turbo ai
0.3.11
Идиоматический способ создания приложений CHATGPT с использованием асинхровых генераторов в Python

В API CHATGPT используется новый формат ввода под названием CHATML. В клиенте Python от Openai формат используется примерно так:
messages = [
{ "role" : "system" , "content" : "Greet the user!" },
{ "role" : "user" , "content" : "Hello world!" },
]Идея здесь состоит в том, чтобы постепенно создавать сообщения с помощью асинхрового генератора, а затем использовать их для генерации завершений. Асинхронные генераторы невероятно универсальны и простая абстракция для выполнения такого рода вещей. Они также могут быть составлены вместе очень легко.
# Equivalent turbo-chat generator
async def example ():
yield System ( content = "Greet the user!" )
yield User ( content = "Hello World!" )
# To run generation, just yield Generate(),
# the lib will take care of correctly running the app, and
# return the value back here.
output = yield Generate ()
print ( output . content )Смотрите более подробный пример ниже.
pip install turbo-chat from typing import AsyncGenerator , Union
from turbo_chat import (
turbo ,
System ,
User ,
Assistant ,
GetInput ,
Generate ,
run ,
)
# Get user
async def get_user ( id ):
return { "zodiac" : "pisces" }
# Set user zodiac mixin
# Notice that no `@turbo()` decorator used here
async def set_user_zodiac ( user_id : int ):
user_data : dict = await get_user ( user_id )
zodiac : str = user_data [ "zodiac" ]
yield User ( content = f"My zodiac sign is { zodiac } " )
# Horoscope app
@ turbo ( temperature = 0.0 )
async def horoscope ( user_id : int ):
yield System ( content = "You are a fortune teller" )
# Yield from mixin
async for output in set_user_zodiac ( user_id ):
yield output
# Prompt runner to ask for user input
input = yield GetInput ( message = "What do you want to know?" )
# Yield the input
yield User ( content = input )
# Generate (overriding the temperature)
value = yield Generate ( temperature = 0.9 )
# Let's run this
app : AsyncGenerator [ Union [ Assistant , GetInput ], str ] = horoscope ({ "user_id" : 1 })
_input = None
while not ( result := await ( app . run ( _input )). done :
if result . needs_input :
# Prompt user with the input message
_input = input ( result . content )
continue
print ( result . content )
# Output
# >>> What do you want to know? Tell me my fortune
# >>> As an AI language model, I cannot predict the future or provide supernatural fortune-telling. However, I can offer guidance and advice based on your current situation and past experiences. Is there anything specific you would like me to help you with?
#Вы также можете настроить, как сообщения сохраняются между выполнением.
from turbo_chat import turbo , BaseMemory
class RedisMemory ( BaseMemory ):
"""Implement BaseMemory methods here"""
async def setup ( self , ** kwargs ) -> None :
...
async def append ( self , item ) -> None :
...
async def clear ( self ) -> None :
...
# Now use the memory in a turbo_chat app
@ turbo ( memory_class = RedisMemory )
async def app ():
... @ turbo ()
async def app ( some_param : Any , memory : BaseMemory ):
messages = await memory . get ()
... @ turbo ()
async def example ():
yield System ( content = "You are a good guy named John" )
yield User ( content = "What is your name?" )
result = yield Generate ( forward = False )
yield User ( content = "How are you doing?" )
result = yield Generate ()
b = example ()
results = [ output async for output in b ]
assert len ( results ) == 1 Вы также можете подключиться к классу BaseCache , чтобы создать пользовательский кэш.
cache = SimpleCache ()
@ turbo ( cache = cache )
async def example ():
yield System ( content = "You are a good guy named John" )
yield User ( content = "What is your name?" )
result = yield Generate ()
b = example ()
results = [ output async for output in b ]
assert len ( cache . cache ) == 1