turbo ai
0.3.11
Maneira idiomática de criar aplicativos ChatGPT usando geradores assíncronos em Python

A API ChatGPT usa um novo formato de entrada chamado Chatml. No cliente Python da Openai, o formato é usado algo assim:
messages = [
{ "role" : "system" , "content" : "Greet the user!" },
{ "role" : "user" , "content" : "Hello world!" },
]A idéia aqui é criar incrementalmente as mensagens usando um gerador assíncrono e, em seguida, usá -lo para gerar conclusões. Os geradores assíncronos são incrivelmente versáteis e simples abstração para fazer esse tipo de coisa. Eles também podem ser compostos juntos com muita facilidade.
# Equivalent turbo-chat generator
async def example ():
yield System ( content = "Greet the user!" )
yield User ( content = "Hello World!" )
# To run generation, just yield Generate(),
# the lib will take care of correctly running the app, and
# return the value back here.
output = yield Generate ()
print ( output . content )Veja um exemplo mais detalhado abaixo.
pip install turbo-chat from typing import AsyncGenerator , Union
from turbo_chat import (
turbo ,
System ,
User ,
Assistant ,
GetInput ,
Generate ,
run ,
)
# Get user
async def get_user ( id ):
return { "zodiac" : "pisces" }
# Set user zodiac mixin
# Notice that no `@turbo()` decorator used here
async def set_user_zodiac ( user_id : int ):
user_data : dict = await get_user ( user_id )
zodiac : str = user_data [ "zodiac" ]
yield User ( content = f"My zodiac sign is { zodiac } " )
# Horoscope app
@ turbo ( temperature = 0.0 )
async def horoscope ( user_id : int ):
yield System ( content = "You are a fortune teller" )
# Yield from mixin
async for output in set_user_zodiac ( user_id ):
yield output
# Prompt runner to ask for user input
input = yield GetInput ( message = "What do you want to know?" )
# Yield the input
yield User ( content = input )
# Generate (overriding the temperature)
value = yield Generate ( temperature = 0.9 )
# Let's run this
app : AsyncGenerator [ Union [ Assistant , GetInput ], str ] = horoscope ({ "user_id" : 1 })
_input = None
while not ( result := await ( app . run ( _input )). done :
if result . needs_input :
# Prompt user with the input message
_input = input ( result . content )
continue
print ( result . content )
# Output
# >>> What do you want to know? Tell me my fortune
# >>> As an AI language model, I cannot predict the future or provide supernatural fortune-telling. However, I can offer guidance and advice based on your current situation and past experiences. Is there anything specific you would like me to help you with?
#Você também pode personalizar como as mensagens são persistidas entre as execuções.
from turbo_chat import turbo , BaseMemory
class RedisMemory ( BaseMemory ):
"""Implement BaseMemory methods here"""
async def setup ( self , ** kwargs ) -> None :
...
async def append ( self , item ) -> None :
...
async def clear ( self ) -> None :
...
# Now use the memory in a turbo_chat app
@ turbo ( memory_class = RedisMemory )
async def app ():
... @ turbo ()
async def app ( some_param : Any , memory : BaseMemory ):
messages = await memory . get ()
... @ turbo ()
async def example ():
yield System ( content = "You are a good guy named John" )
yield User ( content = "What is your name?" )
result = yield Generate ( forward = False )
yield User ( content = "How are you doing?" )
result = yield Generate ()
b = example ()
results = [ output async for output in b ]
assert len ( results ) == 1 Você também pode subclasse a classe BaseCache para criar um cache personalizado.
cache = SimpleCache ()
@ turbo ( cache = cache )
async def example ():
yield System ( content = "You are a good guy named John" )
yield User ( content = "What is your name?" )
result = yield Generate ()
b = example ()
results = [ output async for output in b ]
assert len ( cache . cache ) == 1