turbo ai
0.3.11
使用python中的异步生成器构建chatgpt应用的惯用方法

CHATGPT API使用称为ChatMl的新输入格式。在Openai的Python客户端中,该格式被使用类似:
messages = [
{ "role" : "system" , "content" : "Greet the user!" },
{ "role" : "user" , "content" : "Hello world!" },
]这里的想法是使用异步生成器逐步构建消息,然后使用它来生成完成。异步发电机对于做这种事情的多功能和简单抽象。它们也可以很容易地组成。
# Equivalent turbo-chat generator
async def example ():
yield System ( content = "Greet the user!" )
yield User ( content = "Hello World!" )
# To run generation, just yield Generate(),
# the lib will take care of correctly running the app, and
# return the value back here.
output = yield Generate ()
print ( output . content )请参阅下面的更多详细示例。
pip install turbo-chat from typing import AsyncGenerator , Union
from turbo_chat import (
turbo ,
System ,
User ,
Assistant ,
GetInput ,
Generate ,
run ,
)
# Get user
async def get_user ( id ):
return { "zodiac" : "pisces" }
# Set user zodiac mixin
# Notice that no `@turbo()` decorator used here
async def set_user_zodiac ( user_id : int ):
user_data : dict = await get_user ( user_id )
zodiac : str = user_data [ "zodiac" ]
yield User ( content = f"My zodiac sign is { zodiac } " )
# Horoscope app
@ turbo ( temperature = 0.0 )
async def horoscope ( user_id : int ):
yield System ( content = "You are a fortune teller" )
# Yield from mixin
async for output in set_user_zodiac ( user_id ):
yield output
# Prompt runner to ask for user input
input = yield GetInput ( message = "What do you want to know?" )
# Yield the input
yield User ( content = input )
# Generate (overriding the temperature)
value = yield Generate ( temperature = 0.9 )
# Let's run this
app : AsyncGenerator [ Union [ Assistant , GetInput ], str ] = horoscope ({ "user_id" : 1 })
_input = None
while not ( result := await ( app . run ( _input )). done :
if result . needs_input :
# Prompt user with the input message
_input = input ( result . content )
continue
print ( result . content )
# Output
# >>> What do you want to know? Tell me my fortune
# >>> As an AI language model, I cannot predict the future or provide supernatural fortune-telling. However, I can offer guidance and advice based on your current situation and past experiences. Is there anything specific you would like me to help you with?
#您还可以在执行之间自定义如何在执行之间持续存在。
from turbo_chat import turbo , BaseMemory
class RedisMemory ( BaseMemory ):
"""Implement BaseMemory methods here"""
async def setup ( self , ** kwargs ) -> None :
...
async def append ( self , item ) -> None :
...
async def clear ( self ) -> None :
...
# Now use the memory in a turbo_chat app
@ turbo ( memory_class = RedisMemory )
async def app ():
... @ turbo ()
async def app ( some_param : Any , memory : BaseMemory ):
messages = await memory . get ()
... @ turbo ()
async def example ():
yield System ( content = "You are a good guy named John" )
yield User ( content = "What is your name?" )
result = yield Generate ( forward = False )
yield User ( content = "How are you doing?" )
result = yield Generate ()
b = example ()
results = [ output async for output in b ]
assert len ( results ) == 1您还可以子类BaseCache类以创建自定义缓存。
cache = SimpleCache ()
@ turbo ( cache = cache )
async def example ():
yield System ( content = "You are a good guy named John" )
yield User ( content = "What is your name?" )
result = yield Generate ()
b = example ()
results = [ output async for output in b ]
assert len ( cache . cache ) == 1