turbo ai
0.3.11
Python에서 Async Generator를 사용하여 Chatgpt 앱을 구축하는 관용적 인 방법

ChatGpt API는 ChatMl이라는 새로운 입력 형식을 사용합니다. OpenAi의 Python 클라이언트에서 형식은 다음과 같은 내용을 사용합니다.
messages = [
{ "role" : "system" , "content" : "Greet the user!" },
{ "role" : "user" , "content" : "Hello world!" },
]여기서 아이디어는 비동기 생성기를 사용하여 메시지를 점진적으로 빌드 한 다음이를 사용하여 완료를 생성하는 것입니다. 비동기 발전기는 이런 종류의 작업을 수행하기 위해 엄청나게 다재다능하고 간단한 추상화입니다. 그들은 또한 매우 쉽게 함께 구성 될 수 있습니다.
# Equivalent turbo-chat generator
async def example ():
yield System ( content = "Greet the user!" )
yield User ( content = "Hello World!" )
# To run generation, just yield Generate(),
# the lib will take care of correctly running the app, and
# return the value back here.
output = yield Generate ()
print ( output . content )아래의 자세한 예를 참조하십시오.
pip install turbo-chat from typing import AsyncGenerator , Union
from turbo_chat import (
turbo ,
System ,
User ,
Assistant ,
GetInput ,
Generate ,
run ,
)
# Get user
async def get_user ( id ):
return { "zodiac" : "pisces" }
# Set user zodiac mixin
# Notice that no `@turbo()` decorator used here
async def set_user_zodiac ( user_id : int ):
user_data : dict = await get_user ( user_id )
zodiac : str = user_data [ "zodiac" ]
yield User ( content = f"My zodiac sign is { zodiac } " )
# Horoscope app
@ turbo ( temperature = 0.0 )
async def horoscope ( user_id : int ):
yield System ( content = "You are a fortune teller" )
# Yield from mixin
async for output in set_user_zodiac ( user_id ):
yield output
# Prompt runner to ask for user input
input = yield GetInput ( message = "What do you want to know?" )
# Yield the input
yield User ( content = input )
# Generate (overriding the temperature)
value = yield Generate ( temperature = 0.9 )
# Let's run this
app : AsyncGenerator [ Union [ Assistant , GetInput ], str ] = horoscope ({ "user_id" : 1 })
_input = None
while not ( result := await ( app . run ( _input )). done :
if result . needs_input :
# Prompt user with the input message
_input = input ( result . content )
continue
print ( result . content )
# Output
# >>> What do you want to know? Tell me my fortune
# >>> As an AI language model, I cannot predict the future or provide supernatural fortune-telling. However, I can offer guidance and advice based on your current situation and past experiences. Is there anything specific you would like me to help you with?
#또한 실행 중에 메시지가 유지되는 방식을 사용자 정의 할 수도 있습니다.
from turbo_chat import turbo , BaseMemory
class RedisMemory ( BaseMemory ):
"""Implement BaseMemory methods here"""
async def setup ( self , ** kwargs ) -> None :
...
async def append ( self , item ) -> None :
...
async def clear ( self ) -> None :
...
# Now use the memory in a turbo_chat app
@ turbo ( memory_class = RedisMemory )
async def app ():
... @ turbo ()
async def app ( some_param : Any , memory : BaseMemory ):
messages = await memory . get ()
... @ turbo ()
async def example ():
yield System ( content = "You are a good guy named John" )
yield User ( content = "What is your name?" )
result = yield Generate ( forward = False )
yield User ( content = "How are you doing?" )
result = yield Generate ()
b = example ()
results = [ output async for output in b ]
assert len ( results ) == 1 또한 BaseCache 클래스를 서브 클래스하여 사용자 정의 캐시를 생성 할 수도 있습니다.
cache = SimpleCache ()
@ turbo ( cache = cache )
async def example ():
yield System ( content = "You are a good guy named John" )
yield User ( content = "What is your name?" )
result = yield Generate ()
b = example ()
results = [ output async for output in b ]
assert len ( cache . cache ) == 1