turbo ai
0.3.11
طريقة اصطناعية لبناء تطبيقات chatgpt باستخدام مولدات ASYNC في Python

يستخدم API ChatGPT تنسيق إدخال جديد يسمى chatml. في عميل Python في Openai ، يتم استخدام التنسيق شيء مثل هذا:
messages = [
{ "role" : "system" , "content" : "Greet the user!" },
{ "role" : "user" , "content" : "Hello world!" },
]تتمثل الفكرة هنا في إنشاء الرسائل بشكل متزايد باستخدام مولد ASYNC ثم استخدام ذلك لإنشاء الإكمال. المولدات غير المتزامنة متعددة الاستخدامات بشكل لا يصدق وبسيط للقيام بهذا النوع من الأشياء. يمكن أن تتألف أيضًا بسهولة بالغة.
# Equivalent turbo-chat generator
async def example ():
yield System ( content = "Greet the user!" )
yield User ( content = "Hello World!" )
# To run generation, just yield Generate(),
# the lib will take care of correctly running the app, and
# return the value back here.
output = yield Generate ()
print ( output . content )انظر مثال أكثر تفصيلا أدناه.
pip install turbo-chat from typing import AsyncGenerator , Union
from turbo_chat import (
turbo ,
System ,
User ,
Assistant ,
GetInput ,
Generate ,
run ,
)
# Get user
async def get_user ( id ):
return { "zodiac" : "pisces" }
# Set user zodiac mixin
# Notice that no `@turbo()` decorator used here
async def set_user_zodiac ( user_id : int ):
user_data : dict = await get_user ( user_id )
zodiac : str = user_data [ "zodiac" ]
yield User ( content = f"My zodiac sign is { zodiac } " )
# Horoscope app
@ turbo ( temperature = 0.0 )
async def horoscope ( user_id : int ):
yield System ( content = "You are a fortune teller" )
# Yield from mixin
async for output in set_user_zodiac ( user_id ):
yield output
# Prompt runner to ask for user input
input = yield GetInput ( message = "What do you want to know?" )
# Yield the input
yield User ( content = input )
# Generate (overriding the temperature)
value = yield Generate ( temperature = 0.9 )
# Let's run this
app : AsyncGenerator [ Union [ Assistant , GetInput ], str ] = horoscope ({ "user_id" : 1 })
_input = None
while not ( result := await ( app . run ( _input )). done :
if result . needs_input :
# Prompt user with the input message
_input = input ( result . content )
continue
print ( result . content )
# Output
# >>> What do you want to know? Tell me my fortune
# >>> As an AI language model, I cannot predict the future or provide supernatural fortune-telling. However, I can offer guidance and advice based on your current situation and past experiences. Is there anything specific you would like me to help you with?
#يمكنك أيضًا تخصيص كيفية استمرار الرسائل بين عمليات الإعدام.
from turbo_chat import turbo , BaseMemory
class RedisMemory ( BaseMemory ):
"""Implement BaseMemory methods here"""
async def setup ( self , ** kwargs ) -> None :
...
async def append ( self , item ) -> None :
...
async def clear ( self ) -> None :
...
# Now use the memory in a turbo_chat app
@ turbo ( memory_class = RedisMemory )
async def app ():
... @ turbo ()
async def app ( some_param : Any , memory : BaseMemory ):
messages = await memory . get ()
... @ turbo ()
async def example ():
yield System ( content = "You are a good guy named John" )
yield User ( content = "What is your name?" )
result = yield Generate ( forward = False )
yield User ( content = "How are you doing?" )
result = yield Generate ()
b = example ()
results = [ output async for output in b ]
assert len ( results ) == 1 يمكنك أيضًا التصنيف الفرعي لفئة BaseCache لإنشاء ذاكرة التخزين المؤقت المخصصة.
cache = SimpleCache ()
@ turbo ( cache = cache )
async def example ():
yield System ( content = "You are a good guy named John" )
yield User ( content = "What is your name?" )
result = yield Generate ()
b = example ()
results = [ output async for output in b ]
assert len ( cache . cache ) == 1