An Free & Unlimited unofficial Python SDK for the OpenAI API, providing seamless integration and easy-to-use methods for interacting with OpenAI's latest powerful AI models, including GPT-4o (Including gpt-4o-audio-preview & gpt-4o-realtime-preview Models), GPT-4, GPT-3.5 Turbo, DALL·E 3, Whisper & Text-to-Speech (TTS) models for Free
gpt-4o-audio-preview to receive both audio and text responses.Installez le package via PIP:
pip install -U openai-unofficial from openai_unofficial import OpenAIUnofficial
# Initialize the client
client = OpenAIUnofficial ()
# Basic chat completion
response = client . chat . completions . create (
messages = [{ "role" : "user" , "content" : "Say hello!" }],
model = "gpt-4o"
)
print ( response . choices [ 0 ]. message . content ) from openai_unofficial import OpenAIUnofficial
client = OpenAIUnofficial ()
models = client . list_models ()
print ( "Available Models:" )
for model in models [ 'data' ]:
print ( f"- { model [ 'id' ] } " ) from openai_unofficial import OpenAIUnofficial
client = OpenAIUnofficial ()
response = client . chat . completions . create (
messages = [{ "role" : "user" , "content" : "Tell me a joke." }],
model = "gpt-4o"
)
print ( "ChatBot:" , response . choices [ 0 ]. message . content ) from openai_unofficial import OpenAIUnofficial
client = OpenAIUnofficial ()
response = client . chat . completions . create (
messages = [{
"role" : "user" ,
"content" : [
{ "type" : "text" , "text" : "What's in this image?" },
{
"type" : "image_url" ,
"image_url" : {
"url" : "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" ,
}
},
],
}],
model = "gpt-4o-mini-2024-07-18"
)
print ( "Response:" , response . choices [ 0 ]. message . content ) from openai_unofficial import OpenAIUnofficial
client = OpenAIUnofficial ()
completion_stream = client . chat . completions . create (
messages = [{ "role" : "user" , "content" : "Write a short story in 3 sentences." }],
model = "gpt-4o-mini-2024-07-18" ,
stream = True
)
for chunk in completion_stream :
content = chunk . choices [ 0 ]. delta . content
if content :
print ( content , end = '' , flush = True ) from openai_unofficial import OpenAIUnofficial
client = OpenAIUnofficial ()
audio_data = client . audio . create (
input_text = "This is a test of the TTS capabilities!" ,
model = "tts-1-hd" ,
voice = "nova"
)
with open ( "tts_output.mp3" , "wb" ) as f :
f . write ( audio_data )
print ( "TTS Audio saved as tts_output.mp3" ) from openai_unofficial import OpenAIUnofficial
client = OpenAIUnofficial ()
response = client . chat . completions . create (
messages = [{ "role" : "user" , "content" : "Tell me a fun fact." }],
model = "gpt-4o-audio-preview" ,
modalities = [ "text" , "audio" ],
audio = { "voice" : "fable" , "format" : "wav" }
)
message = response . choices [ 0 ]. message
print ( "Text Response:" , message . content )
if message . audio and 'data' in message . audio :
from base64 import b64decode
with open ( "audio_preview.wav" , "wb" ) as f :
f . write ( b64decode ( message . audio [ 'data' ]))
print ( "Audio saved as audio_preview.wav" ) from openai_unofficial import OpenAIUnofficial
client = OpenAIUnofficial ()
response = client . image . create (
prompt = "A futuristic cityscape at sunset" ,
model = "dall-e-3" ,
size = "1024x1024"
)
print ( "Image URL:" , response . data [ 0 ]. url ) from openai_unofficial import OpenAIUnofficial
client = OpenAIUnofficial ()
with open ( "speech.mp3" , "rb" ) as audio_file :
transcription = client . audio . transcribe (
file = audio_file ,
model = "whisper-1"
)
print ( "Transcription:" , transcription . text )Le SDK prend en charge les capacités d'appel des fonctions d'OpenAI, vous permettant de définir et d'utiliser des outils / fonctions dans vos conversations. Voici des exemples d'appels de fonctions et d'utilisation des outils:
️ Important Note : In the current version (0.1.2), complex or multiple function calling is not yet fully supported. Le SDK prend actuellement en charge les capacités d'appel de la fonction de base. La prise en charge de plusieurs appels de fonction et des modèles d'utilisation d'outils plus complexes sera ajouté dans les versions à venir.
from openai_unofficial import OpenAIUnofficial
import json
client = OpenAIUnofficial ()
# Define your functions as tools
tools = [
{
"type" : "function" ,
"function" : {
"name" : "get_current_weather" ,
"description" : "Get the current weather in a given location" ,
"parameters" : {
"type" : "object" ,
"properties" : {
"location" : {
"type" : "string" ,
"description" : "The city and state, e.g., San Francisco, CA"
},
"unit" : {
"type" : "string" ,
"enum" : [ "celsius" , "fahrenheit" ],
"description" : "The temperature unit"
}
},
"required" : [ "location" ]
}
}
}
]
# Function to actually get weather data
def get_current_weather ( location : str , unit : str = "celsius" ) -> str :
# This is a mock function - replace with actual weather API call
return f"The current weather in { location } is 22° { unit [ 0 ]. upper () } "
# Initial conversation message
messages = [
{ "role" : "user" , "content" : "What's the weather like in London?" }
]
# First API call to get function calling response
response = client . chat . completions . create (
model = "gpt-4o-mini-2024-07-18" ,
messages = messages ,
tools = tools ,
tool_choice = "auto"
)
# Get the assistant's message
assistant_message = response . choices [ 0 ]. message
messages . append ( assistant_message . to_dict ())
# Check if the model wants to call a function
if assistant_message . tool_calls :
# Process each tool call
for tool_call in assistant_message . tool_calls :
function_name = tool_call . function . name
function_args = json . loads ( tool_call . function . arguments )
# Call the function and get the result
function_response = get_current_weather ( ** function_args )
# Append the function response to messages
messages . append ({
"role" : "tool" ,
"tool_call_id" : tool_call . id ,
"name" : function_name ,
"content" : function_response
})
# Get the final response from the model
final_response = client . chat . completions . create (
model = "gpt-4o-mini-2024-07-18" ,
messages = messages
)
print ( "Final Response:" , final_response . choices [ 0 ]. message . content )Les contributions sont les bienvenues! Veuillez suivre ces étapes:
git checkout -b feature/my-feature .git commit -am 'Add new feature' .git push origin feature/my-feature .Veuillez vous assurer que votre code respecte les normes de codage du projet et passe tous les tests.
Ce projet est autorisé en vertu de la licence MIT - voir le fichier de licence pour plus de détails.
Note : This SDK is unofficial and not affiliated with OpenAI.
Si vous rencontrez des problèmes ou avez des suggestions, veuillez ouvrir un problème sur GitHub.
Voici une liste partielle de modèles que le SDK prend actuellement en charge. For Complete list, check out the /models endpoint:
Chat Models :
gpt-4gpt-4-turbogpt-4ogpt-4o-minigpt-3.5-turbogpt-3.5-turbo-16kgpt-3.5-turbo-instructgpt-4o-realtime-previewgpt-4o-audio-previewImage Generation Models :
dall-e-2dall-e-3Text-to-Speech (TTS) Models :
tts-1tts-1-hdtts-1-1106tts-1-hd-1106Audio Models :
whisper-1Embedding Models :
text-embedding-ada-002text-embedding-3-smalltext-embedding-3-large