Cette bibliothèque fournit des clients Gooficiaux Go pour l'API OpenAI. Nous soutenons:
go get github.com/sashabaranov/go-openai
Actuellement, Go-Openai nécessite GO version 1.18 ou plus.
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your token" )
resp , err := client . CreateChatCompletion (
context . Background (),
openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleUser ,
Content : "Hello!" ,
},
},
},
)
if err != nil {
fmt . Printf ( "ChatCompletion error: %v n " , err )
return
}
fmt . Println ( resp . Choices [ 0 ]. Message . Content )
}Remarque: Votre clé API est des informations sensibles. Ne le partagez avec personne.
package main
import (
"context"
"errors"
"fmt"
"io"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
MaxTokens : 20 ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleUser ,
Content : "Lorem ipsum" ,
},
},
Stream : true ,
}
stream , err := c . CreateChatCompletionStream ( ctx , req )
if err != nil {
fmt . Printf ( "ChatCompletionStream error: %v n " , err )
return
}
defer stream . Close ()
fmt . Printf ( "Stream response: " )
for {
response , err := stream . Recv ()
if errors . Is ( err , io . EOF ) {
fmt . Println ( " n Stream finished" )
return
}
if err != nil {
fmt . Printf ( " n Stream error: %v n " , err )
return
}
fmt . Printf ( response . Choices [ 0 ]. Delta . Content )
}
} package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. CompletionRequest {
Model : openai . GPT3Babbage002 ,
MaxTokens : 5 ,
Prompt : "Lorem ipsum" ,
}
resp , err := c . CreateCompletion ( ctx , req )
if err != nil {
fmt . Printf ( "Completion error: %v n " , err )
return
}
fmt . Println ( resp . Choices [ 0 ]. Text )
} package main
import (
"errors"
"context"
"fmt"
"io"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. CompletionRequest {
Model : openai . GPT3Babbage002 ,
MaxTokens : 5 ,
Prompt : "Lorem ipsum" ,
Stream : true ,
}
stream , err := c . CreateCompletionStream ( ctx , req )
if err != nil {
fmt . Printf ( "CompletionStream error: %v n " , err )
return
}
defer stream . Close ()
for {
response , err := stream . Recv ()
if errors . Is ( err , io . EOF ) {
fmt . Println ( "Stream finished" )
return
}
if err != nil {
fmt . Printf ( "Stream error: %v n " , err )
return
}
fmt . Printf ( "Stream response: %v n " , response )
}
} package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. AudioRequest {
Model : openai . Whisper1 ,
FilePath : "recording.mp3" ,
}
resp , err := c . CreateTranscription ( ctx , req )
if err != nil {
fmt . Printf ( "Transcription error: %v n " , err )
return
}
fmt . Println ( resp . Text )
} package main
import (
"context"
"fmt"
"os"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( os . Getenv ( "OPENAI_KEY" ))
req := openai. AudioRequest {
Model : openai . Whisper1 ,
FilePath : os . Args [ 1 ],
Format : openai . AudioResponseFormatSRT ,
}
resp , err := c . CreateTranscription ( context . Background (), req )
if err != nil {
fmt . Printf ( "Transcription error: %v n " , err )
return
}
f , err := os . Create ( os . Args [ 1 ] + ".srt" )
if err != nil {
fmt . Printf ( "Could not open file: %v n " , err )
return
}
defer f . Close ()
if _ , err := f . WriteString ( resp . Text ); err != nil {
fmt . Printf ( "Error writing to file: %v n " , err )
return
}
} package main
import (
"bytes"
"context"
"encoding/base64"
"fmt"
openai "github.com/sashabaranov/go-openai"
"image/png"
"os"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
// Sample image by link
reqUrl := openai. ImageRequest {
Prompt : "Parrot on a skateboard performs a trick, cartoon style, natural light, high detail" ,
Size : openai . CreateImageSize256x256 ,
ResponseFormat : openai . CreateImageResponseFormatURL ,
N : 1 ,
}
respUrl , err := c . CreateImage ( ctx , reqUrl )
if err != nil {
fmt . Printf ( "Image creation error: %v n " , err )
return
}
fmt . Println ( respUrl . Data [ 0 ]. URL )
// Example image as base64
reqBase64 := openai. ImageRequest {
Prompt : "Portrait of a humanoid parrot in a classic costume, high detail, realistic light, unreal engine" ,
Size : openai . CreateImageSize256x256 ,
ResponseFormat : openai . CreateImageResponseFormatB64JSON ,
N : 1 ,
}
respBase64 , err := c . CreateImage ( ctx , reqBase64 )
if err != nil {
fmt . Printf ( "Image creation error: %v n " , err )
return
}
imgBytes , err := base64 . StdEncoding . DecodeString ( respBase64 . Data [ 0 ]. B64JSON )
if err != nil {
fmt . Printf ( "Base64 decode error: %v n " , err )
return
}
r := bytes . NewReader ( imgBytes )
imgData , err := png . Decode ( r )
if err != nil {
fmt . Printf ( "PNG decode error: %v n " , err )
return
}
file , err := os . Create ( "example.png" )
if err != nil {
fmt . Printf ( "File creation error: %v n " , err )
return
}
defer file . Close ()
if err := png . Encode ( file , imgData ); err != nil {
fmt . Printf ( "PNG encode error: %v n " , err )
return
}
fmt . Println ( "The image was saved as example.png" )
} config := openai . DefaultConfig ( "token" )
proxyUrl , err := url . Parse ( "http://localhost:{port}" )
if err != nil {
panic ( err )
}
transport := & http. Transport {
Proxy : http . ProxyURL ( proxyUrl ),
}
config . HTTPClient = & http. Client {
Transport : transport ,
}
c := openai . NewClientWithConfig ( config )Voir aussi: https://pkg.go.dev/github.com/sashabaranov/go-openai#clientconfig
package main
import (
"bufio"
"context"
"fmt"
"os"
"strings"
"github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your token" )
messages := make ([]openai. ChatCompletionMessage , 0 )
reader := bufio . NewReader ( os . Stdin )
fmt . Println ( "Conversation" )
fmt . Println ( "---------------------" )
for {
fmt . Print ( "-> " )
text , _ := reader . ReadString ( 'n' )
// convert CRLF to LF
text = strings . Replace ( text , " n " , "" , - 1 )
messages = append ( messages , openai. ChatCompletionMessage {
Role : openai . ChatMessageRoleUser ,
Content : text ,
})
resp , err := client . CreateChatCompletion (
context . Background (),
openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
Messages : messages ,
},
)
if err != nil {
fmt . Printf ( "ChatCompletion error: %v n " , err )
continue
}
content := resp . Choices [ 0 ]. Message . Content
messages = append ( messages , openai. ChatCompletionMessage {
Role : openai . ChatMessageRoleAssistant ,
Content : content ,
})
fmt . Println ( content )
}
} package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
config := openai . DefaultAzureConfig ( "your Azure OpenAI Key" , "https://your Azure OpenAI Endpoint" )
// If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
// config.AzureModelMapperFunc = func(model string) string {
// azureModelMapping := map[string]string{
// "gpt-3.5-turbo": "your gpt-3.5-turbo deployment name",
// }
// return azureModelMapping[model]
// }
client := openai . NewClientWithConfig ( config )
resp , err := client . CreateChatCompletion (
context . Background (),
openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleUser ,
Content : "Hello Azure OpenAI!" ,
},
},
},
)
if err != nil {
fmt . Printf ( "ChatCompletion error: %v n " , err )
return
}
fmt . Println ( resp . Choices [ 0 ]. Message . Content )
} package main
import (
"context"
"log"
openai "github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your-token" )
// Create an EmbeddingRequest for the user query
queryReq := openai. EmbeddingRequest {
Input : [] string { "How many chucks would a woodchuck chuck" },
Model : openai . AdaEmbeddingV2 ,
}
// Create an embedding for the user query
queryResponse , err := client . CreateEmbeddings ( context . Background (), queryReq )
if err != nil {
log . Fatal ( "Error creating query embedding:" , err )
}
// Create an EmbeddingRequest for the target text
targetReq := openai. EmbeddingRequest {
Input : [] string { "How many chucks would a woodchuck chuck if the woodchuck could chuck wood" },
Model : openai . AdaEmbeddingV2 ,
}
// Create an embedding for the target text
targetResponse , err := client . CreateEmbeddings ( context . Background (), targetReq )
if err != nil {
log . Fatal ( "Error creating target embedding:" , err )
}
// Now that we have the embeddings for the user query and the target text, we
// can calculate their similarity.
queryEmbedding := queryResponse . Data [ 0 ]
targetEmbedding := targetResponse . Data [ 0 ]
similarity , err := queryEmbedding . DotProduct ( & targetEmbedding )
if err != nil {
log . Fatal ( "Error calculating dot product:" , err )
}
log . Printf ( "The similarity score between the query and the target is %f" , similarity )
} package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
config := openai . DefaultAzureConfig ( "your Azure OpenAI Key" , "https://your Azure OpenAI Endpoint" )
config . APIVersion = "2023-05-15" // optional update to latest API version
//If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
//config.AzureModelMapperFunc = func(model string) string {
// azureModelMapping := map[string]string{
// "gpt-3.5-turbo":"your gpt-3.5-turbo deployment name",
// }
// return azureModelMapping[model]
//}
input := "Text to vectorize"
client := openai . NewClientWithConfig ( config )
resp , err := client . CreateEmbeddings (
context . Background (),
openai. EmbeddingRequest {
Input : [] string { input },
Model : openai . AdaEmbeddingV2 ,
})
if err != nil {
fmt . Printf ( "CreateEmbeddings error: %v n " , err )
return
}
vectors := resp . Data [ 0 ]. Embedding // []float32 with 1536 dimensions
fmt . Println ( vectors [: 10 ], "..." , vectors [ len ( vectors ) - 10 :])
}Il est désormais possible que le chat soit choisi d'appeler une fonction pour plus d'informations (voir les documents du développeur ici).
Afin de décrire le type de fonctions qui peuvent être appelées, un schéma JSON doit être fourni. De nombreuses bibliothèques de schémas JSON existent et sont plus avancées que ce que nous pouvons offrir dans cette bibliothèque, mais nous avons inclus un package jsonschema simple pour ceux qui souhaitent utiliser cette fonctionnalité sans formater leur propre charge utile de schéma JSON.
Les documents du développeur donnent à cette définition du schéma JSON à titre d'exemple:
{
"name" : " get_current_weather " ,
"description" : " Get the current weather in a given location " ,
"parameters" :{
"type" : " object " ,
"properties" :{
"location" :{
"type" : " string " ,
"description" : " The city and state, e.g. San Francisco, CA "
},
"unit" :{
"type" : " string " ,
"enum" :[
" celsius " ,
" fahrenheit "
]
}
},
"required" :[
" location "
]
}
} En utilisant le package jsonschema , ce schéma pourrait être créé en utilisant des structures en tant que telles:
FunctionDefinition {
Name : "get_current_weather" ,
Parameters : jsonschema. Definition {
Type : jsonschema . Object ,
Properties : map [ string ]jsonschema. Definition {
"location" : {
Type : jsonschema . String ,
Description : "The city and state, e.g. San Francisco, CA" ,
},
"unit" : {
Type : jsonschema . String ,
Enum : [] string { "celsius" , "fahrenheit" },
},
},
Required : [] string { "location" },
},
} Le champ Parameters d'une FunctionDefinition peut accepter l'un des styles ci-dessus, ou même une structure imbriquée d'une autre bibliothèque (tant qu'elle peut être rassemblée en JSON).
Open-AI maintient une documentation claire sur la façon de gérer les erreurs d'API
exemple:
e := &openai.APIError{}
if errors.As(err, &e) {
switch e.HTTPStatusCode {
case 401:
// invalid auth or key (do not retry)
case 429:
// rate limiting or engine overload (wait and retry)
case 500:
// openai server error (retry)
default:
// unhandled
}
}
package main
import (
"context"
"fmt"
"github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your token" )
ctx := context . Background ()
// create a .jsonl file with your training data for conversational model
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
// chat models are trained using the following file format:
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]}
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]}
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
// you can use openai cli tool to validate the data
// For more info - https://platform.openai.com/docs/guides/fine-tuning
file , err := client . CreateFile ( ctx , openai. FileRequest {
FilePath : "training_prepared.jsonl" ,
Purpose : "fine-tune" ,
})
if err != nil {
fmt . Printf ( "Upload JSONL file error: %v n " , err )
return
}
// create a fine tuning job
// Streams events until the job is done (this often takes minutes, but can take hours if there are many jobs in the queue or your dataset is large)
// use below get method to know the status of your model
fineTuningJob , err := client . CreateFineTuningJob ( ctx , openai. FineTuningJobRequest {
TrainingFile : file . ID ,
Model : "davinci-002" , // gpt-3.5-turbo-0613, babbage-002.
})
if err != nil {
fmt . Printf ( "Creating new fine tune model error: %v n " , err )
return
}
fineTuningJob , err = client . RetrieveFineTuningJob ( ctx , fineTuningJob . ID )
if err != nil {
fmt . Printf ( "Getting fine tune model error: %v n " , err )
return
}
fmt . Println ( fineTuningJob . FineTunedModel )
// once the status of fineTuningJob is `succeeded`, you can use your fine tune model in Completion Request or Chat Completion Request
// resp, err := client.CreateCompletion(ctx, openai.CompletionRequest{
// Model: fineTuningJob.FineTunedModel,
// Prompt: "your prompt",
// })
// if err != nil {
// fmt.Printf("Create completion error %vn", err)
// return
// }
//
// fmt.Println(resp.Choices[0].Text)
} package main
import (
"context"
"fmt"
"log"
"github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai/jsonschema"
)
func main () {
client := openai . NewClient ( "your token" )
ctx := context . Background ()
type Result struct {
Steps [] struct {
Explanation string `json:"explanation"`
Output string `json:"output"`
} `json:"steps"`
FinalAnswer string `json:"final_answer"`
}
var result Result
schema , err := jsonschema . GenerateSchemaForType ( result )
if err != nil {
log . Fatalf ( "GenerateSchemaForType error: %v" , err )
}
resp , err := client . CreateChatCompletion ( ctx , openai. ChatCompletionRequest {
Model : openai . GPT4oMini ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleSystem ,
Content : "You are a helpful math tutor. Guide the user through the solution step by step." ,
},
{
Role : openai . ChatMessageRoleUser ,
Content : "how can I solve 8x + 7 = -23" ,
},
},
ResponseFormat : & openai. ChatCompletionResponseFormat {
Type : openai . ChatCompletionResponseFormatTypeJSONSchema ,
JSONSchema : & openai. ChatCompletionResponseFormatJSONSchema {
Name : "math_reasoning" ,
Schema : schema ,
Strict : true ,
},
},
})
if err != nil {
log . Fatalf ( "CreateChatCompletion error: %v" , err )
}
err = schema . Unmarshal ( resp . Choices [ 0 ]. Message . Content , & result )
if err != nil {
log . Fatalf ( "Unmarshal schema error: %v" , err )
}
fmt . Println ( result )
}Même lorsque vous spécifiez un champ de température de 0, il ne garantit pas que vous obtiendrez toujours la même réponse. Plusieurs facteurs entrent en jeu.
En raison des facteurs mentionnés ci-dessus, différentes réponses peuvent être retournées même pour la même question.
Solution de contournement:
seed en conjonction avec le champ de réponse system_fingerprint , ainsi que la gestion de la température.math.SmallestNonzeroFloat32 : En spécifiant math.SmallestNonzeroFloat32 dans le champ de température au lieu de 0, vous pouvez imiter le comportement de le régler à 0.En adoptant ces stratégies, vous pouvez vous attendre à des résultats plus cohérents.
Problèmes connexes:
L'option omitempty de la demande de demande générera une demande incorrecte lorsque le paramètre est 0.
Non, Go Openai n'offre pas de fonctionnalité pour compter les jetons, et il n'est pas prévu de fournir une telle fonctionnalité à l'avenir. Cependant, s'il existe un moyen d'implémenter une fonction de comptage de jetons avec des dépendances zéro, il pourrait être possible de fusionner cette fonctionnalité dans Go Openai. Sinon, il serait plus approprié de l'implémenter dans une bibliothèque ou un référentiel dédié.
Pour compter les jetons, vous pourriez trouver les liens suivants utiles:
Problèmes connexes:
Est-il possible de rejoindre la mise en œuvre du tokenizer GPT3
En suivant les directives contributives, nous espérons nous assurer que vos contributions sont faites en douceur et efficacement.
Nous voulons prendre un moment pour exprimer notre plus profonde gratitude aux contributeurs et sponsors de ce projet:
À vous tous: merci. Vous nous avez aidés à réaliser plus que nous ne l'avons jamais imaginé possible. J'ai hâte de voir où nous allons ensuite, ensemble!