Esta biblioteca ofrece clientes no oficiales para API Operai. Apoyamos:
go get github.com/sashabaranov/go-openai
Actualmente, Go-Openai requiere Go Versión 1.18 o mayor.
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your token" )
resp , err := client . CreateChatCompletion (
context . Background (),
openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleUser ,
Content : "Hello!" ,
},
},
},
)
if err != nil {
fmt . Printf ( "ChatCompletion error: %v n " , err )
return
}
fmt . Println ( resp . Choices [ 0 ]. Message . Content )
}Nota: Su clave API es información confidencial. No lo comparta con nadie.
package main
import (
"context"
"errors"
"fmt"
"io"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
MaxTokens : 20 ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleUser ,
Content : "Lorem ipsum" ,
},
},
Stream : true ,
}
stream , err := c . CreateChatCompletionStream ( ctx , req )
if err != nil {
fmt . Printf ( "ChatCompletionStream error: %v n " , err )
return
}
defer stream . Close ()
fmt . Printf ( "Stream response: " )
for {
response , err := stream . Recv ()
if errors . Is ( err , io . EOF ) {
fmt . Println ( " n Stream finished" )
return
}
if err != nil {
fmt . Printf ( " n Stream error: %v n " , err )
return
}
fmt . Printf ( response . Choices [ 0 ]. Delta . Content )
}
} package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. CompletionRequest {
Model : openai . GPT3Babbage002 ,
MaxTokens : 5 ,
Prompt : "Lorem ipsum" ,
}
resp , err := c . CreateCompletion ( ctx , req )
if err != nil {
fmt . Printf ( "Completion error: %v n " , err )
return
}
fmt . Println ( resp . Choices [ 0 ]. Text )
} package main
import (
"errors"
"context"
"fmt"
"io"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. CompletionRequest {
Model : openai . GPT3Babbage002 ,
MaxTokens : 5 ,
Prompt : "Lorem ipsum" ,
Stream : true ,
}
stream , err := c . CreateCompletionStream ( ctx , req )
if err != nil {
fmt . Printf ( "CompletionStream error: %v n " , err )
return
}
defer stream . Close ()
for {
response , err := stream . Recv ()
if errors . Is ( err , io . EOF ) {
fmt . Println ( "Stream finished" )
return
}
if err != nil {
fmt . Printf ( "Stream error: %v n " , err )
return
}
fmt . Printf ( "Stream response: %v n " , response )
}
} package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. AudioRequest {
Model : openai . Whisper1 ,
FilePath : "recording.mp3" ,
}
resp , err := c . CreateTranscription ( ctx , req )
if err != nil {
fmt . Printf ( "Transcription error: %v n " , err )
return
}
fmt . Println ( resp . Text )
} package main
import (
"context"
"fmt"
"os"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( os . Getenv ( "OPENAI_KEY" ))
req := openai. AudioRequest {
Model : openai . Whisper1 ,
FilePath : os . Args [ 1 ],
Format : openai . AudioResponseFormatSRT ,
}
resp , err := c . CreateTranscription ( context . Background (), req )
if err != nil {
fmt . Printf ( "Transcription error: %v n " , err )
return
}
f , err := os . Create ( os . Args [ 1 ] + ".srt" )
if err != nil {
fmt . Printf ( "Could not open file: %v n " , err )
return
}
defer f . Close ()
if _ , err := f . WriteString ( resp . Text ); err != nil {
fmt . Printf ( "Error writing to file: %v n " , err )
return
}
} package main
import (
"bytes"
"context"
"encoding/base64"
"fmt"
openai "github.com/sashabaranov/go-openai"
"image/png"
"os"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
// Sample image by link
reqUrl := openai. ImageRequest {
Prompt : "Parrot on a skateboard performs a trick, cartoon style, natural light, high detail" ,
Size : openai . CreateImageSize256x256 ,
ResponseFormat : openai . CreateImageResponseFormatURL ,
N : 1 ,
}
respUrl , err := c . CreateImage ( ctx , reqUrl )
if err != nil {
fmt . Printf ( "Image creation error: %v n " , err )
return
}
fmt . Println ( respUrl . Data [ 0 ]. URL )
// Example image as base64
reqBase64 := openai. ImageRequest {
Prompt : "Portrait of a humanoid parrot in a classic costume, high detail, realistic light, unreal engine" ,
Size : openai . CreateImageSize256x256 ,
ResponseFormat : openai . CreateImageResponseFormatB64JSON ,
N : 1 ,
}
respBase64 , err := c . CreateImage ( ctx , reqBase64 )
if err != nil {
fmt . Printf ( "Image creation error: %v n " , err )
return
}
imgBytes , err := base64 . StdEncoding . DecodeString ( respBase64 . Data [ 0 ]. B64JSON )
if err != nil {
fmt . Printf ( "Base64 decode error: %v n " , err )
return
}
r := bytes . NewReader ( imgBytes )
imgData , err := png . Decode ( r )
if err != nil {
fmt . Printf ( "PNG decode error: %v n " , err )
return
}
file , err := os . Create ( "example.png" )
if err != nil {
fmt . Printf ( "File creation error: %v n " , err )
return
}
defer file . Close ()
if err := png . Encode ( file , imgData ); err != nil {
fmt . Printf ( "PNG encode error: %v n " , err )
return
}
fmt . Println ( "The image was saved as example.png" )
} config := openai . DefaultConfig ( "token" )
proxyUrl , err := url . Parse ( "http://localhost:{port}" )
if err != nil {
panic ( err )
}
transport := & http. Transport {
Proxy : http . ProxyURL ( proxyUrl ),
}
config . HTTPClient = & http. Client {
Transport : transport ,
}
c := openai . NewClientWithConfig ( config )Ver también: https://pkg.go.dev/github.com/sashabaranov/go-openai#clientconfig
package main
import (
"bufio"
"context"
"fmt"
"os"
"strings"
"github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your token" )
messages := make ([]openai. ChatCompletionMessage , 0 )
reader := bufio . NewReader ( os . Stdin )
fmt . Println ( "Conversation" )
fmt . Println ( "---------------------" )
for {
fmt . Print ( "-> " )
text , _ := reader . ReadString ( 'n' )
// convert CRLF to LF
text = strings . Replace ( text , " n " , "" , - 1 )
messages = append ( messages , openai. ChatCompletionMessage {
Role : openai . ChatMessageRoleUser ,
Content : text ,
})
resp , err := client . CreateChatCompletion (
context . Background (),
openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
Messages : messages ,
},
)
if err != nil {
fmt . Printf ( "ChatCompletion error: %v n " , err )
continue
}
content := resp . Choices [ 0 ]. Message . Content
messages = append ( messages , openai. ChatCompletionMessage {
Role : openai . ChatMessageRoleAssistant ,
Content : content ,
})
fmt . Println ( content )
}
} package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
config := openai . DefaultAzureConfig ( "your Azure OpenAI Key" , "https://your Azure OpenAI Endpoint" )
// If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
// config.AzureModelMapperFunc = func(model string) string {
// azureModelMapping := map[string]string{
// "gpt-3.5-turbo": "your gpt-3.5-turbo deployment name",
// }
// return azureModelMapping[model]
// }
client := openai . NewClientWithConfig ( config )
resp , err := client . CreateChatCompletion (
context . Background (),
openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleUser ,
Content : "Hello Azure OpenAI!" ,
},
},
},
)
if err != nil {
fmt . Printf ( "ChatCompletion error: %v n " , err )
return
}
fmt . Println ( resp . Choices [ 0 ]. Message . Content )
} package main
import (
"context"
"log"
openai "github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your-token" )
// Create an EmbeddingRequest for the user query
queryReq := openai. EmbeddingRequest {
Input : [] string { "How many chucks would a woodchuck chuck" },
Model : openai . AdaEmbeddingV2 ,
}
// Create an embedding for the user query
queryResponse , err := client . CreateEmbeddings ( context . Background (), queryReq )
if err != nil {
log . Fatal ( "Error creating query embedding:" , err )
}
// Create an EmbeddingRequest for the target text
targetReq := openai. EmbeddingRequest {
Input : [] string { "How many chucks would a woodchuck chuck if the woodchuck could chuck wood" },
Model : openai . AdaEmbeddingV2 ,
}
// Create an embedding for the target text
targetResponse , err := client . CreateEmbeddings ( context . Background (), targetReq )
if err != nil {
log . Fatal ( "Error creating target embedding:" , err )
}
// Now that we have the embeddings for the user query and the target text, we
// can calculate their similarity.
queryEmbedding := queryResponse . Data [ 0 ]
targetEmbedding := targetResponse . Data [ 0 ]
similarity , err := queryEmbedding . DotProduct ( & targetEmbedding )
if err != nil {
log . Fatal ( "Error calculating dot product:" , err )
}
log . Printf ( "The similarity score between the query and the target is %f" , similarity )
} package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
config := openai . DefaultAzureConfig ( "your Azure OpenAI Key" , "https://your Azure OpenAI Endpoint" )
config . APIVersion = "2023-05-15" // optional update to latest API version
//If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
//config.AzureModelMapperFunc = func(model string) string {
// azureModelMapping := map[string]string{
// "gpt-3.5-turbo":"your gpt-3.5-turbo deployment name",
// }
// return azureModelMapping[model]
//}
input := "Text to vectorize"
client := openai . NewClientWithConfig ( config )
resp , err := client . CreateEmbeddings (
context . Background (),
openai. EmbeddingRequest {
Input : [] string { input },
Model : openai . AdaEmbeddingV2 ,
})
if err != nil {
fmt . Printf ( "CreateEmbeddings error: %v n " , err )
return
}
vectors := resp . Data [ 0 ]. Embedding // []float32 with 1536 dimensions
fmt . Println ( vectors [: 10 ], "..." , vectors [ len ( vectors ) - 10 :])
}Ahora es posible completar el chat para elegir llamar a una función para obtener más información (consulte los documentos del desarrollador aquí).
Para describir el tipo de funciones que se pueden llamar, se debe proporcionar un esquema JSON. Existen muchas bibliotecas de esquema JSON y son más avanzadas de lo que podemos ofrecer en esta biblioteca, sin embargo, hemos incluido un paquete jsonschema simple para aquellos que desean usar esta función sin formatear su propia carga de esquema JSON.
Los documentos del desarrollador dan esta definición de esquema JSON como ejemplo:
{
"name" : " get_current_weather " ,
"description" : " Get the current weather in a given location " ,
"parameters" :{
"type" : " object " ,
"properties" :{
"location" :{
"type" : " string " ,
"description" : " The city and state, e.g. San Francisco, CA "
},
"unit" :{
"type" : " string " ,
"enum" :[
" celsius " ,
" fahrenheit "
]
}
},
"required" :[
" location "
]
}
} Usando el paquete jsonschema , este esquema podría crearse usando estructuras como tales:
FunctionDefinition {
Name : "get_current_weather" ,
Parameters : jsonschema. Definition {
Type : jsonschema . Object ,
Properties : map [ string ]jsonschema. Definition {
"location" : {
Type : jsonschema . String ,
Description : "The city and state, e.g. San Francisco, CA" ,
},
"unit" : {
Type : jsonschema . String ,
Enum : [] string { "celsius" , "fahrenheit" },
},
},
Required : [] string { "location" },
},
} El campo Parameters de una FunctionDefinition puede aceptar cualquiera de los estilos anteriores, o incluso una estructura anidada de otra biblioteca (siempre que se pueda reunir en JSON).
Open-AI mantiene una documentación clara sobre cómo manejar los errores de API
ejemplo:
e := &openai.APIError{}
if errors.As(err, &e) {
switch e.HTTPStatusCode {
case 401:
// invalid auth or key (do not retry)
case 429:
// rate limiting or engine overload (wait and retry)
case 500:
// openai server error (retry)
default:
// unhandled
}
}
package main
import (
"context"
"fmt"
"github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your token" )
ctx := context . Background ()
// create a .jsonl file with your training data for conversational model
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
// chat models are trained using the following file format:
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]}
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]}
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
// you can use openai cli tool to validate the data
// For more info - https://platform.openai.com/docs/guides/fine-tuning
file , err := client . CreateFile ( ctx , openai. FileRequest {
FilePath : "training_prepared.jsonl" ,
Purpose : "fine-tune" ,
})
if err != nil {
fmt . Printf ( "Upload JSONL file error: %v n " , err )
return
}
// create a fine tuning job
// Streams events until the job is done (this often takes minutes, but can take hours if there are many jobs in the queue or your dataset is large)
// use below get method to know the status of your model
fineTuningJob , err := client . CreateFineTuningJob ( ctx , openai. FineTuningJobRequest {
TrainingFile : file . ID ,
Model : "davinci-002" , // gpt-3.5-turbo-0613, babbage-002.
})
if err != nil {
fmt . Printf ( "Creating new fine tune model error: %v n " , err )
return
}
fineTuningJob , err = client . RetrieveFineTuningJob ( ctx , fineTuningJob . ID )
if err != nil {
fmt . Printf ( "Getting fine tune model error: %v n " , err )
return
}
fmt . Println ( fineTuningJob . FineTunedModel )
// once the status of fineTuningJob is `succeeded`, you can use your fine tune model in Completion Request or Chat Completion Request
// resp, err := client.CreateCompletion(ctx, openai.CompletionRequest{
// Model: fineTuningJob.FineTunedModel,
// Prompt: "your prompt",
// })
// if err != nil {
// fmt.Printf("Create completion error %vn", err)
// return
// }
//
// fmt.Println(resp.Choices[0].Text)
} package main
import (
"context"
"fmt"
"log"
"github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai/jsonschema"
)
func main () {
client := openai . NewClient ( "your token" )
ctx := context . Background ()
type Result struct {
Steps [] struct {
Explanation string `json:"explanation"`
Output string `json:"output"`
} `json:"steps"`
FinalAnswer string `json:"final_answer"`
}
var result Result
schema , err := jsonschema . GenerateSchemaForType ( result )
if err != nil {
log . Fatalf ( "GenerateSchemaForType error: %v" , err )
}
resp , err := client . CreateChatCompletion ( ctx , openai. ChatCompletionRequest {
Model : openai . GPT4oMini ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleSystem ,
Content : "You are a helpful math tutor. Guide the user through the solution step by step." ,
},
{
Role : openai . ChatMessageRoleUser ,
Content : "how can I solve 8x + 7 = -23" ,
},
},
ResponseFormat : & openai. ChatCompletionResponseFormat {
Type : openai . ChatCompletionResponseFormatTypeJSONSchema ,
JSONSchema : & openai. ChatCompletionResponseFormatJSONSchema {
Name : "math_reasoning" ,
Schema : schema ,
Strict : true ,
},
},
})
if err != nil {
log . Fatalf ( "CreateChatCompletion error: %v" , err )
}
err = schema . Unmarshal ( resp . Choices [ 0 ]. Message . Content , & result )
if err != nil {
log . Fatalf ( "Unmarshal schema error: %v" , err )
}
fmt . Println ( result )
}Incluso cuando especifica un campo de temperatura de 0, no garantiza que siempre obtenga la misma respuesta. Varios factores entran en juego.
Debido a los factores mencionados anteriormente, se pueden devolver diferentes respuestas incluso para la misma pregunta.
Solución:
seed junto con el campo de respuesta system_fingerprint , junto con la gestión de la temperatura.math.SmallestNonzeroFloat32 Smallestnonzerofloat32: especificando math.SmallestNonzeroFloat32 .Al adoptar estas estrategias, puede esperar resultados más consistentes.
Problemas relacionados:
La opción Omitempty de la estructura de solicitud generará una solicitud incorrecta cuando el parámetro sea 0.
No, Go Openai no ofrece una función para contar tokens, y no hay planes para proporcionar dicha característica en el futuro. Sin embargo, si hay una manera de implementar una función de conteo de tokens con cero dependencias, es posible fusionar esa característica en GO OpenAI. De lo contrario, sería más apropiado implementarlo en una biblioteca o repositorio dedicado.
Para contar tokens, puede encontrar útiles los siguientes enlaces:
Problemas relacionados:
¿Es posible unirse a la implementación de GPT3 Tokenizer?
Al seguir las pautas contribuyentes, esperamos asegurarnos de que sus contribuciones se realicen sin problemas y eficientemente.
Queremos tomarnos un momento para expresar nuestra más profunda gratitud a los contribuyentes y patrocinadores de este proyecto:
A todos ustedes: gracias. Nos has ayudado a lograr más de lo que imaginamos posible. ¡No puedo esperar a ver a dónde vamos a continuación, juntos!