Perpustakaan ini menyediakan klien GO yang tidak resmi untuk Openai API. Kami mendukung:
go get github.com/sashabaranov/go-openai
Saat ini, Go-Openai membutuhkan GO versi 1.18 atau lebih.
package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your token" )
resp , err := client . CreateChatCompletion (
context . Background (),
openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleUser ,
Content : "Hello!" ,
},
},
},
)
if err != nil {
fmt . Printf ( "ChatCompletion error: %v n " , err )
return
}
fmt . Println ( resp . Choices [ 0 ]. Message . Content )
}Catatan: Kunci API Anda adalah informasi sensitif. Jangan membaginya dengan siapa pun.
package main
import (
"context"
"errors"
"fmt"
"io"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
MaxTokens : 20 ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleUser ,
Content : "Lorem ipsum" ,
},
},
Stream : true ,
}
stream , err := c . CreateChatCompletionStream ( ctx , req )
if err != nil {
fmt . Printf ( "ChatCompletionStream error: %v n " , err )
return
}
defer stream . Close ()
fmt . Printf ( "Stream response: " )
for {
response , err := stream . Recv ()
if errors . Is ( err , io . EOF ) {
fmt . Println ( " n Stream finished" )
return
}
if err != nil {
fmt . Printf ( " n Stream error: %v n " , err )
return
}
fmt . Printf ( response . Choices [ 0 ]. Delta . Content )
}
} package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. CompletionRequest {
Model : openai . GPT3Babbage002 ,
MaxTokens : 5 ,
Prompt : "Lorem ipsum" ,
}
resp , err := c . CreateCompletion ( ctx , req )
if err != nil {
fmt . Printf ( "Completion error: %v n " , err )
return
}
fmt . Println ( resp . Choices [ 0 ]. Text )
} package main
import (
"errors"
"context"
"fmt"
"io"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. CompletionRequest {
Model : openai . GPT3Babbage002 ,
MaxTokens : 5 ,
Prompt : "Lorem ipsum" ,
Stream : true ,
}
stream , err := c . CreateCompletionStream ( ctx , req )
if err != nil {
fmt . Printf ( "CompletionStream error: %v n " , err )
return
}
defer stream . Close ()
for {
response , err := stream . Recv ()
if errors . Is ( err , io . EOF ) {
fmt . Println ( "Stream finished" )
return
}
if err != nil {
fmt . Printf ( "Stream error: %v n " , err )
return
}
fmt . Printf ( "Stream response: %v n " , response )
}
} package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
req := openai. AudioRequest {
Model : openai . Whisper1 ,
FilePath : "recording.mp3" ,
}
resp , err := c . CreateTranscription ( ctx , req )
if err != nil {
fmt . Printf ( "Transcription error: %v n " , err )
return
}
fmt . Println ( resp . Text )
} package main
import (
"context"
"fmt"
"os"
openai "github.com/sashabaranov/go-openai"
)
func main () {
c := openai . NewClient ( os . Getenv ( "OPENAI_KEY" ))
req := openai. AudioRequest {
Model : openai . Whisper1 ,
FilePath : os . Args [ 1 ],
Format : openai . AudioResponseFormatSRT ,
}
resp , err := c . CreateTranscription ( context . Background (), req )
if err != nil {
fmt . Printf ( "Transcription error: %v n " , err )
return
}
f , err := os . Create ( os . Args [ 1 ] + ".srt" )
if err != nil {
fmt . Printf ( "Could not open file: %v n " , err )
return
}
defer f . Close ()
if _ , err := f . WriteString ( resp . Text ); err != nil {
fmt . Printf ( "Error writing to file: %v n " , err )
return
}
} package main
import (
"bytes"
"context"
"encoding/base64"
"fmt"
openai "github.com/sashabaranov/go-openai"
"image/png"
"os"
)
func main () {
c := openai . NewClient ( "your token" )
ctx := context . Background ()
// Sample image by link
reqUrl := openai. ImageRequest {
Prompt : "Parrot on a skateboard performs a trick, cartoon style, natural light, high detail" ,
Size : openai . CreateImageSize256x256 ,
ResponseFormat : openai . CreateImageResponseFormatURL ,
N : 1 ,
}
respUrl , err := c . CreateImage ( ctx , reqUrl )
if err != nil {
fmt . Printf ( "Image creation error: %v n " , err )
return
}
fmt . Println ( respUrl . Data [ 0 ]. URL )
// Example image as base64
reqBase64 := openai. ImageRequest {
Prompt : "Portrait of a humanoid parrot in a classic costume, high detail, realistic light, unreal engine" ,
Size : openai . CreateImageSize256x256 ,
ResponseFormat : openai . CreateImageResponseFormatB64JSON ,
N : 1 ,
}
respBase64 , err := c . CreateImage ( ctx , reqBase64 )
if err != nil {
fmt . Printf ( "Image creation error: %v n " , err )
return
}
imgBytes , err := base64 . StdEncoding . DecodeString ( respBase64 . Data [ 0 ]. B64JSON )
if err != nil {
fmt . Printf ( "Base64 decode error: %v n " , err )
return
}
r := bytes . NewReader ( imgBytes )
imgData , err := png . Decode ( r )
if err != nil {
fmt . Printf ( "PNG decode error: %v n " , err )
return
}
file , err := os . Create ( "example.png" )
if err != nil {
fmt . Printf ( "File creation error: %v n " , err )
return
}
defer file . Close ()
if err := png . Encode ( file , imgData ); err != nil {
fmt . Printf ( "PNG encode error: %v n " , err )
return
}
fmt . Println ( "The image was saved as example.png" )
} config := openai . DefaultConfig ( "token" )
proxyUrl , err := url . Parse ( "http://localhost:{port}" )
if err != nil {
panic ( err )
}
transport := & http. Transport {
Proxy : http . ProxyURL ( proxyUrl ),
}
config . HTTPClient = & http. Client {
Transport : transport ,
}
c := openai . NewClientWithConfig ( config )Lihat juga: https://pkg.go.dev/github.com/sashabaranov/go-openai#clientConfig
package main
import (
"bufio"
"context"
"fmt"
"os"
"strings"
"github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your token" )
messages := make ([]openai. ChatCompletionMessage , 0 )
reader := bufio . NewReader ( os . Stdin )
fmt . Println ( "Conversation" )
fmt . Println ( "---------------------" )
for {
fmt . Print ( "-> " )
text , _ := reader . ReadString ( 'n' )
// convert CRLF to LF
text = strings . Replace ( text , " n " , "" , - 1 )
messages = append ( messages , openai. ChatCompletionMessage {
Role : openai . ChatMessageRoleUser ,
Content : text ,
})
resp , err := client . CreateChatCompletion (
context . Background (),
openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
Messages : messages ,
},
)
if err != nil {
fmt . Printf ( "ChatCompletion error: %v n " , err )
continue
}
content := resp . Choices [ 0 ]. Message . Content
messages = append ( messages , openai. ChatCompletionMessage {
Role : openai . ChatMessageRoleAssistant ,
Content : content ,
})
fmt . Println ( content )
}
} package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
config := openai . DefaultAzureConfig ( "your Azure OpenAI Key" , "https://your Azure OpenAI Endpoint" )
// If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
// config.AzureModelMapperFunc = func(model string) string {
// azureModelMapping := map[string]string{
// "gpt-3.5-turbo": "your gpt-3.5-turbo deployment name",
// }
// return azureModelMapping[model]
// }
client := openai . NewClientWithConfig ( config )
resp , err := client . CreateChatCompletion (
context . Background (),
openai. ChatCompletionRequest {
Model : openai . GPT3Dot5Turbo ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleUser ,
Content : "Hello Azure OpenAI!" ,
},
},
},
)
if err != nil {
fmt . Printf ( "ChatCompletion error: %v n " , err )
return
}
fmt . Println ( resp . Choices [ 0 ]. Message . Content )
} package main
import (
"context"
"log"
openai "github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your-token" )
// Create an EmbeddingRequest for the user query
queryReq := openai. EmbeddingRequest {
Input : [] string { "How many chucks would a woodchuck chuck" },
Model : openai . AdaEmbeddingV2 ,
}
// Create an embedding for the user query
queryResponse , err := client . CreateEmbeddings ( context . Background (), queryReq )
if err != nil {
log . Fatal ( "Error creating query embedding:" , err )
}
// Create an EmbeddingRequest for the target text
targetReq := openai. EmbeddingRequest {
Input : [] string { "How many chucks would a woodchuck chuck if the woodchuck could chuck wood" },
Model : openai . AdaEmbeddingV2 ,
}
// Create an embedding for the target text
targetResponse , err := client . CreateEmbeddings ( context . Background (), targetReq )
if err != nil {
log . Fatal ( "Error creating target embedding:" , err )
}
// Now that we have the embeddings for the user query and the target text, we
// can calculate their similarity.
queryEmbedding := queryResponse . Data [ 0 ]
targetEmbedding := targetResponse . Data [ 0 ]
similarity , err := queryEmbedding . DotProduct ( & targetEmbedding )
if err != nil {
log . Fatal ( "Error calculating dot product:" , err )
}
log . Printf ( "The similarity score between the query and the target is %f" , similarity )
} package main
import (
"context"
"fmt"
openai "github.com/sashabaranov/go-openai"
)
func main () {
config := openai . DefaultAzureConfig ( "your Azure OpenAI Key" , "https://your Azure OpenAI Endpoint" )
config . APIVersion = "2023-05-15" // optional update to latest API version
//If you use a deployment name different from the model name, you can customize the AzureModelMapperFunc function
//config.AzureModelMapperFunc = func(model string) string {
// azureModelMapping := map[string]string{
// "gpt-3.5-turbo":"your gpt-3.5-turbo deployment name",
// }
// return azureModelMapping[model]
//}
input := "Text to vectorize"
client := openai . NewClientWithConfig ( config )
resp , err := client . CreateEmbeddings (
context . Background (),
openai. EmbeddingRequest {
Input : [] string { input },
Model : openai . AdaEmbeddingV2 ,
})
if err != nil {
fmt . Printf ( "CreateEmbeddings error: %v n " , err )
return
}
vectors := resp . Data [ 0 ]. Embedding // []float32 with 1536 dimensions
fmt . Println ( vectors [: 10 ], "..." , vectors [ len ( vectors ) - 10 :])
}Sekarang dimungkinkan untuk penyelesaian obrolan untuk memilih untuk memanggil fungsi untuk informasi lebih lanjut (lihat dokumen pengembang di sini).
Untuk menggambarkan jenis fungsi yang dapat dipanggil, skema JSON harus disediakan. Banyak pustaka skema JSON ada dan lebih maju dari apa yang dapat kami tawarkan di perpustakaan ini, namun kami telah menyertakan paket jsonschema sederhana untuk mereka yang ingin menggunakan fitur ini tanpa memformat muatan skema JSON mereka sendiri.
Dokumen pengembang memberikan definisi skema JSON ini sebagai contoh:
{
"name" : " get_current_weather " ,
"description" : " Get the current weather in a given location " ,
"parameters" :{
"type" : " object " ,
"properties" :{
"location" :{
"type" : " string " ,
"description" : " The city and state, e.g. San Francisco, CA "
},
"unit" :{
"type" : " string " ,
"enum" :[
" celsius " ,
" fahrenheit "
]
}
},
"required" :[
" location "
]
}
} Menggunakan paket jsonschema , skema ini dapat dibuat menggunakan struct seperti:
FunctionDefinition {
Name : "get_current_weather" ,
Parameters : jsonschema. Definition {
Type : jsonschema . Object ,
Properties : map [ string ]jsonschema. Definition {
"location" : {
Type : jsonschema . String ,
Description : "The city and state, e.g. San Francisco, CA" ,
},
"unit" : {
Type : jsonschema . String ,
Enum : [] string { "celsius" , "fahrenheit" },
},
},
Required : [] string { "location" },
},
} Bidang Parameters dari FunctionDefinition dapat menerima salah satu dari gaya di atas, atau bahkan struct bersarang dari perpustakaan lain (asalkan dapat diarahkan ke JSON).
Open-AI mempertahankan dokumentasi yang jelas tentang cara menangani kesalahan API
contoh:
e := &openai.APIError{}
if errors.As(err, &e) {
switch e.HTTPStatusCode {
case 401:
// invalid auth or key (do not retry)
case 429:
// rate limiting or engine overload (wait and retry)
case 500:
// openai server error (retry)
default:
// unhandled
}
}
package main
import (
"context"
"fmt"
"github.com/sashabaranov/go-openai"
)
func main () {
client := openai . NewClient ( "your token" )
ctx := context . Background ()
// create a .jsonl file with your training data for conversational model
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
// {"prompt": "<prompt text>", "completion": "<ideal generated text>"}
// chat models are trained using the following file format:
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "What's the capital of France?"}, {"role": "assistant", "content": "Paris, as if everyone doesn't know that already."}]}
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "Who wrote 'Romeo and Juliet'?"}, {"role": "assistant", "content": "Oh, just some guy named William Shakespeare. Ever heard of him?"}]}
// {"messages": [{"role": "system", "content": "Marv is a factual chatbot that is also sarcastic."}, {"role": "user", "content": "How far is the Moon from Earth?"}, {"role": "assistant", "content": "Around 384,400 kilometers. Give or take a few, like that really matters."}]}
// you can use openai cli tool to validate the data
// For more info - https://platform.openai.com/docs/guides/fine-tuning
file , err := client . CreateFile ( ctx , openai. FileRequest {
FilePath : "training_prepared.jsonl" ,
Purpose : "fine-tune" ,
})
if err != nil {
fmt . Printf ( "Upload JSONL file error: %v n " , err )
return
}
// create a fine tuning job
// Streams events until the job is done (this often takes minutes, but can take hours if there are many jobs in the queue or your dataset is large)
// use below get method to know the status of your model
fineTuningJob , err := client . CreateFineTuningJob ( ctx , openai. FineTuningJobRequest {
TrainingFile : file . ID ,
Model : "davinci-002" , // gpt-3.5-turbo-0613, babbage-002.
})
if err != nil {
fmt . Printf ( "Creating new fine tune model error: %v n " , err )
return
}
fineTuningJob , err = client . RetrieveFineTuningJob ( ctx , fineTuningJob . ID )
if err != nil {
fmt . Printf ( "Getting fine tune model error: %v n " , err )
return
}
fmt . Println ( fineTuningJob . FineTunedModel )
// once the status of fineTuningJob is `succeeded`, you can use your fine tune model in Completion Request or Chat Completion Request
// resp, err := client.CreateCompletion(ctx, openai.CompletionRequest{
// Model: fineTuningJob.FineTunedModel,
// Prompt: "your prompt",
// })
// if err != nil {
// fmt.Printf("Create completion error %vn", err)
// return
// }
//
// fmt.Println(resp.Choices[0].Text)
} package main
import (
"context"
"fmt"
"log"
"github.com/sashabaranov/go-openai"
"github.com/sashabaranov/go-openai/jsonschema"
)
func main () {
client := openai . NewClient ( "your token" )
ctx := context . Background ()
type Result struct {
Steps [] struct {
Explanation string `json:"explanation"`
Output string `json:"output"`
} `json:"steps"`
FinalAnswer string `json:"final_answer"`
}
var result Result
schema , err := jsonschema . GenerateSchemaForType ( result )
if err != nil {
log . Fatalf ( "GenerateSchemaForType error: %v" , err )
}
resp , err := client . CreateChatCompletion ( ctx , openai. ChatCompletionRequest {
Model : openai . GPT4oMini ,
Messages : []openai. ChatCompletionMessage {
{
Role : openai . ChatMessageRoleSystem ,
Content : "You are a helpful math tutor. Guide the user through the solution step by step." ,
},
{
Role : openai . ChatMessageRoleUser ,
Content : "how can I solve 8x + 7 = -23" ,
},
},
ResponseFormat : & openai. ChatCompletionResponseFormat {
Type : openai . ChatCompletionResponseFormatTypeJSONSchema ,
JSONSchema : & openai. ChatCompletionResponseFormatJSONSchema {
Name : "math_reasoning" ,
Schema : schema ,
Strict : true ,
},
},
})
if err != nil {
log . Fatalf ( "CreateChatCompletion error: %v" , err )
}
err = schema . Unmarshal ( resp . Choices [ 0 ]. Message . Content , & result )
if err != nil {
log . Fatalf ( "Unmarshal schema error: %v" , err )
}
fmt . Println ( result )
}Bahkan ketika menentukan bidang suhu 0, itu tidak menjamin bahwa Anda akan selalu mendapatkan respons yang sama. Beberapa faktor ikut berperan.
Karena faktor -faktor yang disebutkan di atas, jawaban yang berbeda dapat dikembalikan bahkan untuk pertanyaan yang sama.
Solusi:
seed baru dalam hubungannya dengan bidang respons system_fingerprint , di samping manajemen suhu.math.SmallestNonzeroFloat32 : dengan menentukan math.SmallestNonzeroFloat32 di bidang suhu alih -alih 0, Anda dapat meniru perilaku mengaturnya ke 0.Dengan mengadopsi strategi ini, Anda dapat mengharapkan hasil yang lebih konsisten.
Masalah terkait:
Opsi omitempty dari permintaan struct akan menghasilkan permintaan yang salah saat parameter adalah 0.
Tidak, Go Openai tidak menawarkan fitur untuk menghitung token, dan tidak ada rencana untuk memberikan fitur seperti itu di masa depan. Namun, jika ada cara untuk mengimplementasikan fitur penghitungan token dengan nol dependensi, dimungkinkan untuk menggabungkan fitur itu menjadi go openai. Kalau tidak, akan lebih tepat untuk mengimplementasikannya di perpustakaan atau repositori khusus.
Untuk menghitung token, Anda mungkin menemukan tautan berikut bermanfaat:
Masalah terkait:
Apakah mungkin untuk bergabung dengan implementasi GPT3 Tokenizer
Dengan mengikuti pedoman yang berkontribusi, kami berharap dapat memastikan bahwa kontribusi Anda dibuat dengan lancar dan efisien.
Kami ingin meluangkan waktu sejenak untuk mengucapkan terima kasih yang terdalam kepada para kontributor dan sponsor proyek ini:
Untuk kalian semua: Terima kasih. Anda telah membantu kami mencapai lebih dari yang pernah kami bayangkan. Tidak sabar untuk melihat ke mana kita pergi selanjutnya, bersama!