diff --git a/code/config.example.yaml b/code/config.example.yaml index c249d65..b4dc899 100644 --- a/code/config.example.yaml +++ b/code/config.example.yaml @@ -21,6 +21,8 @@ API_URL: https://api.openai.com HTTP_PROXY: "" # 访问OpenAi的 普通 Http请求的超时时间,单位秒,不配置的话默认为 550 秒 OPENAI_HTTP_CLIENT_TIMEOUT: +# openai 指定模型, 更多见 https://platform.openai.com/docs/models/model-endpoint-compatibility 中 /v1/chat/completions +OPENAI_MODEL: gpt-3.5-turbo # AZURE OPENAI AZURE_ON: true # set to true to use Azure rather than OpenAI diff --git a/code/handlers/handler.go b/code/handlers/handler.go index 6ed789a..d9a0ea8 100644 --- a/code/handlers/handler.go +++ b/code/handlers/handler.go @@ -101,7 +101,6 @@ func (m MessageHandler) msgReceivedHandler(ctx context.Context, event *larkim.P2 &MessageAction{ chatgpt: chatgpt.NewGpt3(&m.config), }, //消息处理 - } chain(data, actions...) diff --git a/code/handlers/msg.go b/code/handlers/msg.go index fb7f545..9a7526e 100644 --- a/code/handlers/msg.go +++ b/code/handlers/msg.go @@ -675,8 +675,11 @@ func updateTextCard(ctx context.Context, msg string, } return nil } -func updateFinalCard(ctx context.Context, msg string, - msgId *string) error { +func updateFinalCard( + ctx context.Context, + msg string, + msgId *string, +) error { newCard, _ := newSendCardWithOutHeader( withMainText(msg)) err := PatchCard(ctx, msgId, newCard) diff --git a/code/initialization/config.go b/code/initialization/config.go index ce8fd12..8e2079e 100644 --- a/code/initialization/config.go +++ b/code/initialization/config.go @@ -36,6 +36,7 @@ type Config struct { AccessControlEnable bool AccessControlMaxCountPerUserPerDay int OpenAIHttpClientTimeOut int + OpenaiModel string } var ( @@ -94,6 +95,7 @@ func LoadConfig(cfg string) *Config { AccessControlEnable: getViperBoolValue("ACCESS_CONTROL_ENABLE", false), AccessControlMaxCountPerUserPerDay: getViperIntValue("ACCESS_CONTROL_MAX_COUNT_PER_USER_PER_DAY", 0), OpenAIHttpClientTimeOut: getViperIntValue("OPENAI_HTTP_CLIENT_TIMEOUT", 550), + OpenaiModel: getViperStringValue("OPENAI_MODEL", "gpt-3.5-turbo"), } return config diff --git a/code/services/chatgpt/gpt3.go b/code/services/chatgpt/gpt3.go index 8cd85e9..6870e51 100644 --- a/code/services/chatgpt/gpt3.go +++ b/code/services/chatgpt/gpt3.go @@ -58,7 +58,7 @@ func (c *ChatGPT) StreamChatWithHistory(ctx context.Context, msg []openai.ChatCo client := openai.NewClientWithConfig(config) //pp.Printf("client: %v", client) req := openai.ChatCompletionRequest{ - Model: openai.GPT3Dot5Turbo, + Model: c.config.OpenaiModel, Messages: msg, N: 1, Temperature: 0.7, diff --git a/code/services/openai/common.go b/code/services/openai/common.go index b8609d5..5e6e7cd 100644 --- a/code/services/openai/common.go +++ b/code/services/openai/common.go @@ -37,6 +37,7 @@ type ChatGPT struct { Lb *loadbalancer.LoadBalancer ApiKey []string ApiUrl string + ApiModel string HttpProxy string Platform PlatForm AzureConfig AzureConfig @@ -224,6 +225,7 @@ func NewChatGPT(config initialization.Config) *ChatGPT { ApiUrl: config.OpenaiApiUrl, HttpProxy: config.HttpProxy, Platform: platform, + ApiModel: config.OpenaiModel, AzureConfig: AzureConfig{ BaseURL: AzureApiUrlV1, ResourceName: config.AzureResourceName, diff --git a/code/services/openai/gpt3.go b/code/services/openai/gpt3.go index 1d51fbf..bdb39b8 100644 --- a/code/services/openai/gpt3.go +++ b/code/services/openai/gpt3.go @@ -7,7 +7,6 @@ import ( const ( maxTokens = 2000 temperature = 0.7 - engine = "gpt-3.5-turbo" ) type Messages struct { @@ -45,7 +44,7 @@ type ChatGPTRequestBody struct { func (gpt *ChatGPT) Completions(msg []Messages) (resp Messages, err error) { requestBody := ChatGPTRequestBody{ - Model: engine, + Model: gpt.ApiModel, Messages: msg, MaxTokens: maxTokens, Temperature: temperature,