From ebe617b7335dafbb42a760b6dcc2da5e09949242 Mon Sep 17 00:00:00 2001 From: lyf <1910527151@qq.com> Date: Wed, 16 Oct 2024 16:24:02 +0800 Subject: [PATCH] fix max_completions_tokens --- app/client/platforms/openai.ts | 13 ++++++++----- app/store/config.ts | 4 ++-- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index a2263361143..014e7ff0884 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -63,7 +63,7 @@ export interface RequestPayload { presence_penalty: number; frequency_penalty: number; top_p: number; - max_tokens?: number; + max_completions_tokens?: number; } export interface DalleRequestPayload { @@ -228,13 +228,16 @@ export class ChatGPTApi implements LLMApi { presence_penalty: !isO1 ? modelConfig.presence_penalty : 0, frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0, top_p: !isO1 ? modelConfig.top_p : 1, - // max_tokens: Math.max(modelConfig.max_tokens, 1024), - // Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore. + // max_completions_tokens: Math.max(modelConfig.max_completions_tokens, 1024), + // Please do not ask me why not send max_completions_tokens, no reason, this param is just shit, I dont want to explain anymore. }; - // add max_tokens to vision model + // add max_completions_tokens to vision model if (visionModel) { - requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000); + requestPayload["max_completions_tokens"] = Math.max( + modelConfig.max_completions_tokens, + 4000, + ); } } diff --git a/app/store/config.ts b/app/store/config.ts index f9ddce4a80c..336d590a272 100644 --- a/app/store/config.ts +++ b/app/store/config.ts @@ -65,7 +65,7 @@ export const DEFAULT_CONFIG = { providerName: "OpenAI" as ServiceProvider, temperature: 0.5, top_p: 1, - max_tokens: 4000, + max_completions_tokens: 4000, presence_penalty: 0, frequency_penalty: 0, sendMemory: true, @@ -127,7 +127,7 @@ export const ModalConfigValidator = { model(x: string) { return x as ModelType; }, - max_tokens(x: number) { + max_completions_tokens(x: number) { return limitNumber(x, 0, 512000, 1024); }, presence_penalty(x: number) {