diff --git a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx index 4e0a9592c1..a16e1689c0 100644 --- a/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx +++ b/frontend/src/pages/WorkspaceSettings/AgentConfig/AgentModelSelection/index.jsx @@ -6,12 +6,19 @@ import { useTranslation } from "react-i18next"; import { Link, useParams } from "react-router-dom"; // These models do NOT support function calling +// and therefore are not supported for agents. function supportedModel(provider, model = "") { if (provider !== "openai") return true; return ( - ["gpt-3.5-turbo-0301", "gpt-4-turbo-2024-04-09", "gpt-4-turbo"].includes( - model - ) === false + [ + "gpt-3.5-turbo-0301", + "gpt-4-turbo-2024-04-09", + "gpt-4-turbo", + "o1-preview", + "o1-preview-2024-09-12", + "o1-mini", + "o1-mini-2024-09-12", + ].includes(model) === false ); } diff --git a/server/utils/AiProviders/modelMap.js b/server/utils/AiProviders/modelMap.js index 99d78dc142..84e480b317 100644 --- a/server/utils/AiProviders/modelMap.js +++ b/server/utils/AiProviders/modelMap.js @@ -52,6 +52,10 @@ const MODEL_MAP = { "gpt-4-turbo-preview": 128_000, "gpt-4": 8_192, "gpt-4-32k": 32_000, + "o1-preview": 128_000, + "o1-preview-2024-09-12": 128_000, + "o1-mini": 128_000, + "o1-mini-2024-09-12": 128_000, }, deepseek: { "deepseek-chat": 128_000, diff --git a/server/utils/AiProviders/openAi/index.js b/server/utils/AiProviders/openAi/index.js index b0e52dc2b9..4f6bc2219b 100644 --- a/server/utils/AiProviders/openAi/index.js +++ b/server/utils/AiProviders/openAi/index.js @@ -23,6 +23,14 @@ class OpenAiLLM { this.defaultTemp = 0.7; } + /** + * Check if the model is an o1 model. + * @returns {boolean} + */ + get isO1Model() { + return this.model.startsWith("o1"); + } + #appendContext(contextTexts = []) { if (!contextTexts || !contextTexts.length) return ""; return ( @@ -36,6 +44,7 @@ class OpenAiLLM { } streamingEnabled() { + if (this.isO1Model) return false; return "streamGetChatCompletion" in this; } @@ -98,8 +107,11 @@ class OpenAiLLM { userPrompt = "", attachments = [], // This is the specific attachment for only this prompt }) { + // o1 Models do not support the "system" role + // in order to combat this, we can use the "user" role as a replacement for now + // https://community.openai.com/t/o1-models-do-not-support-system-role-in-chat-completion/953880 const prompt = { - role: "system", + role: this.isO1Model ? "user" : "system", content: `${systemPrompt}${this.#appendContext(contextTexts)}`, }; return [ @@ -122,7 +134,7 @@ class OpenAiLLM { .create({ model: this.model, messages, - temperature, + temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1 }) .catch((e) => { throw new Error(e.message); @@ -143,7 +155,7 @@ class OpenAiLLM { model: this.model, stream: true, messages, - temperature, + temperature: this.isO1Model ? 1 : temperature, // o1 models only accept temperature 1 }); return streamRequest; } diff --git a/server/utils/helpers/customModels.js b/server/utils/helpers/customModels.js index f3430cecc0..086144bfe9 100644 --- a/server/utils/helpers/customModels.js +++ b/server/utils/helpers/customModels.js @@ -128,7 +128,7 @@ async function openAiModels(apiKey = null) { }); const gpts = allModels - .filter((model) => model.id.startsWith("gpt")) + .filter((model) => model.id.startsWith("gpt") || model.id.startsWith("o1")) .filter( (model) => !model.id.includes("vision") && !model.id.includes("instruct") )