diff --git a/app/components/chat/UserMessage.tsx b/app/components/chat/UserMessage.tsx index 62e054b21..2b1343b5f 100644 --- a/app/components/chat/UserMessage.tsx +++ b/app/components/chat/UserMessage.tsx @@ -1,7 +1,7 @@ // @ts-nocheck // Preventing TS checks with files presented in the video for a better presentation. import { modificationsRegex } from '~/utils/diff'; -import { MODEL_REGEX } from '~/utils/constants'; +import { MODEL_REGEX, PROVIDER_REGEX } from '~/utils/constants'; import { Markdown } from './Markdown'; interface UserMessageProps { @@ -17,5 +17,5 @@ export function UserMessage({ content }: UserMessageProps) { } function sanitizeUserMessage(content: string) { - return content.replace(modificationsRegex, '').replace(MODEL_REGEX, '').trim(); + return content.replace(modificationsRegex, '').replace(MODEL_REGEX, '').replace(PROVIDER_REGEX, '').trim(); } diff --git a/app/lib/.server/llm/stream-text.ts b/app/lib/.server/llm/stream-text.ts index 70bb3a917..4c4d74ae4 100644 --- a/app/lib/.server/llm/stream-text.ts +++ b/app/lib/.server/llm/stream-text.ts @@ -4,7 +4,7 @@ import { streamText as _streamText, convertToCoreMessages } from 'ai'; import { getModel } from '~/lib/.server/llm/model'; import { MAX_TOKENS } from './constants'; import { getSystemPrompt } from './prompts'; -import { MODEL_LIST, DEFAULT_MODEL, DEFAULT_PROVIDER } from '~/utils/constants'; +import { MODEL_LIST, DEFAULT_MODEL, DEFAULT_PROVIDER, MODEL_REGEX, PROVIDER_REGEX } from '~/utils/constants'; interface ToolResult { toolCallId: string; @@ -25,21 +25,18 @@ export type Messages = Message[]; export type StreamingOptions = Omit[0], 'model'>; function extractPropertiesFromMessage(message: Message): { model: string; provider: string; content: string } { - const modelRegex = /^\[Model: (.*?)\]\n\n/; - const providerRegex = /\[Provider: (.*?)\]\n\n/; - // Extract model - const modelMatch = message.content.match(modelRegex); + const modelMatch = message.content.match(MODEL_REGEX); const model = modelMatch ? modelMatch[1] : DEFAULT_MODEL; // Extract provider - const providerMatch = message.content.match(providerRegex); + const providerMatch = message.content.match(PROVIDER_REGEX); const provider = providerMatch ? providerMatch[1] : DEFAULT_PROVIDER; // Remove model and provider lines from content const cleanedContent = message.content - .replace(modelRegex, '') - .replace(providerRegex, '') + .replace(MODEL_REGEX, '') + .replace(PROVIDER_REGEX, '') .trim(); return { model, provider, content: cleanedContent }; diff --git a/app/utils/constants.ts b/app/utils/constants.ts index d003df496..ab7fe1bd2 100644 --- a/app/utils/constants.ts +++ b/app/utils/constants.ts @@ -4,6 +4,7 @@ export const WORK_DIR_NAME = 'project'; export const WORK_DIR = `/home/${WORK_DIR_NAME}`; export const MODIFICATIONS_TAG_NAME = 'bolt_file_modifications'; export const MODEL_REGEX = /^\[Model: (.*?)\]\n\n/; +export const PROVIDER_REGEX = /\[Provider: (.*?)\]\n\n/; export const DEFAULT_MODEL = 'claude-3-5-sonnet-20240620'; export const DEFAULT_PROVIDER = 'Anthropic'; @@ -19,7 +20,7 @@ const staticModels: ModelInfo[] = [ { name: 'qwen/qwen-110b-chat', label: 'OpenRouter Qwen 110b Chat (OpenRouter)', provider: 'OpenRouter' }, { name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter' }, { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google' }, - { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google'}, + { name: 'gemini-1.5-pro-latest', label: 'Gemini 1.5 Pro', provider: 'Google' }, { name: 'llama-3.1-70b-versatile', label: 'Llama 3.1 70b (Groq)', provider: 'Groq' }, { name: 'llama-3.1-8b-instant', label: 'Llama 3.1 8b (Groq)', provider: 'Groq' }, { name: 'llama-3.2-11b-vision-preview', label: 'Llama 3.2 11b (Groq)', provider: 'Groq' }, @@ -32,8 +33,8 @@ const staticModels: ModelInfo[] = [ { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI' }, { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI' }, { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI' }, - { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek'}, - { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek'}, + { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek' }, + { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek' }, { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral' }, { name: 'open-mixtral-8x7b', label: 'Mistral 8x7B', provider: 'Mistral' }, { name: 'open-mixtral-8x22b', label: 'Mistral 8x22B', provider: 'Mistral' }, @@ -54,11 +55,11 @@ const getOllamaBaseUrl = () => { // Frontend always uses localhost return defaultBaseUrl; } - + // Backend: Check if we're running in Docker const isDocker = process.env.RUNNING_IN_DOCKER === 'true'; - - return isDocker + + return isDocker ? defaultBaseUrl.replace("localhost", "host.docker.internal") : defaultBaseUrl; }; @@ -80,32 +81,32 @@ async function getOllamaModels(): Promise { } async function getOpenAILikeModels(): Promise { - try { - const base_url =import.meta.env.OPENAI_LIKE_API_BASE_URL || ""; - if (!base_url) { + try { + const base_url = import.meta.env.OPENAI_LIKE_API_BASE_URL || ""; + if (!base_url) { return []; - } - const api_key = import.meta.env.OPENAI_LIKE_API_KEY ?? ""; - const response = await fetch(`${base_url}/models`, { - headers: { - Authorization: `Bearer ${api_key}`, - } - }); + } + const api_key = import.meta.env.OPENAI_LIKE_API_KEY ?? ""; + const response = await fetch(`${base_url}/models`, { + headers: { + Authorization: `Bearer ${api_key}`, + } + }); const res = await response.json() as any; return res.data.map((model: any) => ({ name: model.id, label: model.id, provider: 'OpenAILike', })); - }catch (e) { - return [] - } + } catch (e) { + return [] + } } async function initializeModelList(): Promise { const ollamaModels = await getOllamaModels(); const openAiLikeModels = await getOpenAILikeModels(); - MODEL_LIST = [...ollamaModels,...openAiLikeModels, ...staticModels]; + MODEL_LIST = [...ollamaModels, ...openAiLikeModels, ...staticModels]; } initializeModelList().then(); export { getOllamaModels, getOpenAILikeModels, initializeModelList };