diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..2f8f89bc3 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,26 @@ +# Ignore Git and GitHub files +.git +.github/ + +# Ignore Husky configuration files +.husky/ + +# Ignore documentation and metadata files +CONTRIBUTING.md +LICENSE +README.md + +# Ignore environment examples and sensitive info +.env +*.local +*.example + +# Ignore node modules, logs and cache files +**/*.log +**/node_modules +**/dist +**/build +**/.cache +logs +dist-ssr +.DS_Store diff --git a/.env.example b/.env.example index 234dba955..83f29aedc 100644 --- a/.env.example +++ b/.env.example @@ -1,4 +1,4 @@ -# Rename this file to .env.local once you have filled in the below environment variables! +# Rename this file to .env once you have filled in the below environment variables! # Get your GROQ API Key here - # https://console.groq.com/keys @@ -43,5 +43,10 @@ OPENAI_LIKE_API_KEY= # You only need this environment variable set if you want to use Mistral models MISTRAL_API_KEY= +# Get your xAI API key +# https://x.ai/api +# You only need this environment variable set if you want to use xAI models +XAI_API_KEY= + # Include this environment variable if you want more logging for debugging locally VITE_LOG_LEVEL=debug diff --git a/.gitignore b/.gitignore index f141cc0ef..69d279030 100644 --- a/.gitignore +++ b/.gitignore @@ -29,3 +29,5 @@ dist-ssr *.vars .wrangler _worker.bundle + +Modelfile diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ad3b1951a..1bf3bfb77 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -8,6 +8,7 @@ First off, thank you for considering contributing to Bolt.new! This fork aims to - [Pull Request Guidelines](#pull-request-guidelines) - [Coding Standards](#coding-standards) - [Development Setup](#development-setup) +- [Deploymnt with Docker](#docker-deployment-documentation) - [Project Structure](#project-structure) ## Code of Conduct @@ -88,11 +89,113 @@ pnpm run dev **Note**: You will need Google Chrome Canary to run this locally if you use Chrome! It's an easy install and a good browser for web development anyway. -## Questions? +## Testing -For any questions about contributing, please: -1. Check existing documentation -2. Search through issues -3. Create a new issue with the question label +Run the test suite with: -Thank you for contributing to Bolt.new! 🚀 \ No newline at end of file +```bash +pnpm test +``` + +## Deployment + +To deploy the application to Cloudflare Pages: + +```bash +pnpm run deploy +``` + +Make sure you have the necessary permissions and Wrangler is correctly configured for your Cloudflare account. + +# Docker Deployment Documentation + +This guide outlines various methods for building and deploying the application using Docker. + +## Build Methods + +### 1. Using Helper Scripts + +NPM scripts are provided for convenient building: + +```bash +# Development build +npm run dockerbuild + +# Production build +npm run dockerbuild:prod +``` + +### 2. Direct Docker Build Commands + +You can use Docker's target feature to specify the build environment: + +```bash +# Development build +docker build . --target bolt-ai-development + +# Production build +docker build . --target bolt-ai-production +``` + +### 3. Docker Compose with Profiles + +Use Docker Compose profiles to manage different environments: + +```bash +# Development environment +docker-compose --profile development up + +# Production environment +docker-compose --profile production up +``` + +## Running the Application + +After building using any of the methods above, run the container with: + +```bash +# Development +docker run -p 5173:5173 --env-file .env.local bolt-ai:development + +# Production +docker run -p 5173:5173 --env-file .env.local bolt-ai:production +``` + +## Deployment with Coolify + +[Coolify](https://github.com/coollabsio/coolify) provides a straightforward deployment process: + +1. Import your Git repository as a new project +2. Select your target environment (development/production) +3. Choose "Docker Compose" as the Build Pack +4. Configure deployment domains +5. Set the custom start command: + ```bash + docker compose --profile production up + ``` +6. Configure environment variables + - Add necessary AI API keys + - Adjust other environment variables as needed +7. Deploy the application + +## VS Code Integration + +The `docker-compose.yaml` configuration is compatible with VS Code dev containers: + +1. Open the command palette in VS Code +2. Select the dev container configuration +3. Choose the "development" profile from the context menu + +## Environment Files + +Ensure you have the appropriate `.env.local` file configured before running the containers. This file should contain: +- API keys +- Environment-specific configurations +- Other required environment variables + +## Notes + +- Port 5173 is exposed and mapped for both development and production environments +- Environment variables are loaded from `.env.local` +- Different profiles (development/production) can be used for different deployment scenarios +- The configuration supports both local development and production deployment diff --git a/Dockerfile b/Dockerfile index de88d11e9..3b5a74cde 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,29 +1,67 @@ -# Use an official Node.js runtime as the base image -FROM node:20.15.1 +ARG BASE=node:20.18.0 +FROM ${BASE} AS base -# Set the working directory in the container WORKDIR /app -# Install pnpm -RUN npm install -g pnpm@9.4.0 +# Install dependencies (this step is cached as long as the dependencies don't change) +COPY package.json pnpm-lock.yaml ./ -# Copy package.json and pnpm-lock.yaml (if available) -COPY package.json pnpm-lock.yaml* ./ +RUN corepack enable pnpm && pnpm install -# Install dependencies -RUN pnpm install - -# Copy the rest of the application code +# Copy the rest of your app's source code COPY . . -# Build the application -RUN pnpm run build +# Expose the port the app runs on +EXPOSE 5173 + +# Production image +FROM base AS bolt-ai-production + +# Define environment variables with default values or let them be overridden +ARG GROQ_API_KEY +ARG OPENAI_API_KEY +ARG ANTHROPIC_API_KEY +ARG OPEN_ROUTER_API_KEY +ARG GOOGLE_GENERATIVE_AI_API_KEY +ARG OLLAMA_API_BASE_URL +ARG VITE_LOG_LEVEL=debug + +ENV WRANGLER_SEND_METRICS=false \ + GROQ_API_KEY=${GROQ_API_KEY} \ + OPENAI_API_KEY=${OPENAI_API_KEY} \ + ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \ + OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \ + GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \ + OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \ + VITE_LOG_LEVEL=${VITE_LOG_LEVEL} + +# Pre-configure wrangler to disable metrics +RUN mkdir -p /root/.config/.wrangler && \ + echo '{"enabled":false}' > /root/.config/.wrangler/metrics.json + +RUN npm run build + +CMD [ "pnpm", "run", "dockerstart"] + +# Development image +FROM base AS bolt-ai-development -# Make sure bindings.sh is executable -RUN chmod +x bindings.sh +# Define the same environment variables for development +ARG GROQ_API_KEY +ARG OPENAI_API_KEY +ARG ANTHROPIC_API_KEY +ARG OPEN_ROUTER_API_KEY +ARG GOOGLE_GENERATIVE_AI_API_KEY +ARG OLLAMA_API_BASE_URL +ARG VITE_LOG_LEVEL=debug -# Expose the port the app runs on (adjust if you specified a different port) -EXPOSE 3000 +ENV GROQ_API_KEY=${GROQ_API_KEY} \ + OPENAI_API_KEY=${OPENAI_API_KEY} \ + ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} \ + OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} \ + GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} \ + OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} \ + VITE_LOG_LEVEL=${VITE_LOG_LEVEL} -# Start the application -CMD ["pnpm", "run", "start"] \ No newline at end of file +RUN mkdir -p ${WORKDIR}/run +CMD pnpm run dev --host diff --git a/README.md b/README.md index 262682204..fb70e7566 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,7 @@ This fork of Bolt.new allows you to choose the LLM that you use for each prompt! - ✅ Publish projects directly to GitHub (@goncaloalves) - ⬜ Prevent Bolt from rewriting files as often (Done but need to review PR still) - ⬜ **HIGH PRIORITY** - Better prompting for smaller LLMs (code window sometimes doesn't start) +- ⬜ **HIGH PRIORITY** Load local projects into the app - ⬜ **HIGH PRIORITY** - Attach images to prompts - ⬜ **HIGH PRIORITY** - Run agents in the backend as opposed to a single model call - ⬜ LM Studio Integration @@ -27,12 +28,17 @@ This fork of Bolt.new allows you to choose the LLM that you use for each prompt! - ⬜ Azure Open AI API Integration - ⬜ HuggingFace Integration - ⬜ Perplexity Integration +- ⬜ Vertex AI Integration +- ⬜ Cohere Integration - ⬜ Deploy directly to Vercel/Netlify/other similar platforms -- ⬜ Load local projects into the app - ⬜ Ability to revert code to earlier version - ⬜ Prompt caching +- ⬜ Better prompt enhancing - ⬜ Ability to enter API keys in the UI - ⬜ Have LLM plan the project in a MD file for better results/transparency +- ⬜ VSCode Integration with git-like confirmations +- ⬜ Upload documents for knowledge - UI design templates, a code base to reference coding style, etc. +- ⬜ Voice prompting # Bolt.new: AI-Powered Full-Stack Web Development in the Browser @@ -55,28 +61,47 @@ Whether you’re an experienced developer, a PM, or a designer, Bolt.new allows For developers interested in building their own AI-powered development tools with WebContainers, check out the open-source Bolt codebase in this repo! -## Prerequisites +## Setup -Before you begin, ensure you have the following installed: +Many of you are new users to installing software from Github. If you have any installation troubles reach out and submit an "issue" using the links above, or feel free to enhance this documentation by forking, editing the instructions, and doing a pull request. -- Node.js (v20.15.1) -- pnpm (v9.4.0) +1. Install Git from https://git-scm.com/downloads -## Setup +2. Install Node.js from https://nodejs.org/en/download/ -1. Clone the repository (if you haven't already): +Pay attention to the installer notes after completion. -```bash +On all operating systems, the path to Node.js should automatically be added to your system path. But you can check your path if you want to be sure. On Windows, you can search for "edit the system environment variables" in your system, select "Environment Variables..." once you are in the system properties, and then check for a path to Node in your "Path" system variable. On a Mac or Linux machine, it will tell you to check if /usr/local/bin is in your $PATH. To determine if usr/local/bin is included in $PATH open your Terminal and run: + +``` +echo $PATH . +``` + +If you see usr/local/bin in the output then you're good to go. + +3. Clone the repository (if you haven't already) by opening a Terminal window (or CMD with admin permissions) and then typing in this: + +``` git clone https://github.com/coleam00/bolt.new-any-llm.git ``` -2. Install dependencies: +3. Rename .env.example to .env and add your LLM API keys. You will find this file on a Mac at "[your name]/bold.new-any-llm/.env.example". For Windows and Linux the path will be similar. -```bash -pnpm install +![image](https://github.com/user-attachments/assets/7e6a532c-2268-401f-8310-e8d20c731328) + +If you can't see the file indicated above, its likely you can't view hidden files. On Mac, open a Terminal window and enter this command below. On Windows, you will see the hidden files option in File Explorer Settings. A quick Google search will help you if you are stuck here. + +``` +defaults write com.apple.finder AppleShowAllFiles YES ``` -3. Rename `.env.example` to .env.local and add your LLM API keys (you only have to set the ones you want to use and Ollama doesn't need an API key because it runs locally on your computer): +**NOTE**: you only have to set the ones you want to use and Ollama doesn't need an API key because it runs locally on your computer: + +Get your GROQ API Key here: https://console.groq.com/keys + +Get your Open AI API Key by following these instructions: https://help.openai.com/en/articles/4936850-where-do-i-find-my-openai-api-key + +Get your Anthropic API Key in your account settings: https://console.anthropic.com/settings/keys ``` GROQ_API_KEY=XXX @@ -90,7 +115,98 @@ Optionally, you can set the debug level: VITE_LOG_LEVEL=debug ``` -**Important**: Never commit your `.env.local` file to version control. It's already included in .gitignore. +**Important**: Never commit your `.env` file to version control. It's already included in .gitignore. + +## Run with Docker + +Prerequisites: + +Git and Node.js as mentioned above, as well as Docker: https://www.docker.com/ + +### 1a. Using Helper Scripts + +NPM scripts are provided for convenient building: + +```bash +# Development build +npm run dockerbuild + +# Production build +npm run dockerbuild:prod +``` + +### 1b. Direct Docker Build Commands (alternative to using NPM scripts) + +You can use Docker's target feature to specify the build environment instead of using NPM scripts if you wish: + +```bash +# Development build +docker build . --target bolt-ai-development + +# Production build +docker build . --target bolt-ai-production +``` + +### 2. Docker Compose with Profiles to Run the Container + +Use Docker Compose profiles to manage different environments: + +```bash +# Development environment +docker-compose --profile development up + +# Production environment +docker-compose --profile production up +``` + +When you run the Docker Compose command with the development profile, any changes you +make on your machine to the code will automatically be reflected in the site running +on the container (i.e. hot reloading still applies!). + +## Run Without Docker + +1. Install dependencies using Terminal (or CMD in Windows with admin permissions): + +``` +pnpm install +``` + +If you get an error saying "command not found: pnpm" or similar, then that means pnpm isn't installed. You can install it via this: + +``` +sudo npm install -g pnpm +``` + +2. Start the application with the command: + +```bash +pnpm run dev +``` + +## Super Important Note on Running Ollama Models + +Ollama models by default only have 2048 tokens for their context window. Even for large models that can easily handle way more. +This is not a large enough window to handle the Bolt.new/oTToDev prompt! You have to create a version of any model you want +to use where you specify a larger context window. Luckily it's super easy to do that. + +All you have to do is: + +- Create a file called "Modelfile" (no file extension) anywhere on your computer +- Put in the two lines: + +``` +FROM [Ollama model ID such as qwen2.5-coder:7b] +PARAMETER num_ctx 32768 +``` + +- Run the command: + +``` +ollama create -f Modelfile [your new model ID, can be whatever you want (example: qwen2.5-coder-extra-ctx:7b)] +``` + +Now you have a new Ollama model that isn't heavily limited in the context length like Ollama models are by default for some reason. +You'll see this new model in the list of Ollama models along with all the others you pulled! ## Adding New LLMs: diff --git a/app/lib/.server/llm/api-key.ts b/app/lib/.server/llm/api-key.ts index e2764c1dc..9293a5694 100644 --- a/app/lib/.server/llm/api-key.ts +++ b/app/lib/.server/llm/api-key.ts @@ -25,6 +25,8 @@ export function getAPIKey(cloudflareEnv: Env, provider: string) { return env.MISTRAL_API_KEY || cloudflareEnv.MISTRAL_API_KEY; case "OpenAILike": return env.OPENAI_LIKE_API_KEY || cloudflareEnv.OPENAI_LIKE_API_KEY; + case "xAI": + return env.XAI_API_KEY || cloudflareEnv.XAI_API_KEY; default: return ""; } @@ -35,7 +37,11 @@ export function getBaseURL(cloudflareEnv: Env, provider: string) { case 'OpenAILike': return env.OPENAI_LIKE_API_BASE_URL || cloudflareEnv.OPENAI_LIKE_API_BASE_URL; case 'Ollama': - return env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || "http://localhost:11434"; + let baseUrl = env.OLLAMA_API_BASE_URL || cloudflareEnv.OLLAMA_API_BASE_URL || "http://localhost:11434"; + if (env.RUNNING_IN_DOCKER === 'true') { + baseUrl = baseUrl.replace("localhost", "host.docker.internal"); + } + return baseUrl; default: return ""; } diff --git a/app/lib/.server/llm/model.ts b/app/lib/.server/llm/model.ts index 390d57aeb..b3a208b34 100644 --- a/app/lib/.server/llm/model.ts +++ b/app/lib/.server/llm/model.ts @@ -58,7 +58,10 @@ export function getGroqModel(apiKey: string, model: string) { } export function getOllamaModel(baseURL: string, model: string) { - let Ollama = ollama(model); + let Ollama = ollama(model, { + numCtx: 32768, + }); + Ollama.config.baseURL = `${baseURL}/api`; return Ollama; } @@ -80,6 +83,15 @@ export function getOpenRouterModel(apiKey: string, model: string) { return openRouter.chat(model); } +export function getXAIModel(apiKey: string, model: string) { + const openai = createOpenAI({ + baseURL: 'https://api.x.ai/v1', + apiKey, + }); + + return openai(model); +} + export function getModel(provider: string, model: string, env: Env) { const apiKey = getAPIKey(env, provider); const baseURL = getBaseURL(env, provider); @@ -101,6 +113,8 @@ export function getModel(provider: string, model: string, env: Env) { return getDeepseekModel(apiKey, model) case 'Mistral': return getMistralModel(apiKey, model); + case 'xAI': + return getXAIModel(apiKey, model); default: return getOllamaModel(baseURL, model); } diff --git a/app/utils/constants.ts b/app/utils/constants.ts index b48cb3442..b3120f91d 100644 --- a/app/utils/constants.ts +++ b/app/utils/constants.ts @@ -15,6 +15,7 @@ const staticModels: ModelInfo[] = [ { name: 'deepseek/deepseek-coder', label: 'Deepseek-Coder V2 236B (OpenRouter)', provider: 'OpenRouter' }, { name: 'google/gemini-flash-1.5', label: 'Google Gemini Flash 1.5 (OpenRouter)', provider: 'OpenRouter' }, { name: 'google/gemini-pro-1.5', label: 'Google Gemini Pro 1.5 (OpenRouter)', provider: 'OpenRouter' }, + { name: 'x-ai/grok-beta', label: "xAI Grok Beta (OpenRouter)", provider: 'OpenRouter' }, { name: 'mistralai/mistral-nemo', label: 'OpenRouter Mistral Nemo (OpenRouter)', provider: 'OpenRouter' }, { name: 'qwen/qwen-110b-chat', label: 'OpenRouter Qwen 110b Chat (OpenRouter)', provider: 'OpenRouter' }, { name: 'cohere/command', label: 'Cohere Command (OpenRouter)', provider: 'OpenRouter' }, @@ -32,6 +33,7 @@ const staticModels: ModelInfo[] = [ { name: 'gpt-4-turbo', label: 'GPT-4 Turbo', provider: 'OpenAI' }, { name: 'gpt-4', label: 'GPT-4', provider: 'OpenAI' }, { name: 'gpt-3.5-turbo', label: 'GPT-3.5 Turbo', provider: 'OpenAI' }, + { name: 'grok-beta', label: "xAI Grok Beta", provider: 'xAI' }, { name: 'deepseek-coder', label: 'Deepseek-Coder', provider: 'Deepseek'}, { name: 'deepseek-chat', label: 'Deepseek-Chat', provider: 'Deepseek'}, { name: 'open-mistral-7b', label: 'Mistral 7B', provider: 'Mistral' }, @@ -47,9 +49,25 @@ const staticModels: ModelInfo[] = [ export let MODEL_LIST: ModelInfo[] = [...staticModels]; +const getOllamaBaseUrl = () => { + const defaultBaseUrl = import.meta.env.OLLAMA_API_BASE_URL || 'http://localhost:11434'; + // Check if we're in the browser + if (typeof window !== 'undefined') { + // Frontend always uses localhost + return defaultBaseUrl; + } + + // Backend: Check if we're running in Docker + const isDocker = process.env.RUNNING_IN_DOCKER === 'true'; + + return isDocker + ? defaultBaseUrl.replace("localhost", "host.docker.internal") + : defaultBaseUrl; +}; + async function getOllamaModels(): Promise { try { - const base_url = import.meta.env.OLLAMA_API_BASE_URL || "http://localhost:11434"; + const base_url = getOllamaBaseUrl(); const response = await fetch(`${base_url}/api/tags`); const data = await response.json() as OllamaApiResponse; diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 000000000..c391dd732 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,61 @@ +services: + bolt-ai: + image: bolt-ai:production + build: + context: . + dockerfile: Dockerfile + target: bolt-ai-production + ports: + - "5173:5173" + env_file: ".env.local" + environment: + - NODE_ENV=production + - COMPOSE_PROFILES=production + # No strictly neded but serving as hints for Coolify + - PORT=5173 + - GROQ_API_KEY=${GROQ_API_KEY} + - OPENAI_API_KEY=${OPENAI_API_KEY} + - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} + - OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} + - GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} + - OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} + - VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug} + - RUNNING_IN_DOCKER=true + extra_hosts: + - "host.docker.internal:host-gateway" + command: pnpm run dockerstart + profiles: + - production # This service only runs in the production profile + + bolt-ai-dev: + image: bolt-ai:development + build: + target: bolt-ai-development + environment: + - NODE_ENV=development + - VITE_HMR_PROTOCOL=ws + - VITE_HMR_HOST=localhost + - VITE_HMR_PORT=5173 + - CHOKIDAR_USEPOLLING=true + - WATCHPACK_POLLING=true + - PORT=5173 + - GROQ_API_KEY=${GROQ_API_KEY} + - OPENAI_API_KEY=${OPENAI_API_KEY} + - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} + - OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} + - GOOGLE_GENERATIVE_AI_API_KEY=${GOOGLE_GENERATIVE_AI_API_KEY} + - OLLAMA_API_BASE_URL=${OLLAMA_API_BASE_URL} + - VITE_LOG_LEVEL=${VITE_LOG_LEVEL:-debug} + - RUNNING_IN_DOCKER=true + extra_hosts: + - "host.docker.internal:host-gateway" + volumes: + - type: bind + source: . + target: /app + consistency: cached + - /app/node_modules + ports: + - "5173:5173" # Same port, no conflict as only one runs at a time + command: pnpm run dev --host 0.0.0.0 + profiles: ["development", "default"] # Make development the default profile diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 21ef4c097..000000000 --- a/docker-compose.yml +++ /dev/null @@ -1,24 +0,0 @@ -services: - bolt-app: - build: - context: . - dockerfile: Dockerfile - ports: - - "3000:3000" - environment: - - NODE_ENV=production - # Add any other environment variables your app needs - # - OPENAI_API_KEY=${OPENAI_API_KEY} - # - ANTHROPIC_API_KEY=${ANTHROPIC_API_KEY} - # - GROQ_API_KEY=${GROQ_API_KEY} - # - OPEN_ROUTER_API_KEY=${OPEN_ROUTER_API_KEY} - volumes: - # This volume is for development purposes, allowing live code updates - # Comment out or remove for production - - .:/app - # This volume is to prevent node_modules from being overwritten by the above volume - - /app/node_modules - command: pnpm run start - -volumes: - node_modules: \ No newline at end of file diff --git a/package.json b/package.json index edb2b8dad..3f93eb606 100644 --- a/package.json +++ b/package.json @@ -13,7 +13,11 @@ "test:watch": "vitest", "lint": "eslint --cache --cache-location ./node_modules/.cache/eslint .", "lint:fix": "npm run lint -- --fix", - "start": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings --ip 0.0.0.0 --port 3000", + "start": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings", + "dockerstart": "bindings=$(./bindings.sh) && wrangler pages dev ./build/client $bindings --ip 0.0.0.0 --port 5173 --no-show-interactive-dev-session", + "dockerrun": "docker run -it -d --name bolt-ai-live -p 5173:5173 --env-file .env.local bolt-ai", + "dockerbuild:prod": "docker build -t bolt-ai:production bolt-ai:latest --target bolt-ai-production .", + "dockerbuild": "docker build -t bolt-ai:development -t bolt-ai:latest --target bolt-ai-development .", "typecheck": "tsc", "typegen": "wrangler types", "preview": "pnpm run build && pnpm run start" @@ -110,5 +114,6 @@ }, "resolutions": { "@typescript-eslint/utils": "^8.0.0-alpha.30" - } + }, + "packageManager": "pnpm@9.12.2+sha512.22721b3a11f81661ae1ec68ce1a7b879425a1ca5b991c975b074ac220b187ce56c708fe5db69f4c962c989452eee76c82877f4ee80f474cebd61ee13461b6228" } diff --git a/wrangler.toml b/wrangler.toml index 09f2e3a88..93c41604e 100644 --- a/wrangler.toml +++ b/wrangler.toml @@ -3,3 +3,4 @@ name = "bolt" compatibility_flags = ["nodejs_compat"] compatibility_date = "2024-07-01" pages_build_output_dir = "./build/client" +send_metrics = false