diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..a6c9bb9 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,6 @@ +FROM python:3.9-slim +WORKDIR /app +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +COPY . . +CMD ["python", "-m", "routellm.openai_server", "--routers", "mf", "--strong-model", "gpt-4-1106-preview", "--weak-model", "anyscale/mistralai/Mixtral-8x7B-Instruct-v0.1"] diff --git a/README.md b/README.md index 45cbe52..a486192 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,25 @@ cd RouteLLM pip install -e .[serve,eval] ``` +## Docker +To run Route-LLM inside Docker: +``` +git clone https://github.com/lm-sys/RouteLLM.git +cd RouteLLM + ``` + Add your keys and models in the **environment** section of docker-compose.yml: +``` + environment: + - OPENAI_API_KEY=your_strong_model_key + - ANYSCALE_MODEL_KEY=your_weak_model_key + command: python -m routellm.openai_server --routers mf --strong-model openai/gpt4o --weak-model anyscale/mistralai/Mixtral-8x7B-Instruct-v0.1 + +``` +Now simply use the docker-compose command. +``` +docker-compose up +``` + ## Quickstart Let's walkthrough replacing an existing OpenAI client to route queries between LLMs instead of using only a single model. diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..7d232b5 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,15 @@ +version: '3.8' + +services: + routellm: + build: + context: . + dockerfile: Dockerfile + ports: + - 6060:6060 + volumes: + - ./config.example.yaml:/app/config.example.yaml # Mount the config file + environment: + - OPENAI_API_KEY=your_openai_api_key # Replace with your actual OpenAI API key + - GROQ_API_KEY=your_groq_api_key # Replace with your actual GR0Q_API_KEY, ANYSCALE_API_KEY ETC. + command: python -m routellm.openai_server --routers mf --strong-model openai/gpt4o --weak-model groq/llama3-8b-8192