forked from BerriAI/litellm
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathrouter_config_template.yaml
30 lines (28 loc) · 1.04 KB
/
router_config_template.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
# Global settings for the litellm module
litellm_settings:
drop_params: True
# failure_callbacks: ["sentry"]
# Model-specific settings
model_list: # refer to https://docs.litellm.ai/docs/routing
- model_name: gpt-3.5-turbo
litellm_params: # parameters for litellm.completion()
model: azure/chatgpt-v-2 # azure/<your-deployment-name>
api_key: your_azure_api_key
api_version: your_azure_api_version
api_base: your_azure_api_base
tpm: 240000 # [OPTIONAL] To load balance between multiple deployments
rpm: 1800 # [OPTIONAL] To load balance between multiple deployments
- model_name: mistral
litellm_params:
model: ollama/mistral
api_base: my_ollama_api_base
- model_name: gpt-3.5-turbo
litellm_params:
model: gpt-3.5-turbo
api_key: your_openai_api_key
tpm: 1000000 # [OPTIONAL] REPLACE with your openai tpm
rpm: 9000 # [OPTIONAL] REPLACE with your openai rpm
environment_variables:
REDIS_HOST: your_redis_host
REDIS_PASSWORD: your_redis_password
REDIS_PORT: your_redis_port