-
Notifications
You must be signed in to change notification settings - Fork 0
/
Makefile
133 lines (114 loc) · 3.92 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
#!make
PROJECT_NAME = momconnect-aaq
CONDA_ACTIVATE=source $$(conda info --base)/etc/profile.d/conda.sh ; conda activate ; conda activate
ENDPOINT_URL = localhost:8000
OPENAI_API_KEY := $(shell printenv OPENAI_API_KEY)
## Main targets
clean:
find . -type f -name "*.py[co]" -delete
find . -type d -name "__pycache__" -delete
find . -type d -name ".pytest_cache" -exec rm -rf {} +
find . -type d -name ".mypy_cache" -exec rm -rf {} +
find . -type d -name ".ruff_cache" -exec rm -rf {} +
# Note: Run `make fresh-env psycopg2-binary=true` to manually replace psycopg with psycopg2-binary
fresh-env :
conda remove --name $(PROJECT_NAME) --all -y
conda create --name $(PROJECT_NAME) python==3.10 -y
$(CONDA_ACTIVATE) $(PROJECT_NAME); \
pip install -r core_backend/requirements.txt --ignore-installed; \
pip install -r requirements-dev.txt --ignore-installed; \
pre-commit install
if [ "$(psycopg2-binary)" = "true" ]; then \
$(CONDA_ACTIVATE) $(PROJECT_NAME); \
pip uninstall -y psycopg2==2.9.9; \
pip install psycopg2-binary==2.9.9; \
fi
# Dev requirements
setup-dev: setup-db setup-redis #setup-embeddings-arm
teardown-dev: teardown-db teardown-redis teardown-embeddings
## Helper targets
guard-%:
@if [ -z '${${*}}' ]; then echo 'ERROR: environment variable $* not set' && exit 1; fi
# Dev db
setup-db:
-@docker stop postgres-local
-@docker rm postgres-local
@docker system prune -f
@sleep 2
@docker run --name postgres-local \
--env-file "$(CURDIR)/deployment/docker-compose/.core_backend.env" \
-p 5432:5432 \
-d pgvector/pgvector:pg16
set -a && \
source "$(CURDIR)/deployment/docker-compose/.base.env" && \
source "$(CURDIR)/deployment/docker-compose/.core_backend.env" && \
set +a && \
cd core_backend && \
python -m alembic upgrade head
teardown-db:
@docker stop postgres-local
@docker rm postgres-local
setup-redis:
-@docker stop redis-local
-@docker rm redis-local
@docker system prune -f
@sleep 2
@docker run --name redis-local \
-p 6379:6379 \
-d redis:6.0-alpine
make teardown-redis:
@docker stop redis-local
@docker rm redis-local
# Dev LiteLLM Proxy server
setup-llm-proxy:
-@docker stop litellm-proxy
-@docker rm litellm-proxy
@docker system prune -f
@sleep 2
@docker pull ghcr.io/berriai/litellm:main-v1.40.10
@docker run \
--name litellm-proxy \
--rm \
-v "$(CURDIR)/deployment/docker-compose/litellm_proxy_config.yaml":/app/config.yaml \
-v "$(CURDIR)/deployment/docker-compose/.gcp_credentials.json":/app/credentials.json \
--env-file "$(CURDIR)/deployment/docker-compose/.litellm_proxy.env" \
-p 4000:4000 \
-d ghcr.io/berriai/litellm:main-v1.40.10 \
--config /app/config.yaml --detailed_debug
teardown-llm-proxy:
@docker stop litellm-proxy
@docker rm litellm-proxy
build-embeddings-arm:
@git clone https://github.com/huggingface/text-embeddings-inference.git
@docker build text-embeddings-inference -f text-embeddings-inference/Dockerfile \
--platform=linux/arm64 \
-t text-embeddings-inference-arm
@cd ..
@rm -rf text-embeddings-inference
setup-embeddings-arm: guard-HUGGINGFACE_MODEL guard-HUGGINGFACE_EMBEDDINGS_API_KEY
-@docker stop huggingface-embeddings
-@docker rm huggingface-embeddings
@docker system prune -f
@sleep 2
@docker run \
--name huggingface-embeddings \
-p 8081:80 \
-v "$(PWD)/data:/data" \
-d text-embeddings-inference-arm \
--model-id $(HUGGINGFACE_MODEL) \
--api-key $(LITELLM_API_KEY)
setup-embeddings: guard-HUGGINGFACE_MODEL guard-HUGGINGFACE_EMBEDDINGS_API_KEY
-@docker stop huggingface-embeddings
-@docker rm huggingface-embeddings
@docker system prune -f
@sleep 2
@docker run \
--name huggingface-embeddings \
-p 8081:80 \
-v "$(PWD)/data:/data" \
--pull always ghcr.io/huggingface/text-embeddings-inference:cpu-1.5 \
--model-id $(HUGGINGFACE_MODEL) \
--api-key $(LITELLM_API_KEY)
teardown-embeddings:
@docker stop huggingface-embeddings
@docker rm huggingface-embeddings