-
Notifications
You must be signed in to change notification settings - Fork 2
/
docker-compose.yml
208 lines (201 loc) · 6.97 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
version: "3.8"
services:
postgres:
image: postgis/postgis:12-3.0-alpine
restart: unless-stopped
ports:
- ${PG_PORT}:5432
command: "postgres -c listen_addresses='*'"
healthcheck:
test: pg_isready --username=${PG_USERNAME} --dbname=${PG_DATABASE} --quiet
interval: 15s
timeout: 5s
retries: 5
environment:
POSTGRES_DB: ${PG_DATABASE}
POSTGRES_PASSWORD: ${PG_PASSWORD}
POSTGRES_USER: ${PG_USERNAME}
metabase:
# if changing, also check infrastructure/application/index.ts
image: metabase/metabase:v0.50.26
profiles: ["analytics"]
ports:
- "${METABASE_PORT}:${METABASE_PORT}"
depends_on:
postgres:
condition: service_healthy
restart: unless-stopped
healthcheck:
test:
[
"CMD",
"wget",
"--spider",
"--quiet",
"http://localhost:3000/api/health",
]
interval: 15s
timeout: 3s
retries: 10
environment:
MB_DB_TYPE: postgres
MB_DB_CONNECTION_URI: postgres://${PG_USERNAME}:${PG_PASSWORD}@postgres/${PG_DATABASE}
MB_JETTY_PORT: "${METABASE_PORT}"
hasura:
build:
context: ./hasura.planx.uk
depends_on:
postgres:
condition: service_healthy
healthcheck:
# https://github.com/hasura/graphql-engine/issues/3371#issuecomment-902515112
test:
[
"CMD",
"bash",
"-c",
"exec 5<>/dev/tcp/127.0.0.1/8080 && echo -e 'GET /healthz HTTP/1.1\n\n' >&5 && cat <&5 | head -n 1 | grep 200",
]
interval: 15s
timeout: 3s
retries: 10
volumes:
- "./hasura.planx.uk/metadata:/hasura-metadata"
- "./hasura.planx.uk/migrations:/hasura-migrations"
restart: unless-stopped
environment:
HASURA_GRAPHQL_ADMIN_SECRET: ${HASURA_GRAPHQL_ADMIN_SECRET}
HASURA_GRAPHQL_CORS_DOMAIN: ${HASURA_GRAPHQL_CORS_DOMAIN}
HASURA_GRAPHQL_DATABASE_URL: postgres://${PG_USERNAME}:${PG_PASSWORD}@postgres/${PG_DATABASE}
HASURA_GRAPHQL_ENABLE_CONSOLE: "true"
HASURA_GRAPHQL_ENABLED_LOG_TYPES: startup, http-log, webhook-log, websocket-log, query-log
HASURA_GRAPHQL_JWT_SECRET: '{ "type": "HS256", "key": "${JWT_SECRET}" }'
HASURA_GRAPHQL_UNAUTHORIZED_ROLE: "public"
HASURA_PLANX_API_KEY: ${HASURA_PLANX_API_KEY}
HASURA_PLANX_API_URL: ${HASURA_PLANX_API_URL}
hasura-proxy:
build:
context: ./hasura.planx.uk/proxy
depends_on:
hasura:
condition: service_healthy
restart: always
ports:
- ${HASURA_PROXY_PORT}:${HASURA_PROXY_PORT}
environment:
HASURA_PROXY_PORT: ${HASURA_PROXY_PORT}
HASURA_NETWORK_LOCATION: "hasura"
healthcheck:
test: ["CMD", "wget", "--spider", "--quiet", "localhost:$HASURA_PROXY_PORT/healthz"]
interval: 15s
timeout: 3s
retries: 3
api:
restart: unless-stopped
build:
context: ./api.planx.uk
target: production
depends_on:
hasura-proxy:
condition: service_healthy
ports:
- ${API_PORT}:${API_PORT}
healthcheck:
test:
["CMD", "wget", "--spider", "--quiet", "http://localhost:${API_PORT}"]
interval: 30s
timeout: 3s
retries: 5
environment:
AIRBRAKE_PROJECT_ID: ${AIRBRAKE_PROJECT_ID}
AIRBRAKE_PROJECT_KEY: ${AIRBRAKE_PROJECT_KEY}
API_URL_EXT: ${API_URL_EXT}
APP_ENVIRONMENT: ${APP_ENVIRONMENT}
AWS_ACCESS_KEY: ${AWS_ACCESS_KEY}
AWS_S3_BUCKET: ${AWS_S3_BUCKET}
AWS_S3_REGION: ${AWS_S3_REGION}
AWS_SECRET_KEY: ${AWS_SECRET_KEY}
# This is controlled via generateCORSAllowList() in staging and production
CORS_ALLOWLIST: ${EDITOR_URL_EXT}, ${API_URL_EXT}, https://login.live.com, https://login.microsoftonline.com
EDITOR_URL_EXT: ${EDITOR_URL_EXT}
ENCRYPTION_KEY: ${ENCRYPTION_KEY}
FILE_API_KEY_BARNET: ${FILE_API_KEY_BARNET}
FILE_API_KEY_LAMBETH: ${FILE_API_KEY_LAMBETH}
FILE_API_KEY_SOUTHWARK: ${FILE_API_KEY_SOUTHWARK}
FILE_API_KEY_EPSOM_EWELL: ${FILE_API_KEY_EPSOM_EWELL}
FILE_API_KEY_MEDWAY: ${FILE_API_KEY_MEDWAY}
FILE_API_KEY_GATESHEAD: ${FILE_API_KEY_GATESHEAD}
FILE_API_KEY_NEXUS: ${FILE_API_KEY_NEXUS}
FILE_API_KEY: ${FILE_API_KEY}
GOOGLE_CLIENT_ID: ${GOOGLE_CLIENT_ID}
GOOGLE_CLIENT_SECRET: ${GOOGLE_CLIENT_SECRET}
MICROSOFT_CLIENT_ID: ${MICROSOFT_CLIENT_ID}
MICROSOFT_CLIENT_SECRET: ${MICROSOFT_CLIENT_SECRET}
GOVUK_NOTIFY_API_KEY: ${GOVUK_NOTIFY_API_KEY}
HASURA_GRAPHQL_ADMIN_SECRET: ${HASURA_GRAPHQL_ADMIN_SECRET}
HASURA_GRAPHQL_URL: http://hasura-proxy:${HASURA_PROXY_PORT}/v1/graphql
HASURA_METADATA_URL: http://hasura-proxy:${HASURA_PROXY_PORT}/v1/metadata
HASURA_PLANX_API_KEY: ${HASURA_PLANX_API_KEY}
HASURA_SCHEMA_URL: http://hasura-proxy:${HASURA_PROXY_PORT}/v2/query
IDOX_NEXUS_CLIENT: ${IDOX_NEXUS_CLIENT}
IDOX_NEXUS_SUBMISSION_URL: ${IDOX_NEXUS_SUBMISSION_URL}
IDOX_NEXUS_TOKEN_URL: ${IDOX_NEXUS_TOKEN_URL}
JWT_SECRET: ${JWT_SECRET}
MAPBOX_ACCESS_TOKEN: ${MAPBOX_ACCESS_TOKEN}
METABASE_API_KEY: ${METABASE_API_KEY}
METABASE_URL_EXT: ${METABASE_URL_EXT}
MINIO_PORT: ${MINIO_PORT}
ORDNANCE_SURVEY_API_KEY: ${ORDNANCE_SURVEY_API_KEY}
PORT: ${API_PORT}
SESSION_SECRET: ${SESSION_SECRET}
SLACK_WEBHOOK_URL: ${SLACK_WEBHOOK_URL}
UNIFORM_SUBMISSION_URL: ${UNIFORM_SUBMISSION_URL}
UNIFORM_TOKEN_URL: ${UNIFORM_TOKEN_URL}
# Local authority config
# Lambeth
UNIFORM_CLIENT_LAMBETH: ${UNIFORM_CLIENT_LAMBETH}
# Southwark
UNIFORM_CLIENT_SOUTHWARK: ${UNIFORM_CLIENT_SOUTHWARK}
# Buckinghamshire
UNIFORM_CLIENT_AYLESBURY_VALE: ${UNIFORM_CLIENT_AYLESBURY_VALE}
UNIFORM_CLIENT_CHILTERN: ${UNIFORM_CLIENT_CHILTERN}
UNIFORM_CLIENT_WYCOMBE: ${UNIFORM_CLIENT_WYCOMBE}
sharedb:
restart: unless-stopped
build:
context: ./sharedb.planx.uk
target: production
volumes:
- "./sharedb.planx.uk:/sharedb"
- "/sharedb/node_modules"
depends_on:
hasura-proxy:
condition: service_healthy
ports:
- ${SHAREDB_PORT}:8000
environment:
JWT_SECRET: ${JWT_SECRET}
PG_URL: postgres://${PG_USERNAME}:${PG_PASSWORD}@postgres/${PG_DATABASE}
# used as an S3 service mock
minio:
image: minio/minio:RELEASE.2021-08-31T05-46-54Z
profiles: ["mock-services"]
ports:
- ${MINIO_PORT}:9000
- ${MINIO_ADMIN_PORT}:9001
volumes:
- ./minio-data:/data
environment:
# use these credentials to login the dashboard or upload files as S3 replacement
MINIO_ROOT_USER: ${AWS_ACCESS_KEY}
MINIO_ROOT_PASSWORD: ${AWS_SECRET_KEY}
entrypoint: sh
command: -c "mkdir -p /data/${AWS_S3_BUCKET} && minio server --console-address ':9001' /data"
deploy:
restart_policy:
condition: on-failure
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3