-
Notifications
You must be signed in to change notification settings - Fork 1
/
docker-compose.yml
345 lines (326 loc) · 10.1 KB
/
docker-compose.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
version: '3.7'
## 新创建网络
networks:
laradock_backend:
driver: bridge
attachable: true
# ## 外部已有网络
# networks:
# laradock_backend:
# external: true
services:
etcd1:
image: "quay.io/coreos/etcd:v3.3"
container_name: etcd1
command: etcd -name etcd1 -data-dir=/etcddata -advertise-client-urls http://0.0.0.0:2379 -listen-client-urls http://0.0.0.0:2379 -listen-peer-urls http://0.0.0.0:2380 -initial-cluster-token etcd-cluster -initial-cluster "etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380" -initial-cluster-state new
ports:
- "23791:2379"
- "23801:2380"
volumes:
- /root/.laradock/data/etcddata1/data:/etcddata
networks:
- laradock_backend
etcd2:
image: "quay.io/coreos/etcd:v3.3"
container_name: etcd2
command: etcd -name etcd2 -data-dir=/etcddata -advertise-client-urls http://0.0.0.0:2379 -listen-client-urls http://0.0.0.0:2379 -listen-peer-urls http://0.0.0.0:2380 -initial-cluster-token etcd-cluster -initial-cluster "etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380" -initial-cluster-state new
ports:
- "23792:2379"
- "23802:2380"
volumes:
- /root/.laradock/data/etcddata2/data:/etcddata
networks:
- laradock_backend
etcd3:
image: "quay.io/coreos/etcd:v3.3"
container_name: etcd3
command: etcd -name etcd3 -data-dir=/etcddata -advertise-client-urls http://0.0.0.0:2379 -listen-client-urls http://0.0.0.0:2379 -listen-peer-urls http://0.0.0.0:2380 -initial-cluster-token etcd-cluster -initial-cluster "etcd1=http://etcd1:2380,etcd2=http://etcd2:2380,etcd3=http://etcd3:2380" -initial-cluster-state new
ports:
- "23793:2379"
- "23803:2380"
volumes:
- /root/.laradock/data/etcddata3/data:/etcddata
networks:
- laradock_backend
#######zookeeper######
zoo1:
image: zookeeper
restart: unless-stopped
hostname: zoo1
container_name: zoo1
ports:
- 2182:2181
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
volumes:
- /root/.laradock/data/zookeeper1/data:/data
- /root/.laradock/data/zookeeper1/datalog:/datalog
networks:
- laradock_backend
zoo2:
image: zookeeper
restart: unless-stopped
hostname: zoo2
container_name: zoo2
ports:
- 2183:2181
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zoo3:2888:3888;2181
volumes:
- /root/.laradock/data/zookeeper2/data:/data
- /root/.laradock/data/zookeeper2/datalog:/datalog
networks:
- laradock_backend
zoo3:
image: zookeeper
restart: unless-stopped
hostname: zoo3
container_name: zoo3
ports:
- 2184:2181
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
volumes:
- /root/.laradock/data/zookeeper3/data:/data
- /root/.laradock/data/zookeeper3/datalog:/datalog
networks:
- laradock_backend
#######kafka#######
kafka1:
image: wurstmeister/kafka
restart: unless-stopped
container_name: kafka1
ports:
- "9093:9092"
external_links:
- zoo1
- zoo2
- zoo3
depends_on:
- zoo1
- zoo2
- zoo3
environment:
KAFKA_BROKER_ID: 1
KAFKA_ADVERTISED_HOST_NAME: 10.0.41.145 ## 修改:宿主机IP
KAFKA_ADVERTISED_PORT: 9093 ## 修改:宿主机映射port
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.0.41.145:9093 ## 绑定发布订阅的端口。修改:宿主机IP
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2181,zoo3:2181"
volumes:
- "/root/.laradock/data/kafka1/docker.sock:/var/run/docker.sock"
- "/root/.laradock/data/kafka1/data/:/kafka"
networks:
- laradock_backend
kafka2:
image: wurstmeister/kafka
restart: unless-stopped
container_name: kafka2
ports:
- "9094:9092"
external_links:
- zoo1
- zoo2
- zoo3
depends_on:
- zoo1
- zoo2
- zoo3
environment:
KAFKA_BROKER_ID: 2
KAFKA_ADVERTISED_HOST_NAME: 10.0.41.145 ## 修改:宿主机IP
KAFKA_ADVERTISED_PORT: 9094 ## 修改:宿主机映射port
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.0.41.145:9094 ## 修改:宿主机IP
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2181,zoo3:2181"
volumes:
- "/root/.laradock/data/kafka2/docker.sock:/var/run/docker.sock"
- "/root/.laradock/data/kafka2/data/:/kafka"
networks:
- laradock_backend
kafka3:
image: wurstmeister/kafka
restart: unless-stopped
container_name: kafka3
ports:
- "9095:9092"
external_links:
- zoo1
- zoo2
- zoo3
depends_on:
- zoo1
- zoo2
- zoo3
environment:
KAFKA_BROKER_ID: 3
KAFKA_ADVERTISED_HOST_NAME: 10.0.41.145 ## 修改:宿主机IP
KAFKA_ADVERTISED_PORT: 9095 ## 修改:宿主机映射port
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://10.0.41.145:9095 ## 修改:宿主机IP
KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181,zoo2:2181,zoo3:2181"
volumes:
- "/root/.laradock/data/kafka3/docker.sock:/var/run/docker.sock"
- "/root/.laradock/data/kafka3/data/:/kafka"
networks:
- laradock_backend
kafka-manager: #服务名称 可自定义如:elasticsearch,redis,mysql,abcd等
image: sheepkiller/kafka-manager:latest #从指定的镜像中启动容器,可以是存储仓库、标签、镜像ID 如果镜像不存在,Compose 会自动拉去镜像
restart: unless-stopped #指定容器退出后的重启策略为始终重启。该命令对保持服务始终运行十分有效,在生产环境中推荐配置为always或者unless-stopped。
privileged: true #允许容器中运行一些特权命令。
container_name: kafka-manager #确定app容器的命名
hostname: kafka-manager
#domainname: your_website.com #指定容器中搜索域名
#hostname: test #指定容器中 主机名
#mac_address: 08-00-27-00-0C-0A #指定容器中 mac 地址
depends_on:
- kafka1
- kafka2
- kafka3
ports:
- "19000:9000"
links: # 连接本compose文件创建的container
- kafka1
- kafka2
- kafka3
external_links: # 连接本compose文件以外的container
- zoo1
- zoo2
- zoo3
environment:
ZK_HOSTS: zoo1:2181,zoo2:2181,zoo3:2181 ## 修改:宿主机IP
TZ: CST-8
networks:
- laradock_backend
#####redis#####
redis:
image: redis
restart: always
command: --appendonly yes
ports:
- 6379:6379
volumes:
- "/root/.laradock/data/redis/data/:/data"
networks:
- laradock_backend
#####goim-example#####
comet:
build:
context: .
dockerfile: Dockerfile
image: poembro/goim
restart: always
#env_file: .env # 从指定文件中读取变量设置为容器中的环境变量, 可以是单个值或者一个文件列表, 如果多个文件中的变量重名则后面的变量覆盖前面的变量, environment 的值覆盖 env_file 的值
environment:
REGION: sh # 区域
ZONE: sh001 # 机器编号
DEPLOY_ENV: prod # 生产环境 / dev 开发环境
WEIGHT: 10 # 权重
ADDRS: 47.111.69.116,0.0.0.117 # 外网ip
OFFLINE: "true"
DEBUG: "true"
GOLANG_PROTOBUF_REGISTRATION_CONFLICT: warn
ports:
- "3101:3101"
- "3102:3102"
- "3109:3009"
depends_on: #定义容器启动顺序 (此选项解决了容器之间的依赖关系, 此选项在 v3 版本中 使用 swarm 部署时将忽略该选项)
- etcd1
- etcd2
- etcd3
- redis
- zoo1
- zoo2
- zoo3
- kafka1
- kafka2
- kafka3
entrypoint: # 容器启动时强制执行 comet命令 (等同于 docker run --entrypoint 的作用)
- /webser/go_wepapp/goim-example/comet
- -conf=/webser/go_wepapp/goim-example/comet.toml
- -region=sh
- -zone=sh001
- -deploy.env=prod
- -host=10.0.41.145
- -addrs=47.98.236.219
- -weight=10
- -offline=true
- -debug=true
- -log_dir=/webser/go_wepapp/goim-example
- -v=1
- -alsologtostderr
networks:
- laradock_backend
logic:
build:
context: .
dockerfile: Dockerfile
image: poembro/goim
restart: always
environment: #env_file: .env
REGION: sh # 区域
ZONE: sh001 # 机器编号
DEPLOY_ENV: prod # 生产环境 / dev 开发环境
GOLANG_PROTOBUF_REGISTRATION_CONFLICT: warn
ports:
- "3111:3111"
- "3119:3119"
depends_on:
- etcd1
- etcd2
- etcd3
- redis
- zoo1
- zoo2
- zoo3
- kafka1
- kafka2
- kafka3
entrypoint:
- /webser/go_wepapp/goim-example/logic
- -conf=/webser/go_wepapp/goim-example/logic.toml
- -region=sh
- -zone=sh001
- -deploy.env=prod
- -host=10.0.41.145
- -weight=10
- -log_dir=/webser/go_wepapp/goim-example
- -v=1
- -alsologtostderr
networks:
- laradock_backend
job:
build:
context: .
dockerfile: Dockerfile
image: poembro/goim
restart: always
#env_file: .env
environment:
REGION: sh # 区域
ZONE: sh001 # 机器编号
DEPLOY_ENV: prod # 生产环境 / dev 开发环境
GOLANG_PROTOBUF_REGISTRATION_CONFLICT: warn
depends_on:
- etcd1
- etcd2
- etcd3
- redis
- zoo1
- zoo2
- zoo3
- kafka1
- kafka2
- kafka3
entrypoint:
- /webser/go_wepapp/goim-example/job
- -conf=/webser/go_wepapp/goim-example/job.toml
- -region=sh
- -zone=sh001
- -deploy.env=prod
- -host=10.0.41.145
- -log_dir=/webser/go_wepapp/goim-example
- -v=1
- -alsologtostderr
networks:
- laradock_backend