forked from ByConity/ByConity
-
Notifications
You must be signed in to change notification settings - Fork 0
307 lines (304 loc) · 20.1 KB
/
ci.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
name: CI
on:
# Triggers the workflow on push or pull request events but only for the "main" branch
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
jobs:
scm_build:
name: Build binary and run stateless tests
# Use digest instead of tag to avoid network issue, original image byconity/byconity-ci:24Jan16
container: byconity/byconity-ci:24Jan16
runs-on: self-hosted
steps:
- name: Preload docker images
run: |
cd /docker_image_archive && ls -1 *.tar | xargs --no-run-if-empty -L 1 docker image load -i
- name: Set environment variables and git
run: |
export COMPOSE_PROJECT_NAME=byconity-$(cat /etc/hostname)
echo "COMPOSE_PROJECT_NAME=${COMPOSE_PROJECT_NAME}" >> $GITHUB_ENV
echo "BINARY_VOL=${COMPOSE_PROJECT_NAME}_bin_volume" >> $GITHUB_ENV
echo "HDFS_VOL=${COMPOSE_PROJECT_NAME}_hdfs_volume" >> $GITHUB_ENV
echo "CONFIG_VOL=${COMPOSE_PROJECT_NAME}_config_volume" >> $GITHUB_ENV
echo "CONFIG_VOL_FOR_S3=${COMPOSE_PROJECT_NAME}_config_volume_for_s3" >> $GITHUB_ENV
echo "SCRIPTS_VOL=${COMPOSE_PROJECT_NAME}_scripts_volume" >> $GITHUB_ENV
echo "CLICKHOUSE_PORT_TCP=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()')" >> $GITHUB_ENV
echo "CLICKHOUSE_PORT_HTTP=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()')" >> $GITHUB_ENV
echo "CLICKHOUSE_PORT_HDFS=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()')" >> $GITHUB_ENV
echo "CNCH_WRITE_WORKER_PORT_TCP=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()')" >> $GITHUB_ENV
echo "FS_PORT=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()')" >> $GITHUB_ENV
echo "HIVE_SERVER_PORT=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()')" >> $GITHUB_ENV
echo "HMS_PORT=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()')" >> $GITHUB_ENV
echo "HIVE_NAMENODE_PORT=$(python -c 'import socket; s=socket.socket(); s.bind(("", 0)); print(s.getsockname()[1]); s.close()')" >> $GITHUB_ENV
git config --global --add safe.directory $GITHUB_WORKSPACE
- name: Setup git proxy
if: ${{ runner.name != 'ec2-aws-id4-10.10.129.157' }}
run: |
git config --global http.proxy http://${{ secrets.HTTP_PROXY }}
- name: Check out repository code
uses: actions/checkout@v4
with:
submodules: recursive
path: '.'
- name: Build binary with build_bin.sh
env:
CUSTOM_CMAKE_BUILD_TYPE: "Release"
run: |
# git -C "$GITHUB_WORKSPACE" submodule sync
# git -C "$GITHUB_WORKSPACE" submodule update --init --recursive --force
./build_bin.sh
- name: run unit tests
run: |
bash $GITHUB_WORKSPACE/unittest.sh
- name: glibc_compatibility_check
run: |
python3 $GITHUB_WORKSPACE/tests/ci/compatibility_check.py --binary $GITHUB_WORKSPACE/build/programs/clickhouse
- name: Preparing
run: |
mkdir /CI/
cd /CI/
cp -r $GITHUB_WORKSPACE/docker/ci-deploy/* ./
cp $GITHUB_WORKSPACE/docker/ci-deploy/.env ./
mkdir bin
cp -r $GITHUB_WORKSPACE/build/programs/* ./bin
sed -i "s|COMPOSE\_PROJECT\_NAME|${COMPOSE_PROJECT_NAME}|g" hdfs_config/conf/core-site.xml
sed -i "s|COMPOSE\_PROJECT\_NAME|${COMPOSE_PROJECT_NAME}|g" config/fdb.cluster
sed -i "s|COMPOSE\_PROJECT\_NAME|${COMPOSE_PROJECT_NAME}|g" config/cnch-config.yml
sed -i "s|COMPOSE\_PROJECT\_NAME|${COMPOSE_PROJECT_NAME}|g" config/daemon-manager.yml
sed -i "s|COMPOSE\_PROJECT\_NAME|${COMPOSE_PROJECT_NAME}|g" config_for_s3_storage/fdb.cluster
sed -i "s|COMPOSE\_PROJECT\_NAME|${COMPOSE_PROJECT_NAME}|g" config_for_s3_storage/cnch-config.yml
sed -i "s|COMPOSE\_PROJECT\_NAME|${COMPOSE_PROJECT_NAME}|g" config_for_s3_storage/daemon-manager.yml
echo "Create volumes for ByConity Cluster"
/bin/bash create_docker_volume_ci.sh
mkdir -p /test_output
- name: Create ByConity Cluster S3
run: |
echo "Bring up ByConity Cluster S3"
cd /CI/
docker compose -f docker-compose-s3.yml up -d
sleep 20
export CLICKHOUSE_HOST=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.Gateway}}{{end}}' ${COMPOSE_PROJECT_NAME}_server-s3-0)
echo "CLICKHOUSE_HOST=${CLICKHOUSE_HOST}" >> $GITHUB_ENV
export CNCH_WRITE_WORKER_HOST=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.Gateway}}{{end}}' ${COMPOSE_PROJECT_NAME}_worker-write-s3-0)
echo "CNCH_WRITE_WORKER_HOST=${CNCH_WRITE_WORKER_HOST}" >> $GITHUB_ENV
echo "Check connection to ByConity Server at ${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}"
curl --retry 11 --retry-delay 5 --retry-connrefused --retry-max-time 120 --max-time 120 $CLICKHOUSE_HOST:$CLICKHOUSE_PORT_HTTP
echo "Check status of cluster"
docker ps --filter name=$COMPOSE_PROJECT_NAME
- name: Run ByConity FuntionalStateless for S3 (w.o optimizer)
continue-on-error: true
run: |
mkdir -p ci_stateless_without_optimizer_s3
cd ci_stateless_without_optimizer_s3
export EXTRA_OPTIONS='enable_optimizer_fallback=0 enable_optimizer=0'
cp -r $GITHUB_WORKSPACE/docker/test/stateless/process_functional_tests_result.py ./
cp -r $GITHUB_WORKSPACE/tests/clickhouse-test ./
mkdir queries
cp -r $GITHUB_WORKSPACE/tests/queries/4_cnch_stateless queries/
cp -r $GITHUB_WORKSPACE/tests/queries/shell_config.sh queries/
cp $GITHUB_WORKSPACE/tests/queries/skip_list.json queries/
mkdir -p test_output
./clickhouse-test --s3 --use-skip-list -b $GITHUB_WORKSPACE/build/programs/clickhouse --stop --hung-check --jobs 4 --order asc --print-time --client-option ${EXTRA_OPTIONS} 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
./process_functional_tests_result.py --in-results-dir ./test_output/ --out-results-file /test_output/stateless_without_optimizer_s3_test_results.txt --out-status-file /test_output/check_status.tsv || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
mv test_output/test_result.txt /test_output/wo_optimizer_s3_test_result.txt
cd ..
rm -rf ci_stateless_without_optimizer_s3
- name: After S3 tests
if: always()
continue-on-error: true
run: |
docker ps -a --format "{{.ID}} {{.Names}} {{.Image}}" --filter name=$COMPOSE_PROJECT_NAME | grep "byconity/debian:buster-runit-fdb7.1.27" | awk {'print $1"\t"$2'} | xargs -n 2 sh -c 'mkdir -p /test_output/$2; docker cp -q $1:/var/log/byconity /test_output/$2/log' sh || true
- name: Stop ByConity Cluster S3
continue-on-error: true
run: |
cd /CI/
echo "Bring down ByConity Cluster S3"
docker compose -f docker-compose-s3.yml down
docker ps --filter name=$COMPOSE_PROJECT_NAME
docker volume prune
- name: Create ByConity Cluster HDFS
run: |
echo "Bring up ByConity Cluster HDFS"
cd /CI/
docker compose up -d
sleep 10
hdfs_config/script/create_users.sh
export CLICKHOUSE_HOST=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.Gateway}}{{end}}' ${COMPOSE_PROJECT_NAME}_server-0)
echo "CLICKHOUSE_HOST=${CLICKHOUSE_HOST}" >> $GITHUB_ENV
export CNCH_WRITE_WORKER_HOST=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.Gateway}}{{end}}' ${COMPOSE_PROJECT_NAME}_worker-write-0)
echo "CNCH_WRITE_WORKER_HOST=${CNCH_WRITE_WORKER_HOST}" >> $GITHUB_ENV
echo "Check connection to ByConity Server at ${CLICKHOUSE_HOST}:${CLICKHOUSE_PORT_HTTP}"
curl --retry 10 --retry-delay 5 --retry-connrefused --retry-max-time 120 --max-time 120 $CLICKHOUSE_HOST:$CLICKHOUSE_PORT_HTTP
echo "Check status of cluster"
docker ps --filter name=$COMPOSE_PROJECT_NAME
- name: Run ByConity External Table test
continue-on-error: true
run: |
mkdir -p ci_external_table
cd ci_external_table
export EXTRA_OPTIONS='enable_optimizer_fallback=0'
cp -r $GITHUB_WORKSPACE/docker/test/stateless/process_functional_tests_result.py ./
cp -r $GITHUB_WORKSPACE/tests/clickhouse-test ./
mkdir queries
cp -r $GITHUB_WORKSPACE/tests/queries/4_cnch_external_table queries/
cp -r $GITHUB_WORKSPACE/tests/queries/shell_config.sh queries/
mkdir -p test_output
./clickhouse-test -b $GITHUB_WORKSPACE/build/programs/clickhouse --stop --hung-check --jobs 4 --order asc --print-time --client-option ${EXTRA_OPTIONS} 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
./process_functional_tests_result.py --in-results-dir ./test_output/ --out-results-file /test_output/external_table_test_results.txt --out-status-file /test_output/check_status.tsv || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
mv test_output/test_result.txt /test_output/external_table_test_result.txt
cd ..
rm -rf ci_external_table
- name: Run ByConity FuntionalStateless (w.o optimizer)
continue-on-error: true
run: |
mkdir -p ci_stateless_without_optimizer
cd ci_stateless_without_optimizer
export EXTRA_OPTIONS='enable_optimizer_fallback=0 enable_optimizer=0'
cp -r $GITHUB_WORKSPACE/docker/test/stateless/process_functional_tests_result.py ./
cp -r $GITHUB_WORKSPACE/tests/clickhouse-test ./
mkdir queries
cp -r $GITHUB_WORKSPACE/tests/queries/4_cnch_stateless queries/
cp -r $GITHUB_WORKSPACE/tests/queries/shell_config.sh queries/
mkdir -p test_output
./clickhouse-test -b $GITHUB_WORKSPACE/build/programs/clickhouse --stop --hung-check --jobs 4 --order asc --print-time --client-option ${EXTRA_OPTIONS} 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
./process_functional_tests_result.py --in-results-dir ./test_output/ --out-results-file /test_output/stateless_without_optimizer_test_results.txt --out-status-file /test_output/check_status.tsv || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
mv test_output/test_result.txt /test_output/wo_optimizer_test_result.txt
cd ..
rm -rf ci_stateless_without_optimizer
- name: Run ByConity FuntionalStateless no tenant (w.o optimizer)
continue-on-error: true
run: |
mkdir -p ci_stateless_without_optimizer_no_tenant
cd ci_stateless_without_optimizer_no_tenant
export EXTRA_OPTIONS='enable_optimizer_fallback=0 enable_optimizer=0'
cp -r $GITHUB_WORKSPACE/docker/test/stateless/process_functional_tests_result.py ./
cp -r $GITHUB_WORKSPACE/tests/clickhouse-test ./
mkdir queries
cp -r $GITHUB_WORKSPACE/tests/queries/4_cnch_stateless_no_tenant queries/
cp -r $GITHUB_WORKSPACE/tests/queries/shell_config.sh queries/
mkdir -p test_output
./clickhouse-test -b $GITHUB_WORKSPACE/build/programs/clickhouse --stop --hung-check --jobs 4 --order asc --print-time --client-option ${EXTRA_OPTIONS} 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
./process_functional_tests_result.py --in-results-dir ./test_output/ --out-results-file /test_output/stateless_without_optimizer_no_tenant_test_results.txt --out-status-file /test_output/check_status.tsv || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
mv test_output/test_result.txt /test_output/wo_optimizer_no_tenant_test_result.txt
cd ..
rm -rf ci_stateless_without_optimizer_no_tenant
- name: Run ByConity FuntionalStateless (w optimizer)
continue-on-error: true
run: |
mkdir -p ci_stateless_with_optimizer
cd ci_stateless_with_optimizer
cp -r $GITHUB_WORKSPACE/docker/test/stateless/process_functional_tests_result.py ./
cp -r $GITHUB_WORKSPACE/tests/clickhouse-test ./
mkdir queries
cp -r $GITHUB_WORKSPACE/tests/queries/4_cnch_stateless queries/
cp -r $GITHUB_WORKSPACE/tests/queries/shell_config.sh queries/
mkdir -p test_output
export EXTRA_OPTIONS='enable_optimizer_fallback=0 enable_optimizer=1'
./clickhouse-test -b $GITHUB_WORKSPACE/build/programs/clickhouse --stop --hung-check --jobs 4 --order asc --print-time --client-option ${EXTRA_OPTIONS} 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
./process_functional_tests_result.py --in-results-dir ./test_output/ --out-results-file /test_output/stateless_with_optimizer_test_results.txt --out-status-file /test_output/check_status.tsv || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
mv test_output/test_result.txt /test_output/w_optimizer_test_result.txt
cd ..
rm -rf ci_stateless_with_optimizer
- name: Run ByConity FuntionalStateless (w optimizer + bsp_mode)
continue-on-error: true
run: |
mkdir -p ci_stateless_with_optimizer_bsp
cd ci_stateless_with_optimizer_bsp
cp -r $GITHUB_WORKSPACE/docker/test/stateless/process_functional_tests_result.py ./
cp -r $GITHUB_WORKSPACE/tests/clickhouse-test ./
mkdir queries
cp -r $GITHUB_WORKSPACE/tests/queries/4_cnch_stateless queries/
cp -r $GITHUB_WORKSPACE/tests/queries/shell_config.sh queries/
mkdir -p test_output
export EXTRA_OPTIONS='enable_optimizer_fallback=0 enable_optimizer=1 bsp_mode=1'
./clickhouse-test -b $GITHUB_WORKSPACE/build/programs/clickhouse --stop --hung-check --jobs 4 --order asc --print-time --client-option ${EXTRA_OPTIONS} 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
./process_functional_tests_result.py --in-results-dir ./test_output/ --out-results-file /test_output/stateless_with_optimizer_bsp_test_results.txt --out-status-file /test_output/check_status.tsv || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
mv test_output/test_result.txt /test_output/w_optimizer_bsp_test_result.txt
cd ..
rm -rf ci_stateless_with_optimizer_bsp
- name: Run ByConity FuntionalStateless no tenant (w optimizer)
continue-on-error: true
run: |
printf '%s\n' "$CNCH_WRITE_WORKER_HOST"
mkdir -p ci_stateless_with_optimizer_no_tenant
cd ci_stateless_with_optimizer_no_tenant
cp -r $GITHUB_WORKSPACE/docker/test/stateless/process_functional_tests_result.py ./
cp -r $GITHUB_WORKSPACE/tests/clickhouse-test ./
mkdir queries
cp -r $GITHUB_WORKSPACE/tests/queries/4_cnch_stateless_no_tenant queries/
cp -r $GITHUB_WORKSPACE/tests/queries/shell_config.sh queries/
mkdir -p test_output
export EXTRA_OPTIONS='enable_optimizer_fallback=0 enable_optimizer=1'
./clickhouse-test -b $GITHUB_WORKSPACE/build/programs/clickhouse --stop --hung-check --jobs 4 --order asc --print-time --client-option ${EXTRA_OPTIONS} 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
./process_functional_tests_result.py --in-results-dir ./test_output/ --out-results-file /test_output/stateless_no_tenant_with_optimizer_test_results.txt --out-status-file /test_output/check_status.tsv || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
mv test_output/test_result.txt /test_output/w_optimizer_test_result.txt
cd ..
rm -rf ci_stateless_with_optimizer_no_tenant
- name: Run ByConity FuntionalStateless no tenant (w optimizer bsp)
continue-on-error: true
run: |
printf '%s\n' "$CNCH_WRITE_WORKER_HOST"
mkdir -p ci_stateless_with_optimizer_bsp_no_tenant
cd ci_stateless_with_optimizer_bsp_no_tenant
cp -r $GITHUB_WORKSPACE/docker/test/stateless/process_functional_tests_result.py ./
cp -r $GITHUB_WORKSPACE/tests/clickhouse-test ./
mkdir queries
cp -r $GITHUB_WORKSPACE/tests/queries/4_cnch_stateless_no_tenant queries/
echo "Skip 48028_explain_format_json and 48036_verify_nullable_type_after_eliminate_join_by_fk_in_cte"
rm -f ./queries/4_cnch_stateless_no_tenant/48028_explain_format_json.* ./queries/4_cnch_stateless_no_tenant/48036_verify_nullable_type_after_eliminate_join_by_fk_in_cte.*
cp -r $GITHUB_WORKSPACE/tests/queries/shell_config.sh queries/
mkdir -p test_output
export EXTRA_OPTIONS='enable_optimizer_fallback=0 enable_optimizer=1 bsp_mode=1'
./clickhouse-test -b $GITHUB_WORKSPACE/build/programs/clickhouse --stop --hung-check --jobs 4 --order asc --print-time --client-option ${EXTRA_OPTIONS} 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
./process_functional_tests_result.py --in-results-dir ./test_output/ --out-results-file /test_output/stateless_no_tenant_with_optimizer_bsp_test_results.txt --out-status-file /test_output/check_status.tsv || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
mv test_output/test_result.txt /test_output/no_tenant_w_optimizer_bsp_test_result.txt
cd ..
rm -rf ci_stateless_with_optimizer_bsp_no_tenant
- name: Run ByConity ClickhouseSQL FuntionalStateless (w.o optimizer)
continue-on-error: true
run: |
mkdir -p ci_clickhouse_sql
cd ci_clickhouse_sql
export EXTRA_OPTIONS='enable_optimizer_fallback=0 enable_optimizer=0'
cp -r $GITHUB_WORKSPACE/docker/test/stateless/process_functional_tests_result.py ./
cp -r $GITHUB_WORKSPACE/tests/clickhouse-test ./
mkdir queries
cp -r $GITHUB_WORKSPACE/tests/queries/7_clickhouse_sql queries/
cp -r $GITHUB_WORKSPACE/tests/queries/shell_config.sh queries/
echo "Running test without optimizer"
mkdir -p test_output
./clickhouse-test -b $GITHUB_WORKSPACE/build/programs/clickhouse --stop --hung-check --jobs 1 --order asc --print-time --client-option ${EXTRA_OPTIONS} 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee -a test_output/test_result.txt
./process_functional_tests_result.py --in-results-dir ./test_output/ --out-results-file /test_output/clickhouse_sql_test_results.txt --out-status-file /test_output/check_status.tsv || echo -e "failure\tCannot parse results" > /test_output/check_status.tsv
mv test_output/test_result.txt /test_output/ChSQL_tenant_wo_optimizer_test_result.txt
cd ..
rm -rf ci_clickhouse_sql
- name: After test
if: always()
continue-on-error: true
run: |
docker ps -a --format "{{.ID}} {{.Names}} {{.Image}}" --filter name=$COMPOSE_PROJECT_NAME | grep "byconity/debian:buster-runit-fdb7.1.27" | awk {'print $1"\t"$2'} | xargs -n 2 sh -c 'mkdir -p /test_output/$2; docker cp -q $1:/var/log/byconity /test_output/$2/log' sh || true
mv /test_output /Artifacts || true
- name: Upload Artifact
if: always()
continue-on-error: true
uses: actions/upload-artifact@v3
with:
name: Artifacts
path: /Artifacts
- name: Analyse Result
run:
/bin/bash $GITHUB_WORKSPACE/docker/test/result.sh
- name: Cleanup docker compose HDFS and S3
if: always()
run: |
cd /CI/
docker compose down --volumes || true
docker compose -f docker-compose-s3.yml down --volumes || true
docker container prune -f
docker network prune -f
docker volume prune -f