-
Notifications
You must be signed in to change notification settings - Fork 86
/
Copy pathmerge_tp_partitions.sh
69 lines (61 loc) · 1.79 KB
/
merge_tp_partitions.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
#!/bin/bash
#merge checkpoint along the tensor
LOAD_CHECKPOINT_PATH=<Specify the loaded ckpt path>
SAVE_CHECKPOINT_PATH=<Specify the stored ckpt path >
TOKENIZER_MODEL_PATH=<Specify tokenizer model path>
export CUDA_DEVICE_MAX_CONNECTIONS=1
if [ ! -d $SAVE_CHECKPOINT_PATH ]; then
mkdir $SAVE_CHECKPOINT_PATH
fi
python tools/merge_tp_partitions.py \
--tensor-model-parallel-size 2 \
--target-tensor-model-parallel-size 1 \
--pipeline-model-parallel-size 4 \
--target-pipeline-model-parallel-size 4 \
--tokenizer-type YuanTokenizer \
--tokenizer-model-path $TOKENIZER_MODEL_PATH \
--num-layers 42 \
--hidden-size 8192 \
--num-attention-heads 64 \
--seq-length 4096 \
--max-position-embeddings 4096 \
--use-lf-gate \
--lf-conv2d-group 1 \
--lf-conv2d-num-pad 1 \
--position-embedding-type rope \
--no-embedding-dropout \
--flash-attn-drop 0.1 \
--fim-rate 0.5 \
--fim-spm-rate 0.5 \
--attention-dropout 0 \
--norm-dtype RMSNorm \
--attention-dropout 0 \
--hidden-dropout 0 \
--disable-bias-linear \
--reset-position-ids \
--use-flash-attn \
--swiglu \
--adam-beta1 0.9 \
--adam-beta2 0.95 \
--bf16 \
--DDP-impl local \
--use-cpu-initialization \
--micro-batch-size 1 \
--save-interval 1 \
--recompute-method block \
--recompute-granularity full \
--recompute-num-layers 1 \
--load $LOAD_CHECKPOINT_PATH \
--save $SAVE_CHECKPOINT_PATH \
--micro-batch-size 1 \
--global-batch-size 1152 \
--lr 0.00009 \
--train-iters 63578 \
--lr-decay-iters 63578 \
--lr-decay-style cosine \
--min-lr 1.8e-5 \
--weight-decay 1e-1 \
--no-load-optim \
--process-checkpoint \
--use-distributed-optimizer
du -sh $SAVE_CHECKPOINT_PATH