forked from icheng3/GeoTrainr
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathscript_train
executable file
·93 lines (77 loc) · 4.62 KB
/
script_train
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#!/bin/sh
#
# Execute from the current working directory
#$ -cwd
#
# This is a long-running job
#$ -l day
#
# Can use up to 6GB of memory
#$ -l vf=16G
#
#$ -l gpus=1
#$ -l gmem=32
#
#
nvidia-smi
# python3 main.py --model convnext_base --experiment latlng \
# --finetune ./convnext_base_22k_224.pth --dis_criterion mse \
# --log_dir ./logs/latlng_raw --output_dir ./logs/latlng_raw \
# --input_size 224 --drop_path 0.0 --batch_size 64 \
# --data_path ./data --lr 5e-5 --min_lr 1e-6 --num_workers 0 --auto_resume True
# python3 main.py --model convnext_base --experiment latlng \
# --finetune ./logs/ConvNextB_cls_final/checkpoint-best.pth --dis_criterion mse \
# --log_dir ./logs/latlng_pretrain --output_dir ./logs/latlng_pretrain \
# --input_size 224 --drop_path 0.0 --batch_size 64 \
# --data_path ./data --lr 5e-5 --min_lr 1e-6 --num_workers 0 --auto_resume False
### final bit script
python3 main.py --model BiT-M-R101x1 --experiment country \
--finetune BiT-M-R101x1-run2-caltech101.npz --imagenet_default_mean_and_std False \
--log_dir ./logs/BitR101x1_cls --output_dir ./logs/BitR101x1_cls \
--input_size 224 --drop_path 0.0 --batch_size 32 \
--data_path ./data --lr 3e-5 --min_lr 1e-6 --num_workers 0 --auto_resume False \
--resume ./logs/BitR101x1_cls/checkpoint-best.pth --eval True
python main.py --model BiT-M-R101x1 --experiment country --imagenet_default_mean_and_std False --input_size 224 --drop_path 0.0 --batch_size 32 --data_path ./data --lr 3e-5 --min_lr 1e-6 --num_workers 0 --auto_resume False --resume ./logs/BitR101x1_cls/checkpoint-23.pth --eval True
### final convnext_base
# python3 main.py --model convnext_base --experiment country \
# --finetune ./convnext_base_22k_224.pth \
# --log_dir ./logs/ConvNextB_cls_final --output_dir ./logs/ConvNextB_cls_final \
# --input_size 224 --drop_path 0.1 --batch_size 16 \
# --data_path ./data --lr 5e-5 --min_lr 1e-6 --num_workers 0 --auto_resume False
python main.py --model convnext_base --finetune ./convnext_base_22k_224.pth --experiment country --input_size 224 --drop_path 0.0 --batch_size 32 --data_path ./data --lr 3e-5 --min_lr 1e-6 --num_workers 0 --auto_resume False --resume ./logs/cls_final/checkpoint-20.pth --eval True
### final convnext dis prediction
# for i in {1..10}
# do
# python3 main.py --model convnext_base --experiment dis_freeze \
# --finetune ./convnext_base_22k_224.pth --device cuda --weight_decay 0.05 \
# --log_dir ./logs/ConvNextB_dis_sigmoid_final --output_dir ./logs/ConvNextB_dis_sigmoid_final \
# --input_size 224 --drop_path 0.0 --batch_size 64 --warmup_steps 500 \
# --data_path ./data --lr 1e-4 --min_lr 1e-6 --num_workers 0 --auto_resume True
# done
# python3 main.py --model convnext_base --experiment dis_freeze \
# --finetune ./convnext_base_22k_224.pth --device cuda \
# --log_dir ./logs/ConvNextB_dis_debug --output_dir ./logs/ConvNextB_dis_debug \
# --input_size 224 --drop_path 0.0 --batch_size 64 --warmup_steps 500 \
# --data_path ./data --lr 1e-4 --min_lr 1e-6 --num_workers 0 --auto_resume True
# python3 main.py --model convnext_base --experiment dis --dis_criterion smoothl1 \
# --finetune ./logs/ConvNextB_cls_final/checkpoint-best.pth --device cuda \
# --log_dir ./logs/ConvNextB_dis_contrast --output_dir ./logs/ConvNextB_dis_contrast \
# --input_size 224 --drop_path 0.0 --batch_size 128 --warmup_epochs 5 \
# --data_path ./data --lr 1e-3 --min_lr 1e-5 --num_workers 0 --auto_resume True \
# --resume ./logs/ConvNextB_dis_contrast/checkpoint-4.pth --eval True
# python3 main.py --model convnext_base --experiment dis \
# --finetune ./convnext_base_22k_224.pth --device cpu \
# --log_dir ./logs/ConvNextB_dis_debug --output_dir ./logs/ConvNextB_dis_debug \
# --input_size 224 --drop_path 0.0 --batch_size 64 --warmup_steps 500 \
# --data_path ./data --lr 1e-4 --min_lr 1e-6 --num_workers 0 --auto_resume False
### ablation
# python3 main.py --model convnext_base --experiment euclidean \
# --finetune ./convnext_base_22k_224.pth \
# --log_dir ./logs/ConvNextB_euclidean_final --output_dir ./logs/ConvNextB_euclidean_final \
# --input_size 224 --drop_path 0.0 --batch_size 16 \
# --data_path ./data --lr 1e-3 --min_lr 1e-5 --num_workers 0
# python3 main.py --model convnext_base --experiment dis_freeze \
# --finetune ./convnext_base_22k_224.pth --device cuda \
# --log_dir ./logs/ConvNextB_dis_debug --output_dir ./logs/ConvNextB_dis_debug \
# --input_size 224 --drop_path 0.0 --batch_size 32 --warmup_steps 10 \
# --data_path ./data --lr 1e-4 --min_lr 1e-6 --num_workers 0 --auto_resume False