-
Notifications
You must be signed in to change notification settings - Fork 220
/
AnymalTerrainPPO.yaml
84 lines (74 loc) · 1.88 KB
/
AnymalTerrainPPO.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
params:
seed: ${...seed}
algo:
name: a2c_continuous
model:
name: continuous_a2c_logstd
network:
name: actor_critic
separate: True
space:
continuous:
mu_activation: None
sigma_activation: None
mu_init:
name: default
sigma_init:
name: const_initializer
val: 0. # std = 1.
fixed_sigma: True
mlp:
units: [512, 256, 128]
activation: elu
d2rl: False
initializer:
name: default
regularizer:
name: None
# rnn:
# name: lstm
# units: 128
# layers: 1
# before_mlp: True
# concat_input: True
# layer_norm: False
load_checkpoint: ${if:${...checkpoint},True,False} # flag which sets whether to load the checkpoint
load_path: ${...checkpoint} # path to the checkpoint to load
config:
name: ${resolve_default:AnymalTerrain,${....experiment}}
full_experiment_name: ${.name}
device: ${....rl_device}
device_name: ${....rl_device}
env_name: rlgpu
multi_gpu: ${....multi_gpu}
ppo: True
mixed_precision: False # True
normalize_input: True
normalize_value: True
normalize_advantage: True
value_bootstrap: True
clip_actions: False
num_actors: ${....task.env.numEnvs}
reward_shaper:
scale_value: 1.0
gamma: 0.99
tau: 0.95
e_clip: 0.2
entropy_coef: 0.001
learning_rate: 3.e-4 # overwritten by adaptive lr_schedule
lr_schedule: adaptive
kl_threshold: 0.008 # target kl for adaptive lr
truncate_grads: True
grad_norm: 1.
horizon_length: 48
minibatch_size: 16384
mini_epochs: 5
critic_coef: 2
clip_value: True
seq_length: 4 # only for rnn
bounds_loss_coef: 0.
max_epochs: ${resolve_default:2000,${....max_iterations}}
save_best_after: 100
score_to_win: 20000
save_frequency: 50
print_stats: True