-
Notifications
You must be signed in to change notification settings - Fork 7
/
dqn_p_noises_various_lrs.py
112 lines (101 loc) · 3.19 KB
/
dqn_p_noises_various_lrs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
from ray import tune
import numpy as np
from collections import OrderedDict
timesteps_total = 20_000
num_seeds = 10
var_env_configs = OrderedDict(
{
"state_space_size": [8], # , 10, 12, 14] # [2**i for i in range(1,6)]
"action_space_size": [8], # 2, 4, 8, 16] # [2**i for i in range(1,6)]
"delay": [0],
"sequence_length": [1], # i for i in range(1,4)]
"reward_density": [0.25], # np.linspace(0.0, 1.0, num=5)
"make_denser": [False],
"terminal_state_density": [0.25], # np.linspace(0.1, 1.0, num=5)
"transition_noise": [0, 0.01, 0.02, 0.10, 0.25],
"reward_noise": [0], # Std dev. of normal dist.
"dummy_seed": [i for i in range(num_seeds)],
}
)
var_agent_configs = OrderedDict(
{
"lr": list(
np.power(10.0, np.linspace(-1, -6, 16))
), # "lr": grid_search([1e-2, 1e-4, 1e-6]),
}
)
var_configs = OrderedDict(
{
"env": var_env_configs,
"agent": var_agent_configs,
}
)
env_config = {
"env": "RLToy-v0",
"horizon": 100,
"env_config": {
"seed": 0, # seed
"state_space_type": "discrete",
"action_space_type": "discrete",
"generate_random_mdp": True,
"repeats_in_sequences": False,
"reward_scale": 1.0,
"completely_connected": True,
},
}
algorithm = "DQN"
agent_config = {
"adam_epsilon": 1e-4,
"beta_annealing_fraction": 1.0,
"buffer_size": 20_000,
"double_q": False,
"dueling": False,
"exploration_final_eps": 0.01,
"exploration_fraction": 0.1,
"final_prioritized_replay_beta": 1.0,
"hiddens": None,
"learning_starts": 1000,
# "lr": 1e-4, # "lr": grid_search([1e-2, 1e-4, 1e-6]),
"n_step": 1,
"noisy": False,
"num_atoms": 1,
"prioritized_replay": False,
"prioritized_replay_alpha": 0.5,
"sample_batch_size": 4,
"schedule_max_timesteps": 20000,
"target_network_update_freq": 800,
"timesteps_per_iteration": 1000,
"min_iter_time_s": 0,
"train_batch_size": 32,
}
model_config = {
"model": {
"fcnet_hiddens": [256, 256],
"custom_preprocessor": "ohe",
"custom_options": {}, # extra options to pass to your preprocessor
"fcnet_activation": "tanh",
"use_lstm": False,
"max_seq_len": 20,
"lstm_cell_size": 256,
"lstm_use_prev_action_reward": False,
},
}
eval_config = {
"evaluation_interval": 1, # I think this means every x training_iterations
"evaluation_config": {
"explore": False,
"exploration_fraction": 0,
"exploration_final_eps": 0,
"evaluation_num_episodes": 10,
"horizon": 100,
"env_config": {
"dummy_eval": True, # hack Used to check if we are in evaluation mode or training mode inside Ray callback on_episode_end() to be able to write eval stats
"transition_noise": 0
if "state_space_type" in env_config["env_config"]
and env_config["env_config"]["state_space_type"] == "discrete"
else tune.function(lambda a: a.normal(0, 0)),
"reward_noise": tune.function(lambda a: a.normal(0, 0)),
"action_loss_weight": 0.0,
},
},
}