-
Notifications
You must be signed in to change notification settings - Fork 1
/
rllib_ppo.py
57 lines (51 loc) · 1.71 KB
/
rllib_ppo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
import ray
from ray import tune
from ray.rllib.algorithms.ppo import PPOConfig
from ray.rllib.policy.policy import PolicySpec
from ray.tune import register_env
import rllib_setup
def train(jobs, machine, rollout_workers=10, allocation_mechanism="FirstPrice"):
env_name = "VJS" + allocation_mechanism
alg_name = "PPO"
register_env(
env_name,
lambda config: rllib_setup.get_env_continuous(
jobs, machine, allocation_mechanism
),
)
test_env = rllib_setup.get_env_continuous(jobs, machine, allocation_mechanism)
def policies(agent_ids):
obs_space = test_env.observation_space
act_space = test_env.action_space
return {
str(i): (
None,
obs_space,
act_space,
{}
# config=config.overrides(agent_id=int(i[8:])),
)
for i in agent_ids
}
config = (
PPOConfig()
.rollouts(num_rollout_workers=rollout_workers, rollout_fragment_length=30)
.training(vf_clip_param=1080)
.resources(num_gpus=1)
.multi_agent(
policies=policies(test_env._agent_ids),
policy_mapping_fn=lambda agent_id, episode, **kwargs: str(agent_id),
)
.environment(env=env_name, disable_env_checking=True)
.evaluation(evaluation_interval=10)
)
# print(config.to_dict())
# Build a Algorithm object from the config and run one training iteration.
# algo = config.build(env=env_name)
tune.run(
alg_name,
name="PPO" + str(machine) + "_" + str(jobs),
stop={"episodes_total": 10000},
checkpoint_freq=10,
config=config.to_dict(),
)