-
Notifications
You must be signed in to change notification settings - Fork 0
/
STATICbattle_simulation.py
109 lines (83 loc) · 2.41 KB
/
STATICbattle_simulation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import battle_royale as b
import machine as m
import newnetworkagent as n
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
def no_delay():
return 0
N = 1_000
ALPHA = 1.0
EPSILON = 0.8
ALPHA_DECAY = 0.999
EPSILON_DECAY = 0.999
a1 = n.NetworkAgent(None,"A",epsilon=EPSILON,alpha=ALPHA,decay_alpha=ALPHA_DECAY,decay_epsilon=EPSILON_DECAY)
a2 = n.NetworkAgent(None,"B",epsilon=EPSILON,alpha=ALPHA,decay_alpha=ALPHA_DECAY,decay_epsilon=EPSILON_DECAY)
a3 = n.NetworkAgent(None,"C",epsilon=EPSILON,alpha=ALPHA,decay_alpha=ALPHA_DECAY,decay_epsilon=EPSILON_DECAY)
agents = [a1,a2,a3]
w1 = b.BattleRoyale(agents)
w2 = b.BattleRoyale(agents)
w3 = b.BattleRoyale(agents)
a1.world = w1
a2.world = w2
a3.world = w3
m1 = m.Machine(a1,"VM1")
m2 = m.Machine(a2,"VM2")
m3 = m.Machine(a3,"VM3")
machines = [m1,m2,m3]
c1_2 = m.Connection(m1,m2,no_delay)
c1_3 = m.Connection(m1,m3,no_delay)
m1.add_connection(m2,c1_2)
m1.add_connection(m3,c1_3)
c2_1 = m.Connection(m2,m1,no_delay)
c2_3 = m.Connection(m2,m3,no_delay)
m2.add_connection(m1,c2_1)
m2.add_connection(m3,c2_3)
c3_1 = m.Connection(m3,m1,no_delay)
c3_2 = m.Connection(m3,m2,no_delay)
m3.add_connection(m1,c3_1)
m3.add_connection(m2,c3_2)
# a1.value_approximator.model = load_model("model\\STATIC"+m1.name)
# a2.value_approximator.model = load_model("model\\STATIC"+m2.name)
# a3.value_approximator.model = load_model("model\\STATIC"+m3.name)
# a1.has_model = True
# a2.has_model = True
# a3.has_model = True
x = list()
y = list()
y_r =list()
avg_t = 0
avg_r = 0
for i in range(N):
interval = 50
print(i)
quit = False
t = 0
while(t<10_000 and not quit):
machine = m1
machine.activate(t)
if(machine.world.episode_complete):
quit = True
t+=30
avg_t+=t
avg_r+=m1.agent.reward
m1.world.reset()
if i%interval == 0 and i!=0:
print(i,avg_t/interval,avg_r/interval/4)
x.append(i)
y.append(avg_t/interval)
y_r.append(avg_r/interval/4)
avg_t = 0
avg_r = 0
print(i)
m1.agent.refit_model()
m1.agent.value_approximator.model.save("model/STATICNEW"+machine.name)
plt.plot(x,y)
plt.xlabel("Time")
plt.ylabel("Average length of match")
plt.show()
plt.plot(x,y_r)
plt.xlabel("Time")
plt.ylabel("Average reward across all agents")
plt.show()
machine = m1
machine.agent.value_approximator.model.save("model/STATICNEW"+machine.name)