-
Notifications
You must be signed in to change notification settings - Fork 0
/
scale_tests.py
executable file
·237 lines (205 loc) · 8.8 KB
/
scale_tests.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
import argparse
import math
import os
import time
import sqlite3
import subprocess
import sys
import glob
import parsl
from parsl.config import Config
from parsl.executors import HighThroughputExecutor
from parsl.app.app import python_app, bash_app
from parsl.launchers import SimpleLauncher
from parsl.launchers import SingleNodeLauncher
from parsl.addresses import address_by_hostname, address_by_interface
from parsl.launchers import AprunLauncher
from parsl.providers import TorqueProvider, CobaltProvider
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--min_workers", type=int, default=1, help="minimum workers")
parser.add_argument("-a", "--max_workers", type=int, default=1, help="maximum workers")
parser.add_argument("-r", "--trials", type=int, default=5, help="number of trials per batch submission")
parser.add_argument("-t", "--tasks_per_trial", type=int, default=1, help="number of tasks per trial")
parser.add_argument("-c", "--cores_per_node", type=int, default=12, help="cores per node")
parser.add_argument("-w", "--walltime", type=str, default='00:40:00', help="walltime")
parser.add_argument("-q", "--queue", type=str, default='pubnet-debug', help="queue")
args = parser.parse_args()
# parsl.set_stream_logger()
db = sqlite3.connect('data.db')
db.execute("""create table if not exists tasks(
executor text,
start_submit float,
end_submit float,
returned float,
connected_workers int,
tasks_per_trial,
tag text)"""
)
target_workers = args.min_workers
while target_workers <= args.max_workers:
#subprocess.call("qstat -u $USER | awk '{print $1}' | grep -o [0-9]* | xargs qdel", shell=True)
needed_time = 2500
#needed_time = args.tasks_per_trial * args.trials * 2 / target_workers
#if needed_time <= 1800: needed_time = 1800
walltime = time.strftime('%H:%M:%S', time.gmtime(needed_time))
print("The walltime for {} workers is {}".format(target_workers, walltime))
if target_workers % args.cores_per_node != 0:
nodes_per_block = 1
tasks_per_node = target_workers % args.cores_per_node
else:
nodes_per_block = int(target_workers / args.cores_per_node)
tasks_per_node = args.cores_per_node
config = Config(
executors=[
HighThroughputExecutor(
label="funcx_local",
# worker_debug=True,
worker_mode="singularity_reuse",
container_image=os.path.expanduser("~/sing-run.simg"),
cores_per_worker=int(args.cores_per_node / tasks_per_node),
max_workers=1,
address=address_by_interface("eth0"),
provider=CobaltProvider(
launcher=SingleNodeLauncher(),
init_blocks=1,
max_blocks=1,
queue=args.queue,
account='DLHub',
worker_init="source activate funcx_5"
),
)
],
run_dir="/home/tskluzac/FuncX/evaluation/runinfo",
strategy=None,
)
parsl.clear()
dfk = parsl.load(config)
executor = list(dfk.executors.values())[0]
@python_app
def noop():
pass
@python_app
def sleep10ms():
import time
time.sleep(0.01)
#sleep(0.01)
@python_app
def sleep100ms():
import time
time.sleep(0.1)
#sleep(0.1)
@python_app
def sleep1000ms():
import time
time.sleep(1.0)
#sleep(1.0)
@python_app
def sleep10s():
import time
time.sleep(10.0)
@python_app
def sleep100s():
import time
time.sleep(100.0)
#@bash_app
#def bash_dials_1(stdout=None, stderr=None):
# command = "source /home/ryan/work/dials-dev20190325/dials_env.sh; " \
# "dials.stills_process /projects/DLHub/ryan/Crystallography/processing/process.phil " \
# "/projects/DLHub/ryan/Crystallography/data/apc/hornet/hornet0019_00100.cbf"
# return command
#@bash_app
#def bash_dials_5(stdout=None, stderr=None):
# command = "source /home/ryan/work/dials-dev20190325/dials_env.sh; " \
# "dials.stills_process /projects/DLHub/ryan/Crystallography/processing/process.phil " \
# "/projects/DLHub/ryan/Crystallography/data/apc/hornet/hornet0019_{{00100..00105}}.cbf"
# return command
#@bash_app
#def bash_dials_10(stdout=None, stderr=None):
# command = "source /home/ryan/work/dials-dev20190325/dials_env.sh; " \
# "dials.stills_process /projects/DLHub/ryan/Crystallography/processing/process.phil " \
# "/projects/DLHub/ryan/Crystallography/data/apc/hornet/hornet0019_{{00100..00110}}.cbf"
# return command
#@bash_app
#def bash_dials_25(stdout=None, stderr=None):
# command = "source /home/ryan/work/dials-dev20190325/dials_env.sh; " \
# "dials.stills_process /projects/DLHub/ryan/Crystallography/processing/process.phil " \
# "/projects/DLHub/ryan/Crystallography/data/apc/hornet/hornet0019_{{00100..00125}}.cbf"
# return command
#@bash_app
#def bash_dials_50(stdout=None, stderr=None):
# command = "source /home/ryan/work/dials-dev20190325/dials_env.sh; " \
# "dials.stills_process /projects/DLHub/ryan/Crystallography/processing/process.phil " \
# "/projects/DLHub/ryan/Crystallography/data/apc/hornet/hornet0019_{{00100..00150}}.cbf"
# return command
#@bash_app
#def bash_dials_100(stdout=None, stderr=None):
# command = "source /home/ryan/work/dials-dev20190325/dials_env.sh; " \
# "dials.stills_process /projects/DLHub/ryan/Crystallography/processing/process.phil " \
# "/projects/DLHub/ryan/Crystallography/data/apc/hornet/hornet0019_{{00100..00200}}.cbf"
# return command
#@bash_app
#def bash_dials_1000(stdout=None, stderr=None):
# command = "source /home/ryan/work/dials-dev20190325/dials_env.sh; " \
# "dials.stills_process /projects/DLHub/ryan/Crystallography/processing/process.phil " \
# "/projects/DLHub/ryan/Crystallography/data/apc/hornet/hornet0019_{{00100..01100}}.cbf"
# return command
attempt = 0
#cmd = 'ls {} | wc -l'.format(os.path.join(executor.run_dir, executor.label, '*', '*worker*'))
path = os.path.join(executor.run_dir, executor.label, '*', '*worker*')
print("Priming...")
while True:
#connected_workers = int(subprocess.check_output(cmd, shell=True))
#connected_workers = len(glob.glob(path, recursive=True))
connected_managers = len(executor.connected_workers)
if connected_managers < nodes_per_block:
print('attempt {}: waiting for {} managers, but only found {}'.format(attempt, nodes_per_block, connected_managers))
time.sleep(30)
attempt += 1
else:
tasks = [bash_dials_1() for _ in range(0, target_workers)]
[t.result() for t in tasks]
dfk.tasks = {}
break
for app in [noop, sleep10ms, sleep100ms, sleep1000ms]:
#for app in [noop, sleep10ms, sleep100ms, sleep1000ms, sleep10s, sleep100s]:
#for app in [noop, sleep10ms, sleep100ms, sleep1000ms, sleep10s]:
#for app in [noop]:
sum1 = sum2 = 0
#end_submit = 0
for trial in range(args.trials):
try:
start_submit = time.time()
tasks = [app() for _ in range(0, args.tasks_per_trial)]
end_submit = time.time()
[t.result() for t in tasks]
returned = time.time()
data = (
executor.label,
start_submit,
end_submit,
returned,
target_workers,
args.tasks_per_trial,
app.__name__
)
print('inserting {}'.format(str(data)))
db.execute("""
insert into
tasks(executor, start_submit, end_submit, returned, connected_workers, tasks_per_trial, tag)
values (?, ?, ?, ?, ?, ?, ?)""", data
)
db.commit()
t1 = (end_submit - start_submit) * 1000
t2 = (returned - start_submit) * 1000
sum1 += t1
sum2 += t2
print("Submitted time is %.6f ms" % t1)
print("Running time is %.6f ms\n" % t2)
except Exception as e:
print(e)
dfk.tasks = {}
print("The average submitted time of {} is {}".format(app.__name__, sum1/args.trials))
print("The average running time of {} is {}".format(app.__name__, sum2/args.trials))
target_workers *= 2
executor.shutdown()
del dfk