forked from OSC/bc_example_jupyter
-
Notifications
You must be signed in to change notification settings - Fork 1
/
form.yml.erb
137 lines (135 loc) · 4.17 KB
/
form.yml.erb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
<%-
require 'open3'
begin
oodqueues = ['classroomgpu', 'cryogars', 'ondemand', 'ondemand-p100']
maxcores = [14, 32, 4, 14]
hasgpu = [true, true, false, true]
# Command to Run
script = '/cm/shared/apps/slurm/current/bin/sinfo -h --format="%P"'
# Create a partitions array to dynamically populate the queues associated
# with the user
partitions = []
pmaxcores = []
phasgpu = []
# Store the output, error, status
output, status = Open3.capture2('bash', stdin_data: script)
# puts status
if status.success?
# Add it to the custom_envs array by splitting the output at '\n'.
output.split("\n").each do |queue|
if oodqueues.include?(queue)
qindex = oodqueues.index(queue)
queue = queue.gsub("*", "")
partitions.push(queue)
pmaxcores.push(maxcores[qindex])
phasgpu.push(hasgpu[qindex])
end
end
puts partitions
puts pmaxcores
puts phasgpu
else
partition_error = "Error"
end
rescue => e
partition_error = e.message.strip
end
-%>
---
cluster: "borah"
form:
- version
- extra_jupyter_args
- custom_queue
- num_cores
- enable_gpu
- bc_num_hours
- jupyterlab_switch
- bc_email_on_started
- extra_commands
attributes:
version:
widget: select
label: "Jupyter version"
help: "This defines the version of Jupyter you want to load."
options:
- [
"4.9.2", "jupyter/4.9.2",
]
- [
"5.0.0", "jupyter/5.0.0",
]
custom_queue:
label: Partition
widget: select
help: |
<%- if partitions.include?("classroomgpu")-%>
- **classroomgpu** <br>
These are HPC nodes with 2 [NVIDIA Tesla P100 GPUs], 28 cores, and
251 GB of memory.
By selecting the "classroomgpu" partition, you will be given up to 14
cores and 1 GPU.
[NVIDIA Tesla P100 GPUs]: http://www.nvidia.com/object/tesla-p100.html
<%- end -%>
<%- if partitions.include?("cryogars")-%>
- **cryogars** <br>
This node has 2 [NVIDIA A30 GPUs], 64 cores, and 1 TB of memory.
By selecting the "cryogars" partition, you will be given up to 32 cores
and 1 GPU.
[NVIDIA A30 GPUs]: https://www.nvidia.com/a30
<%- end -%>
- **ondemand** <br>
Standard Borah nodes have 48 cores and 186 GB of memory.
By selecting the "ondemand" partition, you will be given up to 4 cores.
- **ondemand-p100** <br>
These are HPC nodes with 2 [NVIDIA Tesla P100 GPUs], 28 cores, and
251 GB of memory.
By selecting the "ondemand-p100" partition, you will be given up to 14
cores and 1 GPU.
[NVIDIA Tesla P100 GPUs]: http://www.nvidia.com/object/tesla-p100.html
<%- if partition_error || partitions.blank?-%>
<div class="text-danger">Error while fetching Partition. Please contact
support!</div>
<%- else -%>
options:
<%- partitions.zip(pmaxcores,phasgpu).each do |q, maxcore, gpu| -%>
- [
"<%= q %>", "<%= q %>",
data-max-num-cores: <%= maxcore %>,
<%- if !gpu -%>
data-hide-enable-gpu: true,
data-set-enable-gpu: 0
<%- end -%>
]
<%- end -%>
<%- end -%>
# Any extra command line arguments to feed to the `jupyter notebook ...`
# command that launches the Jupyter notebook within the batch job
extra_jupyter_args: ""
extra_commands:
widget: text_area
label: "Additional commands to run"
help: |
Additional commands to run, e.g., loading a module or setting an
environment variable. If you're not sure, just leave this blank.
num_cores:
widget: number_field
label: "Number of cores"
value: 1
min: 1
max: 1
enable_gpu:
widget: check_box
label: "Enable GPU"
help:
bc_num_hours:
value: 1
min: 1
max: 4
jupyterlab_switch:
widget: "check_box"
value: 1
label: Use JupyterLab instead of Jupyter Notebook?
help: |
JupyterLab is the next generation of Jupyter with an IDE-like experience,
and is completely compatible with existing Jupyter Notebooks.