-
Notifications
You must be signed in to change notification settings - Fork 79
/
default.env
77 lines (70 loc) · 2.39 KB
/
default.env
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
## Get hostname with the following command:
## $ hostname -f
##
## Configure environment per hostname:
## [hostname1]
## ...
##
## Use the same environment for multiple hostnames:
## [hostname2, hostname3, ...]
## ...
##
## Using group
## [hostname1, hostname2, ... : group]
## [group]
## ...
##
## Using an asterisk in hostnames (IMPORTANT: only one * is allowed in hostnames)
##
## [host*name1]
##
## [*hostname2, hostname3*]
# Stanford Kundaje group clusters (out of SGE)
[vayu, mitra, durga]
conda_env = bds_atac
conda_env_py3 = bds_atac_py3
conda_bin_dir = /software/miniconda3/bin
species_file = $script_dir/species/kundaje.conf
unlimited_mem_wt= true # unlimited max. memory and walltime on Kundaje clusters
nice = 20
nth = 3
# Stanford Kundaje group clusters (controlled with SGE)
[nandi, kali, amold, wotan, kadru, surya, indra, brahma]
conda_env = bds_atac
conda_env_py3 = bds_atac_py3
conda_bin_dir = /software/miniconda3/bin
species_file = $script_dir/species/kundaje.conf
unlimited_mem_wt= true # unlimited max. memory and walltime on Kundaje clusters
system = sge
nice = 20
nth = 3
# Stanford NEW SCG
[*.scg.stanford.edu, dper730xd*, hppsl230s*, dper910*, sgiuv*, sgisummit*, smsx10srw*]
conda_env = bds_atac
conda_env_py3 = bds_atac_py3
species_file = $script_dir/species/scg.conf
nth = 4 # number of threads for each pipeline
system = slurm # force to use SLURM SCG
q_for_slurm_account = true # use --account instead of -p (partition)
cluster_task_delay = 10 # for NFS delayed write
# Stanford OLD SCG4
[scg*.stanford.edu, scg*.local, carmack.stanford.edu, crick.stanford.edu]
conda_env = bds_atac
conda_env_py3 = bds_atac_py3
species_file = $script_dir/species/scg.conf
nth = 4 # number of threads for each pipeline
system = sge # force to use SGE (Sun Grid Engine) on SCG3/4 even though a user doesn't explicitly specify SGE on command line with 'bds -s sge atac.bds ...'
cluster_task_delay = 10 # for NFS delayed write
# Stanford Sherlock clusters
[sherlock*.stanford.edu, sh-*.local, sh-*.int, sh-ln*.stanford.edu]
conda_env = bds_atac
conda_env_py3 = bds_atac_py3
species_file = $script_dir/species/sherlock.conf
nth = 4 # number of threads for each pipeline
system = slurm
cluster_task_delay = 30 # for NFS delayed write
# default
[default]
conda_env = bds_atac
conda_env_py3 = bds_atac_py3
species_file = # use your own species file here. (DEF_SPECIES_FILE: DO NOT REMOVE THIS COMMENT!)