-
Notifications
You must be signed in to change notification settings - Fork 40
/
default.env
82 lines (72 loc) · 2.68 KB
/
default.env
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
## Get hostname with the following command:
## $ hostname -f
##
## Configure an environment per hostname:
## [hostname1]
## ...
##
## Use the same environment for multiple hostnames:
## [hostname2, hostname3, ...]
## ...
##
## Using group
## [hostname1, hostname2, ... : group]
## [group]
## ...
##
## Using an asterisk in hostnames (IMPORTANT: only one * is allowed in hostnames)
##
## [host*name1]
##
## [*hostname2, hostname3*]
# Stanford Kundaje group clusters (out of SGE)
[vayu, mitra, durga]
conda_env = aquas_chipseq
conda_env_py3 = aquas_chipseq_py3
conda_bin_dir = /software/miniconda3/bin
species_file = $script_dir/species/kundaje.conf
unlimited_mem_wt= true # unlimited max. memory and walltime on Kundaje clusters
nice = 10
nth = 4
# Stanford Kundaje group clusters (controlled with SGE)
[nandi, kali, amold, wotan, kadru, surya, indra, brahma]
conda_env = aquas_chipseq
conda_env_py3 = aquas_chipseq_py3
conda_bin_dir = /software/miniconda3/bin
species_file = $script_dir/species/kundaje.conf
unlimited_mem_wt= true # unlimited max. memory and walltime on Kundaje clusters
system = sge # force to use SGE (Sun Grid Engine)
nice = 20
nth = 4
# Stanford NEW SCG
[*.scg.stanford.edu, dper730xd*, hppsl230s*, dper910*, sgiuv*, sgisummit*, smsx10srw*]
conda_env = aquas_chipseq
conda_env_py3 = aquas_chipseq_py3
species_file = $script_dir/species/scg.conf
nth = 4 # number of threads for each pipeline
system = slurm # force to use SLURM SCG
q_for_slurm_account = true # use --account instead of -p (partition)
cluster_task_delay = 10 # for NFS delayed write
# Stanford OLD SCG : login node, computing nodes, file transfer servers
[scg*.stanford.edu, scg*.local, carmack.stanford.edu, crick.stanford.edu]
conda_env = aquas_chipseq
conda_env_py3 = aquas_chipseq_py3
species_file = $script_dir/species/scg.conf
nth = 8 # number of threads for each pipeline run
wt_spp = 72h # walltime for spp
system = sge # force to use SGE (Sun Grid Engine) on SCG3/4 even though a user doesn't explicitly specify SGE on command line with 'bds -s sge chipseq.bds ...'
cluster_task_delay = 10
# Stanford Sherlock clusters
[sherlock*.stanford.edu, sh-*.local, sh-*.int, sh-ln*.stanford.edu]
conda_env = aquas_chipseq
conda_env_py3 = aquas_chipseq_py3
species_file = $script_dir/species/sherlock.conf
nth = 8 # number of threads for each pipeline run
wt_spp = 47h # walltime for spp
system = slurm # force to use SLURM
cluster_task_delay = 30
# default (if no section with hostname is found)
[default]
conda_env = aquas_chipseq
conda_env_py3 = aquas_chipseq_py3
species_file = # use your own species file here. (DEF_SPECIES_FILE: DO NOT REMOVE THIS COMMENT!)