Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add sunspot #196

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
84 changes: 84 additions & 0 deletions mache/machines/sunspot.cfg
Original file line number Diff line number Diff line change
@@ -0,0 +1,84 @@
# Options related to deploying an e3sm-unified conda environment on supported
# machines
[e3sm_unified]

# the unix group for permissions for the e3sm-unified conda environment
group = e3sm

# the compiler set to use for system libraries and MPAS builds
compiler = oneapi-ifx

# the system MPI library to use for intel18 compiler
mpi = mpich

# the path to the directory where activation scripts, the base environment, and
# system libraries will be deployed
base_path = TODO

# whether to use system modules for hdf5, netcdf-c, netcdf-fortran and pnetcdf
# (spack modules are used otherwise)
use_system_hdf5_netcdf = True


# config options related to data needed by diagnostics software such as
# e3sm_diags and MPAS-Analysis
[diagnostics]

# The base path to the diagnostics directory
base_path = TODO

# the unix group for permissions for diagnostics
group = e3sm


# config options associated with web portals
[web_portal]

# The path to the base of the web portals
base_path = TODO

# The base URL that corresponds to the base path
base_url = TODO


# The parallel section describes options related to running jobs in parallel
[parallel]

# parallel system of execution: slurm, cobalt or single_node
system = slurm

# whether to use mpirun or srun to run a task
parallel_executable = srun

# cores per node on the machine (with hyperthreading)
cores_per_node = 208

# account for running diagnostics jobs
account = CSC249ADSE15_CNDA

# available constraint(s) (default is the first)
constraints = gpu

# quality of service (default is the first)
qos = regular, debug, premium

# Config options related to spack environments
[spack]

# whether to load modules from the spack yaml file before loading the spack
# environment
modules_before = False

# whether to load modules from the spack yaml file after loading the spack
# environment
modules_after = False

# whether the machine uses cray compilers
cray_compilers = True


# config options related to synchronizing files
[sync]

# the full hostname of the machine
hostname = TODO
Loading