diff --git a/.gitignore b/.gitignore
new file mode 100644
index 00000000..611a79e9
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,141 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# pycharm
+.idea/
diff --git a/README.rst b/README.rst
new file mode 100644
index 00000000..be233f5a
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,107 @@
+========================
+mache: Machines for E3SM
+========================
+
+A package for providing configuration data relate to E3SM supported machines.
+
+Example usage:
+
+.. code-block:: python
+
+ #!/usr/bin/env python
+ from mache import MachineInfo
+
+ machinfo = MachineInfo(machine='anvil')
+ print(machinfo)
+
+This loads machine info for Anvil and prints it:
+
+.. code-block:: none
+
+ Machine: anvil
+ E3SM Supported Machine? True
+ Compilers: intel, gnu
+ MPI libraries: mvapich, impi
+ OS: LINUX
+ E3SM-Unified:
+ Base path: /lcrc/soft/climate/e3sm-unified
+ E3SM-Unified is not currently loaded
+ Diagnostics:
+ Base path: /lcrc/group/e3sm/diagnostics
+
+If you are on the login node of one of the following E3SM supported machines,
+you don't need to provide the machine name. It will be recognized from the
+host name:
+
+* acme1
+
+* andes
+
+* anvil
+
+* badger
+
+* chrysalis
+
+* compy
+
+* cooley
+
+* cori-haswell (but you will get a warning)
+
+* grizzly
+
+If you are on a compute node or want info about a machine you're not currently
+on, give the ``machine`` name in all lowercase.
+
+
+Attributes
+----------
+
+The attributes currently available are:
+
+machine : str
+ The name of an E3SM supported machine
+
+config : configparser.ConfigParser
+ Config options for this machine
+
+e3sm_supported : bool
+ Whether this machine supports running E3SM itself, and therefore has
+ a list of compilers, MPI libraries, and the modules needed to load them
+
+compilers : list
+ A list of compilers for this machine if ``e3sm_supported == True``
+
+mpilibs : list
+ A list of MPI libraries for this machine if ``e3sm_supported == True``
+
+os : str
+ The machine's operating system if ``e3sm_supported == True``
+
+e3sm_unified_mpi : {'nompi', 'system', None}
+ Which MPI type is included in the E3SM-Unified environment (if one is
+ loaded)
+
+e3sm_unified_base : str
+ The base path where E3SM-Unified and its activation scripts are
+ installed if ``e3sm_unified`` is not ``None``
+
+e3sm_unified_activation : str
+ The activation script used to activate E3SM-Unified if ``e3sm_unified``
+ is not ``None``
+
+diagnostics_base : str
+ The base directory for diagnostics data
+
+License
+-------
+
+Copyright (c) 2021, Energy Exascale Earth System Model Project
+All rights reserved
+
+SPDX-License-Identifier: (BSD-3-Clause)
+
+See `LICENSE <./LICENSE>`_ for details
+
+Unlimited Open Source - BSD 3-clause Distribution ``LLNL-CODE-819717``
diff --git a/mache/__init__.py b/mache/__init__.py
new file mode 100644
index 00000000..7e0e3db1
--- /dev/null
+++ b/mache/__init__.py
@@ -0,0 +1,4 @@
+from mache.machine_info import MachineInfo
+
+__version_info__ = (1, 0, 0)
+__version__ = '.'.join(str(vi) for vi in __version_info__)
diff --git a/mache/cime_machine_config/__init__.py b/mache/cime_machine_config/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/mache/cime_machine_config/config_compilers.xml b/mache/cime_machine_config/config_compilers.xml
new file mode 100644
index 00000000..6e335c73
--- /dev/null
+++ b/mache/cime_machine_config/config_compilers.xml
@@ -0,0 +1,2391 @@
+
+
+
+
+
+ FALSE
+
+
+
+
+ -h omp
+ -g -O0
+
+
+
+ -DFORTRANUNDERSCORE -DNO_R16 -DCPRCRAY
+
+
+ -s real64
+
+
+ -f free -N 255 -h byteswapio -em
+ -h omp
+ -M1077
+ -g -O0
+
+
+ -O0
+
+ TRUE
+
+ -Wl,--allow-multiple-definition -h byteswapio
+ -h omp
+
+ TRUE
+ FORTRAN
+
+
+
+
+ -mcmodel=medium
+ -fopenmp
+ -g -Wall -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow
+ -O
+ -std=c99
+
+
+ -std=c++14
+ -fopenmp
+ -g -Wall -fbacktrace
+ -O
+
+
+ -D CISM_GNU=ON
+
+
+
+ -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU
+
+
+ -DYAKL_DEBUG
+
+ FORTRAN
+
+ -fdefault-real-8
+
+
+
+ -mcmodel=medium -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none
+ -fopenmp
+
+ -g -Wall -fbacktrace -fcheck=bounds -ffpe-trap=zero,overflow
+ -O
+
+
+ -O0
+
+
+ -ffixed-form
+
+
+ -ffree-form
+
+ FALSE
+
+ -fopenmp
+
+ mpicc
+ mpicxx
+ mpif90
+ gcc
+ g++
+ gfortran
+ TRUE
+
+
+
+
+ -mcmodel=medium
+ -fopenmp
+ -g -Wall -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow
+ -O
+ -std=c99
+
+
+ -std=c++14
+ -fopenmp
+ -g -Wall -fbacktrace
+ -O
+
+
+
+ -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU
+
+
+ -DYAKL_DEBUG
+
+ FORTRAN
+
+ -fdefault-real-8
+
+
+
+ -mcmodel=medium -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none
+ -fopenmp
+
+ -g -Wall -fbacktrace -fcheck=bounds -ffpe-trap=zero,overflow
+ -O
+
+
+ -O0
+
+
+ -ffixed-form
+
+
+ -ffree-form
+
+ FALSE
+
+ -fopenmp
+
+ mpicc
+ mpicxx
+ mpif90
+ gcc
+ g++
+ gfortran
+ TRUE
+
+
+
+
+ -g -qfullpath -qmaxmem=-1 -qphsinfo
+ -O3
+ -qsmp=omp
+ -qsmp=omp:noopt
+
+
+ -g -qfullpath -qmaxmem=-1 -qphsinfo
+ -O2
+ -qsmp=omp
+ -qsmp=omp:noopt
+
+
+
+ -DFORTRAN_SAME -DCPRIBM
+ -DUSE_CBOOL
+
+ -WF,-D
+
+ -qrealsize=8
+
+
+ -g -qfullpath -qmaxmem=-1 -qphsinfo
+ -O2 -qstrict -Q
+ -qsmp=omp
+ -qsmp=omp:noopt
+ -qinitauto=7FF7FFFF -qflttrap=ov:zero:inv:en
+
+
+ -O0
+
+
+ -qsuffix=f=f -qfixed=132
+
+
+ -qsuffix=f=f90:cpp=F90
+
+ TRUE
+
+
+
+
+ -O2 -fp-model precise -std=gnu99
+ -qopenmp
+ -O2 -debug minimal
+ -O0 -g
+
+
+ -std=c++14 -fp-model source
+ -qopenmp
+ -O0 -g
+ -O2
+
+
+
+ -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL
+
+
+ -cxxlib
+
+ FORTRAN
+
+ -r8
+
+
+ -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model source
+ -qopenmp
+ -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created
+ -O2 -debug minimal
+
+
+ -O0
+
+
+ -fixed -132
+
+
+ -free
+
+ TRUE
+
+ -qopenmp
+
+ mpicc
+ mpicxx
+ mpif90
+ icc
+ icpc
+ ifort
+ TRUE
+
+
+
+
+ -g
+ -std=c99
+
+
+ -DFORTRANUNDERSCORE -DNO_CRAY_POINTERS -DNO_SHR_VMATH -DCPRNAG
+
+
+ -r8
+
+
+
+
+
+
+
+ -wmismatch=mpi_send,mpi_recv,mpi_bcast,mpi_allreduce,mpi_reduce,mpi_isend,mpi_irecv,mpi_irsend,mpi_rsend,mpi_gatherv,mpi_gather,mpi_scatterv,mpi_allgather,mpi_alltoallv,mpi_file_read_all,mpi_file_write_all,mpibcast,mpiscatterv,mpi_alltoallw,nfmpi_get_vara_all,NFMPI_IPUT_VARA,NFMPI_GET_VAR_ALL,NFMPI_PUT_VARA,NFMPI_PUT_ATT_REAL,NFMPI_PUT_ATT_DOUBLE,NFMPI_PUT_ATT_INT,NFMPI_GET_ATT_REAL,NFMPI_GET_ATT_INT,NFMPI_GET_ATT_DOUBLE,NFMPI_PUT_VARA_DOUBLE_ALL,NFMPI_PUT_VARA_REAL_ALL,NFMPI_PUT_VARA_INT_ALL -convert=BIG_ENDIAN
+
+ -ieee=full -O2
+ -g -time -f2003 -ieee=stop
+
+
+ -C=all -g -time -f2003 -ieee=stop
+ -gline
+ -openmp
+
+
+ -fixed
+
+
+ -free
+
+ FALSE
+
+ -openmp
+
+ mpicc
+ mpif90
+ gcc
+ nagfor
+
+
+
+
+ -mp
+
+
+
+ -DFORTRANUNDERSCORE -DNO_R16 -DCPRPATHSCALE
+
+
+ -r8
+
+
+ -O -extend_source -ftpp -fno-second-underscore -funderscoring -byteswapio
+ -mp
+ -g -trapuv -Wuninitialized
+
+
+ -O0
+
+ FALSE
+
+ -mp
+
+ mpicc
+ mpif90
+
+
+
+
+ -time
+ -mp
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DCPRPGI
+
+ CXX
+
+ -r8
+
+
+ -i4 -time -Mstack_arrays -Mextend -byteswapio -Mflushz -Kieee -Mallocatable=03
+ -mp
+ -O0 -g -Ktrap=fp -Mbounds -Kieee
+ -Mnovect
+ -Mnovect
+ -Mnovect
+ -Mnovect
+ -Mnovect
+ -Mnovect
+
+
+ -O0
+
+
+ -Mfixed
+
+
+ -Mfree
+
+
+
+ FALSE
+
+ -time -Wl,--allow-multiple-definition
+ -mp
+
+ mpicc
+ mpicxx
+ mpif90
+ pgcc
+ pgc++
+ pgf95
+
+
+
+
+ -time
+ -mp
+
+
+ -DFORTRANUNDERSCORE -DNO_SHR_VMATH -DNO_R16 -DUSE_CUDA_FORTRAN -DCPRPGI
+
+ CXX
+
+ -r8
+
+
+ -i4 -time -Mstack_arrays -Mextend -byteswapio -Mflushz -Kieee
+ -mp
+
+ -O0 -g -Ktrap=fp -Mbounds -Kieee
+ -Mnovect
+ -Mnovect
+ -Mnovect
+ -Mnovect
+ -Mnovect
+ -Mnovect
+
+
+ -O0
+
+
+ -Mfixed
+
+
+ -Mfree
+
+ FALSE
+
+ -time -Wl,--allow-multiple-definition -acc
+ -mp
+
+ mpicc
+ mpicxx
+ mpif90
+ pgcc
+ pgc++
+ pgf95
+
+
+
+
+ -qarch=auto -qtune=auto -qcache=auto
+
+ /usr/bin/bash
+
+ -qarch=auto -qtune=auto -qcache=auto -qsclk=micro
+ -qspill=6000
+
+
+ -qsigtrap=xl__trcedump
+ -bdatapsize:64K -bstackpsize:64K -btextpsize:32K
+
+ mpcc_r
+ mpxlf2003_r
+ cc_r
+ xlf2003_r
+
+ -lmassv -lessl
+ -lmass
+
+
+
+
+
+ -O3 -qstrict
+ -qtune=440 -qarch=440d
+
+
+ --build=powerpc-bgp-linux --host=powerpc64-suse-linux
+
+
+ -DLINUX -DnoI8
+
+
+ -qtune=440 -qarch=440d
+ -O3 -qstrict -Q
+ -qinitauto=FF911299 -qflttrap=ov:zero:inv:en
+ -qextname=flush
+
+
+ -Wl,--relax -Wl,--allow-multiple-definition
+
+
+ -L/bgl/BlueLight/ppcfloor/bglsys/lib -lmpich.rts -lmsglayer.rts -lrts.rts -ldevices.rts
+
+ blrts_xlc
+ blrts_xlf2003
+ mpich.rts
+ /bgl/BlueLight/ppcfloor/bglsys
+ blrts_xlc
+ blrts_xlf2003
+
+
+
+
+ -qtune=450 -qarch=450 -I/bgsys/drivers/ppcfloor/arch/include/
+
+
+ --build=powerpc-bgp-linux --host=powerpc64-suse-linux
+
+
+ -DLINUX -DnoI8
+
+
+ -qspillsize=2500 -qtune=450 -qarch=450
+ -qextname=flush
+
+
+ -Wl,--relax -Wl,--allow-multiple-definition
+
+
+
+
+
+ -qsmp=omp:nested_par -qsuppress=1520-045
+ -qsmp=omp:nested_par:noopt -qsuppress=1520-045
+
+
+ --build=powerpc-bgp-linux --host=powerpc64-suse-linux
+
+
+ -DLINUX
+
+
+ -g -qfullpath -qmaxmem=-1 -qspillsize=2500 -qextname=flush -qphsinfo
+ -O3 -qstrict -Q
+ -qsmp=omp:nested_par -qsuppress=1520-045
+ -qsmp=omp:nested_par:noopt -qsuppress=1520-045
+
+
+ -Wl,--relax -Wl,--allow-multiple-definition
+
+
+
+
+
+ -DCMAKE_SYSTEM_NAME=Catamount
+
+
+ -DLINUX
+ -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY
+
+ cc
+ CC
+ ftn
+ mpich
+ $ENV{MPICH_DIR}
+ $ENV{NETCDF_DIR}
+ lustre
+ $ENV{PARALLEL_NETCDF_DIR}
+ cc
+ CC
+ ftn
+
+
+
+
+ -DSYSDARWIN
+
+
+ -all_load
+
+
+
+
+
+ -heap-arrays
+
+
+ -mkl=cluster
+ -mkl=cluster
+ -mkl=cluster
+ -mkl=cluster
+ -mkl=cluster
+ -mkl=cluster
+ -mkl
+
+
+
+
+
+ -mcmodel medium -shared-intel
+
+
+
+
+ /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install
+
+ -O2
+
+
+ -lstdc++
+
+
+ -O2
+
+
+ $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -lblas -llapack
+
+ $ENV{NETCDF_PATH}
+ $ENV{PNETCDF_PATH}
+ $ENV{HDF5_PATH}
+ $ENV{SZIP_PATH}
+ $ENV{ZLIB_PATH}
+
+
+
+
+ -DHAVE_NANOTIME -DBIT64 -DHAVE_SLASHPROC -DHAVE_GETTIMEOFDAY
+
+ gpfs
+
+ $SHELL{$ENV{NETCDF_FORTRAN_PATH}/bin/nf-config --flibs} -L$ENV{MKLROOT}/lib/intel64 -Wl,--no-as-needed -lmkl_intel_lp64 -lmkl_intel_thread -lmkl_core -liomp5 -lpthread -lm -ldl
+ $SHELL{$ENV{NETCDF_C_PATH}/bin/nc-config --libs}
+
+
+ -lstdc++
+
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+
+ TRUE
+
+ -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY -DHAVE_BACKTRACE
+
+ $ENV{NETCDF_HOME}
+
+ -fno-unsafe-math-optimizations
+ -g -fbacktrace -fbounds-check -ffpe-trap=invalid,zero,overflow
+
+
+ -L$ENV{NETCDF_HOME}/lib/ -lnetcdff -lnetcdf -lcurl -llapack -lblas
+
+ gcc
+ gfortran
+
+
+
+
+ -static-intel
+ -heap-arrays
+
+
+ -DHAVE_SLASHPROC
+
+
+ -O2 -debug minimal -qno-opt-dynamic-align
+ $SHELL{$ENV{NETCDF_FORTRAN_PATH}/bin/nf-config --fflags}
+ -static-intel
+ -heap-arrays
+
+
+ -static-intel
+
+
+ -static-intel
+
+ mpiicc
+ mpiicpc
+ mpiifort
+ gpfs
+
+ $SHELL{$ENV{NETCDF_FORTRAN_PATH}/bin/nf-config --flibs}
+ $SHELL{$ENV{NETCDF_C_PATH}/bin/nc-config --libs}
+ -Wl,--start-group $ENV{MKLROOT}/lib/intel64/libmkl_intel_lp64.a $ENV{MKLROOT}/lib/intel64/libmkl_sequential.a $ENV{MKLROOT}/lib/intel64/libmkl_core.a -Wl,--end-group -lpthread -lm -ldl
+
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+
+ -DHAVE_SLASHPROC
+
+
+ -static-intel
+ -march=core-avx2
+ -O3
+
+
+ -static-intel
+ -axCORE-AVX2
+ -O3
+
+
+ -static-intel
+ -axCORE-AVX2
+ -O3 -qno-opt-dynamic-align
+
+ gpfs
+
+ $SHELL{$ENV{NETCDF_FORTRAN_PATH}/bin/nf-config --flibs} $SHELL{$ENV{NETCDF_C_PATH}/bin/nc-config --libs} -mkl
+
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ $ENV{PNETCDF_PATH}
+
+ -static-intel
+
+ mpiicc
+ mpiicpc
+ mpiifort
+
+
+
+
+ -DHAVE_NANOTIME -DBIT64 -DHAVE_SLASHPROC -DHAVE_GETTIMEOFDAY
+
+ gpfs
+
+ $SHELL{$ENV{NETCDF_FORTRAN_PATH}/bin/nf-config --flibs} $SHELL{$ENV{NETCDF_C_PATH}/bin/nc-config --libs}
+ -Wl,--start-group $ENV{MKLROOT}/lib/intel64/libmkl_gf_lp64.a $ENV{MKLROOT}/lib/intel64/libmkl_sequential.a $ENV{MKLROOT}/lib/intel64/libmkl_core.a -Wl,--end-group -lpthread -lm -ldl
+
+
+ -lstdc++
+
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+ /soft/climate/AlbanyTrilinos_06262017/Albany/buildintel/install
+
+ -DHAVE_SLASHPROC
+
+
+ -lstdc++
+
+
+ -O2 -debug minimal -qno-opt-dynamic-align
+
+ mpiicc
+ mpiicpc
+ mpiifort
+
+ $SHELL{nf-config --flibs} -mkl
+
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+
+ -DHAVE_SLASHPROC
+
+
+ -lstdc++
+
+
+ $SHELL{$ENV{NETCDF_FORTRAN_PATH}/bin/nf-config --flibs} $SHELL{$ENV{NETCDF_C_PATH}/bin/nc-config --libs}
+ -L$ENV{MKLROOT}/lib/intel64 -Wl,--no-as-needed -lmkl_gf_lp64 -lmkl_sequential -lmkl_core -lpthread -lm -ldl
+
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+
+ -DGPU
+ -DHAVE_NANOTIME -DBIT64 -DHAVE_SLASHPROC -DHAVE_GETTIMEOFDAY
+
+ gpfs
+
+ -O2 -Mvect=nosimd
+
+
+ -O2 -Mvect=nosimd -DSUMMITDEV_PGI -DHAVE_IEEE_ARITHMETIC -DNO_R16
+
+
+ -DSUMMITDEV_PGI -DHAVE_IEEE_ARITHMETIC -DNO_R16
+
+
+ -Minline -ta=tesla:ccall,fastmath,loadcache:L1,unroll,fma,managed,deepcopy,nonvvm -Mcuda -Minfo=accel
+
+
+ $SHELL{$ENV{NETCDF_FORTRAN_PATH}/bin/nf-config --flibs} -llapack -lblas
+ $SHELL{$ENV{NETCDF_C_PATH}/bin/nc-config --libs}
+
+ TRUE
+ FORTRAN
+
+ -lstdc++
+
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+
+ -DGPU
+ -DHAVE_NANOTIME -DBIT64 -DHAVE_SLASHPROC -DHAVE_GETTIMEOFDAY
+
+ gpfs
+
+ -O2 -Mvect=nosimd
+
+
+ -O2 -Mvect=nosimd -DSUMMITDEV_PGI -DHAVE_IEEE_ARITHMETIC -DNO_R16
+
+
+ -DSUMMITDEV_PGI -DHAVE_IEEE_ARITHMETIC -DNO_R16
+
+
+ -Minline -ta=tesla:ccall,fastmath,loadcache:L1,unroll,fma,managed,deepcopy,nonvvm -Mcuda -Minfo=accel
+
+
+ $SHELL{$ENV{NETCDF_FORTRAN_PATH}/bin/nf-config --flibs} -llapack -lblas
+ $SHELL{$ENV{NETCDF_C_PATH}/bin/nc-config --libs}
+
+ TRUE
+ FORTRAN
+
+ -lstdc++
+
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+
+ -fopenmp
+
+
+ -D CISM_GNU=ON
+
+
+ -DFORTRANUNDERSCORE -DNO_R16
+
+
+ FORTRAN
+
+ -fdefault-real-8
+
+
+
+ -O -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none -fno-range-check
+ -fopenmp
+ -g -Wall
+
+
+
+ -ffixed-form
+
+
+ -ffree-form
+
+ /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/hdf5-parallel/1.8.17/centos7.2_gnu5.3.0
+ /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/netcdf-hdf5parallel/4.3.3.1/centos7.2_gnu5.3.0
+ /software/dev_tools/swtree/cs400_centos7.2_pe2016-08/pnetcdf/1.9.0/centos7.2_gnu5.3.0
+ /software/tools/compilers/intel_2017/mkl/lib/intel64
+
+ -fopenmp
+
+
+
+ -L$NETCDF_PATH/lib -Wl,-rpath=$NETCDF_PATH/lib -lnetcdff -lnetcdf
+
+ mpicc
+ mpic++
+ mpif90
+ gcc
+ gcpp
+ gfortran
+ TRUE
+
+
+
+
+ --enable-filesystem-hints=lustre
+
+
+ -DLINUX
+
+
+ -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv
+
+ $ENV{NETCDF_HOME}
+ lustre
+ $ENV{PNETCDFROOT}
+
+ -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -L$ENV{MKL_PATH}/lib/intel64 -lmkl_rt
+ -mkl=cluster
+ -mkl
+
+
+
+
+
+ -DnoI8
+
+
+ -C=all -g -O0 -v
+ -C=all -g -nan -O0 -v
+
+ $ENV{MPI_LIB}
+ $ENV{NETCDF_ROOT}
+ lustre
+ $ENV{PNETCDFROOT}
+
+ -L$ENV{NETCDF_ROOT}/lib -lnetcdf -lnetcdff -L$ENV{MKL_PATH} -lmkl_rt
+
+
+
+
+
+ -O2
+
+
+ -DLINUX
+
+
+ -O2
+ -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv -init=snan
+
+ $ENV{NETCDF_PATH}
+ lustre
+ $ENV{PNETCDFROOT}
+
+ -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKL_PATH} -lmkl_rt
+
+
+
+
+
+ -O2
+
+
+ -DLINUX
+
+
+ -O2
+ -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv
+
+ $ENV{NETCDF_HOME}
+ lustre
+ $ENV{PNETCDFROOT}
+
+ -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKL_PATH} -lmkl_rt
+
+
+
+
+
+ -O2 -kind=byte
+
+
+ -DLINUX
+
+
+ -O2 -kind=byte
+ -C=all -g -O0 -v
+
+ $ENV{NETCDF_HOME}
+ lustre
+ $ENV{PNETCDFROOT}
+
+ -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MKL_PATH} -lmkl_rt
+
+
+
+
+
+ -O2
+
+
+ -DLINUX
+
+
+ -O2
+ -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv
+ -C -Mbounds -traceback -Mchkfpstk -Mchkstk -Mdalign -Mdepchk -Mextend -Miomutex -Mrecursive -Ktrap=fp -O0 -g -byteswapio -Meh_frame
+
+ $ENV{NETCDF_HOME}
+ lustre
+ $ENV{PNETCDFROOT}
+
+ -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -lpmi -L$ENV{MPI_LIB} -lmpich
+
+
+
+
+
+ -O2
+ -DHAVE_SLASHPROC
+
+
+ -DLINUX
+
+
+ -O2
+ -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv -init=snan
+
+ $ENV{NETCDF_HOME}
+ $ENV{PNETCDF_HOME}
+ lustre
+
+ -lpmi -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -L$ENV{MKL_PATH}/lib/intel64/ -lmkl_rt $ENV{PNETCDF_LIBRARIES}
+
+ mpiicc
+ mpiicpc
+ mpiifort
+
+
+
+
+ -O2
+ -DHAVE_SLASHPROC
+
+
+ -lstdc++
+
+ FORTRAN
+
+ -DLINUX
+
+
+ -O2
+ -C -Mbounds -traceback -Mchkfpstk -Mchkstk -Mdalign -Mdepchk -Mextend -Miomutex -Mrecursive -Ktrap=fp -O0 -g -byteswapio -Meh_frame
+ -Mnovect
+ -Mnovect
+
+ $ENV{NETCDF_HOME}
+ lustre
+ $ENV{PNETCDF_HOME}
+
+ -lpmi -L$NETCDF_PATH/lib -lnetcdf -lnetcdff -L$ENV{MKL_PATH}/lib/intel64/ -lmkl_rt $ENV{PNETCDF_LIBRARIES}
+
+ mpipgcc
+ mpipgcxx
+ mpipgf90
+ TRUE
+
+
+
+ /global/cfs/cdirs/e3sm/software/albany-trilinos/albany-install-2020-08-07
+
+ -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml
+ -O2 -debug minimal -qno-opt-dynamic-align
+
+
+ -std=c++14 -fp-model consistent
+ -qopenmp
+ -O0 -g
+ -O2
+
+ $ENV{PETSC_DIR}
+ icc
+ icpc
+ ifort
+
+ -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf
+ -mkl -lpthread
+
+
+
+
+
+ -O2
+
+
+ -O2
+
+
+ -lstdc++
+
+
+
+
+ /global/homes/m/mperego/e3sm-software/albany-trilinos/albany-install-2021-01-05
+
+ --host=cray
+
+
+ -axMIC-AVX512 -xCORE-AVX2
+
+
+ -DARCH_MIC_KNL
+
+
+ -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml
+ -O2 -debug minimal -qno-opt-dynamic-align
+ -xMIC-AVX512
+ -DHAVE_ERF_INTRINSICS
+
+
+ -std=c++14 -fp-model consistent
+ -qopenmp
+ -O0 -g
+ -O2
+
+ mpiicc
+ mpiicpc
+ mpiifort
+
+ impi
+ $ENV{PETSC_DIR}
+ icc
+ icpc
+ ifort
+
+ -L$ENV{NETCDF_DIR} -lnetcdff -Wl,--as-needed,-L$ENV{NETCDF_DIR}/lib -lnetcdff -lnetcdf
+ -mkl -lpthread
+
+
+
+
+
+ -O2
+
+
+ -O2
+
+
+ -lstdc++
+
+
+
+
+ /projects/ccsm/AlbanyTrilinos_20190904/albany-build/install
+
+ -O2
+
+ /projects/ccsm/esmf-6.3.0rp1/lib/libO/Linux.intel.64.openmpi.default
+
+ -O2
+
+ /opt/openmpi-1.8-intel
+ $ENV{NETCDFROOT}
+ /projects/ccsm/pfunit/3.2.9/mpi-serial
+ lustre
+ $ENV{PNETCDFROOT}
+
+ $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -L/projects/ccsm/BLAS-intel -lblas_LINUX
+ -mkl=cluster
+ -mkl
+
+
+
+
+
+ -O2 -fp-model precise -I/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include
+ -openmp
+
+
+ -DFORTRANUNDERSCORE -DNO_R16
+ -DCPRINTEL
+
+
+ -cxxlib
+
+ FORTRAN
+
+ -r8
+
+
+ -fp-model source -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -I/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/include
+ -openmp
+ -O0 -g -check uninit -check bounds -check pointers -fpe0
+ -O2
+
+
+ -fixed -132
+
+
+ -free
+
+
+ -openmp
+
+ mpiicc
+ mpiicpc
+ mpiifort
+ icc
+ icpc
+ ifort
+
+ -L/soft/netcdf/fortran-4.4-intel-sp1-update3-parallel/lib -lnetcdff -L/soft/hdf5/hdf5-1.8.13-intel-2013-sp1-update3-impi-5.0.0.028/lib -openmp -fPIC -lnetcdf -lnetcdf -L/soft/intel/x86_64/2013/composer_xe_2013/composer_xe_2013_sp1.3.174/mkl/lib/intel64 -lmkl_intel_lp64 -lmkl_core -lmkl_intel_thread -lpthread -lm
+
+ TRUE
+
+
+
+
+ -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY
+
+ /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib
+ $ENV{NETCDF_DIR}
+
+ -lnetcdff -lnetcdf -mkl
+
+
+
+
+
+ -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY
+
+ /global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel/lib
+ $ENV{NETCDF_DIR}
+
+ -lnetcdff -lnetcdf -mkl
+
+
+
+
+ $ENV{LAPACK_DIR}/lib
+
+ -O2
+
+
+ -lstdc++ -lmpi_cxx
+
+
+ -O2
+ -I$ENV{NETCDF_DIR}/include
+
+ $ENV{NETCDF_DIR}
+ $ENV{PNETCDF_DIR}
+
+ -L/global/software/sl-7.x86_64/modules/gcc/6.3.0/netcdf/4.4.1.1-gcc-p/lib -lnetcdff -lnetcdf -lnetcdf -lblas -llapack
+
+
+
+
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ $ENV{PNETCDF_PATH}
+
+ $SHELL{$NETCDF_FORTRAN_PATH/bin/nf-config --flibs}
+
+
+
+
+ $ENV{HDF5_PATH}
+ $ENV{NETCDF_PATH}
+ $ENV{PNETCDF_PATH}
+
+ $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack
+
+
+
+
+
+ -framework Accelerate
+
+ $ENV{NETCDF_PATH}
+
+ -L$NETCDF_PATH/lib -lnetcdff -lnetcdf
+
+
+
+
+ /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install
+
+ -DHAVE_SLASHPROC
+
+
+ -O2
+
+
+ -lstdc++
+
+
+ -O2
+ -I$ENV{NETCDFROOT}/include
+
+ $ENV{NETCDFROOT}
+ $ENV{SEMS_PFUNIT_ROOT}
+
+ $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack
+
+
+
+
+ /projects/install/rhel6-x86_64/ACME/AlbanyTrilinos/Albany/build/install
+
+ -DHAVE_SLASHPROC
+
+
+ -O2
+
+
+ -lstdc++ -lmpi_cxx
+
+
+ -O2
+
+ $ENV{NETCDFROOT}
+ $ENV{PNETCDFROOT}
+
+ $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack
+
+
+
+
+ /projects/install/rhel7-x86_64/ACME/AlbanyTrilinos/Albany/build/install
+
+ -DHAVE_SLASHPROC
+
+
+ -O2
+
+
+ -lstdc++
+
+
+ -O2
+ -I$ENV{NETCDFROOT}/include
+
+ $ENV{NETCDFROOT}
+ $ENV{SEMS_PFUNIT_ROOT}
+
+ $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack
+
+
+
+
+ /projects/install/rhel7-x86_64/ACME/AlbanyTrilinos/Albany/build/install
+
+ -DHAVE_SLASHPROC
+
+
+ -O2
+
+
+ -lstdc++ -lmpi_cxx
+
+
+ -O2
+
+ $ENV{NETCDFROOT}
+ $ENV{PNETCDFROOT}
+
+ $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack
+
+
+
+
+ $ENV{E3SM_SRCROOT}/externals/kokkos/bin/nvcc_wrapper
+ $ENV{E3SM_SRCROOT}/externals/kokkos/bin/nvcc_wrapper
+ --arch=Pascal60 --with-cuda=$ENV{CUDA_ROOT} --with-cuda-options=enable_lambda
+
+ -expt-extended-lambda -DCUDA_BUILD
+
+ $ENV{NETCDF_FORTRAN_PATH}
+
+ $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -lblas -llapack -lcudart -lstdc++
+
+
+
+
+
+ -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL
+
+
+ -xCORE_AVX512 -mkl -std=gnu99
+ -O3 -g -debug minimal
+ -O0 -g
+
+
+ -xCORE_AVX512 -mkl -std=c++14
+ -O3 -g -debug minimal
+ -O0 -g
+ -qopenmp
+
+
+ -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -xCORE_AVX512 -mkl
+ -qopenmp
+ -O3 -g -debug minimal
+ -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created
+
+
+ -mkl -lstdc++
+ -qopenmp
+ -L$(NETCDF_FORTRAN_PATH)/lib64
+
+
+ -fixed -132
+
+
+ -free
+
+ TRUE
+
+ -r8
+
+ ifort
+ icc
+ icpc
+ FORTRAN
+
+ -cxxlib
+
+ TRUE
+
+
+
+ /projects/ccsm/AlbanyTrilinos_20190904/albany-build/install
+
+ -DHAVE_SLASHPROC
+
+
+ -O2
+
+ /projects/ccsm/esmf-6.3.0rp1/lib/libO/Linux.intel.64.openmpi.default
+
+ -O2
+
+ $ENV{MPIHOME}
+ $ENV{NETCDFROOT}
+ /projects/ccsm/pfunit/3.2.9/mpi-serial
+ lustre
+ $ENV{PNETCDFROOT}
+
+ $SHELL{$NETCDF_PATH/bin/nf-config --flibs} -L/projects/ccsm/BLAS-intel -lblas_LINUX -L$ENV{MKL_LIBS} -lmkl_rt
+ -mkl=cluster
+ -mkl
+
+
+
+
+
+ -xCORE-AVX2
+
+
+ -DLINUX
+ -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY
+ -DARCH_MIC_KNL
+
+
+ -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml
+ -O2 -debug minimal -qno-opt-dynamic-align
+ -xCORE-AVX2
+
+ $ENV{TACC_HDF5_DIR}
+ mpicc
+ mpicxx
+ mpif90
+ impi
+ $ENV{TACC_NETCDF_DIR}
+ $ENV{TACC_NETCDF_DIR}
+ $ENV{PETSC_DIR}
+ $ENV{TACC_PNETCDF_DIR}
+ icc
+ icpc
+ ifort
+
+ -L$NETCDF_PATH -lnetcdff -Wl,--as-needed,-L$NETCDF_PATH/lib -lnetcdff -lnetcdf
+ -L$NETCDF_PATH -lnetcdff -Wl,--as-needed,-L$NETCDF_PATH/lib -lnetcdff -lnetcdf
+ -mkl -lpthread
+
+
+
+
+
+ -O2
+
+
+ -O2
+
+
+ -DHAVE_SLASHPROC
+
+
+ -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+ -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+
+
+ -lstdc++
+
+ mpicc
+ mpiCC
+ mpif90
+ gcc
+ g++
+ gfortran
+ gpfs
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ TRUE
+
+
+
+
+ -O2
+
+
+ -O2
+
+
+ -DHAVE_SLASHPROC
+
+
+ -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+ -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+
+
+ -lstdc++
+
+ mpiCC
+
+ gpfs
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ TRUE
+
+ -O3 -arch sm_70 --use_fast_math
+
+ TRUE
+
+
+
+
+ -DLINUX
+ -DHAVE_SLASHPROC
+
+
+ -qzerosize -qfree=f90 -qxlf2003=polymorphic
+ -qspillsize=2500 -qextname=flush
+ -qsmp=omp:noopt
+
+
+ -Wl,--relax -Wl,--allow-multiple-definition
+
+
+ -lxlopt -lxl -lxlsmp -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+ -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -lxlopt -lxl -lxlsmp -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+
+
+ -L/sw/summit/gcc/8.1.1/lib64 -lstdc++ -L$ENV{OLCF_XLC_ROOT}/lib -libmc++
+
+ mpicc
+ mpiCC
+ mpif90
+ gpfs
+ xlc_r
+ xlf90_r
+ xlc++_r
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ TRUE
+ --arch=Power9 --with-serial
+
+
+
+
+ -g -qfullpath -qmaxmem=-1 -qphsinfo
+ -O3
+ -qsmp=omp
+ -qsmp=omp:noopt
+
+
+ -g -qfullpath -qmaxmem=-1 -qphsinfo
+ -O2
+ -qsmp=omp
+ -qsmp=omp:noopt
+
+
+ -DFORTRAN_SAME -DCPRIBM
+ -DUSE_CBOOL
+ -DLINUX
+ -DHAVE_SLASHPROC
+
+ -WF,-D
+
+ -qrealsize=8
+
+
+ -g -qfullpath -qmaxmem=-1 -qphsinfo
+ -O2 -qstrict -Q
+ -qsmp=omp
+ -qsmp=omp:noopt
+ -qzerosize -qfree=f90 -qxlf2003=polymorphic
+ -qspillsize=2500 -qextname=flush
+ -qsmp=omp:noopt
+
+
+ -O0
+
+
+ -qsuffix=f=f -qfixed=132
+
+
+ -qsuffix=f=f90:cpp=F90
+
+ TRUE
+
+ -Wl,--relax -Wl,--allow-multiple-definition
+ -qsmp -qoffload -lcudart -L$ENV{CUDA_DIR}/lib64
+
+
+ -lxlopt -lxl -lxlsmp -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+ -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -lxlopt -lxl -lxlsmp -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+
+
+ -L/sw/summit/gcc/8.1.1/lib64 -lstdc++ -L$ENV{OLCF_XLC_ROOT}/lib -libmc++
+
+ mpicc
+ mpiCC
+ mpif90
+ gpfs
+ xlc_r
+ xlf90_r
+ xlc++_r
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ TRUE
+ --arch=Power9 --with-serial
+
+
+
+
+ -O2 -Mvect=nosimd
+
+
+ -O2 -DSUMMITDEV_PGI
+
+
+ -DHAVE_SLASHPROC
+
+
+ -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+
+ FORTRAN
+
+ -lstdc++
+
+ mpicc
+ mpiCC
+ mpif90
+ gpfs
+ pgcc
+ pgc++
+ pgfortran
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ TRUE
+
+
+
+
+ -O2 -Mvect=nosimd
+
+
+ -DGPU
+ -DHAVE_SLASHPROC
+
+
+ -O2 -Mvect=nosimd -DSUMMITDEV_PGI -DHAVE_IEEE_ARITHMETIC -DNO_R16
+
+
+ -DSUMMITDEV_PGI -DHAVE_IEEE_ARITHMETIC -DNO_R16
+
+
+ -Minline -ta=nvidia,cc70,fastmath,loadcache:L1,unroll,fma,managed,ptxinfo -Mcuda -Minfo=accel
+
+
+ -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+
+ FORTRAN
+
+ -lstdc++
+
+ --arch=Power9,Volta70 --with-cuda=$ENV{CUDA_DIR} --with-cuda-options=enable_lambda
+ mpicc
+ mpiCC
+ mpif90
+ gpfs
+ pgcc
+ pgfortran
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ TRUE
+
+
+
+
+ -O2
+
+
+ -O2
+
+
+ -DHAVE_SLASHPROC
+
+
+ -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+ -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+
+
+ -lstdc++
+
+ mpicc
+ mpiCC
+ mpif90
+ gcc
+ g++
+ gfortran
+ gpfs
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ TRUE
+
+
+
+
+ -O2
+
+
+ -O2
+
+
+ -DHAVE_SLASHPROC
+
+
+ -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+ -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+
+
+ -lstdc++
+
+ mpiCC
+ gpfs
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ TRUE
+
+ -O3 -arch sm_70 --use_fast_math
+
+ TRUE
+
+
+
+
+ -DLINUX
+ -DHAVE_SLASHPROC
+
+
+ -qzerosize -qfree=f90 -qxlf2003=polymorphic
+ -qspillsize=2500 -qextname=flush
+ -qsmp=omp:noopt
+
+
+ -Wl,--relax -Wl,--allow-multiple-definition
+
+
+ -lxlopt -lxl -lxlsmp -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+ -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -lxlopt -lxl -lxlsmp -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+
+
+ -lstdc++ -L$ENV{OLCF_XLC_ROOT}/lib -libmc++
+
+ mpicc
+ mpiCC
+ mpif90
+ gpfs
+ xlc_r
+ xlf90_r
+ xlc++_r
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ TRUE
+
+
+
+
+ -g -qfullpath -qmaxmem=-1 -qphsinfo
+ -O3
+ -qsmp=omp
+ -qsmp=omp:noopt
+
+
+ -g -qfullpath -qmaxmem=-1 -qphsinfo
+ -O2
+ -qsmp=omp
+ -qsmp=omp:noopt
+
+
+ -DFORTRAN_SAME -DCPRIBM
+ -DUSE_CBOOL
+ -DLINUX
+ -DHAVE_SLASHPROC
+
+ -WF,-D
+
+ -qrealsize=8
+
+
+ -g -qfullpath -qmaxmem=-1 -qphsinfo
+ -O2 -qstrict -Q
+ -qsmp=omp
+ -qsmp=omp:noopt
+ -qinitauto=7FF7FFFF -qflttrap=ov:zero:inv:en
+ -qzerosize -qfree=f90 -qxlf2003=polymorphic
+ -qspillsize=2500 -qextname=flush
+ -qsmp=omp:noopt
+
+
+ -O0
+
+
+ -qsuffix=f=f -qfixed=132
+
+
+ -qsuffix=f=f90:cpp=F90
+
+ TRUE
+
+ -Wl,--relax -Wl,--allow-multiple-definition
+ -qsmp -qoffload -lcudart -L$ENV{CUDA_DIR}/lib64
+
+
+ -lxlopt -lxl -lxlsmp -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+ -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -lxlopt -lxl -lxlsmp -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+
+
+ -lstdc++ -L$ENV{OLCF_XLC_ROOT}/lib -libmc++
+
+ mpicc
+ mpiCC
+ mpif90
+ gpfs
+ xlc_r
+ xlf90_r
+ xlc++_r
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ TRUE
+
+
+
+
+ -O2 -Mvect=nosimd
+
+
+ -O2 -DSUMMITDEV_PGI
+
+
+ -DHAVE_SLASHPROC
+
+
+ -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+
+ FORTRAN
+
+ -lstdc++
+
+ mpicc
+ mpiCC
+ mpif90
+ gpfs
+ pgcc
+ pgc++
+ pgfortran
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ TRUE
+
+
+
+
+ -O2 -Mvect=nosimd
+
+
+ -DGPU
+ -DHAVE_SLASHPROC
+
+
+ -O2 -Mvect=nosimd -DSUMMITDEV_PGI -DHAVE_IEEE_ARITHMETIC -DNO_R16
+
+
+ -DSUMMITDEV_PGI -DHAVE_IEEE_ARITHMETIC -DNO_R16
+
+
+ -Minline -ta=nvidia,cc70,fastmath,loadcache:L1,unroll,fma,managed,ptxinfo -Mcuda -Minfo=accel
+
+
+ -L$ENV{PNETCDF_PATH}/lib -lpnetcdf -L$ENV{HDF5_PATH}/lib -lhdf5_hl -lhdf5 -L$ENV{NETCDF_C_PATH}/lib -lnetcdf -L$ENV{NETCDF_FORTRAN_PATH}/lib -lnetcdff -L$ENV{ESSL_PATH}/lib64 -lessl -L$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib -llapack
+
+ FORTRAN
+
+ -lstdc++
+
+ --arch=Power9,Volta70 --with-cuda=$ENV{CUDA_DIR} --with-cuda-options=enable_lambda
+ mpicc
+ mpiCC
+ mpif90
+ gpfs
+ pgcc
+ pgfortran
+ $ENV{NETCDF_C_PATH}
+ $ENV{NETCDF_FORTRAN_PATH}
+ TRUE
+
+
+
+
+ -O2
+
+
+ -DNO_SHR_VMATH -DCNL
+
+
+ -O2
+ -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv
+
+
+ -llapack -lblas
+
+ mpich
+ /usr/tce/packages/mvapich2/mvapich2-2.2-intel-18.0.1/
+ /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/
+
+ $SHELL{/usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/bin/nf-config --flibs}
+
+
+
+
+
+ -O2
+
+
+ -DNO_SHR_VMATH -DCNL
+
+
+ -O2
+ -g -traceback -O0 -fpe0 -check all -check noarg_temp_created -ftrapuv
+
+
+ -llapack -lblas
+
+ mpich
+ /usr/tce/packages/mvapich2/mvapich2-2.2-intel-18.0.1/
+ /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/
+
+ $SHELL{/usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/bin/nf-config --flibs}
+
+
+
+
+
+ -O2
+ -O0
+
+
+ -O2
+ -O0
+
+
+ $SHELL{nf-config --flibs}
+
+
+ -lstdc++
+
+
+
+
+
+ --host=cray
+
+
+ -DARCH_MIC_KNL
+
+
+ -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model consistent -fimf-use-svml=true
+ -O2 -debug minimal -qno-opt-dynamic-align -fp-speculation=off
+ -DHAVE_ERF_INTRINSICS
+
+ icc
+ icpc
+ ifort
+
+ -L$ENV{NETCDF_DIR}/lib -lnetcdff -L$ENV{NETCDF_DIR}/lib -lnetcdf -Wl,-rpath -Wl,$ENV{NETCDF_DIR}/lib
+ -mkl -lpthread
+
+
+
+
+
+ -DHAVE_SLASHPROC
+
+
+ -lstdc++
+
+
+ $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} $SHELL{$ENV{NETCDF_PATH}/bin/nc-config --libs} -llapack -lblas
+
+ $ENV{NETCDF_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+
+ -O1
+
+
+ -O1
+ -vector0
+
+ CXX
+ --gcc-toolchain=/opt/gcc/9.3.0/snos --with-serial
+
+ -std=c++14
+ -fopenmp
+ -g -Wall
+ -O1
+
+
+
+
+
+
+ -convert big_endian -assume byterecl -traceback -assume realloc_lhs -fp-model consistent
+ -qopenmp
+ -O2
+ -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created
+
+
+ -fp-model precise -std=gnu99 -traceback
+ -qopenmp
+ -O2
+ -O0 -g
+
+
+ -std=c++14 -fp-model precise -traceback
+ -qopenmp
+ -O2
+ -O0 -g
+
+ TRUE
+ FORTRAN
+
+ -cxxlib
+
+
+ -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL -DHAVE_SLASHPROC
+
+
+ -r8
+
+
+ -O0
+
+
+ -fixed -132
+
+
+ -free
+
+ TRUE
+ mpif90
+ mpicc
+ mpicxx
+ icc
+ icpc
+ ifort
+
+ -qopenmp
+
+
+ $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -Wl,-rpath -Wl,$ENV{NETCDF_PATH}/lib -mkl
+ $SHELL{$ENV{NETCDF_PATH}/bin/nc-config --libs}
+
+ $ENV{NETCDF_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+
+
+
+ -traceback -convert big_endian -assume byterecl -assume realloc_lhs -fp-model precise
+ -qopenmp
+ -O2
+ -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created
+
+
+ -traceback -fp-model precise -std=gnu99
+ -qopenmp
+ -O2
+ -O0 -g
+
+
+ -traceback -std=c++17 -fp-model precise
+ -qopenmp
+ -O2
+ -O0 -g
+
+ TRUE
+ FORTRAN
+
+ -cxxlib
+
+
+ -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL -DHAVE_SLASHPROC
+
+
+ -r8
+
+
+ -O0
+
+
+ -fixed -132
+
+
+ -free
+
+ TRUE
+
+ mpiifx
+ mpiicx
+ mpiicpx
+ icx
+ icpx
+ ifx
+
+
+ -qopenmp
+
+
+ $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -Wl,-rpath -Wl,$ENV{NETCDF_PATH}/lib -qmkl
+ $SHELL{$ENV{NETCDF_PATH}/bin/nc-config --libs}
+ -fiopenmp -fopenmp-targets=spir64
+
+ $ENV{NETCDF_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+
+
+ -DHAVE_SLASHPROC
+
+
+ -O2
+
+
+ -O2
+
+
+ $SHELL{$ENV{NETCDF_PATH}/bin/nf-config --flibs} -Wl,-rpath -Wl,$ENV{NETCDF_PATH}/lib
+ $SHELL{$ENV{NETCDF_PATH}/bin/nc-config --libs}
+ -L/home/azamat/soft/libs -llapack -lblas
+
+ $ENV{NETCDF_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+ lustre
+ mpicc
+ mpif90
+ mpic++
+ gfortran
+ gcc
+ g++
+
+ $SHELL{nc-config --libs}
+ $SHELL{nf-config --flibs} -lnetcdff -llapack -lblas
+ $ENV{MKLROOT}/lib/intel64/libmkl_scalapack_lp64.a -Wl,--start-group $ENV{MKLROOT}/lib/intel64/libmkl_intel_lp64.a $ENV{MKLROOT}/lib/intel64/libmkl_core.a $ENV{MKLROOT}/lib/intel64/libmkl_sequential.a -Wl,--end-group $ENV{MKLROOT}/lib/intel64/libmkl_blacs_intelmpi_lp64.a -lpthread -lm -lz
+
+
+ -lstdc++
+
+ $ENV{NETCDF_PATH}
+ $ENV{NETCDF_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+ lustre
+ mpiicc
+ mpiicpc
+ mpiifort
+ mpicc
+ mpic++
+ mpif90
+ ifort
+ icc
+ icpc
+
+ $SHELL{$ENV{NETCDF_ROOT}/bin/nc-config --libs}
+ $SHELL{$ENV{NETCDF_ROOT}/bin/nf-config --flibs} -lnetcdff -llapack -lblas
+ -mkl -lpthread
+
+
+ -lstdc++
+
+ $ENV{NETCDF_ROOT}
+ $ENV{NETCDF_ROOT}
+ $ENV{PNETCDF_ROOT}
+
+
+
+ lustre
+ mpicc
+ mpif90
+ mpic++
+ gfortran
+ gcc
+ g++
+
+ $SHELL{nc-config --libs}
+ $SHELL{nf-config --flibs} -lnetcdff -llapack -lblas
+ $ENV{MKLROOT}/lib/intel64/libmkl_scalapack_lp64.a -Wl,--start-group $ENV{MKLROOT}/lib/intel64/libmkl_intel_lp64.a $ENV{MKLROOT}/lib/intel64/libmkl_core.a $ENV{MKLROOT}/lib/intel64/libmkl_sequential.a -Wl,--end-group $ENV{MKLROOT}/lib/intel64/libmkl_blacs_intelmpi_lp64.a -lpthread -lm -lz
+
+
+ -lstdc++
+
+ $ENV{NETCDF_PATH}
+ $ENV{NETCDF_PATH}
+ $ENV{PNETCDF_PATH}
+
+
+
+ lustre
+ mpiicc
+ mpiicpc
+ mpiifort
+ mpicc
+ mpic++
+ mpif90
+ ifort
+ icc
+ icpc
+
+ $SHELL{$ENV{NETCDF_ROOT}/bin/nc-config --libs}
+ $SHELL{$ENV{NETCDF_ROOT}/bin/nf-config --flibs} -lnetcdff -llapack -lblas
+ -mkl -lpthread
+
+
+ -lstdc++
+
+ $ENV{NETCDF_ROOT}
+ $ENV{NETCDF_ROOT}
+ $ENV{PNETCDF_ROOT}
+
+
+
+ TRUE
+
+ -DHAVE_VPRINTF -DHAVE_GETTIMEOFDAY -DHAVE_BACKTRACE
+
+
+ -fno-unsafe-math-optimizations
+ -g -fbacktrace -fbounds-check -ffpe-trap=invalid,zero,overflow
+
+
+ -L$ENV{HDF5_HOME}/lib -lhdf5_fortran -lhdf5 -lhdf5_hl -lhdf5hl_fortran
+ -L$ENV{NETCDF_PATH}/lib/ -lnetcdff -lnetcdf -lcurl -lblas -llapack
+
+ $ENV{HDF5_HOME}
+ $ENV{NETCDF_PATH}
+
+
+
+
+
+
+
+
+
+
+
+
+
+ USERDEFINED_MUST_EDIT_THIS
+
+
+ # USERDEFINED $SHELL{$NETCDF_PATH/bin/nf-config --flibs}
+
+
+
+
diff --git a/mache/cime_machine_config/config_machines.xml b/mache/cime_machine_config/config_machines.xml
new file mode 100644
index 00000000..fcd6a9ab
--- /dev/null
+++ b/mache/cime_machine_config/config_machines.xml
@@ -0,0 +1,3237 @@
+
+
+
+
+
+
+ Cori. XC40 Cray system at NERSC. Haswell partition. os is CNL, 32 pes/node, batch system is SLURM
+ cori-knl-is-default
+ CNL
+ intel,gnu
+ mpt
+ e3sm
+ /global/cfs/cdirs/e3sm
+ e3sm,m3411,m3412
+ $ENV{SCRATCH}/e3sm_scratch/cori-haswell
+ /global/cfs/cdirs/e3sm/www/$ENV{USER}
+ http://portal.nersc.gov/project/e3sm/$ENV{USER}
+ /global/cfs/cdirs/e3sm/inputdata
+ /global/cfs/cdirs/e3sm/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /global/cfs/cdirs/e3sm/baselines/$COMPILER
+ /global/cfs/cdirs/e3sm/tools/cprnc.cori/cprnc
+ 8
+ e3sm_developer
+ 4
+ nersc_slurm
+ e3sm
+ 32
+ 32
+ TRUE
+
+ srun
+
+ --label
+ -n {{ total_tasks }} -N {{ num_nodes }}
+ -c $SHELL{echo 64/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc}
+ $SHELL{if [ 32 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;}
+ -m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}
+
+
+
+ /opt/modules/default/init/perl
+ /opt/modules/default/init/python
+ /opt/modules/default/init/sh
+ /opt/modules/default/init/csh
+ /opt/modules/default/bin/modulecmd perl
+ /opt/modules/default/bin/modulecmd python
+ module
+ module
+
+
+ PrgEnv-intel
+ PrgEnv-cray
+ PrgEnv-gnu
+ intel
+ cce
+ gcc
+ cray-parallel-netcdf
+ cray-hdf5-parallel
+ pmi
+ cray-libsci
+ cray-mpich2
+ cray-mpich
+ cray-netcdf
+ cray-hdf5
+ cray-netcdf-hdf5parallel
+ craype-sandybridge
+ craype-ivybridge
+ craype
+ papi
+ cmake
+ cray-petsc
+ esmf
+ zlib
+ craype-hugepages2M
+ darshan
+
+
+ craype
+ PrgEnv-intel
+ cray-mpich
+ craype-mic-knl
+ craype-haswell
+
+
+
+ cray-mpich cray-mpich/7.7.10
+
+
+
+ PrgEnv-intel/6.0.5
+ intel
+ intel/19.0.3.199
+
+
+
+ PrgEnv-intel PrgEnv-gnu/6.0.5
+ gcc
+ gcc/8.3.0
+ cray-libsci
+ cray-libsci/19.06.1
+
+
+
+ craype craype/2.6.2
+ pmi
+ pmi/5.0.14
+ craype-mic-knl
+ craype-haswell
+
+
+
+ cray-netcdf-hdf5parallel
+ cray-hdf5-parallel
+ cray-parallel-netcdf
+ cray-netcdf/4.6.3.2
+ cray-hdf5/1.10.5.2
+
+
+ cray-netcdf-hdf5parallel
+ cray-netcdf-hdf5parallel/4.6.3.2
+ cray-hdf5-parallel/1.10.5.2
+ cray-parallel-netcdf/1.11.1.1
+
+
+
+ git
+ git
+ cmake
+ cmake/3.20.2
+ perl5-extras
+
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+
+
+ 1
+ 1
+
+
+ 128M
+ spread
+ threads
+ FALSE
+ static
+
+
+ yes
+
+
+
+
+
+ Cori. XC40 Cray system at NERSC. KNL partition. os is CNL, 68 pes/node (for now only use 64), batch system is SLURM
+ cori
+ CNL
+ intel,gnu
+ mpt,impi
+ e3sm
+ /global/cfs/cdirs/e3sm
+ e3sm,m3411,m3412
+ $ENV{SCRATCH}/e3sm_scratch/cori-knl
+ /global/cfs/cdirs/e3sm/www/$ENV{USER}
+ http://portal.nersc.gov/project/e3sm/$ENV{USER}
+ /global/cfs/cdirs/e3sm/inputdata
+ /global/cfs/cdirs/e3sm/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /global/cfs/cdirs/e3sm/baselines/$COMPILER
+ /global/cfs/cdirs/e3sm/tools/cprnc.cori/cprnc
+ 8
+ e3sm_developer
+ 4
+ nersc_slurm
+ e3sm
+ 128
+ 64
+ TRUE
+
+ srun
+
+ --label
+ -n {{ total_tasks }} -N {{ num_nodes }}
+ -c $SHELL{mpn=`./xmlquery --value MAX_MPITASKS_PER_NODE`; if [ 68 -ge $mpn ]; then c0=`expr 272 / $mpn`; c1=`expr $c0 / 4`; cflag=`expr $c1 \* 4`; echo $cflag|bc ; else echo 272/$mpn|bc;fi;}
+ $SHELL{if [ 68 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;}
+ -m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}
+
+
+
+ /opt/modules/default/init/perl
+ /opt/modules/default/init/python
+ /opt/modules/default/init/sh
+ /opt/modules/default/init/csh
+ /opt/modules/default/bin/modulecmd perl
+ /opt/modules/default/bin/modulecmd python
+ module
+ module
+
+ craype
+ craype-mic-knl
+ craype-haswell
+ PrgEnv-intel
+ PrgEnv-cray
+ PrgEnv-gnu
+ intel
+ cce
+ gcc
+ cray-parallel-netcdf
+ cray-hdf5-parallel
+ pmi
+ cray-mpich2
+ cray-mpich
+ cray-netcdf
+ cray-hdf5
+ cray-netcdf-hdf5parallel
+ cray-libsci
+ papi
+ cmake
+ cray-petsc
+ esmf
+ zlib
+ craype-hugepages2M
+ darshan
+
+
+ craype
+ PrgEnv-intel
+ cray-mpich
+ craype-haswell
+ craype-mic-knl
+
+
+
+ cray-mpich cray-mpich/7.7.10
+
+
+
+ cray-mpich impi/2020
+
+
+
+ PrgEnv-intel/6.0.5
+ intel
+ intel/19.0.3.199
+
+
+
+ PrgEnv-intel PrgEnv-gnu/6.0.5
+ gcc
+ gcc/8.3.0
+ cray-libsci
+ cray-libsci/19.06.1
+
+
+
+ craype craype/2.6.2
+ pmi
+ pmi/5.0.14
+ craype-haswell
+ craype-mic-knl
+
+
+
+ cray-netcdf-hdf5parallel
+ cray-hdf5-parallel
+ cray-parallel-netcdf
+ cray-netcdf/4.6.3.2
+ cray-hdf5/1.10.5.2
+
+
+ cray-netcdf-hdf5parallel
+ cray-netcdf-hdf5parallel/4.6.3.2
+ cray-hdf5-parallel/1.10.5.2
+ cray-parallel-netcdf/1.11.1.1
+
+
+
+ git
+ git
+ cmake
+ cmake/3.20.2
+ perl5-extras
+
+
+
+
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+
+ 1
+ 1
+
+
+ 128M
+ spread
+ threads
+ FALSE
+ static
+
+
+
+ disabled
+
+
+
+ 1
+
+
+
+
+
+ Stampede2. Intel skylake nodes at TACC. 48 cores per node, batch system is SLURM
+ .*stampede2.*
+ LINUX
+ intel,gnu
+ impi
+ $ENV{SCRATCH}
+ acme
+ $ENV{SCRATCH}/acme_scratch/stampede2
+ $ENV{SCRATCH}/inputdata
+ $ENV{SCRATCH}/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ $ENV{SCRATCH}/baselines/$COMPILER
+ $ENV{SCRATCH}/tools/cprnc.cori/cprnc
+ 8
+ e3sm_developer
+ slurm
+ e3sm
+ 96
+ 48
+ FALSE
+
+ ibrun
+
+
+ /opt/apps/lmod/lmod/init/perl
+ /opt/apps/lmod/lmod/init/python
+ /opt/apps/lmod/lmod/init/sh
+ /opt/apps/lmod/lmod/init/csh
+ /opt/apps/lmod/lmod/libexec/lmod perl
+ /opt/apps/lmod/lmod/libexec/lmod python
+ module -q
+ module -q
+
+
+
+
+
+
+ intel/18.0.0
+
+
+
+ gcc/6.3.0
+
+
+
+ impi/18.0.0
+
+
+
+ hdf5/1.8.16
+ netcdf/4.3.3.1
+
+
+ phdf5/1.8.16
+ parallel-netcdf/4.3.3.1
+ pnetcdf/1.8.1
+
+
+ git
+ cmake
+ autotools
+ xalt
+
+
+
+
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+
+ 1
+ 1
+
+ 128M
+ spread
+ threads
+ 1
+ -l
+
+
+
+
+ Mac OS/X workstation or laptop
+
+ Darwin
+ gnu
+ openmpi,mpich
+ $ENV{HOME}/projects/acme/scratch
+ $ENV{HOME}/projects/acme/cesm-inputdata
+ $ENV{HOME}/projects/acme/ptclm-data
+ $ENV{HOME}/projects/acme/scratch/archive/$CASE
+ $ENV{HOME}/projects/acme/baselines/$COMPILER
+ $CCSMROOT/tools/cprnc/build/cprnc
+ 4
+ e3sm_developer
+ none
+ jnjohnson at lbl dot gov
+ 4
+ 2
+
+ mpirun
+
+
+
+ $ENV{HOME}/projects/acme/scratch/$CASE/run
+ $ENV{HOME}/projects/acme/scratch/$CASE/bld
+
+
+
+
+
+ Linux workstation or laptop
+ none
+ LINUX
+ gnu
+ openmpi,mpich
+ $ENV{HOME}/projects/acme/scratch
+ $ENV{HOME}/projects/acme/cesm-inputdata
+ $ENV{HOME}/projects/acme/ptclm-data
+ $ENV{HOME}/projects/acme/scratch/archive/$CASE
+ $ENV{HOME}/projects/acme/baselines/$COMPILER
+ $CCSMROOT/tools/cprnc/build/cprnc
+ 4
+ e3sm_developer
+ none
+ jayesh at mcs dot anl dot gov
+ 4
+ 2
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+ $ENV{HOME}/projects/acme/scratch/$CASE/run
+ $ENV{HOME}/projects/acme/scratch/$CASE/bld
+
+
+
+
+
+ Singularity container
+ singularity
+ LINUX
+ gnu
+ mpich
+ $ENV{HOME}/projects/e3sm/scratch
+ $ENV{HOME}/projects/e3sm/cesm-inputdata
+ $ENV{HOME}/projects/e3sm/ptclm-data
+ $ENV{HOME}/projects/e3sm/scratch/archive/$CASE
+ $ENV{HOME}/projects/e3sm/baselines/$COMPILER
+ $CCSMROOT/tools/cprnc/build/cprnc
+ make
+ 4
+ e3sm_developer
+ none
+ lukasz at uchicago dot edu
+ 16
+ 16
+
+ mpirun
+
+ -launcher fork -hosts localhost -np {{ total_tasks }}
+
+
+
+ $ENV{HOME}/projects/e3sm/scratch/$CASE/run
+ $ENV{HOME}/projects/e3sm/scratch/$CASE/bld
+
+ $SRCROOT
+
+
+ /usr/local/packages/netcdf-serial
+ /usr/local/packages/cmake/bin:/usr/local/packages/hdf5-serial/bin:/usr/local/packages/netcdf-serial/bin:$ENV{PATH}
+ /usr/local/packages/szip/lib:/usr/local/packages/hdf5-serial/lib:/usr/local/packages/netcdf-serial/lib
+
+
+ /usr/local/packages/netcdf-parallel
+ /usr/local/packages/pnetcdf
+ /usr/local/packages/hdf5-parallel
+ /usr/local/packages/cmake/bin:/usr/local/packages/mpich/bin:/usr/local/packages/hdf5-parallel/bin:/usr/local/packages/netcdf-parallel/bin:/usr/local/packages/pnetcdf/bin:$ENV{PATH}
+ /usr/local/packages/mpich/lib:/usr/local/packages/szip/lib:/usr/local/packages/hdf5-parallel/lib:/usr/local/packages/netcdf-parallel/lib:/usr/local/packages/pnetcdf/lib
+
+
+
+
+ Linux workstation for Jenkins testing
+ (melvin|watson|s999964|climate|penn|sems)
+ LINUX
+ proxy.sandia.gov:80
+ gnu,intel
+ openmpi
+ /sems-data-store/ACME/timings
+ .*
+ $ENV{HOME}/acme/scratch
+ /sems-data-store/ACME/inputdata
+ /sems-data-store/ACME/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /sems-data-store/ACME/baselines/$COMPILER
+ /sems-data-store/ACME/cprnc/build.new/cprnc
+ 32
+ e3sm_developer
+ none
+ jgfouca at sandia dot gov
+ 48
+ 48
+
+ mpirun
+
+ -np {{ total_tasks }}
+ --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread:overload-allowed
+
+
+
+ /usr/share/Modules/init/python.py
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/csh
+ /usr/bin/modulecmd python
+ /usr/bin/modulecmd perl
+ module
+ module
+
+
+ sems-env
+ acme-env
+ sems-git
+ acme-binutils
+ sems-python/3.5.2
+ sems-cmake/3.12.2
+
+
+ sems-gcc/7.3.0
+
+
+ sems-intel/16.0.3
+
+
+ sems-netcdf/4.4.1/exo
+ acme-pfunit/3.2.8/base
+
+
+ acme-openmpi/2.1.5
+ acme-netcdf/4.7.4/acme
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+ 1000
+
+
+ $ENV{SEMS_NETCDF_ROOT}
+ 64M
+ spread
+ threads
+
+
+
+
+ Huge Linux workstation for Sandia climate scientists
+ mappy
+ LINUX
+ proxy.sandia.gov:80
+ gnu,intel
+ openmpi
+ /sems-data-store/ACME/mappy/timings
+ .*
+ $ENV{HOME}/acme/scratch
+ /sems-data-store/ACME/inputdata
+ /sems-data-store/ACME/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /sems-data-store/ACME/baselines/mappy/$COMPILER
+ /sems-data-store/ACME/mappy/cprnc/cprnc
+ 64
+ e3sm_developer
+ none
+ jgfouca at sandia dot gov
+ 64
+ 64
+
+ mpirun
+
+ -np {{ total_tasks }}
+ --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread:overload-allowed
+
+
+
+ /usr/share/Modules/init/python.py
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/csh
+ /usr/bin/modulecmd python
+ /usr/bin/modulecmd perl
+ module
+ module
+
+
+ sems-env
+ acme-env
+ sems-git
+ sems-python/3.5.2
+ sems-cmake/3.19.1
+
+
+ acme-gcc/8.1.0
+
+
+ sems-intel/19.0.5
+
+
+ acme-netcdf/4.4.1/exo_acme
+ acme-pfunit/3.2.8/base
+
+
+ acme-openmpi/2.1.5
+ acme-netcdf/4.7.4/acme
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+ 0
+
+
+ $ENV{SEMS_NETCDF_ROOT}
+ 64M
+ spread
+ threads
+
+
+
+
+ IBM Power 8 Testbed machine
+ white
+ LINUX
+ gnu
+ openmpi
+ $ENV{HOME}/projects/e3sm/scratch
+ $ENV{HOME}/projects/e3sm/cesm-inputdata
+ $ENV{HOME}/projects/e3sm/ptclm-data
+ $ENV{HOME}/projects/e3sm/scratch/archive/$CASE
+ $ENV{HOME}/projects/e3sm/baselines/$COMPILER
+ $CCSMROOT/tools/cprnc/build/cprnc
+ 32
+ e3sm_developer
+ lsf
+ mdeakin at sandia dot gov
+ 4
+ 1
+
+ mpirun
+
+
+
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/python.py
+ module
+ /usr/bin/modulecmd python
+
+ devpack/20181011/openmpi/2.1.2/gcc/7.2.0/cuda/9.2.88
+
+
+ $ENV{HOME}/projects/e3sm/scratch/$CASE/run
+ $ENV{HOME}/projects/e3sm/scratch/$CASE/bld
+
+ $ENV{NETCDF_ROOT}
+ /ascldap/users/jgfouca/packages/netcdf-fortran-4.4.4-white
+ $SRCROOT
+
+
+
+
+ Skylake Testbed machine
+ blake
+ LINUX
+ intel18
+ openmpi
+ $ENV{HOME}/projects/e3sm/scratch
+ $ENV{HOME}/projects/e3sm/cesm-inputdata
+ $ENV{HOME}/projects/e3sm/ptclm-data
+ $ENV{HOME}/projects/e3sm/scratch/archive/$CASE
+ $ENV{HOME}/projects/e3sm/baselines/$COMPILER
+ $CCSMROOT/tools/cprnc/build/cprnc
+ 48
+ e3sm_developer
+ slurm
+ mdeakin at sandia dot gov
+ 48
+ 48
+
+ mpirun
+
+
+
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/python.py
+ module
+ module
+
+ zlib/1.2.11
+ intel/compilers/18.1.163
+ openmpi/2.1.2/intel/18.1.163
+ hdf5/1.10.1/openmpi/2.1.2/intel/18.1.163
+ netcdf-exo/4.4.1.1/openmpi/2.1.2/intel/18.1.163
+
+
+ $ENV{HOME}/projects/e3sm/scratch/$CASE/run
+ $ENV{HOME}/projects/e3sm/scratch/$CASE/bld
+
+ $ENV{NETCDF_ROOT}
+ $ENV{NETCDFF_ROOT}
+
+
+
+
+ Linux workstation for ANL
+ compute.*mcs.anl.gov
+ LINUX
+ gnu
+ mpich,openmpi
+ $ENV{HOME}/acme/scratch
+ /home/climate1/acme/inputdata
+ /home/climate1/acme/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /home/climate1/acme/baselines/$COMPILER
+ /home/climate1/acme/cprnc/build/cprnc
+ make
+ 32
+ e3sm_developer
+ none
+ jgfouca at sandia dot gov
+ 32
+ 32
+
+ mpirun
+
+ -l -np {{ total_tasks }}
+
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+ /software/common/adm/packages/softenv-1.6.2/etc/softenv-load.csh
+ /software/common/adm/packages/softenv-1.6.2/etc/softenv-load.sh
+ source /software/common/adm/packages/softenv-1.6.2/etc/softenv-aliases.csh ; soft
+ source /software/common/adm/packages/softenv-1.6.2/etc/softenv-aliases.sh ; soft
+
+ +gcc-8.2.0
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+
+ /soft/apps/packages/climate/cmake/3.18.4/bin:/soft/apps/packages/climate/gmake/bin:$ENV{PATH}
+
+ /soft/apps/packages/climate/hdf5/1.8.16-serial/gcc-8.2.0/lib:/soft/apps/packages/climate/szip/2.1/gcc-8.2.0/lib:$ENV{LD_LIBRARY_PATH}
+
+ /soft/apps/packages/climate/netcdf/4.4.1c-4.2cxx-4.4.4f-serial/gcc-8.2.0
+
+
+
+ /soft/apps/packages/climate/hdf5/1.8.16-parallel/mpich-3.3.2/gcc-8.2.0/lib:/soft/apps/packages/climate/szip/2.1/gcc-8.2.0/lib:$ENV{LD_LIBRARY_PATH}
+
+ /soft/apps/packages/climate/mpich/3.3.2/gcc-8.2.0/bin:/soft/apps/packages/climate/cmake/3.18.4/bin:/soft/apps/packages/climate/gmake/bin:$ENV{PATH}
+
+ /soft/apps/packages/climate/hdf5/1.8.16-parallel/mpich-3.3.2/gcc-8.2.0
+
+ /soft/apps/packages/climate/netcdf/4.4.1c-4.2cxx-4.4.4f-parallel/mpich-3.3.2/gcc-8.2.0
+
+ /soft/apps/packages/climate/pnetcdf/1.12.0/mpich-3.3.2/gcc-8.2.0
+
+
+
+ /soft/apps/packages/climate/openmpi/2.1.5/gcc-8.2.0/bin:/soft/apps/packages/climate/cmake/3.18.4/bin:/soft/apps/packages/climate/gmake/bin:$ENV{PATH}
+ /soft/apps/packages/climate/zlib/1.2.11/gcc-8.2.0-static
+ /soft/apps/packages/climate/szip/2.1/gcc-8.2.0-static
+ /soft/apps/packages/climate/hdf5/1.8.12-parallel/openmpi-2.1.5/gcc-8.2.0-static
+ /soft/apps/packages/climate/netcdf/4.7.4c-4.3.1cxx-4.4.4f-parallel/openmpi-2.1.5/gcc-8.2.0-static-hdf5-1.8.12-pnetcdf-1.12.0
+ /soft/apps/packages/climate/pnetcdf/1.12.0/openmpi-2.1.5/gcc-8.2.0
+
+
+ 64M
+
+
+
+
+ SNL clust
+ (skybridge|chama)
+ LINUX
+ proxy.sandia.gov:80
+ intel
+ openmpi
+ fy210162
+ /projects/ccsm/timings
+ .*
+ /gpfs1/$USER/acme_scratch/sandiatoss3
+ /projects/ccsm/inputdata
+ /projects/ccsm/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /projects/ccsm/ccsm_baselines/$COMPILER
+ /projects/ccsm/cprnc/build.toss3/cprnc
+ 8
+ e3sm_integration
+ slurm
+ jgfouca at sandia dot gov
+ 16
+ 16
+ TRUE
+
+ mpiexec
+
+ --n {{ total_tasks }}
+ --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to core
+
+
+
+
+
+
+ /usr/share/lmod/lmod/init/python.py
+ /usr/share/lmod/lmod/init/perl.pm
+ /usr/share/lmod/lmod/init/sh
+ /usr/share/lmod/lmod/init/csh
+ /usr/share/lmod/lmod/libexec/lmod python
+ /usr/share/lmod/lmod/libexec/lmod perl
+ module
+ module
+
+
+ sems-env
+ acme-env
+ sems-git
+ sems-cmake/3.19.1
+ gnu/6.3.1
+ sems-intel/17.0.0
+
+
+ sems-openmpi/1.10.5
+ acme-netcdf/4.7.4/acme
+
+
+ sems-netcdf/4.4.1/exo
+
+
+ /nscratch/$USER/acme_scratch/sandiatoss3/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+
+
+ 0.1
+
+
+ $ENV{SEMS_NETCDF_ROOT}
+ $ENV{SEMS_NETCDF_ROOT}/include
+ $ENV{SEMS_NETCDF_ROOT}/lib
+ 64M
+
+
+ $ENV{SEMS_NETCDF_ROOT}
+
+
+
+
+ SNL clust
+ ghost-login
+ LINUX
+ proxy.sandia.gov:80
+ intel
+ openmpi
+ fy210162
+
+ /gscratch/$USER/acme_scratch/ghost
+ /projects/ccsm/inputdata
+ /projects/ccsm/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /projects/ccsm/ccsm_baselines/$COMPILER
+ /projects/ccsm/cprnc/build.toss3/cprnc
+ 8
+ e3sm_integration
+ slurm
+ jgfouca at sandia dot gov
+ 36
+ 36
+ TRUE
+
+ mpiexec
+
+ --n {{ total_tasks }}
+ --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to core
+
+
+
+
+
+
+ /usr/share/lmod/lmod/init/python.py
+ /usr/share/lmod/lmod/init/perl.pm
+ /usr/share/lmod/lmod/init/sh
+ /usr/share/lmod/lmod/init/csh
+ /usr/share/lmod/lmod/libexec/lmod python
+ /usr/share/lmod/lmod/libexec/lmod perl
+ module
+ module
+
+
+ sems-env
+ sems-git
+ sems-python/3.5.2
+ sems-cmake
+ gnu/4.9.2
+ sems-intel/16.0.2
+ mkl/16.0
+ sems-netcdf/4.4.1/exo_parallel
+
+
+ sems-openmpi/1.10.5
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+
+
+
+ $ENV{SEMS_NETCDF_ROOT}
+ $ENV{SEMS_NETCDF_ROOT}/include
+ $ENV{SEMS_NETCDF_ROOT}/lib
+ 64M
+
+
+ $ENV{SEMS_NETCDF_ROOT}
+
+
+
+
+ ANL/LCRC Linux Cluster
+ b.*.lcrc.anl.gov
+ LINUX
+ intel,gnu
+ impi,openmpi,mvapich
+ condo
+ /lcrc/group/e3sm
+ .*
+ /lcrc/group/e3sm/$USER/scratch/anvil
+ /lcrc/group/e3sm/public_html/$ENV{USER}
+ https://web.lcrc.anl.gov/public/e3sm/$ENV{USER}
+ /lcrc/group/e3sm/data/inputdata
+ /lcrc/group/e3sm/data/inputdata/atm/datm7
+ /lcrc/group/e3sm/$USER/archive/$CASE
+ /lcrc/group/e3sm/baselines/anvil/$COMPILER
+ /lcrc/group/e3sm/soft/tools/cprnc/cprnc
+ 8
+ e3sm_integration
+ slurm
+ E3SM
+ 36
+ 36
+ FALSE
+
+ srun
+
+ -l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit
+ --cpu_bind=cores
+ -c $ENV{OMP_NUM_THREADS}
+ -m plane={{ tasks_per_node }}
+
+
+
+ /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/sh;export MODULEPATH=$MODULEPATH:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core
+ /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/csh;setenv MODULEPATH $MODULEPATH\:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core
+ /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/env_modules_python.py
+ export MODULEPATH=$MODULEPATH:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core;/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ cmake/3.20.3-vedypwm
+
+
+ gcc/7.4.0
+ intel/20.0.4-lednsve
+ intel-mkl/2020.4.304-voqlapk
+
+
+ mvapich2/2.3.6-verbs-x4iz7lq
+ netcdf-c/4.4.1-gei7x7w
+ netcdf-cxx/4.2-db2f5or
+ netcdf-fortran/4.4.4-b4ldb3a
+ parallel-netcdf/1.11.0-kj4jsvt
+
+
+ intel-mpi/2019.9.304-i42whlw
+ netcdf-c/4.4.1-blyisdg
+ netcdf-cxx/4.2-gkqc6fq
+ netcdf-fortran/4.4.4-eanrh5t
+ parallel-netcdf/1.11.0-y3nmmej
+
+
+ openmpi/4.1.1-v3b3npd
+ netcdf-c/4.4.1-smyuxme
+ netcdf-cxx/4.2-kfb2aag
+ netcdf-fortran/4.4.4-mablvyc
+ parallel-netcdf/1.11.0-x4n5s7k
+
+
+ gcc/8.2.0-xhxgy33
+ intel-mkl/2020.4.304-d6zw4xa
+
+
+ netcdf/4.4.1-ve2zfkw
+ netcdf-cxx/4.2-2rkopdl
+ netcdf-fortran/4.4.4-thtylny
+ mvapich2/2.2-verbs-ppznoge
+ parallel-netcdf/1.11.0-c22b2bn
+
+
+ intel-mpi/2019.9.304-rxpzd6p
+ netcdf-c/4.4.1-fysjgfx
+ netcdf-cxx/4.2-oaiw2v6
+ netcdf-fortran/4.4.4-kxgkaop
+ parallel-netcdf/1.11.0-fce7akl
+
+
+ openmpi/4.1.1-x5n4m36
+ netcdf-c/4.4.1-mtfptpl
+ netcdf-cxx/4.2-osp27dq
+ netcdf-fortran/4.4.4-5yd6dos
+ parallel-netcdf/1.11.0-a7ohxsg
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+ 1000
+
+ $SHELL{dirname $(dirname $(which nc-config))}
+ $SHELL{dirname $(dirname $(which nf-config))}
+ $SHELL{dirname $(dirname $(which pnetcdf_version))}
+ /lcrc/group/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}
+
+
+ 0
+ 1
+ 1
+
+
+ 1
+ 2
+
+
+ 10
+
+
+ 64M
+
+
+ granularity=core,balanced
+ 1
+
+
+ cores
+
+
+
+
+ ANL LCRC cluster 512-node AMD Epyc 7532 2-sockets 64-cores per node
+ chr.*
+ LINUX
+ intel,gnu
+ openmpi,impi
+ e3sm
+ /lcrc/group/e3sm/PERF_Chrysalis
+ .*
+ /lcrc/group/e3sm/$USER/scratch/chrys
+ /lcrc/group/e3sm/public_html/$ENV{USER}
+ https://web.lcrc.anl.gov/public/e3sm/$ENV{USER}
+ /lcrc/group/e3sm/data/inputdata
+ /lcrc/group/e3sm/data/inputdata/atm/datm7
+ /lcrc/group/e3sm/$USER/scratch/chrys/archive/$CASE
+ /lcrc/group/e3sm/baselines/chrys/$COMPILER
+ /lcrc/group/e3sm/tools/cprnc/cprnc
+ 8
+ e3sm_integration
+ 8
+ slurm
+ E3SM
+ 128
+ 64
+ FALSE
+
+ srun
+
+ --mpi=pmi2 -l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit
+ $SHELL{if [ 64 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;}
+ -c $SHELL{echo 128/ {{ tasks_per_node }} |bc}
+ -m plane={{ tasks_per_node }}
+
+
+
+ /gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/sh
+ /gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/csh
+ /gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/env_modules_python.py
+ /gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ subversion/1.14.0-e4smcy3
+ perl/5.32.0-bsnc6lt
+ cmake/3.19.1-yisciec
+
+
+ intel/20.0.4-kodw73g
+ intel-mkl/2020.4.304-g2qaxzf
+
+
+ openmpi/4.1.1-qiqkjbu
+ hdf5/1.8.16-35xugty
+ netcdf-c/4.4.1-2vngykq
+ netcdf-cxx/4.2-gzago6i
+ netcdf-fortran/4.4.4-2kddbib
+ parallel-netcdf/1.11.0-go65een
+
+
+ intel-mpi/2019.9.304-tkzvizk
+ hdf5/1.8.16-se4xyo7
+ netcdf-c/4.4.1-qvxyzq2
+ netcdf-cxx/4.2-binixgj
+ netcdf-fortran/4.4.4-rdxohvp
+ parallel-netcdf/1.11.0-b74wv4m
+
+
+ gcc/9.2.0-ugetvbp
+ intel-mkl/2020.4.304-n3b5fye
+
+
+ openmpi/4.1.1-73gbwq4
+ hdf5/1.8.16-dqjdy2d
+ netcdf-c/4.4.1-y6dun2a
+ netcdf-cxx/4.2-vwlvgn6
+ netcdf-fortran/4.4.4-4lnfxki
+ parallel-netcdf/1.11.0-3x2favk
+
+
+ intel-mpi/2019.9.304-jdih7h5
+ hdf5/1.8.16-dtbpce3
+ netcdf-c/4.4.1-zcoa44z
+ netcdf-cxx/4.2-ayxg4c7
+ netcdf-fortran/4.4.4-2lfr2lr
+ parallel-netcdf/1.11.0-ifdodru
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+ 1000
+
+ /lcrc/group/e3sm/soft/perl/chrys/lib/perl5
+ $SHELL{dirname $(dirname $(which nc-config))}
+ $SHELL{dirname $(dirname $(which nf-config))}
+ $SHELL{dirname $(dirname $(which pnetcdf_version))}
+
+
+ 128M
+
+
+ granularity=core,balanced
+
+
+ granularity=thread,balanced
+
+
+ cores
+
+
+
+
+ ANL/LCRC Linux Cluster
+ LINUX
+ pgigpu
+ mvapich
+ e3sm
+ /lcrc/group/e3sm
+ .*
+ /lcrc/group/e3sm/$USER/scratch/blues
+ /lcrc/group/e3sm/data/inputdata
+ /lcrc/group/e3sm/data/inputdata/atm/datm7
+ /lcrc/group/e3sm/$USER/archive/$CASE
+ /lcrc/group/e3sm/baselines/blues/$COMPILER
+ /lcrc/group/e3sm/soft/tools/cprnc/cprnc
+ 8
+ e3sm_integration
+ 4
+ slurm
+ E3SM
+ 16
+ 16
+ TRUE
+
+ srun
+
+ -l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit
+ --cpu_bind=cores
+ -c $ENV{OMP_NUM_THREADS}
+ -m plane=$SHELL{echo 16/$OMP_NUM_THREADS|bc}
+
+
+
+ /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/sh;export MODULEPATH=$MODULEPATH:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core:/blues/gpfs/home/software/spack-0.10.1/share/spack/lmod/linux-centos7-x86_64/Core
+ /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/csh;setenv MODULEPATH $MODULEPATH\:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core\:/blues/gpfs/home/software/spack-0.10.1/share/spack/lmod/linux-centos7-x86_64/Core
+ /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/env_modules_python.py
+ export MODULEPATH=$MODULEPATH:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core:/blues/gpfs/home/software/spack-0.10.1/share/spack/lmod/linux-centos7-x86_64/Core;/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ cmake/3.20.3-vedypwm
+
+
+ nvhpc/20.9-5brtudu
+ cuda/11.1.0-6dvax5z
+ netcdf-c/4.7.4-ltqliri
+ netcdf-cxx/4.2-kf5ox4e
+ netcdf-fortran/4.5.3-6mgyroo
+ mvapich2/2.3.4-blues-5fwicb5
+ parallel-netcdf/1.12.1-nyuvwhn
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+ 1000
+
+ $SHELL{dirname $(dirname $(which nc-config))}
+ $SHELL{dirname $(dirname $(which nf-config))}
+ $SHELL{dirname $(dirname $(which pnetcdf_version))}
+ /lcrc/group/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}
+
+
+ 0
+ 1
+
+
+ 1
+ 2
+
+
+ 64M
+ cores
+
+
+
+
+ ANL/LCRC Linux Cluster: 6x 128c EPYC nodes with 8x A100 GPUs
+ gpulogin.*
+ LINUX
+ pgigpu
+ openmpi
+ e3sm
+ /lcrc/group/e3sm
+ .*
+ /lcrc/group/e3sm/$USER/scratch/swing
+ /lcrc/group/e3sm/data/inputdata
+ /lcrc/group/e3sm/data/inputdata/atm/datm7
+ /lcrc/group/e3sm/$USER/archive/$CASE
+ /lcrc/group/e3sm/baselines/swing/$COMPILER
+ /lcrc/group/e3sm/soft/tools/cprnc/cprnc
+ 8
+ e3sm_gpu
+ 4
+ slurm
+ E3SM
+ 128
+ 128
+ TRUE
+
+ srun
+
+ -l -n {{ total_tasks }} -N {{ num_nodes }} -K
+ $SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;}
+ -c $SHELL{echo 256/ {{ tasks_per_node }} |bc}
+ -m plane={{ tasks_per_node }}
+
+
+
+ /gpfs/fs1/soft/swing/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-5tuyfdb/lmod/lmod/init/sh
+ /gpfs/fs1/soft/swing/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-5tuyfdb/lmod/lmod/init/csh
+ /gpfs/fs1/soft/swing/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-5tuyfdb/lmod/lmod/init/env_modules_python.py
+ /gpfs/fs1/soft/swing/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-5tuyfdb/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ cmake/3.21.1-e5i6eks
+
+
+ nvhpc/20.9-37zsymt
+ cuda/11.1.1-nkh7mm7
+ openmpi/4.1.1-r6ebr2e
+ netcdf-c/4.7.4-zppo53l
+ netcdf-cxx/4.2-wjm7fye
+ netcdf-fortran/4.5.3-srsajjs
+ parallel-netcdf/1.12.1-75szceu
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+ 1000
+
+ $SHELL{dirname $(dirname $(which nc-config))}
+ $SHELL{dirname $(dirname $(which nf-config))}
+ $SHELL{dirname $(dirname $(which pnetcdf_version))}
+ /lcrc/group/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}
+
+
+ 64M
+ cores
+
+
+
+
+ ANL/LCRC Cluster, Cray CS400, 352-nodes Xeon Phi 7230 KNLs 64C/1.3GHz + 672-nodes Xeon E5-2695v4 Broadwells 36C/2.10GHz, Intel Omni-Path network, SLURM batch system, Lmod module environment.
+ beboplogin.*
+ LINUX
+ intel,gnu
+ impi,mvapich
+ e3sm
+ /lcrc/group/e3sm/$USER/scratch/bebop
+ /lcrc/group/e3sm/data/inputdata
+ /lcrc/group/e3sm/data/inputdata/atm/datm7
+ /lcrc/group/e3sm/$USER/archive/$CASE
+ /lcrc/group/e3sm/baselines/bebop/$COMPILER
+ /lcrc/group/e3sm/soft/tools/cprnc/cprnc
+ 8
+ e3sm_integration
+ 4
+ slurm
+ E3SM
+ 36
+ 36
+ TRUE
+
+ mpirun
+
+ -l -n {{ total_tasks }}
+
+
+
+ srun
+
+ -l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit
+ --cpu_bind=cores
+ -c $ENV{OMP_NUM_THREADS}
+ -m plane=$SHELL{echo 36/$OMP_NUM_THREADS|bc}
+
+
+
+
+
+
+ /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/sh
+ /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/csh
+ /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/env_modules_python.py
+ /home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ cmake/3.20.3-vedypwm
+
+
+ intel/18.0.4-443hhug
+ intel-mkl/2018.4.274-jwaeshj
+ hdf5/1.10.5-3mk3uik
+ netcdf/4.7.0-krelxcz
+ netcdf-fortran/4.4.5-74lj75q
+
+
+ intel-mpi/2018.4.274-4hmwfl6
+ parallel-netcdf/1.11.0-acswzws
+
+
+ mvapich2/2.3.1-verbs-omjz3ck
+ parallel-netcdf/1.11.2-7fy6qz3
+
+
+ gcc/8.2.0-g7hppkz
+ intel-mkl/2018.4.274-2amycpi
+ hdf5/1.8.16-mz7lmxh
+ netcdf/4.4.1-xkjcghm
+ netcdf-fortran/4.4.4-mpstomu
+
+
+ intel-mpi/2018.4.274-ozfo327
+ parallel-netcdf/1.11.0-filvnis
+
+
+ mvapich2/2.3-bebop-3xi4hiu
+ parallel-netcdf/1.11.2-hfn33fd
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+
+ $SHELL{dirname $(dirname $(which nc-config))}
+ $SHELL{dirname $(dirname $(which nf-config))}
+ /lcrc/group/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}
+
+
+ $SHELL{dirname $(dirname $(which pnetcdf_version))}
+
+
+ 128M
+ spread
+ threads
+
+
+ shm:tmi
+
+
+
+
+ LLNL Linux Cluster, Linux (pgi), 16 pes/node, batch system is Slurm
+ LINUX
+ intel
+ mpich
+ /p/lscratchh/$USER
+ /usr/gdata/climdat/ccsm3data/inputdata
+ /usr/gdata/climdat/ccsm3data/inputdata/atm/datm7
+ /p/lscratchh/$CCSMUSER/archive/$CASE
+ /p/lscratchh/$CCSMUSER/ccsm_baselines/$COMPILER
+ /p/lscratchd/ma21/ccsm3data/tools/cprnc/cprnc
+ 8
+ lc_slurm
+ donahue5 -at- llnl.gov
+ 16
+ 16
+
+
+
+
+ srun
+
+
+ /usr/share/lmod/lmod/init/env_modules_python.py
+ /usr/share/lmod/lmod/init/perl
+ /usr/share/lmod/lmod/init/sh
+ /usr/share/lmod/lmod/init/csh
+ module
+ module
+ /usr/share/lmod/lmod/libexec/lmod python
+ /usr/share/lmod/lmod/libexec/lmod perl
+
+ python
+ git
+ intel/19.0.4
+ mvapich2/2.3
+ cmake/3.14.5
+ netcdf-fortran/4.4.4
+ pnetcdf/1.9.0
+
+
+ /p/lscratchh/$CCSMUSER/ACME/$CASE/run
+ /p/lscratchh/$CCSMUSER/$CASE/bld
+
+ /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/
+ /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/
+
+
+ /usr/tce/packages/pnetcdf/pnetcdf-1.9.0-intel-18.0.1-mvapich2-2.2/
+
+
+
+
+ LLNL Linux Cluster, Linux (pgi), 36 pes/node, batch system is Slurm
+ LINUX
+ intel
+ mpich
+ /p/lscratchh/$USER
+ /usr/gdata/climdat/ccsm3data/inputdata
+ /usr/gdata/climdat/ccsm3data/inputdata/atm/datm7
+ /p/lscratchh/$CCSMUSER/archive/$CASE
+ /p/lscratchh/$CCSMUSER/ccsm_baselines/$COMPILER
+ /p/lscratchd/ma21/ccsm3data/tools/cprnc/cprnc
+ 8
+ lc_slurm
+ donahue5 -at- llnl.gov
+ 36
+ 36
+
+
+
+
+ srun
+
+
+ /usr/share/lmod/lmod/init/env_modules_python.py
+ /usr/share/lmod/lmod/init/perl
+ /usr/share/lmod/lmod/init/sh
+ /usr/share/lmod/lmod/init/csh
+ module
+ module
+ /usr/share/lmod/lmod/libexec/lmod python
+ /usr/share/lmod/lmod/libexec/lmod perl
+
+ python
+ git
+ intel/19.0.4
+ mvapich2/2.3
+ cmake/3.14.5
+ netcdf-fortran/4.4.4
+ pnetcdf/1.9.0
+
+
+ /p/lscratchh/$CCSMUSER/ACME/$CASE/run
+ /p/lscratchh/$CCSMUSER/$CASE/bld
+
+ /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/
+ /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.4.4-intel-18.0.1/
+
+
+ /usr/tce/packages/pnetcdf/pnetcdf-1.9.0-intel-18.0.1-mvapich2-2.2/
+
+
+
+
+ ALCF Cray XC40 KNL, os is CNL, 64 pes/node, batch system is cobalt
+ theta.*
+ CNL
+ intel,gnu,cray
+ mpt
+ /projects/$PROJECT
+ ClimateEnergy_4
+ /projects/$PROJECT/$USER
+ /projects/ccsm/e3sm/inputdata
+ /projects/ccsm/e3sm/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /projects/$PROJECT/e3sm/baselines/$COMPILER
+ /projects/ccsm/e3sm/tools/cprnc/cprnc
+ 16
+ e3sm_developer
+ 4
+ cobalt_theta
+ E3SM
+ 128
+ 64
+ TRUE
+
+ aprun
+
+ -n {{ total_tasks }}
+ -N $SHELL{if [ `./xmlquery --value MAX_MPITASKS_PER_NODE` -gt `./xmlquery --value TOTAL_TASKS` ];then echo `./xmlquery --value TOTAL_TASKS`;else echo `./xmlquery --value MAX_MPITASKS_PER_NODE`;fi;}
+ --cc depth -d $SHELL{echo `./xmlquery --value MAX_TASKS_PER_NODE`/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc} -j $SHELL{if [ 64 -ge `./xmlquery --value MAX_TASKS_PER_NODE` ];then echo 1;else echo `./xmlquery --value MAX_TASKS_PER_NODE`/64|bc;fi;}
+ $ENV{SMP_VARS} $ENV{labeling}
+
+
+
+ /opt/modules/default/init/perl.pm
+ /opt/modules/default/init/python.py
+ /opt/modules/default/init/sh
+ /opt/modules/default/init/csh
+ /opt/modules/default/bin/modulecmd perl
+ /opt/modules/default/bin/modulecmd python
+ module
+ module
+
+ cray-mpich
+ cray-parallel-netcdf
+ cray-hdf5-parallel
+ cray-hdf5
+ cray-netcdf
+ cray-netcdf-hdf5parallel
+ craype/2.6.5
+ cmake/3.18.0
+
+
+ PrgEnv-gnu
+ PrgEnv-cray
+ PrgEnv-intel/6.0.7
+ intel/19.1.0.166
+
+
+ PrgEnv-intel
+ PrgEnv-cray
+ PrgEnv-gnu/6.0.7
+ gcc/9.3.0
+
+
+ PrgEnv-intel
+ PrgEnv-gnu
+ gcc/9.3.0
+ PrgEnv-cray/6.0.9
+ cce/10.0.3
+ darshan
+
+
+ cray-libsci/20.09.1
+
+
+ cray-mpich/7.7.14
+ cray-hdf5-parallel/1.10.6.1
+ cray-netcdf-hdf5parallel/4.7.3.3
+ cray-parallel-netcdf/1.12.0.1
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+ 1000
+
+ /projects/ccsm/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}
+ /projects/ccsm/e3sm/tools/mpas
+ 1
+ -e PMI_LABEL_ERROUT=1
+
+
+
+ -e OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -e OMP_STACKSIZE=128M -e KMP_AFFINITY=granularity=thread,scatter
+
+
+ -e OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -e OMP_STACKSIZE=128M -e OMP_PROC_BIND=spread -e OMP_PLACES=threads
+
+
+
+
+ ANL experimental/evaluation cluster, batch system is cobalt
+ jlse.*
+ LINUX
+ oneapi-ifx,oneapi-ifort,gnu
+ mpich,impi,openmpi
+ /gpfs/jlse-fs0/projects/climate/$USER/scratch
+ /gpfs/jlse-fs0/projects/climate/inputdata
+ /gpfs/jlse-fs0/projects/climate/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /gpfs/jlse-fs0/projects/climate/baselines/$COMPILER
+ /gpfs/jlse-fs0/projects/climate/tools/cprnc/cprnc
+ 16
+ e3sm_developer
+ 4
+ cobalt_theta
+ e3sm
+ 112
+ 112
+ FALSE
+
+ mpirun
+
+ -l -n {{ total_tasks }}
+
+
+
+ mpirun
+
+ --tag-output -n {{ total_tasks }}
+ --map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread
+
+
+
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/csh
+ /usr/share/Modules/init/python.py
+ module
+ module
+ /usr/bin/modulecmd python
+
+
+ /soft/modulefiles
+ /soft/packaging/spack-builds/modules/linux-rhel7-x86_64
+ /soft/restricted/CNDA/modulefiles
+ /home/azamat/soft/modulefiles
+ cmake/3.17.0-gcc-9.3.0-5dgh2gv
+
+
+ e3sm-env-vars/2021.06.10
+ oneapi/2021.04.30.003
+
+
+ cmake
+ gcc/8.2.0
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+
+ /home/azamat/soft/perl/5.32.0/bin:$ENV{PATH}
+ /home/azamat/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/oneapi-2020.12.15.004-intel_mpi-2019.4.243
+ /home/azamat/soft/pnetcdf/1.12.1/oneapi-2020.12.15.004-intel_mpi-2019.4.243
+
+
+ 10
+ omp
+ spread
+ unit
+
+
+ icc
+ icpc
+ ifort
+ /home/azamat/soft/openmpi/2.1.6/intel19/bin:$ENV{PATH}
+ /home/azamat/soft/openmpi/2.1.6/intel19/lib:$ENV{LD_LIBRARY_PATH}
+ /home/azamat/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/intel19-openmpi2.1.6
+ /home/azamat/soft/pnetcdf/1.12.1/intel19-openmpi2.1.6
+
+
+ gcc
+ g++
+ gfortran
+ /home/azamat/soft/openmpi/2.1.6/gcc8.2.0/lib:/home/azamat/soft/libs:$ENV{LD_LIBRARY_PATH}
+ /home/azamat/soft/openmpi/2.1.6/gcc8.2.0/bin:/home/azamat/soft/cmake/3.18.5/bin:$ENV{PATH}
+ /home/azamat/soft/cmake/3.18.5
+ /home/azamat/soft/cmake/3.18.5/share/aclocal
+ /home/azamat/soft/cmake/3.18.5
+ /home/azamat/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/gcc8.2.0-openmpi2.1.6
+ /home/azamat/soft/pnetcdf/1.12.1/gcc8.2.0-openmpi2.1.6
+
+
+ 0
+
+
+
+ verbose,granularity=thread,balanced
+ 128M
+
+
+ threads
+ 128M
+
+
+ -1
+
+
+
+
+ PNL cluster, OS is Linux, batch system is SLURM
+ sooty
+ LINUX
+ intel,pgi
+ mvapich2
+ /lustre/$USER/cime_output_root
+ /lustre/climate/csmdata/
+ /lustre/climate/csmdata/atm/datm7
+ /lustre/$USER/archive/$CASE
+ /lustre/climate/acme_baselines/$COMPILER
+ /lustre/climate/acme_baselines/cprnc/cprnc
+ 8
+ slurm
+ balwinder.singh -at- pnnl.gov
+ 8
+ 8
+ FALSE
+
+
+
+
+ srun
+
+ --mpi=none
+ --ntasks={{ total_tasks }}
+ --cpu_bind=sockets --cpu_bind=verbose
+ --kill-on-bad-exit
+
+
+
+ /share/apps/modules/Modules/3.2.10/init/perl.pm
+ /share/apps/modules/Modules/3.2.10/init/python.py
+ /etc/profile.d/modules.csh
+ /etc/profile.d/modules.sh
+ /share/apps/modules/Modules/3.2.10/bin/modulecmd perl
+ /share/apps/modules/Modules/3.2.10/bin/modulecmd python
+ module
+ module
+
+
+
+
+ perl/5.20.0
+ cmake/3.17.1
+ python/2.7.8
+ svn/1.8.13
+
+
+ intel/15.0.1
+ mkl/15.0.1
+
+
+ pgi/14.10
+
+
+ mvapich2/2.1
+
+
+ netcdf/4.3.2
+
+
+ /lustre/$USER/csmruns/$CASE/run
+ /lustre/$USER/csmruns/$CASE/bld
+
+ $ENV{MKLROOT}
+ $ENV{NETCDF_LIB}/../
+ 64M
+
+
+
+
+ PNNL Intel KNC cluster, OS is Linux, batch system is SLURM
+ glogin
+ LINUX
+ intel
+ impi,mvapich2
+ /dtemp/$PROJECT/$USER
+ /dtemp/st49401/sing201/acme/inputdata/
+ /dtemp/st49401/sing201/acme/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ $CIME_OUTPUT_ROOT/acme/acme_baselines
+ $CIME_OUTPUT_ROOT/acme/acme_baselines/cprnc/cprnc
+ 8
+ slurm
+ balwinder.singh -at- pnnl.gov
+ 16
+ 16
+ TRUE
+
+
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+ srun
+
+ --mpi=none
+ --ntasks={{ total_tasks }}
+ --cpu_bind=sockets --cpu_bind=verbose
+ --kill-on-bad-exit
+
+
+
+ /opt/lmod/7.8.4/init/env_modules_python.py
+ /etc/profile.d/modules.csh
+ /etc/profile.d/modules.sh
+ /opt/lmod/7.8.4/libexec/lmod python
+ module
+ module
+
+
+
+
+ python/2.7.9
+
+
+ intel/ips_18
+ mkl/14.0
+
+
+ impi/4.1.2.040
+
+
+ mvapich2/1.9
+
+
+ netcdf/4.3.0
+
+
+ $CIME_OUTPUT_ROOT/csmruns/$CASE/run
+ $CIME_OUTPUT_ROOT/csmruns/$CASE/bld
+
+ 64M
+ $ENV{NETCDF_ROOT}
+
+
+ $ENV{MLIBHOME}
+ intel
+
+
+
+
+ PNL Haswell cluster, OS is Linux, batch system is SLURM
+ constance
+ LINUX
+ intel,pgi,nag
+ mvapich2,openmpi,intelmpi,mvapich
+ /pic/scratch/$USER
+ /pic/projects/climate/csmdata/
+ /pic/projects/climate/csmdata/atm/datm7
+ /pic/scratch/$USER/archive/$CASE
+ /pic/projects/climate/acme_baselines/$COMPILER
+ /pic/projects/climate/acme_baselines/cprnc/cprnc
+ 8
+ slurm
+ balwinder.singh -at- pnnl.gov
+ 24
+ 24
+ FALSE
+
+
+
+
+ srun
+
+ --mpi=none
+ --ntasks={{ total_tasks }}
+ --cpu_bind=sockets --cpu_bind=verbose
+ --kill-on-bad-exit
+
+
+
+ srun
+
+ --ntasks={{ total_tasks }}
+ --cpu_bind=sockets --cpu_bind=verbose
+ --kill-on-bad-exit
+
+
+
+ mpirun
+
+ -n {{ total_tasks }}
+
+
+
+ mpirun
+
+ -n {{ total_tasks }}
+
+
+
+ /share/apps/modules/Modules/3.2.10/init/perl.pm
+ /share/apps/modules/Modules/3.2.10/init/python.py
+ /etc/profile.d/modules.csh
+ /etc/profile.d/modules.sh
+ /share/apps/modules/Modules/3.2.10/bin/modulecmd perl
+ /share/apps/modules/Modules/3.2.10/bin/modulecmd python
+ module
+ module
+
+
+
+
+ perl/5.20.0
+
+ cmake/3.12.3
+ python/2.7.8
+
+
+ intel/15.0.1
+ mkl/15.0.1
+
+
+ pgi/14.10
+
+
+ nag/6.0
+ mkl/15.0.1
+
+
+ mvapich2/2.1
+
+
+ mvapich2/2.1
+
+
+ mvapich2/2.1
+
+
+ mvapich2/2.3b
+
+
+ intelmpi/5.0.1.035
+
+
+ openmpi/1.8.3
+
+
+ netcdf/4.3.2
+
+
+ netcdf/4.3.2
+
+
+ netcdf/4.4.1.1
+
+
+ /pic/scratch/$USER/csmruns/$CASE/run
+ /pic/scratch/$USER/csmruns/$CASE/bld
+
+ 64M
+ $ENV{NETCDF_LIB}/../
+
+
+ $ENV{MLIB_LIB}
+
+
+ $ENV{MLIB_LIB}
+
+
+
+
+ PNL E3SM Intel Xeon Gold 6148(Skylake) nodes, OS is Linux, SLURM
+ compy
+ LINUX
+ intel,pgi
+ impi,mvapich2
+ /compyfs
+ .*
+ /compyfs/$USER/e3sm_scratch
+ /compyfs/inputdata
+ /compyfs/inputdata/atm/datm7
+ /compyfs/$USER/e3sm_scratch/archive/$CASE
+ /compyfs/e3sm_baselines/$COMPILER
+ /compyfs/e3sm_baselines/cprnc/cprnc
+ 8
+ e3sm_integration
+ 4
+ slurm
+ bibi.mathew -at- pnnl.gov
+ 40
+ 40
+ TRUE
+
+
+
+
+ srun
+
+ --mpi=none
+ --ntasks={{ total_tasks }} --nodes={{ num_nodes }}
+ --kill-on-bad-exit
+ -l --cpu_bind=cores -c $ENV{OMP_NUM_THREADS} -m plane=$SHELL{echo 40/$OMP_NUM_THREADS|bc}
+
+
+
+ srun
+
+ --mpi=pmi2
+ --ntasks={{ total_tasks }} --nodes={{ num_nodes }}
+ --kill-on-bad-exit
+ -l --cpu_bind=cores -c $ENV{OMP_NUM_THREADS} -m plane=$SHELL{echo 40/$OMP_NUM_THREADS|bc}
+
+
+
+ /share/apps/modules/init/perl.pm
+ /share/apps/modules/init/python.py
+ /etc/profile.d/modules.csh
+ /etc/profile.d/modules.sh
+ /share/apps/modules/bin/modulecmd perl
+ /share/apps/modules/bin/modulecmd python
+ module
+ module
+
+
+
+
+ cmake/3.19.6
+
+
+ gcc/8.1.0
+ intel/19.0.5
+
+
+ pgi/19.10
+
+
+ mvapich2/2.3.1
+
+
+ intelmpi/2019u4
+
+
+ intelmpi/2019u3
+
+
+ netcdf/4.6.3
+ pnetcdf/1.9.0
+ mkl/2019u5
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.05
+ 0
+
+ $ENV{NETCDF_ROOT}/
+ $ENV{MKLROOT}
+
+
+ /share/apps/gcc/8.1.0/lib:/share/apps/gcc/8.1.0/lib64:$ENV{LD_LIBRARY_PATH}
+
+
+ 0
+ 1
+
+
+ 1
+
+
+ 10
+
+
+ 64M
+ cores
+
+
+
+
+ ORNL XK6, os is Linux, 32 pes/node, batch system is PBS
+ oic5
+ LINUX
+ gnu
+ mpich,openmpi
+ /home/$USER/models/ACME
+ /home/zdr/models/ccsm_inputdata
+ /home/zdr/models/ccsm_inputdata/atm/datm7
+ /home/$USER/models/ACME/run/archive/$CASE
+ 32
+ e3sm_developer
+ pbs
+ dmricciuto
+ 32
+ 32
+
+ /projects/cesm/devtools/mpich-3.0.4-gcc4.8.1/bin/mpirun
+
+ -np {{ total_tasks }}
+ --hostfile $ENV{PBS_NODEFILE}
+
+
+
+
+
+
+ /home/$USER/models/ACME/run/$CASE/run
+ /home/$USER/models/ACME/run/$CASE/bld
+
+
+
+ OR-CONDO, CADES-CCSI, os is Linux, 16 pes/nodes, batch system is PBS
+ or-condo
+ LINUX
+ gnu,intel
+ openmpi
+ /lustre/or-hydra/cades-ccsi/scratch/$USER
+ /lustre/or-hydra/cades-ccsi/proj-shared/project_acme/ACME_inputdata
+ /lustre/or-hydra/cades-ccsi/proj-shared/project_acme/ACME_inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /lustre/or-hydra/cades-ccsi/proj-shared/project_acme/baselines/$COMPILER
+ /lustre/or-hydra/cades-ccsi/proj-shared/tools/cprnc.orcondo
+ 4
+ e3sm_developer
+ slurm
+ yinj -at- ornl.gov
+ 32
+ 32
+ FALSE
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+
+
+
+ /usr/share/Modules/init/sh
+ /usr/share/Modules/init/csh
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/python.py
+ module
+ module
+ /usr/bin/modulecmd perl
+ /usr/bin/modulecmd python
+
+
+
+
+ PE-gnu
+
+
+ mkl/2017
+ cmake/3.12.0
+ python/2.7.12
+ nco/4.6.9
+ hdf5-parallel/1.8.17
+ netcdf-hdf5parallel/4.3.3.1
+ pnetcdf/1.9.0
+
+
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+
+
+ /software/user_tools/current/cades-ccsi/petsc4pf/openmpi-1.10-gcc-5.3
+
+
+ /software/user_tools/current/cades-ccsi/perl5/lib/perl5/
+
+
+
+
+
+ ORNL XC30, os is CNL, 16 pes/node, batch system is PBS
+ eos
+ CNL
+ intel
+ mpich
+ $ENV{PROJWORK}/$PROJECT
+ cli115,cli127,cli106,csc190
+ $ENV{HOME}/acme_scratch/$PROJECT
+ /lustre/atlas1/cli900/world-shared/cesm/inputdata
+ /lustre/atlas1/cli900/world-shared/cesm/inputdata/atm/datm7
+ $ENV{MEMBERWORK}/$PROJECT/archive/$CASE
+ /lustre/atlas1/cli900/world-shared/cesm/baselines/$COMPILER
+ /lustre/atlas1/cli900/world-shared/cesm/tools/cprnc/cprnc.eos
+ 8
+ e3sm_developer
+ pbs
+ E3SM
+ 32
+ 16
+ TRUE
+
+ aprun
+
+ -j {{ hyperthreading }}
+ -S {{ tasks_per_numa }}
+ -n {{ total_tasks }}
+ -N $MAX_MPITASKS_PER_NODE
+ -d $ENV{OMP_NUM_THREADS}
+ -cc numa_node
+
+
+
+
+
+
+ $MODULESHOME/init/sh
+ $MODULESHOME/init/csh
+ $MODULESHOME/init/perl.pm
+ $MODULESHOME/init/python.py
+ module
+ module
+ $MODULESHOME/bin/modulecmd perl
+ $MODULESHOME/bin/modulecmd python
+
+ intel
+ cray
+ cray-parallel-netcdf
+ cray-libsci
+ cray-netcdf
+ cray-netcdf-hdf5parallel
+ netcdf
+
+
+ intel/18.0.1.163
+ papi
+
+
+ PrgEnv-cray
+ cce cce/8.1.9
+ cray-libsci/12.1.00
+
+
+ PrgEnv-gnu
+ gcc gcc/4.8.0
+ cray-libsci/12.1.00
+
+
+ cray-netcdf/4.3.2
+
+
+ cray-netcdf-hdf5parallel/4.3.3.1
+ cray-parallel-netcdf/1.6.1
+
+
+ cmake3/3.2.3
+ python/2.7.9
+
+
+ $ENV{MEMBERWORK}/$PROJECT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+
+ 1
+ 1
+
+ 64M
+
+
+
+
+
+ LANL Linux Cluster, 36 pes/node, batch system slurm
+ gr-fe.*.lanl.gov
+ LINUX
+ intel,gnu
+ openmpi,impi,mvapich
+ climateacme
+ /lustre/scratch4/turquoise/$ENV{USER}/E3SM/scratch
+ /lustre/scratch3/turquoise/$ENV{USER}/E3SM/input_data
+ /lustre/scratch3/turquoise/$ENV{USER}/E3SM/input_data/atm/datm7
+ /lustre/scratch3/turquoise/$ENV{USER}/E3SM/archive/$CASE
+ /lustre/scratch3/turquoise/$ENV{USER}/E3SM/input_data/ccsm_baselines/$COMPILER
+ /turquoise/usr/projects/climate/SHARED_CLIMATE/software/wolf/cprnc/v0.40/cprnc
+ 4
+ e3sm_developer
+ slurm
+ luke.vanroekel @ gmail.com
+ 36
+ 32
+ TRUE
+
+ srun
+
+ -n {{ total_tasks }}
+
+
+
+
+
+
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/python.py
+ /etc/profile.d/z00_lmod.sh
+ /etc/profile.d/z00_lmod.csh
+ /usr/share/lmod/lmod/libexec/lmod perl
+ /usr/share/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ cmake/3.16.2
+
+
+ gcc/6.4.0
+ openmpi/2.1.2
+
+
+ gcc/6.4.0
+ mvapich2/2.3
+
+
+ intel/19.0.4
+ intel-mpi/2019.4
+
+
+ intel/18.0.2
+ mvapich2/2.2
+
+
+ intel/19.0.4
+ openmpi/2.1.2
+
+
+ friendly-testing
+ hdf5-parallel/1.8.16
+ pnetcdf/1.11.2
+ netcdf-h5parallel/4.7.3
+ mkl/2019.0.4
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+
+ $ENV{PNETCDF_PATH}
+ $ENV{NETCDF_PATH}
+ $ENV{MKLROOT}
+ romio_ds_write=disable;romio_ds_read=disable;romio_cb_write=enable;romio_cb_read=enable
+
+
+
+
+ LANL Linux Cluster, 36 pes/node, batch system slurm
+ ba-fe.*.lanl.gov
+ LINUX
+ intel,gnu
+ openmpi,impi,mvapich
+ climateacme
+ /lustre/scratch4/turquoise/$ENV{USER}/E3SM/scratch
+ /lustre/scratch3/turquoise/$ENV{USER}/E3SM/input_data
+ /lustre/scratch3/turquoise/$ENV{USER}/E3SM/input_data/atm/datm7
+ /lustre/scratch3/turquoise/$ENV{USER}/E3SM/archive/$CASE
+ /lustre/scratch3/turquoise/$ENV{USER}/E3SM/input_data/ccsm_baselines/$COMPILER
+ /turquoise/usr/projects/climate/SHARED_CLIMATE/software/wolf/cprnc/v0.40/cprnc
+ 4
+ e3sm_developer
+ slurm
+ e3sm
+ 36
+ 32
+ TRUE
+
+ srun
+
+ -n {{ total_tasks }}
+
+
+
+
+
+
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/python.py
+ /etc/profile.d/z00_lmod.sh
+ /etc/profile.d/z00_lmod.csh
+ /usr/share/lmod/lmod/libexec/lmod perl
+ /usr/share/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+ cmake/3.16.2
+
+
+ gcc/6.4.0
+ openmpi/2.1.2
+
+
+ gcc/6.4.0
+ mvapich2/2.3
+
+
+ intel/19.0.4
+ intel-mpi/2019.4
+
+
+ intel/18.0.2
+ mvapich2/2.2
+
+
+ intel/19.0.4
+ openmpi/2.1.2
+
+
+ friendly-testing
+ hdf5-parallel/1.8.16
+ pnetcdf/1.11.2
+ netcdf-h5parallel/4.7.3
+ mkl/2019.0.4
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+
+ $ENV{PNETCDF_PATH}
+ $ENV{NETCDF_PATH}
+ $ENV{MKLROOT}
+ romio_ds_write=disable;romio_ds_read=disable;romio_cb_write=enable;romio_cb_read=enable
+
+
+
+
+ Mesabi batch queue
+ LINUX
+ intel
+ openmpi
+ /home/reichpb/scratch
+ /home/reichpb/shared/cesm_inputdata
+ /home/reichpb/shared/cesm_inputdata/atm/datm7
+ USERDEFINED_optional_run
+ USERDEFINED_optional_run/$COMPILER
+ USERDEFINED_optional_test
+ 2
+ pbs
+ chen1718 at umn dot edu
+ 24
+ 24
+ TRUE
+
+ aprun
+
+ -n {{ total_tasks }}
+ -S {{ tasks_per_numa }}
+ -N $MAX_MPITASKS_PER_NODE
+ -d $ENV{OMP_NUM_THREADS}
+
+
+
+ $CASEROOT/run
+
+ $CASEROOT/exedir
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Itasca batch queue
+ LINUX
+ intel
+ openmpi
+ /home/reichpb/scratch
+ /home/reichpb/shared/cesm_inputdata
+ /home/reichpb/shared/cesm_inputdata/atm/datm7
+ USERDEFINED_optional_run
+ USERDEFINED_optional_run/$COMPILER
+ USERDEFINED_optional_test
+ 2
+ pbs
+ chen1718 at umn dot edu
+ 8
+ 8
+
+ aprun
+
+ -n {{ total_tasks }}
+ -S {{ tasks_per_numa }}
+ -N $MAX_MPITASKS_PER_NODE
+ -d $ENV{OMP_NUM_THREADS}
+
+
+
+ $CASEROOT/run
+
+ $CASEROOT/exedir
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Lawrencium LR3 cluster at LBL, OS is Linux (intel), batch system is SLURM
+ n000*
+ LINUX
+ intel,gnu
+ openmpi
+ ac_acme
+ /global/scratch/$ENV{USER}
+ /global/scratch/$ENV{USER}/cesm_input_datasets/
+ /global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7
+ $CIME_OUTPUT_ROOT/cesm_archive/$CASE
+ $CIME_OUTPUT_ROOT/cesm_baselines/$COMPILER
+ /$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc
+ 4
+ slurm
+ rgknox and glemieux at lbl dot gov
+ 8
+ 8
+ TRUE
+
+ mpirun
+
+ -np {{ total_tasks }}
+ -npernode $MAX_MPITASKS_PER_NODE
+
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+ -npernode $MAX_MPITASKS_PER_NODE
+
+
+
+ /etc/profile.d/modules.sh
+ /etc/profile.d/modules.csh
+ /usr/Modules/init/perl.pm
+ /usr/Modules/python.py
+ module
+ module
+ /usr/Modules/bin/modulecmd perl
+ /usr/Modules/bin/modulecmd python
+
+
+ cmake/3.15.0
+ perl
+ xml-libxml
+ python/2.7
+
+
+ intel/2016.4.072
+ mkl
+
+
+ netcdf/4.4.1.1-intel-s
+
+
+ openmpi
+ netcdf/4.4.1.1-intel-p
+
+
+ gcc/6.3.0
+ lapack/3.8.0-gcc
+
+
+ netcdf/5.4.1.1-gcc-s
+ openmpi/2.0.2-gcc
+
+
+ openmpi/3.0.1-gcc
+ netcdf/4.4.1.1-gcc-p
+ openmpi/2.0.2-gcc
+
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+
+
+
+ Lawrencium LR6 cluster at LBL, OS is Linux (intel), batch system is SLURM
+ n000*
+ LINUX
+ intel,gnu
+ openmpi
+ ac_acme
+ /global/scratch/$ENV{USER}
+ /global/scratch/$ENV{USER}/cesm_input_datasets/
+ /global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7
+ $CIME_OUTPUT_ROOT/cesm_archive/$CASE
+ $CIME_OUTPUT_ROOT/cesm_baselines/$COMPILER
+ /$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc
+ 4
+ slurm
+ rgknox and glemieux at lbl dot gov
+ 32
+ 32
+ TRUE
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+
+
+
+
+ /etc/profile.d/modules.sh
+ /etc/profile.d/modules.csh
+ /usr/Modules/init/perl.pm
+ /usr/Modules/python.py
+ module
+ module
+ /usr/Modules/bin/modulecmd perl
+ /usr/Modules/bin/modulecmd python
+
+
+ cmake/3.15.0
+ perl
+ xml-libxml
+ python/2.7
+
+
+ intel/2016.4.072
+ mkl
+
+
+ netcdf/4.4.1.1-intel-s
+
+
+ openmpi
+ netcdf/4.4.1.1-intel-p
+
+
+ gcc/6.3.0
+ lapack/3.8.0-gcc
+
+
+ netcdf/5.4.1.1-gcc-s
+ openmpi/2.0.2-gcc
+
+
+ openmpi/3.0.1-gcc
+ netcdf/4.4.1.1-gcc-p
+ openmpi/2.0.2-gcc
+
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+
+
+
+ small developer workhorse at lbl climate sciences
+ LINUX
+ gnu
+ mpi-serial
+ ngeet
+ /raid1/lbleco/e3sm/
+ /raid1/lbleco/cesm/cesm_input_datasets/
+ /raid1/lbleco/cesm/cesm_input_datasets/atm/datm7/
+ /raid1/lbleco/acme/cesm_archive/$CASE
+ /raid1/lbleco/acme/cesm_baselines/$COMPILER
+ /raid1/lbleco/cesm/cesm_tools/cprnc/cprnc
+ 1
+ none
+ rgknox at lbl gov
+ 4
+ 4
+ FALSE
+
+
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+ -npernode $MAX_MPITASKS_PER_NODE
+
+
+
+
+
+
+ ORNL Summit. Node: 2x POWER9 + 6x Volta V100, 22 cores/socket, 4 HW threads/core.
+ .*summit.*
+ LINUX
+ ibm,ibmgpu,pgi,pgigpu,gnu,gnugpu
+ spectrum-mpi,mpi-serial
+ cli115
+ cli115
+ /gpfs/alpine/proj-shared/$PROJECT
+ cli115,cli127
+ /gpfs/alpine/$PROJECT/proj-shared/$ENV{USER}/e3sm_scratch
+ /gpfs/alpine/cli115/world-shared/e3sm/inputdata
+ /gpfs/alpine/cli115/world-shared/e3sm/inputdata/atm/datm7
+ /gpfs/alpine/$PROJECT/proj-shared/$ENV{USER}/archive/$CASE
+ /gpfs/alpine/cli115/world-shared/e3sm/baselines/$COMPILER
+ /gpfs/alpine/cli115/world-shared/e3sm/tools/cprnc.summit/cprnc
+ 8
+ e3sm_developer
+ 4
+ lsf
+ e3sm
+ 84
+ 18
+ 42
+ 84
+ 18
+ 42
+ TRUE
+
+ jsrun
+
+ -X 1
+ --nrs $ENV{NUM_RS}
+ --rs_per_host $ENV{RS_PER_NODE}
+ --tasks_per_rs $SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}
+ -d plane:$SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}
+ --cpu_per_rs $ENV{CPU_PER_RS}
+ --gpu_per_rs $ENV{GPU_PER_RS}
+ --bind packed:smt:$ENV{OMP_NUM_THREADS}
+ -E OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS}
+ -E OMP_PROC_BIND=spread -E OMP_PLACES=threads -E OMP_STACKSIZE=256M
+ --latency_priority $ENV{LTC_PRT}
+ --stdio_mode prepended
+
+
+
+ /sw/summit/lmod/7.7.10/rhel7.3_gnu4.8.5/lmod/lmod/init/sh
+ /sw/summit/lmod/7.7.10/rhel7.3_gnu4.8.5/lmod/lmod/init/csh
+ /sw/summit/lmod/7.7.10/rhel7.3_gnu4.8.5/lmod/lmod/init/env_modules_python.py
+ /sw/summit/lmod/7.7.10/rhel7.3_gnu4.8.5/lmod/lmod/init/perl
+ module
+ /sw/summit/lmod/7.7.10/rhel7.3_gnu4.8.5/lmod/7.7.10/libexec/lmod python
+ module
+ module
+
+
+
+ DefApps
+ python/3.7.0
+ subversion/1.9.3
+ git/2.13.0
+ cmake/3.20.2
+ essl/6.1.0-2
+ netlib-lapack/3.8.0
+
+
+ pgi/19.9
+ pgi-cxx14/default
+
+
+ cuda/10.1.243
+
+
+ cuda/10.1.105
+
+
+ xl/16.1.1-9
+
+
+ cuda/10.1.243
+
+
+ gcc/8.1.1
+
+
+ gcc/8.1.1
+
+
+ netcdf/4.6.1
+ netcdf-fortran/4.4.4
+
+
+ spectrum-mpi/10.3.1.2-20200121
+
+
+ spectrum-mpi/10.3.1.2-20200121
+
+
+ spectrum-mpi/10.3.1.2-20200121
+
+
+ spectrum-mpi/10.3.1.2-20200121
+
+
+ parallel-netcdf/1.8.1
+ hdf5/1.10.4
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+
+ $ENV{OLCF_NETCDF_ROOT}
+ $ENV{OLCF_NETCDF_FORTRAN_ROOT}
+ $ENV{OLCF_ESSL_ROOT}
+ 0
+
+
+ /gpfs/alpine/cli115/world-shared/e3sm/tools/xlc/xlc.cfg.rhel.7.6.gcc.8.1.1.cuda.10.1
+ /sw/summit/gcc/8.1.1/lib64:$ENV{LD_LIBRARY_PATH}
+
+
+ $ENV{OLCF_HDF5_ROOT}
+ $ENV{OLCF_PARALLEL_NETCDF_ROOT}
+
+
+ 2
+ 21
+ 0
+ cpu-cpu
+ $SHELL{echo "2*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
+ $SHELL{echo "(`./xmlquery --value MAX_TASKS_PER_NODE`+41)/42"|bc}
+
+
+ 6
+ 7
+ 1
+ gpu-cpu
+ $SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
+
+
+ 6
+ 3
+ 1
+ gpu-cpu
+ $SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
+
+
+ 6
+ 7
+ 1
+ gpu-cpu
+ $SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
+
+
+
+
+ ORNL Ascent. Node: 2x POWER9 + 6x Volta V100, 22 cores/socket, 4 HW threads/core.
+ .*ascent.*
+ LINUX
+ ibm,ibmgpu,pgi,pgigpu,gnu,gnugpu
+ spectrum-mpi
+ cli115
+ cli115
+ /gpfs/wolf/proj-shared/$PROJECT
+ cli115
+ /gpfs/wolf/$PROJECT/proj-shared/$ENV{USER}/e3sm_scratch
+ /gpfs/wolf/cli115/world-shared/e3sm/inputdata
+ /gpfs/wolf/cli115/world-shared/e3sm/inputdata/atm/datm7
+ /gpfs/wolf/$PROJECT/proj-shared/$ENV{USER}/archive/$CASE
+ /gpfs/wolf/cli115/world-shared/e3sm/baselines/$COMPILER
+ /gpfs/wolf/cli115/world-shared/e3sm/tools/cprnc/cprnc
+ 8
+ e3sm_integration
+ 4
+ lsf
+ e3sm
+ 84
+ 18
+ 42
+ 84
+ 18
+ 42
+ TRUE
+
+ jsrun
+
+ -X 1
+ --nrs $ENV{NUM_RS}
+ --rs_per_host $ENV{RS_PER_NODE}
+ --tasks_per_rs $SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}
+ -d plane:$SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}
+ --cpu_per_rs $ENV{CPU_PER_RS}
+ --gpu_per_rs $ENV{GPU_PER_RS}
+ --bind packed:smt:$ENV{OMP_NUM_THREADS}
+ -E OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS}
+ -E OMP_PROC_BIND=spread -E OMP_PLACES=threads -E OMP_STACKSIZE=256M
+ --latency_priority $ENV{LTC_PRT}
+ --stdio_mode prepended
+
+
+
+ /sw/ascent/lmod/7.8.2/rhel7.5_4.8.5/lmod/lmod/init/sh
+ /sw/ascent/lmod/7.8.2/rhel7.5_4.8.5/lmod/lmod/init/csh
+ /sw/ascent/lmod/7.8.2/rhel7.5_4.8.5/lmod/lmod/init/env_modules_python.py
+ /sw/ascent/lmod/7.8.2/rhel7.5_4.8.5/lmod/lmod/libexec/lmod python
+ module
+ module
+
+
+
+ DefApps
+ python/3.7.0
+ subversion/1.9.3
+ git/2.13.0
+ cmake/3.18.2
+ essl/6.1.0-2
+ netlib-lapack/3.8.0
+
+
+ pgi/19.9
+ pgi-cxx14/default
+
+
+ cuda/10.1.243
+
+
+ cuda/10.1.105
+
+
+ xl/16.1.1-7
+
+
+ cuda/10.1.243
+
+
+ gcc/8.1.1
+
+
+ gcc/8.1.1
+
+
+ spectrum-mpi/10.3.1.2-20200121
+ netcdf/4.6.1
+ netcdf-fortran/4.4.4
+ parallel-netcdf/1.8.1
+ hdf5/1.10.4
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+
+ /gpfs/wolf/cli115/world-shared/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}
+ $ENV{OLCF_NETCDF_ROOT}
+ $ENV{OLCF_NETCDF_FORTRAN_ROOT}
+ $ENV{OLCF_ESSL_ROOT}
+ $ENV{OLCF_HDF5_ROOT}
+ $ENV{OLCF_PARALLEL_NETCDF_ROOT}
+ 0
+
+
+ 2
+ 21
+ 0
+ cpu-cpu
+ $SHELL{echo "2*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
+ $SHELL{echo "(`./xmlquery --value MAX_TASKS_PER_NODE`+41)/42"|bc}
+
+
+ 6
+ 7
+ 1
+ gpu-cpu
+ $SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
+
+
+ 6
+ 3
+ 1
+ gpu-cpu
+ $SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
+
+
+ 6
+ 7
+ 1
+ gpu-cpu
+ $SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
+
+
+
+
+ Medium sized linux cluster at BNL, torque scheduler.
+ LINUX
+ gnu
+ openmpi,mpi-serial
+ /data/$ENV{USER}
+ /data/Model_Data/cesm_input_datasets/
+ /data/Model_Data/cesm_input_datasets/atm/datm7
+ $CIME_OUTPUT_ROOT/cesm_archive/$CASE
+ $CIME_OUTPUT_ROOT/cesm_baselines
+ /data/software/cesm_tools/cprnc/cprnc
+ 4
+ pbs
+ sserbin@bnl.gov
+ 12
+ 12
+ 12
+ FALSE
+
+
+
+
+ mpirun
+
+ -np {{ total_tasks }}
+ -npernode $MAX_TASKS_PER_NODE
+
+
+
+ /etc/profile.d/modules.sh
+ /etc/profile.d/modules.csh
+ /usr/share/Modules/init/perl.pm
+ /usr/share/Modules/init/python.py
+ module
+ module
+ /usr/bin/modulecmd perl
+ /usr/bin/modulecmd python
+
+
+ perl/5.22.1
+ libxml2/2.9.2
+ maui/3.3.1
+ python/2.7.15
+ python/3.6.2
+
+
+ gcc/5.4.0
+ gfortran/5.4.0
+ hdf5/1.8.19fates
+ netcdf/4.4.1.1-gnu540-fates
+ openmpi/2.1.1-gnu540
+
+
+ openmpi/2.1.1-gnu540
+
+
+
+ /data/software/hdf5/1.8.19fates
+ /data/software/netcdf/4.4.1.1-gnu540-fates
+
+
+
+
+ ORNL experimental/evaluation cluster
+ tulip.*
+ LINUX
+ gnu
+ openmpi
+ /home/groups/coegroup/e3sm/scratch/$USER
+ /home/groups/coegroup/e3sm/inputdata2
+ /home/groups/coegroup/e3sm/inputdata2/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /home/groups/coegroup/e3sm/baselines/$COMPILER
+ /home/groups/coegroup/e3sm/tools/cprnc/cprnc
+ 16
+ e3sm_developer
+ 4
+ slurm
+ e3sm
+ 64
+ 32
+ FALSE
+
+ mpirun
+
+ --tag-output -n {{ total_tasks }}
+ --map-by ppr:1:core:PE=$ENV{OMP_NUM_THREADS} --bind-to core
+
+
+
+ /cm/local/apps/environment-modules/current/init/python
+ /cm/local/apps/environment-modules/current/init/sh
+ /cm/local/apps/environment-modules/current/init/csh
+ /cm/local/apps/environment-modules/current/bin/modulecmd python
+ module
+ module
+
+ gcc
+ cce
+ PrgEnv-cray
+ cray-mvapich2
+ cmake/3.17.0
+ /home/users/twhite/share/modulefiles
+ svn/1.10.6
+
+
+ gcc/8.1.0
+ blas/gcc/64/3.8.0
+ lapack/gcc/64/3.8.0
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+
+ /home/groups/coegroup/e3sm/soft/perl5/lib/perl5
+
+
+ /home/groups/coegroup/e3sm/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/gcc/8.2.0
+
+
+ gcc
+ g++
+ gfortran
+ /home/groups/coegroup/e3sm/soft/openmpi/2.1.6/gcc/8.2.0/bin:$ENV{PATH}
+ /home/groups/coegroup/e3sm/soft/openmpi/2.1.6/gcc/8.2.0/lib:/home/groups/coegroup/e3sm/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/gcc/8.2.0/lib:$ENV{LD_LIBRARY_PATH}
+ /home/groups/coegroup/e3sm/soft/pnetcdf/1.12.1/gcc/8.2.0/openmpi/2.1.6
+
+
+ 128M
+ threads
+
+
+
+
+ ${EXEROOT}/e3sm.exe
+ >> e3sm.log.$LID 2>&1
+
+
+
diff --git a/mache/machine_info.py b/mache/machine_info.py
new file mode 100644
index 00000000..f663e154
--- /dev/null
+++ b/mache/machine_info.py
@@ -0,0 +1,368 @@
+import socket
+import warnings
+from lxml import etree
+from importlib.resources import path
+import configparser
+import os
+
+
+class MachineInfo:
+ """
+ An object containing information about an E3SM supported machine
+
+ Attributes
+ ----------
+ machine : str
+ The name of an E3SM supported machine
+
+ config : configparser.ConfigParser
+ Config options for this machine
+
+ e3sm_supported : bool
+ Whether this machine supports running E3SM itself, and therefore has
+ a list of compilers, MPI libraries, and the modules needed to load them
+
+ compilers : list
+ A list of compilers for this machine if ``e3sm_supported == True``
+
+ mpilibs : list
+ A list of MPI libraries for this machine if ``e3sm_supported == True``
+
+ os : str
+ The machine's operating system if ``e3sm_supported == True``
+
+ e3sm_unified_mpi : {'nompi', 'system', None}
+ Which MPI type is included in the E3SM-Unified environment (if one is
+ loaded)
+
+ e3sm_unified_base : str
+ The base path where E3SM-Unified and its activation scripts are
+ installed if ``e3sm_unified`` is not ``None``
+
+ e3sm_unified_activation : str
+ The activation script used to activate E3SM-Unified if ``e3sm_unified``
+ is not ``None``
+
+ diagnostics_base : str
+ The base directory for diagnostics data
+ """
+
+ def __init__(self, machine=None):
+ """
+ Create an object with information about the E3SM supported machine
+
+ Parameters
+ ----------
+ machine : str, optional
+ The name of an E3SM supported machine. By default, the machine
+ will be inferred from the host name
+ """
+ self.machine = machine
+ if self.machine is None:
+ self._discover_machine()
+
+ self.config = self._get_config()
+
+ self.e3sm_supported = False
+ self.compilers = None
+ self.mpilibs = None
+ self.os = None
+ self._parse_compilers_and_mpi()
+
+ self.e3sm_unified_mpi = None
+ self.e3sm_unified_base = None
+ self.e3sm_unified_activation = None
+ self._detect_e3sm_unified()
+
+ self.diagnostics_base = None
+ self._get_diagnostics_info()
+
+ def __str__(self):
+ """
+ Convert the info to a format that is good for printing
+
+ Returns
+ -------
+ info : str
+ The contents as a string for printing to the terminal
+ """
+
+ info = f'Machine: {self.machine}\n' \
+ f'E3SM Supported Machine? {self.e3sm_supported}'
+
+ if self.e3sm_supported:
+ info = f'{info}\n' \
+ f' Compilers: {", ".join(self.compilers)}\n' \
+ f' MPI libraries: {", ".join(self.mpilibs)}\n' \
+ f' OS: {self.os}'
+
+ print_unified = (self.e3sm_unified_activation is not None or
+ self.e3sm_unified_base is not None or
+ self.e3sm_unified_mpi is not None)
+ if print_unified:
+ info = f'{info}\n' \
+ f'E3SM-Unified:'
+
+ if self.e3sm_unified_activation is None:
+ info = f'{info}\n' \
+ f' E3SM-Unified is not currently loaded'
+ else:
+ info = f'{info}\n' \
+ f' Activation: {self.e3sm_unified_activation}'
+ if self.e3sm_unified_base is not None:
+ info = f'{info}\n' \
+ f' Base path: {self.e3sm_unified_base}'
+ if self.e3sm_unified_mpi is not None:
+ info = f'{info}\n' \
+ f' MPI type: {self.e3sm_unified_mpi}'
+
+ print_diags = self.diagnostics_base is not None
+ if print_diags:
+ info = f'{info}\n' \
+ f'Diagnostics:'
+
+ if self.diagnostics_base is not None:
+ info = f'{info}\n' \
+ f' Base path: {self.diagnostics_base}'
+
+ return info
+
+ def get_modules_and_mpi_compilers(self, compiler, mpilib):
+ """
+ Get the the modules and MPI compiler commands for a given compiler and
+ MPI library
+
+ Parameters
+ ----------
+ compiler : str
+ One of the compilers for this machine, given in the ``compilers``
+ attribute
+
+ mpilib : str
+ One of the MPI libraries for this machine, , given in the
+ ``mpilibs`` attribute
+
+ Returns
+ -------
+ mpicc : str
+ The MPI c compiler for this machine
+
+ mpicxx : str
+ The MPI c++ compiler for this machine
+
+ mpifc : str
+ The MPI Fortran compiler for this machine
+
+ mod_commands : str
+ Modules to load to set up the compilers, MPI libraries and other
+ dependencies like NetCDF and PNetCDF
+ """
+
+ machine = self.machine
+ if not self.e3sm_supported:
+ raise ValueError(f'{machine} does not appear to be an E3SM '
+ f'supported machine')
+
+ if compiler not in self.compilers:
+ raise ValueError(f'{compiler} does not appear to be one of the '
+ f'compilers for this machine: {self.compilers}')
+
+ if mpilib not in self.mpilibs:
+ raise ValueError(f'{mpilib} does not appear to be one of the MPI'
+ f'libraries for this machine: {self.mpilibs}')
+
+ with path('mache.cime_machine_config',
+ 'config_machines.xml') as xml_path:
+ root = etree.parse(str(xml_path))
+
+ machines = next(root.iter('config_machines'))
+
+ mach = None
+ for mach in machines:
+ if mach.tag == 'machine' and mach.attrib['MACH'] == machine:
+ break
+
+ if mach is None:
+ raise ValueError(f'{machine} does not appear to be an E3SM '
+ f'supported machine')
+
+ mod_commands = []
+ modules = next(mach.iter('module_system'))
+ for module in modules:
+ if module.tag == 'modules':
+ include = True
+ if 'compiler' in module.attrib and \
+ module.attrib['compiler'] != compiler:
+ include = False
+ if 'mpilib' in module.attrib and \
+ module.attrib['mpilib'] != mpilib and \
+ module.attrib['mpilib'] != '!mpi-serial':
+ include = False
+ if include:
+ for command in module:
+ if command.tag == 'command':
+ cmd = command.attrib['name']
+ text = f'module {cmd}'
+ if command.text is not None:
+ text = f'{text} {command.text}'
+ mod_commands.append(text)
+
+ with path('mache.cime_machine_config',
+ 'config_compilers.xml') as xml_path:
+ root = etree.parse(str(xml_path))
+
+ compilers = next(root.iter('config_compilers'))
+
+ mpicc = None
+ mpifc = None
+ mpicxx = None
+ for comp in compilers:
+ if comp.tag != 'compiler':
+ continue
+ if 'COMPILER' in comp.attrib and \
+ comp.attrib['COMPILER'] != compiler:
+ continue
+ if 'OS' in comp.attrib and \
+ comp.attrib['OS'] != self.os:
+ continue
+ if 'MACH' in comp.attrib and comp.attrib['MACH'] != machine:
+ continue
+
+ # okay, this is either a "generic" compiler section or one for
+ # this machine
+
+ for child in comp:
+ if 'MPILIB' in child.attrib:
+ mpi = child.attrib['MPILIB']
+ if mpi[0] == '!':
+ mpi_match = mpi[1:] != mpilib
+ else:
+ mpi_match = mpi == mpilib
+ else:
+ mpi_match = True
+
+ if not mpi_match:
+ continue
+
+ if child.tag == 'MPICC':
+ mpicc = child.text.strip()
+ elif child.tag == 'MPICXX':
+ mpicxx = child.text.strip()
+ elif child.tag == 'MPIFC':
+ mpifc = child.text.strip()
+
+ return mpicc, mpicxx, mpifc, mod_commands
+
+ def _discover_machine(self):
+ """ Figure out the machine from the host name """
+ if self.machine is not None:
+ return
+ hostname = socket.gethostname()
+ if hostname.startswith('acme1'):
+ machine = 'acme1'
+ elif hostname.startswith('andes'):
+ machine = 'andes'
+ elif hostname.startswith('blueslogin'):
+ machine = 'anvil'
+ elif hostname.startswith('ba-fe'):
+ machine = 'badger'
+ elif hostname.startswith('chrlogin'):
+ machine = 'chrysalis'
+ elif hostname.startswith('compy'):
+ machine = 'compy'
+ elif hostname.startswith('cooley'):
+ machine = 'cooley'
+ elif hostname.startswith('cori'):
+ warnings.warn('defaulting to cori-haswell. Use -m cori-knl if you'
+ ' wish to run on KNL.')
+ machine = 'cori-haswell'
+ elif hostname.startswith('gr-fe'):
+ machine = 'grizzly'
+ else:
+ raise ValueError('Unable to discover machine form host name')
+ self.machine = machine
+
+ def _get_config(self):
+ """ get a parser for config options """
+
+ config = configparser.ConfigParser(
+ interpolation=configparser.ExtendedInterpolation())
+
+ machine = self.machine
+ try:
+ with path('mache.machines', f'{machine}.cfg') as cfg_path:
+ config.read(cfg_path)
+ except FileNotFoundError:
+ # this isn't a known machine so use the default
+ with path('mache.machines', 'default.cfg') as cfg_path:
+ config.read(cfg_path)
+
+ return config
+
+ def _parse_compilers_and_mpi(self):
+ """ Parse the compilers and mpi modules from XML config files """
+ machine = self.machine
+
+ with path('mache.cime_machine_config',
+ 'config_machines.xml') as xml_path:
+ root = etree.parse(str(xml_path))
+
+ machines = next(root.iter('config_machines'))
+
+ mach = None
+ for mach in machines:
+ if mach.tag == 'machine' and mach.attrib['MACH'] == machine:
+ break
+
+ if mach is None:
+ # this is not an E3SM supported machine, so we're done
+ self.e3sm_supported = False
+ return
+
+ self.e3sm_supported = True
+ compilers = None
+ for child in mach:
+ if child.tag == 'COMPILERS':
+ compilers = child.text.split(',')
+ break
+
+ self.compilers = compilers
+
+ mpilibs = None
+ for child in mach:
+ if child.tag == 'MPILIBS':
+ mpilibs = child.text.split(',')
+ break
+
+ self.mpilibs = mpilibs
+
+ machine_os = None
+ for child in mach:
+ if child.tag == 'OS':
+ machine_os = child.text
+ break
+
+ self.os = machine_os
+
+ def _detect_e3sm_unified(self):
+ """ Read E3SM-Unified base path and detect whether it is running """
+ config = self.config
+
+ if config is not None and \
+ config.has_option('e3sm_unified', 'base_path'):
+ self.e3sm_unified_base = config.get('e3sm_unified', 'base_path')
+
+ if 'E3SMU_SCRIPT' in os.environ:
+ self.e3sm_unified_activation = os.environ['E3SMU_SCRIPT']
+
+ if 'E3SMU_MPI' in os.environ:
+ self.e3sm_unified_mpi = os.environ['E3SMU_MPI'].lower()
+
+ def _get_diagnostics_info(self):
+ """ Get config options related to diagnostics data """
+
+ config = self.config
+
+ if config is not None and \
+ config.has_option('diagnostics', 'base_path'):
+ self.diagnostics_base = config.get('diagnostics', 'base_path')
diff --git a/mache/machines/__init__.py b/mache/machines/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/mache/machines/acme1.cfg b/mache/machines/acme1.cfg
new file mode 100644
index 00000000..a1c4b28a
--- /dev/null
+++ b/mache/machines/acme1.cfg
@@ -0,0 +1,18 @@
+# Options related to deploying an e3sm-unified conda environment on supported
+# machines
+[e3sm_unified]
+
+# the unix group for permissions for the e3sm-unified conda environment
+group = climate
+
+# the path to the directory where activation scripts, the base environment, and
+# system libraries will be deployed
+base_path = /usr/local/e3sm_unified/envs
+
+
+# config options related to data needed by diagnostics software such as
+# e3sm_diags and MPAS-Analysis
+[diagnostics]
+
+# The base path to the diagnostics directory
+base_path = /space2/diagnostics
diff --git a/mache/machines/andes.cfg b/mache/machines/andes.cfg
new file mode 100644
index 00000000..266423cf
--- /dev/null
+++ b/mache/machines/andes.cfg
@@ -0,0 +1,18 @@
+# Options related to deploying an e3sm-unified conda environment on supported
+# machines
+[e3sm_unified]
+
+# the unix group for permissions for the e3sm-unified conda environment
+group = cli900
+
+# the path to the directory where activation scripts, the base environment, and
+# system libraries will be deployed
+base_path = /ccs/proj/cli900/sw/rhea/e3sm-unified
+
+
+# config options related to data needed by diagnostics software such as
+# e3sm_diags and MPAS-Analysis
+[diagnostics]
+
+# The base path to the diagnostics directory
+base_path = /gpfs/alpine/proj-shared/cli115/diagnostics/
diff --git a/mache/machines/anvil.cfg b/mache/machines/anvil.cfg
new file mode 100644
index 00000000..2510bc46
--- /dev/null
+++ b/mache/machines/anvil.cfg
@@ -0,0 +1,24 @@
+# Options related to deploying an e3sm-unified conda environment on supported
+# machines
+[e3sm_unified]
+
+# the unix group for permissions for the e3sm-unified conda environment
+group = cels
+
+# the compiler set to use for system libraries and MPAS builds
+compiler = intel
+
+# the system MPI library to use for intel18 compiler
+mpi = impi
+
+# the path to the directory where activation scripts, the base environment, and
+# system libraries will be deployed
+base_path = /lcrc/soft/climate/e3sm-unified
+
+
+# config options related to data needed by diagnostics software such as
+# e3sm_diags and MPAS-Analysis
+[diagnostics]
+
+# The base path to the diagnostics directory
+base_path = /lcrc/group/e3sm/diagnostics
\ No newline at end of file
diff --git a/mache/machines/badger.cfg b/mache/machines/badger.cfg
new file mode 100644
index 00000000..b9cb0319
--- /dev/null
+++ b/mache/machines/badger.cfg
@@ -0,0 +1,24 @@
+# Options related to deploying an e3sm-unified conda environment on supported
+# machines
+[e3sm_unified]
+
+# the unix group for permissions for the e3sm-unified conda environment
+group = climate
+
+# the compiler set to use for system libraries and MPAS builds
+compiler = intel
+
+# the system MPI library to use for intel18 compiler
+mpi = impi
+
+# the path to the directory where activation scripts, the base environment, and
+# system libraries will be deployed
+base_path = /turquoise/usr/projects/climate/SHARED_CLIMATE/anaconda_envs
+
+
+# config options related to data needed by diagnostics software such as
+# e3sm_diags and MPAS-Analysis
+[diagnostics]
+
+# The base path to the diagnostics directory
+base_path = /turquoise/usr/projects/climate/SHARED_CLIMATE/diagnostic
diff --git a/mache/machines/chrysalis.cfg b/mache/machines/chrysalis.cfg
new file mode 100644
index 00000000..97256329
--- /dev/null
+++ b/mache/machines/chrysalis.cfg
@@ -0,0 +1,24 @@
+# Options related to deploying an e3sm-unified conda environment on supported
+# machines
+[e3sm_unified]
+
+# the unix group for permissions for the e3sm-unified conda environment
+group = cels
+
+# the compiler set to use for system libraries and MPAS builds
+compiler = intel
+
+# the system MPI library to use for intel18 compiler
+mpi = openmpi
+
+# the path to the directory where activation scripts, the base environment, and
+# system libraries will be deployed
+base_path = /lcrc/soft/climate/e3sm-unified
+
+
+# config options related to data needed by diagnostics software such as
+# e3sm_diags and MPAS-Analysis
+[diagnostics]
+
+# The base path to the diagnostics directory
+base_path = /lcrc/group/e3sm/diagnostics
\ No newline at end of file
diff --git a/mache/machines/compy.cfg b/mache/machines/compy.cfg
new file mode 100644
index 00000000..b8cd3372
--- /dev/null
+++ b/mache/machines/compy.cfg
@@ -0,0 +1,24 @@
+# Options related to deploying an e3sm-unified conda environment on supported
+# machines
+[e3sm_unified]
+
+# the unix group for permissions for the e3sm-unified conda environment
+group = users
+
+# the compiler set to use for system libraries and MPAS builds
+compiler = intel
+
+# the system MPI library to use for intel18 compiler
+mpi = impi
+
+# the path to the directory where activation scripts, the base environment, and
+# system libraries will be deployed
+base_path = /share/apps/E3SM/conda_envs
+
+
+# config options related to data needed by diagnostics software such as
+# e3sm_diags and MPAS-Analysis
+[diagnostics]
+
+# The base path to the diagnostics directory
+base_path = /compyfs/diagnostics
diff --git a/mache/machines/cooley.cfg b/mache/machines/cooley.cfg
new file mode 100644
index 00000000..f1b90551
--- /dev/null
+++ b/mache/machines/cooley.cfg
@@ -0,0 +1,18 @@
+# Options related to deploying an e3sm-unified conda environment on supported
+# machines
+[e3sm_unified]
+
+# the unix group for permissions for the e3sm-unified conda environment
+group = ccsm
+
+# the path to the directory where activation scripts, the base environment, and
+# system libraries will be deployed
+base_path = /lus/theta-fs0/projects/ccsm/acme/tools/e3sm-unified
+
+
+# config options related to data needed by diagnostics software such as
+# e3sm_diags and MPAS-Analysis
+[diagnostics]
+
+# The base path to the diagnostics directory
+base_path = /lus/theta-fs0/projects/ClimateEnergy_4/diagnostics
diff --git a/mache/machines/cori-haswell.cfg b/mache/machines/cori-haswell.cfg
new file mode 100644
index 00000000..ef344cb8
--- /dev/null
+++ b/mache/machines/cori-haswell.cfg
@@ -0,0 +1,24 @@
+# Options related to deploying an e3sm-unified conda environment on supported
+# machines
+[e3sm_unified]
+
+# the unix group for permissions for the e3sm-unified conda environment
+group = e3sm
+
+# the compiler set to use for system libraries and MPAS builds
+compiler = intel
+
+# the system MPI library to use for intel18 compiler
+mpi = mpt
+
+# the path to the directory where activation scripts, the base environment, and
+# system libraries will be deployed
+base_path = /global/common/software/e3sm/anaconda_envs
+
+
+# config options related to data needed by diagnostics software such as
+# e3sm_diags and MPAS-Analysis
+[diagnostics]
+
+# The base path to the diagnostics directory
+base_path = /global/cfs/cdirs/e3sm/diagnostics
diff --git a/mache/machines/cori-knl.cfg b/mache/machines/cori-knl.cfg
new file mode 100644
index 00000000..876192f7
--- /dev/null
+++ b/mache/machines/cori-knl.cfg
@@ -0,0 +1,24 @@
+# Options related to deploying an e3sm-unified conda environment on supported
+# machines
+[e3sm_unified]
+
+# the unix group for permissions for the e3sm-unified conda environment
+group = e3sm
+
+# the compiler set to use for system libraries and MPAS builds
+compiler = intel
+
+# the system MPI library to use for intel18 compiler
+mpi = impi
+
+# the path to the directory where activation scripts, the base environment, and
+# system libraries will be deployed
+base_path = /global/common/software/e3sm/anaconda_envs
+
+
+# config options related to data needed by diagnostics software such as
+# e3sm_diags and MPAS-Analysis
+[diagnostics]
+
+# The base path to the diagnostics directory
+base_path = /global/cfs/cdirs/e3sm/diagnostics
diff --git a/mache/machines/default.cfg b/mache/machines/default.cfg
new file mode 100644
index 00000000..e69de29b
diff --git a/mache/machines/grizzly.cfg b/mache/machines/grizzly.cfg
new file mode 100644
index 00000000..b9cb0319
--- /dev/null
+++ b/mache/machines/grizzly.cfg
@@ -0,0 +1,24 @@
+# Options related to deploying an e3sm-unified conda environment on supported
+# machines
+[e3sm_unified]
+
+# the unix group for permissions for the e3sm-unified conda environment
+group = climate
+
+# the compiler set to use for system libraries and MPAS builds
+compiler = intel
+
+# the system MPI library to use for intel18 compiler
+mpi = impi
+
+# the path to the directory where activation scripts, the base environment, and
+# system libraries will be deployed
+base_path = /turquoise/usr/projects/climate/SHARED_CLIMATE/anaconda_envs
+
+
+# config options related to data needed by diagnostics software such as
+# e3sm_diags and MPAS-Analysis
+[diagnostics]
+
+# The base path to the diagnostics directory
+base_path = /turquoise/usr/projects/climate/SHARED_CLIMATE/diagnostic
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 00000000..c1777c94
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,27 @@
+[metadata]
+name = mache
+version = 1.0.0
+author = Xylar Asay-Davis
+author_email = xylar@lanl.gov
+description = A package for providing configuration data relate to E3SM supported machines
+long_description = file: README.rst
+long_description_content_type = text/x-rst
+url = https://github.com/E3SM-Project/mache
+project_urls =
+ Bug Tracker = https://github.com/E3SM-Project/mache/issues
+classifiers =
+ Programming Language :: Python :: 3
+ License :: OSI Approved :: BSD License
+ Operating System :: OS Independent
+
+[options]
+packages = find:
+python_requires = >=3.7
+install_requires =
+ lxml
+
+[options.data_files]
+data =
+ mache/cime_machine_config/config_compilers.xml
+ mache/cime_machine_config/config_machines.xml
+ mache/machines/*.cfg
diff --git a/setup.py b/setup.py
new file mode 100644
index 00000000..7f1a1763
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,4 @@
+from setuptools import setup
+
+if __name__ == "__main__":
+ setup()
diff --git a/spec-file.txt b/spec-file.txt
new file mode 100644
index 00000000..525e6a88
--- /dev/null
+++ b/spec-file.txt
@@ -0,0 +1,2 @@
+python >=3.6
+lxml