Skip to content

Commit

Permalink
Merge pull request #102 from xylar/fix_chrysalis_os
Browse files Browse the repository at this point in the history
Fix Chrysalis OS in spack and update cime machine config file
  • Loading branch information
xylar authored Feb 15, 2023
2 parents 74b9a59 + 3b68b55 commit 2bd039f
Show file tree
Hide file tree
Showing 8 changed files with 134 additions and 55 deletions.
4 changes: 2 additions & 2 deletions conda/meta.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
{% set name = "mache" %}
{% set version = "1.11.0" %}
{% set version = "1.12.0rc1" %}

package:
name: {{ name|lower }}
Expand Down Expand Up @@ -45,4 +45,4 @@ about:

extra:
recipe-maintainers:
- xylar
- xylar
169 changes: 124 additions & 45 deletions mache/cime_machine_config/config_machines.xml
Original file line number Diff line number Diff line change
Expand Up @@ -104,34 +104,25 @@
<command name="unload">cray-hdf5</command>
<command name="unload">cray-netcdf</command>
<command name="unload">cray-netcdf-hdf5parallel</command>
<command name="load">craype/2.7.12</command>
</modules>
<modules compiler="intel">
<command name="rm">PrgEnv-gnu</command>
<command name="rm">PrgEnv-cray</command>
<command name="load">PrgEnv-intel/8.1.0</command>
<command name="swap">intel/19.1.0.166</command>
<command name="load">craype/2.7.15</command>
</modules>
<modules compiler="gnu">
<command name="unload">PrgEnv-cray</command>
<command name="load">PrgEnv-gnu/8.1.0</command>
<command name="swap">gcc/9.3.0</command>
<command name="unload">PrgEnv-gnu</command>
<command name="load">PrgEnv-gnu/8.3.3</command>
<command name="swap">gcc/12.1.0</command>
</modules>
<modules compiler="cray">
<command name="unload">PrgEnv-intel</command>
<command name="unload">PrgEnv-gnu</command>
<command name="load">gcc/9.3.0</command>
<command name="load">PrgEnv-cray/8.1.0</command>
<command name="load">PrgEnv-cray/8.3.3</command>
<command name="rm">darshan</command>
</modules>
<modules compiler="!intel">
<command name="swap">cray-libsci/21.06.1.1</command>
</modules>
<modules>
<command name="load">cray-mpich/8.1.11</command>
<command name="load">cray-hdf5-parallel/1.12.0.7</command>
<command name="load">cray-netcdf-hdf5parallel/4.7.4.7</command>
<command name="load">cray-parallel-netcdf/1.12.1.7</command>
<command name="load">cray-mpich/8.1.16</command>
<command name="load">cray-hdf5-parallel/1.12.1.3</command>
<command name="load">cray-netcdf-hdf5parallel/4.8.1.3</command>
<command name="load">cray-parallel-netcdf/1.12.2.3</command>
</modules>
</module_system>

Expand All @@ -140,20 +131,14 @@
<TEST_TPUT_TOLERANCE>0.1</TEST_TPUT_TOLERANCE>
<MAX_GB_OLD_TEST_DATA>1000</MAX_GB_OLD_TEST_DATA>
<environment_variables>
<env name="PERL5LIB">/usr/lib/perl5/5.26.1</env>
<env name="NETCDF_C_PATH">opt/cray/pe/netcdf-hdf5parallel/4.7.4.4/gnu/9.1/</env>
<env name="NETCDF_FORTRAN_PATH">opt/cray/pe/netcdf-hdf5parallel/4.7.4.4/gnu/9.1/</env>
<env name="PERL5LIB">/usr/lib/perl5/5.26.2</env>
<env name="NETCDF_C_PATH">/opt/cray/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/</env>
<env name="NETCDF_FORTRAN_PATH">/opt/cray/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/</env>
<env name="PNETCDF_PATH">$SHELL{dirname $(dirname $(which pnetcdf_version))}</env>
</environment_variables>
<environment_variables SMP_PRESENT="TRUE">
<env name="OMP_STACKSIZE">128M</env>
</environment_variables>
<environment_variables SMP_PRESENT="TRUE" compiler="intel" MAX_TASKS_PER_NODE="!128">
<env name="KMP_AFFINITY">granularity=core,balanced</env>
</environment_variables>
<environment_variables SMP_PRESENT="TRUE" compiler="intel" MAX_TASKS_PER_NODE="128">
<env name="KMP_AFFINITY">granularity=thread,balanced</env>
</environment_variables>
<environment_variables SMP_PRESENT="TRUE" compiler="gnu">
<env name="OMP_PLACES">cores</env>
</environment_variables>
Expand Down Expand Up @@ -596,19 +581,16 @@
<NTEST_PARALLEL_JOBS>1</NTEST_PARALLEL_JOBS>
<BATCH_SYSTEM>slurm</BATCH_SYSTEM>
<SUPPORTED_BY>e3sm</SUPPORTED_BY>
<MAX_TASKS_PER_NODE>64</MAX_TASKS_PER_NODE>
<MAX_MPITASKS_PER_NODE>64</MAX_MPITASKS_PER_NODE>
<MAX_TASKS_PER_NODE>56</MAX_TASKS_PER_NODE>
<MAX_MPITASKS_PER_NODE>56</MAX_MPITASKS_PER_NODE>
<PROJECT_REQUIRED>TRUE</PROJECT_REQUIRED>
<mpirun mpilib="default">
<executable>srun</executable>
<arguments>
<arg name="num_tasks"> -l -K -n {{ total_tasks }} -N {{ num_nodes }} </arg>
<!-- <arg name="binding">cpu_bind=cores</arg> -->
<!-- <arg name="binding">threads-per-core=1 -c 2</arg> -->
<arg name="binding">--threads-per-core=1</arg>
<arg name="thread_count">-c $ENV{OMP_NUM_THREADS}</arg>
<arg name="placement">-m *:block</arg>
<!--<arg name="placement">-m plane={{ tasks_per_node }}</arg>-->
</arguments>
</mpirun>
<module_system type="module" allow_error="true">
Expand All @@ -628,7 +610,7 @@
<modules compiler="amdclang">
<command name="reset"></command>
<command name="switch">PrgEnv-cray PrgEnv-amd/8.3.3</command>
<command name="switch">amd amd/5.1.0</command>
<command name="switch">amd amd/5.4.0</command>
</modules>
<modules compiler="gnu">
<command name="reset"></command>
Expand Down Expand Up @@ -1945,7 +1927,7 @@
<executable>srun</executable>
<arguments>
<arg name="num_tasks">--mpi=pmi2 -l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit </arg>
<arg name="binding"> $SHELL{if [ 64 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} </arg>
<arg name="binding"> $SHELL{if [ 64 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} </arg>
<arg name="thread_count">-c $SHELL{echo 128/ {{ tasks_per_node }} |bc}</arg>
<arg name="placement">-m plane={{ tasks_per_node }}</arg>
</arguments>
Expand Down Expand Up @@ -4214,9 +4196,106 @@
</environment_variables>
</machine>

<machine MACH="gcp">
<DESC>Google Cloud cluster with c2-compute-60's</DESC>
<NODENAME_REGEX>gcp*</NODENAME_REGEX>
<machine MACH="gcp12">
<DESC>Google Cloud cluster using compute nodes c2d-compute-112's gcpe3sm12</DESC>
<NODENAME_REGEX>gcpe3sm12*</NODENAME_REGEX>
<OS>LINUX</OS>
<COMPILERS>gnu</COMPILERS>
<MPILIBS>openmpi</MPILIBS>
<CIME_OUTPUT_ROOT>/home/$USER/e3sm/scratch</CIME_OUTPUT_ROOT>
<DIN_LOC_ROOT>/home/inputdata</DIN_LOC_ROOT>
<DIN_LOC_ROOT_CLMFORC>/home/inputdata/atm/datm7</DIN_LOC_ROOT_CLMFORC>
<DOUT_S_ROOT>$CIME_OUTPUT_ROOT/archive/$CASE</DOUT_S_ROOT>
<BASELINE_ROOT>/home/baselines/$COMPILER</BASELINE_ROOT>
<CCSM_CPRNC>/home/tools/cprnc/cprnc</CCSM_CPRNC>
<GMAKE_J>24</GMAKE_J>
<TESTS>e3sm_developer</TESTS>
<NTEST_PARALLEL_JOBS>8</NTEST_PARALLEL_JOBS>
<BATCH_SYSTEM>slurm</BATCH_SYSTEM>
<SUPPORTED_BY>e3sm</SUPPORTED_BY>
<MAX_TASKS_PER_NODE>112</MAX_TASKS_PER_NODE>
<MAX_MPITASKS_PER_NODE>56</MAX_MPITASKS_PER_NODE>
<PROJECT_REQUIRED>FALSE</PROJECT_REQUIRED>
<mpirun mpilib="openmpi">
<executable>srun</executable>
<arguments>
<arg name="pmi_layer"> --mpi=pmi2</arg>
<arg name="label"> --label</arg>
<arg name="num_tasks"> -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit</arg>
<arg name="thread_count">-c $SHELL{echo `./xmlquery --value MAX_TASKS_PER_NODE`/ {{ tasks_per_node }} |bc}</arg>
<arg name="binding"> $SHELL{if [ `./xmlquery --value MAX_TASKS_PER_NODE` -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} </arg>
<arg name="placement">-m plane={{ tasks_per_node }}</arg>
</arguments>
</mpirun>

<module_system type="module" allow_error="true">
<init_path lang="python">/usr/share/lmod/lmod/init/env_modules_python.py</init_path>
<init_path lang="sh">/usr/share/lmod/lmod/init/sh</init_path>
<init_path lang="csh">/usr/share/lmod/lmod/init/csh</init_path>

<cmd_path lang="python">/usr/share/lmod/lmod/libexec/lmod python</cmd_path>
<cmd_path lang="sh">module</cmd_path>
<cmd_path lang="csh">module</cmd_path>

<modules>
<command name="use">/opt/apps/spack/share/spack/modules/linux-centos7-zen2</command>
<command name="use">/opt/apps/spack/share/spack/modules/linux-centos7-x86_64_v3</command>
<command name="unload">gcc</command>
<command name="unload">openmpi</command>
<command name="unload">binutils</command>
<command name="unload">netlib-lapack</command>
<command name="unload">openblas</command>
<command name="unload">hdf5</command>
<command name="unload">netcdf-c</command>
<command name="unload">parallel-netcdf</command>
</modules>

<modules compiler="gnu">
<command name="load">gcc/12.2.0</command>
</modules>

<modules mpilib="openmpi">
<command name="load">openmpi/4.1.4</command>
</modules>

<modules compiler="gnu">
<command name="load">cmake</command>
<command name="load">perl</command>
<command name="load">perl-xml-libxml</command>
<command name="load">netcdf-c</command>
<command name="load">netcdf-cxx</command>
<command name="load">netcdf-fortran</command>
<command name="load">parallel-netcdf</command>
<command name="load">hdf5</command>
<command name="load">netlib-lapack</command>
<command name="load">openblas</command>
</modules>

</module_system>
<RUNDIR>$CIME_OUTPUT_ROOT/$CASE/run</RUNDIR>
<EXEROOT>$CIME_OUTPUT_ROOT/$CASE/bld</EXEROOT>
<TEST_TPUT_TOLERANCE>0.2</TEST_TPUT_TOLERANCE>
<TEST_MEMLEAK_TOLERANCE>0.20</TEST_MEMLEAK_TOLERANCE>
<environment_variables compiler="gnu">
<env name="HDF5_PATH">$SHELL{dirname $(dirname $(which h5diff))}</env>
<env name="NETCDF_C_PATH">$SHELL{dirname $(dirname $(which nc-config))}</env>
<env name="NETCDF_FORTRAN_PATH">$SHELL{dirname $(dirname $(which nf-config))}</env>
<env name="PNETCDF_PATH">$SHELL{dirname $(dirname $(which pnetcdf-config))}</env>
<env name="OPENBLAS_PATH">/opt/apps/spack/opt/spack/linux-centos7-zen2/gcc-12.2.0/openblas-0.3.21-z66r7lyxwkhsshgreexm4cedffp73scp</env>
<env name="LAPACK_PATH">/opt/apps/spack/opt/spack/linux-centos7-zen2/gcc-12.2.0/netlib-lapack-3.10.1-lkhddpuidlw2z74g5ui6eq5iattsfjxp</env>
<env name="PERL5LIB">$ENV{PERL5LIB}:/opt/apps/spack/opt/spack/linux-centos7-zen2/gcc-12.2.0/perl-5.36.0-sly2pft2edg2p3iyijfyy6dzntusokno/lib/site_perl/5.36.0</env>
<env name="HDF5_USE_FILE_LOCKING">FALSE</env>
</environment_variables>

<environment_variables SMP_PRESENT="TRUE">
<env name="OMP_STACKSIZE">128M</env>
<env name="OMP_PLACES">threads</env>
</environment_variables>
</machine>

<machine MACH="gcp10">
<DESC>Google Cloud cluster with c2-compute-60's gcp-e3sm10</DESC>
<NODENAME_REGEX>gcp-e3sm10*</NODENAME_REGEX>
<OS>LINUX</OS>
<COMPILERS>gnu</COMPILERS>
<MPILIBS>openmpi</MPILIBS>
Expand All @@ -4239,7 +4318,7 @@
<arguments>
<arg name="pmi_layer"> --mpi=pmi2</arg>
<arg name="label"> --label</arg>
<arg name="num_tasks"> -n {{ total_tasks }} -N {{ num_nodes }}</arg>
<arg name="num_tasks"> -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit</arg>
<arg name="thread_count">-c $SHELL{echo `./xmlquery --value MAX_TASKS_PER_NODE`/ {{ tasks_per_node }} |bc}</arg>
<arg name="binding"> $SHELL{if [ `./xmlquery --value MAX_TASKS_PER_NODE` -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} </arg>
<arg name="placement">-m plane={{ tasks_per_node }}</arg>
Expand Down Expand Up @@ -4273,13 +4352,13 @@
<command name="load">cmake</command>
<command name="load">perl</command>
<command name="load">perl-xml-libxml</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
<command name="load">[email protected]</command>
</modules>

</module_system>
Expand Down
2 changes: 1 addition & 1 deletion mache/spack/build_spack_env.template
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ source share/spack/setup-env.sh
spack mirror add spack_mirror file://{{ spack_mirror }}
{% endif %}

spack env remove -y {{ env_name }} && \
spack env remove -y {{ env_name }} >& /dev/null && \
echo "recreating environment: {{ env_name }}" || \
echo "creating new environment: {{ env_name }}"
spack env create {{ env_name }} {{ yaml_filename }}
Expand Down
2 changes: 1 addition & 1 deletion mache/spack/chrysalis_gnu_openmpi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ spack:
f77: /gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/gcc-9.2.0-ugetvbp/bin/gfortran
fc: /gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/gcc-9.2.0-ugetvbp/bin/gfortran
flags: {}
operating_system: rhel8
operating_system: centos8
target: x86_64
modules: []
environment: {}
Expand Down
2 changes: 1 addition & 1 deletion mache/spack/chrysalis_intel_impi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ spack:
f77: /gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/intel-20.0.4-kodw73g/compilers_and_libraries_2020.4.304/linux/bin/intel64/ifort
fc: /gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/intel-20.0.4-kodw73g/compilers_and_libraries_2020.4.304/linux/bin/intel64/ifort
flags: {}
operating_system: rhel8
operating_system: centos8
target: x86_64
modules: []
environment: {}
Expand Down
2 changes: 1 addition & 1 deletion mache/spack/chrysalis_intel_openmpi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ spack:
f77: /gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/intel-20.0.4-kodw73g/compilers_and_libraries_2020.4.304/linux/bin/intel64/ifort
fc: /gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/intel-20.0.4-kodw73g/compilers_and_libraries_2020.4.304/linux/bin/intel64/ifort
flags: {}
operating_system: rhel8
operating_system: centos8
target: x86_64
modules: []
environment: {}
Expand Down
4 changes: 2 additions & 2 deletions mache/version.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
__version_info__ = (1, 11, 0)
__version__ = '.'.join(str(vi) for vi in __version_info__)
__version_info__ = (1, 12, 0)
__version__ = '.'.join(str(vi) for vi in __version_info__)
4 changes: 2 additions & 2 deletions setup.cfg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[metadata]
name = mache
version = 1.11.0
version = 1.12.0rc1
author = Xylar Asay-Davis
author_email = [email protected]
description = A package for providing configuration data relate to E3SM supported machines
Expand All @@ -27,4 +27,4 @@ install_requires =

[options.entry_points]
console_scripts =
mache = mache.__main__:main
mache = mache.__main__:main

0 comments on commit 2bd039f

Please sign in to comment.