diff --git a/.github/workflows/e3sm-gh-pages.yml b/.github/workflows/e3sm-gh-pages.yml
index ab94ebf34b04..1025e4ed064f 100644
--- a/.github/workflows/e3sm-gh-pages.yml
+++ b/.github/workflows/e3sm-gh-pages.yml
@@ -15,15 +15,20 @@ concurrency:
jobs:
Build-and-Deploy-docs:
+ if: ${{ github.event.repository.name != 'scream' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
show-progress: false
fetch-depth: 0 # Needed, or else gh-pages won't be fetched, and push rejected
- submodules: false # speeds up clone and not building anything in submodules
+ # TODO: git rid of dependency on CIME
+ # TODO: another option to investigate is a sparse checkout.
+ # In the scream repo, all other components do not need to be checked out.
+ # And even in the upstream, we mainly need only components/xyz/docs (and a few more places).
+ submodules: true
- name: Show action trigger
- run: echo "= The job was automatically triggered by a ${{github.event_name}} event."
+ run: echo "= The job was automatically triggered by a ${{github.event_name}} event on repo ${{github.event.repository.name}}."
- name: Set up Python 3.10
uses: actions/setup-python@v4.7.0
with:
@@ -31,6 +36,10 @@ jobs:
- name: Install python deps
run: python3 -m pip install mkdocs-material pymdown-extensions mkdocs-monorepo-plugin mdutils mkdocs-bibtex
# build every time (PR or push to master)
+ - name: Generate EAMxx params docs
+ working-directory: components/eamxx/scripts
+ run: |
+ ./eamxx-params-docs-autogen
- name: Build
run: mkdocs build --strict --verbose
# Only deploy to the main github page when there is a push to master
diff --git a/.github/workflows/eamxx-gh-pages.yml b/.github/workflows/eamxx-gh-pages.yml
new file mode 100644
index 000000000000..1c10db1f2671
--- /dev/null
+++ b/.github/workflows/eamxx-gh-pages.yml
@@ -0,0 +1,88 @@
+# This workflow aims to automatically rebuild eamxx documentation
+# every time the master branch is updated on github and within every PR
+
+name: EAMxx Docs
+
+on:
+ # Runs every time master branch is updated
+ push:
+ branches: [ master ]
+ # Only if docs-related files are touched
+ paths:
+ - components/eamxx/mkdocs.yml
+ - components/eamxx/docs/**
+ - components/eamxx/cime_config/namelist_defaults_scream.xml
+ # Runs every time a PR is open against master
+ pull_request:
+ branches: [ master ]
+ # Only if docs-related files are touched
+ paths:
+ - components/eamxx/mkdocs.yml
+ - components/eamxx/docs/**
+ - components/eamxx/cime_config/namelist_defaults_scream.xml
+
+ label:
+ types:
+ - created
+
+ workflow_dispatch:
+
+concurrency:
+ # Prevent 2+ copies of this workflow from running concurrently
+ group: eamxx-docs-action
+
+jobs:
+
+ eamxx-docs:
+ if: ${{ github.event.repository.name == 'scream' }}
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the repository
+ uses: actions/checkout@v4
+ with:
+ show-progress: false
+ # TODO: git rid of dependency on CIME
+ # TODO: another option to investigate is a sparse checkout.
+ # In the scream repo, all other components do not need to be checked out.
+ # And even in the upstream, we mainly need only components/xyz/docs (and a few more places).
+ submodules: true
+
+ - name: Show action trigger
+ run: |
+ echo "= The job was automatically triggered by a ${{github.event_name}} event."
+
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v4.7.0
+ with:
+ python-version: "3.10"
+
+ - name: Install Python deps
+ run: |
+ pip install mkdocs pymdown-extensions mkdocs-material mdutils
+
+ - name: Generate EAMxx params docs
+ working-directory: components/eamxx/scripts
+ run: |
+ ./eamxx-params-docs-autogen
+
+ - name: Build docs
+ working-directory: components/eamxx
+ run: |
+ mkdocs build --strict --verbose
+
+ # only deploy to the main github page when there is a push to master
+ - if: ${{ github.event_name == 'push' }}
+ name: GitHub Pages action
+ uses: JamesIves/github-pages-deploy-action@v4
+ with:
+ # Do not remove existing pr-preview pages
+ clean-exclude: pr-preview
+ folder: ./components/eamxx/site
+
+ # If it's a PR from within the same repo, deploy to a preview page
+ - if: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository }}
+ name: Preview docs
+ uses: rossjrw/pr-preview-action@v1
+ with:
+ source-dir: components/eamxx/site/
diff --git a/.gitignore b/.gitignore
index 69f9021c5cdb..c4d2a64bc994 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,3 +29,8 @@ site
# Ignore emacs backup files
*~
+
+# Ignore mkdocs site-generated files in eamxx
+components/eamxx/site/*
+# Ignore auto-generated eamxx_params.md file
+components/eamxx/docs/common/eamxx_params.md
diff --git a/cime_config/allactive/config_compsets.xml b/cime_config/allactive/config_compsets.xml
index b0f8b3139e3b..cb618cf6806e 100755
--- a/cime_config/allactive/config_compsets.xml
+++ b/cime_config/allactive/config_compsets.xml
@@ -14,7 +14,7 @@
TIME_ATM[%phys]_LND[%phys]_ICE[%phys]_OCN[%phys]_ROF[%phys]_GLC[%phys]_WAV[%phys][_ESP%phys][_BGC%phys]
Where for the EAM specific compsets below the following is supported
TIME = Time period (e.g. 2000, HIST, RCP8...)
- ATM = [EAM, SATM, SCREAM]
+ ATM = [EAM, EAMXX, SATM, SCREAM]
LND = [ELM, SLND]
ICE = [MPASSI, CICE, DICE, SICE]
OCN = [MPASO, DOCN, SOCN]
@@ -396,6 +396,14 @@
1850_EAM%CMIP6_ELM%SPBC_MPASSI_MPASO_MOSART_MALI%STATIC_SWAV
+
+
+
+ WCYCLXX2010
+ 2010_EAMXX_ELM%SPBC_MPASSI_MPASO_MOSART_SGLC_SWAV
+
+
+
MPAS_LISIO_TEST
diff --git a/cime_config/allactive/config_pesall.xml b/cime_config/allactive/config_pesall.xml
index 565b206ae912..8b0d07282696 100644
--- a/cime_config/allactive/config_pesall.xml
+++ b/cime_config/allactive/config_pesall.xml
@@ -1853,6 +1853,29 @@
+
+
+ "crusher-gpu-scream ne30np4 and ne30np4.pg2"
+
+ -2
+ -2
+ -2
+ -2
+ -2
+ -2
+ -2
+ -2
+
+
+ 1
+ 7
+ 1
+ 1
+ 1
+ 1
+
+
+
@@ -2336,6 +2359,30 @@
+
+
+ 8
+ 56
+
+ -6
+ -6
+ -6
+ -6
+ -6
+ -6
+ -6
+ -6
+
+
+ 1
+ 7
+ 1
+ 1
+ 1
+ 1
+
+
+
4
diff --git a/cime_config/config_files.xml b/cime_config/config_files.xml
index addb83883ca6..97d73759ac4d 100644
--- a/cime_config/config_files.xml
+++ b/cime_config/config_files.xml
@@ -127,6 +127,7 @@
$SRCROOT/components/stub_comps/satm
$SRCROOT/components/xcpl_comps/xatm
$SRCROOT/components/eam/
+ $SRCROOT/components/eamxx/
$SRCROOT/components/eamxx/
case_comps
diff --git a/cime_config/config_grids.xml b/cime_config/config_grids.xml
index 7cdd7f0a0557..1894ef66fa5d 100755
--- a/cime_config/config_grids.xml
+++ b/cime_config/config_grids.xml
@@ -1446,6 +1446,16 @@
oEC60to30v3
+
+ ne120np4.pg2
+ ne120np4.pg2
+ EC30to60E2r2
+ null
+ null
+ null
+ EC30to60E2r2
+
+
ne120np4.pg2
r05
@@ -1486,6 +1496,16 @@
oRRS18to6v3
+
+ ne256np4.pg2
+ ne256np4.pg2
+ oRRS18to6v3
+ null
+ null
+ null
+ oRRS18to6v3
+
+
ne256np4.pg2
r0125
diff --git a/cime_config/machines/Depends.frontier-gpu.crayclang.cmake b/cime_config/machines/Depends.frontier-gpu.crayclang.cmake
new file mode 100644
index 000000000000..e41d959b52b4
--- /dev/null
+++ b/cime_config/machines/Depends.frontier-gpu.crayclang.cmake
@@ -0,0 +1,43 @@
+set(CICE_F90
+ ice_FY.F90
+ ice_aerosol.F90
+ ice_age.F90
+ ice_atmo.F90
+ ice_blocks.F90
+ ice_calendar.F90
+ ice_diagnostics.F90
+ ice_distribution.F90
+ ice_domain.F90
+ ice_domain_size.F90
+ ice_dyn_evp.F90
+ ice_fileunits.F90
+ ice_flux.F90
+ ice_forcing.F90
+ ice_grid.F90
+ ice_history.F90
+ ice_history_fields.F90
+ ice_init.F90
+ ice_itd.F90
+ ice_kinds_mod.F90
+ ice_lvl.F90
+ ice_mechred.F90
+ ice_meltpond.F90
+ ice_ocean.F90
+ ice_orbital.F90
+ ice_probability.F90
+ ice_probability_tools.F90
+ ice_read_write.F90
+ ice_restoring.F90
+ ice_shortwave.F90
+ ice_spacecurve.F90
+ ice_state.F90
+ ice_step_mod.F90
+ ice_therm_itd.F90
+ ice_therm_vertical.F90
+ ice_transport_driver.F90
+ ice_transport_remap.F90
+ ice_work.F90)
+
+foreach(ITEM IN LISTS CICE_F90)
+ e3sm_add_flags("cice/src/source/${ITEM}" "-O0")
+endforeach()
diff --git a/cime_config/machines/Depends.frontier-scream-gpu.crayclang-scream.cmake b/cime_config/machines/Depends.frontier-scream-gpu.crayclang-scream.cmake
new file mode 100644
index 000000000000..be5ca2a3e2e9
--- /dev/null
+++ b/cime_config/machines/Depends.frontier-scream-gpu.crayclang-scream.cmake
@@ -0,0 +1,53 @@
+set(REDOPT
+ ../driver-mct/main/seq_io_mod.F90
+ elm/src/biogeophys/BandDiagonalMod.F90)
+
+if (NOT DEBUG)
+ foreach(ITEM IN LISTS REDOPT)
+ e3sm_add_flags("${ITEM}" "-O1 -g")
+ endforeach()
+endif()
+
+set(CICE_F90
+ ice_FY.F90
+ ice_aerosol.F90
+ ice_age.F90
+ ice_atmo.F90
+ ice_blocks.F90
+ ice_calendar.F90
+ ice_diagnostics.F90
+ ice_distribution.F90
+ ice_domain.F90
+ ice_domain_size.F90
+ ice_dyn_evp.F90
+ ice_fileunits.F90
+ ice_flux.F90
+ ice_forcing.F90
+ ice_grid.F90
+ ice_history.F90
+ ice_history_fields.F90
+ ice_init.F90
+ ice_itd.F90
+ ice_kinds_mod.F90
+ ice_lvl.F90
+ ice_mechred.F90
+ ice_meltpond.F90
+ ice_ocean.F90
+ ice_orbital.F90
+ ice_probability.F90
+ ice_probability_tools.F90
+ ice_read_write.F90
+ ice_restoring.F90
+ ice_shortwave.F90
+ ice_spacecurve.F90
+ ice_state.F90
+ ice_step_mod.F90
+ ice_therm_itd.F90
+ ice_therm_vertical.F90
+ ice_transport_driver.F90
+ ice_transport_remap.F90
+ ice_work.F90)
+
+foreach(ITEM IN LISTS CICE_F90)
+ e3sm_add_flags("cice/src/source/${ITEM}" "-O0")
+endforeach()
diff --git a/cime_config/machines/cmake_macros/crayclang-scream.cmake b/cime_config/machines/cmake_macros/crayclang-scream.cmake
index ab352a7e6c58..0346104eda30 100644
--- a/cime_config/machines/cmake_macros/crayclang-scream.cmake
+++ b/cime_config/machines/cmake_macros/crayclang-scream.cmake
@@ -9,8 +9,8 @@ string(APPEND CMAKE_Fortran_FLAGS_DEBUG " -O0 -g")
string(APPEND CMAKE_CXX_FLAGS_DEBUG " -O0 -g")
string(APPEND CPPDEFS_DEBUG " -DYAKL_DEBUG")
string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_R16 -DCPRCRAY")
-# -em (default) generates MODULENAME.mod files
-string(APPEND CMAKE_Fortran_FLAGS " -f free -N 255 -h byteswapio -em")
+# -em -ef generates modulename.mod files (lowercase), which we must have
+string(APPEND CMAKE_Fortran_FLAGS " -f free -em -ef")
if (NOT compile_threaded)
# -M1077 flag used to suppress message about OpenMP directives
# that are ignored for non-threaded builds. (-h omp inactive)
@@ -18,7 +18,8 @@ if (NOT compile_threaded)
string(APPEND CMAKE_Fortran_FLAGS " -M1077")
endif()
set(HAS_F2008_CONTIGUOUS "TRUE")
-string(APPEND CMAKE_EXE_LINKER_FLAGS " -Wl,--allow-multiple-definition -h byteswapio")
+string(APPEND CMAKE_EXE_LINKER_FLAGS " -Wl,--allow-multiple-definition -ldl")
+set(E3SM_LINK_WITH_FORTRAN "TRUE")
set(MPICC "cc")
set(MPICXX "CC")
set(MPIFC "ftn")
diff --git a/cime_config/machines/cmake_macros/crayclang-scream_crusher-scream.cmake b/cime_config/machines/cmake_macros/crayclang-scream_crusher-scream.cmake
index ef0d3303004b..2a0bfd6217c8 100644
--- a/cime_config/machines/cmake_macros/crayclang-scream_crusher-scream.cmake
+++ b/cime_config/machines/cmake_macros/crayclang-scream_crusher-scream.cmake
@@ -8,6 +8,5 @@ if (COMP_NAME STREQUAL elm)
string(APPEND CMAKE_Fortran_FLAGS " -hfp0")
endif()
string(APPEND CMAKE_Fortran_FLAGS " -hipa0 -hzero")
-string(APPEND CMAKE_Fortran_FLAGS " -em -ef")
set(PIO_FILESYSTEM_HINTS "gpfs")
diff --git a/cime_config/machines/cmake_macros/crayclang-scream_frontier-scream-gpu.cmake b/cime_config/machines/cmake_macros/crayclang-scream_frontier-scream-gpu.cmake
new file mode 100644
index 000000000000..8cc85b92cc6a
--- /dev/null
+++ b/cime_config/machines/cmake_macros/crayclang-scream_frontier-scream-gpu.cmake
@@ -0,0 +1,38 @@
+set(MPICC "mpicc")
+set(MPICXX "mpicxx") # Needs MPICH_CXX to use hipcc
+set(MPIFC "ftn") # Linker needs to be the Cray wrapper ftn, not mpif90
+set(SCC "cc")
+set(SCXX "hipcc")
+set(SFC "ftn")
+
+string(APPEND CPPDEFS " -DLINUX")
+if (COMP_NAME STREQUAL gptl)
+ string(APPEND CPPDEFS " -DHAVE_NANOTIME -DBIT64 -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY")
+endif()
+
+if (compile_threaded)
+ string(APPEND CMAKE_C_FLAGS " -fopenmp")
+ string(APPEND CMAKE_Fortran_FLAGS " -fopenmp")
+ string(APPEND CMAKE_CXX_FLAGS " -fopenmp")
+ string(APPEND CMAKE_EXE_LINKER_FLAGS " -fopenmp")
+endif()
+
+string(APPEND CMAKE_Fortran_FLAGS " -hipa0 -hzero -f free")
+
+string(APPEND CMAKE_EXE_LINKER_FLAGS " -L$ENV{ROCM_PATH}/lib -lamdhip64")
+string(APPEND CMAKE_CXX_FLAGS " -I$ENV{ROCM_PATH}/include")
+
+# Crusher: this resolves a crash in mct in docn init
+string(APPEND CMAKE_C_FLAGS_RELEASE " -O2 -hnoacc -hfp0 -hipa0")
+string(APPEND CMAKE_Fortran_FLAGS_RELEASE " -O2 -hnoacc -hfp0 -hipa0")
+string(APPEND CMAKE_CXX_FLAGS_RELEASE " -O2 ")
+
+string(APPEND CPPDEFS " -DCPRCRAY")
+
+if (COMP_NAME STREQUAL gptl)
+ string(APPEND CPPDEFS " -DHAVE_NANOTIME -DBIT64 -DHAVE_VPRINTF -DHAVE_BACKTRACE -DHAVE_SLASHPROC -DHAVE_COMM_F2C -DHAVE_TIMES -DHAVE_GETTIMEOFDAY")
+endif()
+set(PIO_FILESYSTEM_HINTS "lustre")
+
+string(APPEND KOKKOS_OPTIONS " -DKokkos_ENABLE_HIP=On -DKokkos_ARCH_VEGA90A=On -DCMAKE_CXX_FLAGS='-std=gnu++14'")
+set(USE_HIP "TRUE")
diff --git a/cime_config/machines/cmake_macros/crayclang-scream_frontier-scream.cmake b/cime_config/machines/cmake_macros/crayclang-scream_frontier-scream.cmake
new file mode 100644
index 000000000000..b486efd712cc
--- /dev/null
+++ b/cime_config/machines/cmake_macros/crayclang-scream_frontier-scream.cmake
@@ -0,0 +1,14 @@
+if (compile_threaded)
+ #string(APPEND CFLAGS " -fopenmp")
+ string(APPEND CMAKE_Fortran_FLAGS " -fopenmp")
+ string(APPEND CMAKE_CXX_FLAGS " -fopenmp")
+ string(APPEND CMAKE_EXE_LINKER_FLAGS " -fopenmp")
+endif()
+if (COMP_NAME STREQUAL elm)
+ string(APPEND CMAKE_Fortran_FLAGS " -hfp0")
+endif()
+string(APPEND CMAKE_Fortran_FLAGS " -hipa0 -hzero -hsystem_alloc -f free -N 255 -h byteswapio")
+
+string(APPEND CMAKE_EXE_LINKER_FLAGS " -L$ENV{ROCM_PATH}/lib -lamdhip64 $ENV{OLCF_LIBUNWIND_ROOT}/lib/libunwind.a /sw/frontier/spack-envs/base/opt/cray-sles15-zen3/clang-14.0.0-rocm5.2.0/gperftools-2.10-6g5acp4pcilrl62tddbsbxlut67pp7qn/lib/libtcmalloc.a")
+
+set(PIO_FILESYSTEM_HINTS "gpfs")
diff --git a/cime_config/machines/cmake_macros/crayclang_frontier.cmake b/cime_config/machines/cmake_macros/crayclang_frontier.cmake
index 7a5fb412cbb9..6bda90a4187c 100644
--- a/cime_config/machines/cmake_macros/crayclang_frontier.cmake
+++ b/cime_config/machines/cmake_macros/crayclang_frontier.cmake
@@ -1,3 +1,10 @@
+if (compile_threaded)
+ #string(APPEND CFLAGS " -fopenmp")
+ string(APPEND CMAKE_Fortran_FLAGS " -fopenmp")
+ string(APPEND CMAKE_CXX_FLAGS " -fopenmp")
+ string(APPEND CMAKE_EXE_LINKER_FLAGS " -fopenmp")
+endif()
+
if (COMP_NAME STREQUAL elm)
# See Land NaNs in conditionals: https://github.com/E3SM-Project/E3SM/issues/4996
string(APPEND CMAKE_Fortran_FLAGS " -hfp0")
diff --git a/cime_config/machines/cmake_macros/gnugpu_ascent.cmake b/cime_config/machines/cmake_macros/gnugpu_ascent.cmake
index 330853a6d8a1..05805cdef74d 100644
--- a/cime_config/machines/cmake_macros/gnugpu_ascent.cmake
+++ b/cime_config/machines/cmake_macros/gnugpu_ascent.cmake
@@ -1,11 +1,14 @@
string(APPEND CMAKE_C_FLAGS_RELEASE " -O2")
string(APPEND CMAKE_Fortran_FLAGS_RELEASE " -O2")
+string(APPEND CMAKE_CUDA_FLAGS " -forward-unknown-to-host-compiler")
string(APPEND CMAKE_CUDA_FLAGS_RELEASE " -O3 -arch sm_70 --use_fast_math")
string(APPEND CMAKE_CUDA_FLAGS_DEBUG " -O0 -g -arch sm_70")
if (COMP_NAME STREQUAL gptl)
string(APPEND CPPDEFS " -DHAVE_SLASHPROC")
endif()
+string(APPEND KOKKOS_OPTIONS " -DKokkos_ARCH_VOLTA70=On -DKokkos_ENABLE_CUDA=On -DKokkos_ENABLE_CUDA_LAMBDA=On -DKokkos_ENABLE_SERIAL=ON -DKokkos_ENABLE_OPENMP=Off")
string(APPEND CMAKE_EXE_LINKER_FLAGS " -L$ENV{ESSL_PATH}/lib64 -lessl")
set(MPICXX "mpiCC")
set(PIO_FILESYSTEM_HINTS "gpfs")
set(USE_CUDA "TRUE")
+set(CMAKE_CUDA_ARCHITECTURES "70")
diff --git a/cime_config/machines/cmake_macros/gnugpu_lassen.cmake b/cime_config/machines/cmake_macros/gnugpu_lassen.cmake
new file mode 100644
index 000000000000..1b87f821b73a
--- /dev/null
+++ b/cime_config/machines/cmake_macros/gnugpu_lassen.cmake
@@ -0,0 +1,18 @@
+string(APPEND CMAKE_C_FLAGS_RELEASE " -O2")
+string(APPEND CMAKE_Fortran_FLAGS_RELEASE " -O2")
+string(APPEND CMAKE_CUDA_FLAGS_RELEASE " -O3 -arch sm_70 --use_fast_math")
+string(APPEND CMAKE_CUDA_FLAGS_DEBUG " -O0 -g -arch sm_70")
+
+if (COMP_NAME STREQUAL gptl)
+ string(APPEND CPPDEFS " -DHAVE_SLASHPROC")
+endif()
+
+string(APPEND CPPDEFS " -DTHRUST_IGNORE_CUB_VERSION_CHECK")
+
+set(PIO_FILESYSTEM_HINTS "gpfs")
+
+set(USE_CUDA "TRUE")
+
+# This may not be needed once we figure out why MPI calls are segfaulting
+# on lassen when this is ON.
+set(SCREAM_MPI_ON_DEVICE OFF CACHE STRING "")
diff --git a/cime_config/machines/cmake_macros/gnugpu_pm-gpu.cmake b/cime_config/machines/cmake_macros/gnugpu_pm-gpu.cmake
index e61e893d001b..fd52a2046502 100644
--- a/cime_config/machines/cmake_macros/gnugpu_pm-gpu.cmake
+++ b/cime_config/machines/cmake_macros/gnugpu_pm-gpu.cmake
@@ -7,6 +7,7 @@ endif()
string(APPEND CPPDEFS " -DTHRUST_IGNORE_CUB_VERSION_CHECK")
string(APPEND CMAKE_CUDA_FLAGS " -ccbin CC -O2 -arch sm_80 --use_fast_math")
string(APPEND KOKKOS_OPTIONS " -DKokkos_ARCH_AMPERE80=On -DKokkos_ENABLE_CUDA=On -DKokkos_ENABLE_CUDA_LAMBDA=On -DKokkos_ENABLE_SERIAL=ON -DKokkos_ENABLE_OPENMP=Off")
+set(CMAKE_CUDA_ARCHITECTURES "80")
string(APPEND CMAKE_C_FLAGS_RELEASE " -O2")
string(APPEND CMAKE_Fortran_FLAGS_RELEASE " -O2")
set(MPICC "cc")
diff --git a/cime_config/machines/cmake_macros/intel.cmake b/cime_config/machines/cmake_macros/intel.cmake
index e9a808135fb2..a5cf3fea905c 100644
--- a/cime_config/machines/cmake_macros/intel.cmake
+++ b/cime_config/machines/cmake_macros/intel.cmake
@@ -12,6 +12,9 @@ string(APPEND CMAKE_C_FLAGS_DEBUG " -O0 -g")
string(APPEND CMAKE_CXX_FLAGS_DEBUG " -O0 -g")
string(APPEND CMAKE_Fortran_FLAGS_DEBUG " -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created -init=snan,arrays")
string(APPEND CMAKE_CXX_FLAGS " -fp-model source")
+if (COMP_NAME STREQUAL cice)
+ string(APPEND CMAKE_Fortran_FLAGS_DEBUG " -init=nosnan,arrays")
+endif()
string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_R16 -DCPRINTEL")
string(APPEND CMAKE_Fortran_FLAGS " -convert big_endian -assume byterecl -ftz -traceback -assume realloc_lhs -fp-model source")
string(APPEND CMAKE_Fortran_FORMAT_FIXED_FLAG " -fixed -132")
diff --git a/cime_config/machines/cmake_macros/intel_chrysalis.cmake b/cime_config/machines/cmake_macros/intel_chrysalis.cmake
index a7e8384b01d2..a38fe5226cdc 100644
--- a/cime_config/machines/cmake_macros/intel_chrysalis.cmake
+++ b/cime_config/machines/cmake_macros/intel_chrysalis.cmake
@@ -18,3 +18,7 @@ if (MPILIB STREQUAL impi)
set(MPICXX "mpiicpc")
set(MPIFC "mpiifort")
endif()
+string(APPEND KOKKOS_OPTIONS " -DKokkos_ARCH_ZEN2=On")
+if (compile_threaded)
+ string(APPEND KOKKOS_OPTIONS " -DKokkos_ENABLE_AGGRESSIVE_VECTORIZATION=On")
+endif()
diff --git a/cime_config/machines/config_batch.xml b/cime_config/machines/config_batch.xml
index 6401d43704b2..9441a193ecec 100644
--- a/cime_config/machines/config_batch.xml
+++ b/cime_config/machines/config_batch.xml
@@ -371,6 +371,7 @@
--job-name={{ job_id }}
--nodes=1
--ntasks={{ total_tasks }}
+ --cpus-per-task={{ thread_count }}
--output={{ job_id }}.%j
@@ -706,6 +707,13 @@
+
+
+ pdebug
+ pbatch
+
+
+
/gpfs/wolf/cli115/world-shared/e3sm/tools/bsub/throttle
@@ -743,6 +751,15 @@
+
+ /lustre/orion/cli115/world-shared/e3sm/tools/sbatch/throttle
+
+ batch
+ batch
+ batch
+
+
+
rhel7G
diff --git a/cime_config/machines/config_machines.xml b/cime_config/machines/config_machines.xml
index 2aaed4a761b4..faec17a7d8a9 100644
--- a/cime_config/machines/config_machines.xml
+++ b/cime_config/machines/config_machines.xml
@@ -244,6 +244,7 @@
cray-netcdf-hdf5parallel/4.9.0.3
cray-parallel-netcdf/1.12.3.3
cmake/3.24.3
+ evp-patch
@@ -266,6 +267,7 @@
$SHELL{if [ -z "$Trilinos_ROOT" ]; then echo /global/common/software/e3sm/mali_tpls/trilinos-e3sm-serial-release-gcc; else echo "$Trilinos_ROOT"; fi}
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
+ 4000MB
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.9.1/cray-mpich-8.1.25/intel-2023.1.0; else echo "$ADIOS2_ROOT"; fi}
@@ -547,6 +549,7 @@
cray-netcdf-hdf5parallel/4.9.0.3
cray-parallel-netcdf/1.12.3.3
cmake/3.24.3
+ evp-patch
@@ -1034,11 +1037,12 @@
Linux
crayclang-scream
mpich
- CLI133_crusher
+ CLI115
/lustre/orion/cli133/proj-shared/$ENV{USER}/e3sm_scratch/crusher
/lustre/orion/cli115/world-shared/e3sm/inputdata
/lustre/orion/cli115/world-shared/e3sm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
+ /lustre/orion/cli133/world-shared/e3sm/baselines/$COMPILER
/lustre/orion/cli115/world-shared/e3sm/tools/cprnc/cprnc
8
1
@@ -1130,11 +1134,12 @@
Linux
crayclang-scream
mpich
- CLI133_crusher
+ CLI115
/lustre/orion/cli133/proj-shared/$ENV{USER}/e3sm_scratch/crusher
/lustre/orion/cli115/world-shared/e3sm/inputdata
/lustre/orion/cli115/world-shared/e3sm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
+ /lustre/orion/cli133/world-shared/e3sm/baselines/$COMPILER
/lustre/orion/cli115/world-shared/e3sm/tools/cprnc/cprnc
8
1
@@ -1207,6 +1212,92 @@
+
+ Frontier. AMD EPYC 7A53 64C nodes, 128 hwthreads, 512GB DDR4, 4 MI250X GPUs.
+ .*frontier.*
+ CNL
+ crayclang-scream
+ mpich
+ cli115
+ /lustre/orion/proj-shared/cli115
+ .*
+ /lustre/orion/cli115/proj-shared/$ENV{USER}/e3sm_scratch
+ /lustre/orion/cli115/world-shared/e3sm/inputdata
+ /lustre/orion/cli115/world-shared/e3sm/inputdata/atm/datm7
+ $CIME_OUTPUT_ROOT/archive/$CASE
+ /lustre/orion/cli115/world-shared/e3sm/baselines/frontier/$COMPILER
+ /lustre/orion/cli115/world-shared/e3sm/tools/cprnc/cprnc
+ 8
+ 1
+ slurm
+ e3sm
+ 56
+ 8
+ TRUE
+
+
+ srun
+
+ -l -K -n {{ total_tasks }} -N {{ num_nodes }}
+ --gpus-per-node=8 --gpu-bind=closest
+ -c $ENV{OMP_NUM_THREADS}
+
+
+
+
+ /usr/share/lmod/lmod/init/sh
+ /usr/share/lmod/lmod/init/csh
+ /usr/share/lmod/lmod/init/perl
+ /usr/share/lmod/lmod/init/env_modules_python.py
+ /usr/share/lmod/lmod/libexec/lmod perl
+ module
+ module
+ /usr/share/lmod/lmod/libexec/lmod python
+
+
+ PrgEnv-cray
+ craype-accel-amd-gfx90a
+ rocm/5.1.0
+ libunwind/1.6.2
+
+
+ cce/15.0.1
+ craype craype/2.7.20
+ cray-mpich cray-mpich/8.1.26
+ cray-python/3.9.13.1
+ subversion/1.14.1
+ git/2.36.1
+ cmake/3.21.3
+ cray-hdf5-parallel/1.12.2.1
+ cray-netcdf-hdf5parallel/4.9.0.1
+ cray-parallel-netcdf/1.12.3.1
+ darshan-runtime
+
+
+
+ $CIME_OUTPUT_ROOT/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+ 0.1
+ 0
+
+ $ENV{NETCDF_DIR}
+ $ENV{PNETCDF_DIR}
+
+ 1
+ 1
+ 2
+ $SHELL{which hipcc}
+ $ENV{CRAY_LD_LIBRARY_PATH}:$ENV{LD_LIBRARY_PATH}
+ True
+
+
+
+ 128M
+ spread
+ threads
+
+
+
Stampede2. Intel skylake nodes at TACC. 48 cores per node, batch system is SLURM
@@ -1548,7 +1639,7 @@
sems-archive-env
acme-env
sems-archive-git
- sems-archive-cmake/3.19.1
+ acme-cmake/3.26.3
acme-gcc/8.1.0
@@ -2636,6 +2727,83 @@
+
+ LLNL Linux Cluster, Linux, 4 V100 GPUs/node, 44 IBM P9 cpu cores/node
+ lassen.*
+ LINUX
+ gnugpu
+ spectrum-mpi
+ cbronze
+ /usr/workspace/$USER/e3sm_scratch
+ /usr/gdata/climdat/ccsm3data/inputdata
+ /usr/gdata/climdat/ccsm3data/inputdata/atm/datm7
+ /usr/workspace/$USER/archive/$CASE
+ /usr/gdata/climdat/baselines/$COMPILER
+ 16
+ lsf
+ donahue5 -at- llnl.gov
+ 40
+ 40
+
+
+
+
+ jsrun
+
+ -X 1
+ $SHELL{if [ {{ total_tasks }} -eq 1 ];then echo --nrs 1 --rs_per_host 1;else echo --nrs $NUM_RS --rs_per_host $RS_PER_NODE;fi}
+ --tasks_per_rs $SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}
+ -d plane:$SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}
+ --cpu_per_rs $ENV{CPU_PER_RS}
+ --gpu_per_rs $ENV{GPU_PER_RS}
+ --bind packed:smt:$ENV{OMP_NUM_THREADS}
+ --latency_priority $ENV{LTC_PRT}
+ --stdio_mode prepended
+ $ENV{JSRUN_THREAD_VARS}
+ $ENV{SMPIARGS}
+
+
+
+ /usr/share/lmod/lmod/init/env_modules_python.py
+ /usr/share/lmod/lmod/init/perl
+ /usr/share/lmod/lmod/init/sh
+ /usr/share/lmod/lmod/init/csh
+ module
+ module
+ /usr/share/lmod/lmod/libexec/lmod python
+ /usr/share/lmod/lmod/libexec/lmod perl
+
+
+ git
+ gcc/8.3.1
+ cuda/11.8.0
+ cmake/3.16.8
+ spectrum-mpi
+ python/3.7.2
+
+
+ /p/gpfs1/$USER/e3sm_scratch/$CASE/run
+ $CIME_OUTPUT_ROOT/$CASE/bld
+
+
+
+
+ -E OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -E OMP_PROC_BIND=spread -E OMP_PLACES=threads -E OMP_STACKSIZE=256M
+
+
+ y
+ /usr/gdata/climdat/netcdf/bin:$ENV{PATH}
+ /usr/gdata/climdat/netcdf/lib:$ENV{LD_LIBRARY_PATH}
+ /usr/gdata/climdat/netcdf
+ 2
+ 20
+ 2
+ gpu-cpu
+ $SHELL{echo "2*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
+ --smpiargs="-gpu"
+
+
+
LLNL Linux Cluster, Linux (pgi), 56 pes/node, batch system is Slurm
LINUX
@@ -2675,17 +2843,20 @@
intel-classic/2021.6.0-magic
mvapich2/2.3.7
cmake/3.19.2
- netcdf-fortran-parallel/4.6.0
- netcdf-c-parallel/4.9.0
+ /usr/gdata/climdat/install/quartz/modulefiles
+ hdf5/1.12.2
+ netcdf-c/4.9.0
+ netcdf-fortran/4.6.0
parallel-netcdf/1.12.3
+ screamML-venv/0.0.1
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
- /usr/tce/packages/netcdf-fortran/netcdf-fortran-4.6.0-mvapich2-2.3.7-intel-classic-2021.6.0/
- /usr/tce/packages/parallel-netcdf/parallel-netcdf-1.12.3-mvapich2-2.3.7-intel-classic-2021.6.0/
-
+ /usr/gdata/climdat/install/quartz/netcdf-fortran/
+ /usr/tce/packages/parallel-netcdf/parallel-netcdf-1.12.3-mvapich2-2.3.7-intel-classic-2021.6.0
+
diff --git a/cime_config/tests.py b/cime_config/tests.py
index 968ea47b2d94..751afa1cf002 100644
--- a/cime_config/tests.py
+++ b/cime_config/tests.py
@@ -562,7 +562,7 @@
"SMS_D.ne4pg2_ne4pg2.F2010-SCREAM-LR",
"ERP.ne4pg2_ne4pg2.F2010-SCREAM-HR.eam-double_memleak_tol",
"ERP.ne4pg2_ne4pg2.F2010-SCREAM-LR.eam-double_memleak_tol",
- "ERP_R_Ln10.ne4_ne4.FDPSCREAM-ARM97",
+ "ERS_R_Ln10.ne4_ne4.FDPSCREAM-ARM97",
)
},
@@ -579,15 +579,23 @@
"ERS_Ln9.ne4_ne4.F2000-SCREAMv1-AQP1",
"SMS_D_Ln9.ne4_ne4.F2010-SCREAMv1-noAero",
"ERP_Ln22.ne4pg2_ne4pg2.F2010-SCREAMv1",
- "ERS_D_Ln21.ne4pg2_ne4pg2.F2010-SCREAMv1.scream-rad_frequency_2",
+ "ERS_D_Ln22.ne4pg2_ne4pg2.F2010-SCREAMv1.scream-rad_frequency_2",
)
},
+ # Tests run on exclusively on mappy for scream AT testing. These tests
+ # should be fast, so we limit it to low res and add some thread tests
+ # specifically for mappy.
+ "e3sm_scream_v1_at" : {
+ "inherit" : ("e3sm_scream_v1_lowres"),
+ "tests" : ("PET_Ln9_P32x2.ne4pg2_ne4pg2.F2010-SCREAMv1")
+ },
+
"e3sm_scream_v1_medres" : {
"time" : "02:00:00",
"tests" : (
# "SMS_D_Ln2.ne30_ne30.F2000-SCREAMv1-AQP1", # Uncomment once IC file for ne30 is ready
- "ERS_Ln22.ne30_ne30.F2010-SCREAMv1",
+ "ERS_Ln22.ne30_ne30.F2010-SCREAMv1.scream-internal_diagnostics_level",
"PEM_Ln90.ne30pg2_ne30pg2.F2010-SCREAMv1",
"ERS_Ln90.ne30pg2_ne30pg2.F2010-SCREAMv1.scream-small_kernels",
"ERP_Ln22.conusx4v1pg2_r05_oECv3.F2010-SCREAMv1-noAero.scream-bfbhash",
diff --git a/components/CMakeLists.txt b/components/CMakeLists.txt
index c3a8d9de607f..7dcb3ae171e5 100644
--- a/components/CMakeLists.txt
+++ b/components/CMakeLists.txt
@@ -87,9 +87,11 @@ if (COMP_INTERFACE STREQUAL "moab")
set(CPPDEFS "${CPPDEFS} -DHAVE_MOAB")
endif()
-if(USE_CUDA)
+if (USE_CUDA)
+ set(CMAKE_CUDA_COMPILER_FORCED True)
enable_language(CUDA)
-elseif(USE_HIP)
+elseif (USE_HIP)
+ set(CMAKE_HIP_COMPILER_FORCED True)
enable_language(HIP)
endif()
diff --git a/components/cmake/build_eamxx.cmake b/components/cmake/build_eamxx.cmake
index 135cd640b36f..2d3b2602173c 100644
--- a/components/cmake/build_eamxx.cmake
+++ b/components/cmake/build_eamxx.cmake
@@ -33,6 +33,12 @@ function(build_eamxx)
else()
include(${SCREAM_MACH_FILE_ROOT}/${MACH}.cmake)
endif()
+
+ # The machine files may enable kokkos stuff we don't want
+ if (NOT compile_threaded)
+ set(Kokkos_ENABLE_OPENMP FALSE)
+ endif()
+
add_subdirectory("eamxx")
endif()
diff --git a/components/cmake/build_model.cmake b/components/cmake/build_model.cmake
index 2769c94f2caa..16f1f039ebde 100644
--- a/components/cmake/build_model.cmake
+++ b/components/cmake/build_model.cmake
@@ -250,7 +250,9 @@ macro(build_model COMP_CLASS COMP_NAME)
endforeach()
# Make sure we link blas/lapack
- target_link_libraries(${TARGET_NAME} BLAS::BLAS LAPACK::LAPACK)
+ if (NOT DEFINED ENV{SKIP_BLAS})
+ target_link_libraries(${TARGET_NAME} BLAS::BLAS LAPACK::LAPACK)
+ endif()
if (E3SM_LINK_WITH_FORTRAN)
set_target_properties(${TARGET_NAME} PROPERTIES LINKER_LANGUAGE Fortran)
@@ -284,7 +286,7 @@ macro(build_model COMP_CLASS COMP_NAME)
target_link_libraries(${TARGET_NAME} PRIVATE csm_share)
if (COMP_NAME STREQUAL "eam")
if (USE_YAKL)
- target_link_libraries(${TARGET_NAME} PRIVATE yakl)
+ target_link_libraries(${TARGET_NAME} PRIVATE yakl yakl_fortran_interface)
endif()
if (USE_SAMXX)
target_link_libraries(${TARGET_NAME} PRIVATE samxx)
diff --git a/components/cmake/find_dep_packages.cmake b/components/cmake/find_dep_packages.cmake
index eee4ea1426dd..9d5628bb0bd4 100644
--- a/components/cmake/find_dep_packages.cmake
+++ b/components/cmake/find_dep_packages.cmake
@@ -45,5 +45,9 @@ endif()
find_package(PIO REQUIRED)
find_package(MCT REQUIRED)
find_package(CsmShare REQUIRED)
-find_package(BLAS REQUIRED)
-find_package(LAPACK REQUIRED)
+
+# Hack for unsupported blas vendors
+if (NOT DEFINED ENV{SKIP_BLAS})
+ find_package(BLAS REQUIRED)
+ find_package(LAPACK REQUIRED)
+endif()
diff --git a/components/eam/bld/build-namelist b/components/eam/bld/build-namelist
index be37559d1e31..8dc532b6a3df 100755
--- a/components/eam/bld/build-namelist
+++ b/components/eam/bld/build-namelist
@@ -3814,10 +3814,8 @@ if ($shoc_sgs =~ /$TRUE/io) {
add_default($nl, 'shoc_lambda_thresh');
add_default($nl, 'shoc_Ckh');
add_default($nl, 'shoc_Ckm');
- add_default($nl, 'shoc_Ckh_s_min');
- add_default($nl, 'shoc_Ckm_s_min');
- add_default($nl, 'shoc_Ckh_s_max');
- add_default($nl, 'shoc_Ckm_s_max');
+ add_default($nl, 'shoc_Ckh_s');
+ add_default($nl, 'shoc_Ckm_s');
}
diff --git a/components/eam/bld/namelist_files/namelist_defaults_eam.xml b/components/eam/bld/namelist_files/namelist_defaults_eam.xml
index 60e3199c4b22..56c816b1753c 100755
--- a/components/eam/bld/namelist_files/namelist_defaults_eam.xml
+++ b/components/eam/bld/namelist_files/namelist_defaults_eam.xml
@@ -829,10 +829,8 @@
0.02D0
0.1D0
0.1D0
- 0.1D0
- 0.1D0
- 0.1D0
- 0.1D0
+ 0.1D0
+ 0.1D0
.false.
diff --git a/components/eam/bld/namelist_files/namelist_definition.xml b/components/eam/bld/namelist_files/namelist_definition.xml
index a79526cb7748..bc9977f67b58 100644
--- a/components/eam/bld/namelist_files/namelist_definition.xml
+++ b/components/eam/bld/namelist_files/namelist_definition.xml
@@ -3403,27 +3403,15 @@ Coefficient for eddy diffusivity of momentum.
Default: set by build-namelist
-
-Minimum allowable value for coefficient for eddy diffusivity for heat for stable boundary layers.
+Coefficient for eddy diffusivity for heat for stable boundary layers.
Default: set by build-namelist
-
-Maximum allowable value for coefficient for eddy diffusivity for heat for stable boundary layers.
-Default: set by build-namelist
-
-
-
-Minimum allowable value for coefficient for eddy diffusivity for momentum for stable boundary layers.
-Default: set by build-namelist
-
-
-
-Maximum allowable value for coefficient for eddy diffusivity for momentum for stable boundary layers.
+Coefficient for eddy diffusivity for momentum for stable boundary layers.
Default: set by build-namelist
@@ -4666,6 +4654,12 @@ Compute LS vertical transport using omega prescribed from IOP file.
Default: FALSE
+
+Use geostropic winds specified in IOP file to apply coriolis force.
+Default: FALSE
+
+
Use relaxation for temperature and moisture.
diff --git a/components/eam/src/control/iop_data_mod.F90 b/components/eam/src/control/iop_data_mod.F90
index 45504d5d7b63..f7b05dee5e09 100644
--- a/components/eam/src/control/iop_data_mod.F90
+++ b/components/eam/src/control/iop_data_mod.F90
@@ -98,8 +98,10 @@ module iop_data_mod
real(r8), public :: tsair(1) ! air temperature at the surface
real(r8), public :: udiff(plev) ! model minus observed uwind
real(r8), public :: uobs(plev) ! actual u wind
+ real(r8), public :: uls(plev) ! large scale / geostropic u wind
real(r8), public :: vdiff(plev) ! model minus observed vwind
real(r8), public :: vobs(plev) ! actual v wind
+ real(r8), public :: vls(plev) ! large scale / geostropic v wind
real(r8), public :: cldobs(plev) ! observed cld
real(r8), public :: clwpobs(plev) ! observed clwp
real(r8), public :: aldirobs(1) ! observed aldir
@@ -154,6 +156,8 @@ module iop_data_mod
logical*4, public :: have_tsair ! dataset contains tsair
logical*4, public :: have_u ! dataset contains u
logical*4, public :: have_v ! dataset contains v
+ logical*4, public :: have_uls ! dataset contains large scale u
+ logical*4, public :: have_vls ! dataset contains large scale v
logical*4, public :: have_cld ! dataset contains cld
logical*4, public :: have_cldliq ! dataset contains cldliq
logical*4, public :: have_cldice ! dataset contains cldice
@@ -166,8 +170,9 @@ module iop_data_mod
logical*4, public :: have_asdif ! dataset contains asdif
logical*4, public :: scm_iop_srf_prop ! use the specified surface properties
logical*4, public :: iop_dosubsidence ! compute Eulerian LS vertical advection
- logical*4, public :: iop_nudge_tq ! use relaxation for t and q
- logical*4, public :: iop_nudge_uv ! use relaxation for u and v
+ logical*4, public :: iop_coriolis ! use geostropic winds to apply coriolis forcing
+ logical*4, public :: iop_nudge_tq! use relaxation for t and q
+ logical*4, public :: iop_nudge_uv! use relaxation for u and v
logical*4, public :: scm_observed_aero ! use observed aerosols in SCM file
logical*4, public :: precip_off ! turn off precipitation processes
logical*4, public :: scm_zero_non_iop_tracers ! initialize non-IOP-specified tracers to zero
@@ -184,7 +189,7 @@ module iop_data_mod
subroutine iop_default_opts( scmlat_out,scmlon_out,iopfile_out, &
single_column_out,scm_iop_srf_prop_out, iop_nudge_tq_out, iop_nudge_uv_out, &
iop_nudge_tq_low_out, iop_nudge_tq_high_out, iop_nudge_tscale_out, &
- scm_observed_aero_out, iop_dosubsidence_out, &
+ scm_observed_aero_out, iop_dosubsidence_out, iop_coriolis_out, &
scm_multcols_out, dp_crm_out, iop_perturb_high_out, &
precip_off_out, scm_zero_non_iop_tracers_out)
!-----------------------------------------------------------------------
@@ -193,6 +198,7 @@ subroutine iop_default_opts( scmlat_out,scmlon_out,iopfile_out, &
logical, intent(out), optional :: single_column_out
logical, intent(out), optional :: scm_iop_srf_prop_out
logical, intent(out), optional :: iop_dosubsidence_out
+ logical, intent(out), optional :: iop_coriolis_out
logical, intent(out), optional :: iop_nudge_tq_out
logical, intent(out), optional :: iop_nudge_uv_out
logical, intent(out), optional :: scm_observed_aero_out
@@ -211,6 +217,7 @@ subroutine iop_default_opts( scmlat_out,scmlon_out,iopfile_out, &
if ( present(single_column_out) ) single_column_out = .false.
if ( present(scm_iop_srf_prop_out) )scm_iop_srf_prop_out = .false.
if ( present(iop_dosubsidence_out) )iop_dosubsidence_out = .false.
+ if ( present(iop_coriolis_out) ) iop_coriolis_out = .false.
if ( present(iop_nudge_tq_out) ) iop_nudge_tq_out = .false.
if ( present(iop_nudge_uv_out) ) iop_nudge_uv_out = .false.
if ( present(iop_nudge_tq_low_out) ) iop_nudge_tq_low_out = 1050.0_r8
@@ -229,7 +236,7 @@ end subroutine iop_default_opts
subroutine iop_setopts( scmlat_in, scmlon_in,iopfile_in,single_column_in, &
scm_iop_srf_prop_in, iop_nudge_tq_in, iop_nudge_uv_in, &
iop_nudge_tq_low_in, iop_nudge_tq_high_in, iop_nudge_tscale_in, &
- scm_observed_aero_in, iop_dosubsidence_in, &
+ scm_observed_aero_in, iop_dosubsidence_in, iop_coriolis_in, &
scm_multcols_in, dp_crm_in, iop_perturb_high_in, &
precip_off_in, scm_zero_non_iop_tracers_in)
!-----------------------------------------------------------------------
@@ -238,6 +245,7 @@ subroutine iop_setopts( scmlat_in, scmlon_in,iopfile_in,single_column_in, &
logical, intent(in), optional :: single_column_in
logical, intent(in), optional :: scm_iop_srf_prop_in
logical, intent(in), optional :: iop_dosubsidence_in
+ logical, intent(in), optional :: iop_coriolis_in
logical, intent(in), optional :: iop_nudge_tq_in
logical, intent(in), optional :: iop_nudge_uv_in
logical, intent(in), optional :: scm_observed_aero_in
@@ -272,7 +280,11 @@ subroutine iop_setopts( scmlat_in, scmlon_in,iopfile_in,single_column_in, &
if (present (iop_dosubsidence_in)) then
iop_dosubsidence=iop_dosubsidence_in
endif
-
+
+ if (present (iop_coriolis_in)) then
+ iop_coriolis=iop_coriolis_in
+ endif
+
if (present (iop_nudge_tq_in)) then
iop_nudge_tq=iop_nudge_tq_in
endif
@@ -317,6 +329,7 @@ subroutine iop_setopts( scmlat_in, scmlon_in,iopfile_in,single_column_in, &
call mpibcast(scm_iop_srf_prop,1,mpilog,0,mpicom)
call mpibcast(dp_crm,1,mpilog,0,mpicom)
call mpibcast(iop_dosubsidence,1,mpilog,0,mpicom)
+ call mpibcast(iop_coriolis,1,mpilog,0,mpicom)
call mpibcast(iop_nudge_tq,1,mpilog,0,mpicom)
call mpibcast(iop_nudge_uv,1,mpilog,0,mpicom)
call mpibcast(iop_nudge_tq_high,1,mpir8,0,mpicom)
@@ -1300,6 +1313,21 @@ subroutine readiopdata(iop_update_phase1,hyam,hybm)
have_u = .true.
endif
+ ! large scale / geostropic horizontal wind (for nudging)
+ call getinterpncdata( ncid, scmlat, scmlon, ioptimeidx, &
+ 'u_ls', have_srf, srf(1), .true. , dplevs, nlev,psobs, hyam, hybm, uls, status )
+ if ( status .ne. nf90_noerr ) then
+ have_uls = .false.
+ if (iop_coriolis) then
+ write(iulog,*) 'Large scale / geostrophic winds required for Coriolis forcing'
+ write(iulog,*) 'Missing variable u_ls in the IOP file'
+ write(iulog,*) 'Aborting run'
+ call endrun
+ endif
+ else
+ have_uls = .true.
+ endif
+
status = nf90_inq_varid( ncid, 'vsrf', varid )
if ( status .ne. nf90_noerr ) then
have_srf = .false.
@@ -1318,6 +1346,21 @@ subroutine readiopdata(iop_update_phase1,hyam,hybm)
endif
call shr_sys_flush( iulog )
+ ! large scale / geostropic meridional wind (for nudging)
+ call getinterpncdata( ncid, scmlat, scmlon, ioptimeidx, &
+ 'v_ls', have_srf, srf(1), .true. , dplevs, nlev,psobs, hyam, hybm, vls, status )
+ if ( status .ne. nf90_noerr ) then
+ have_vls = .false.
+ if (iop_coriolis) then
+ write(iulog,*) 'Large scale / geostrophic winds required for Coriolis forcing'
+ write(iulog,*) 'Missing variable v_ls in the IOP file'
+ write(iulog,*) 'Aborting run'
+ call endrun
+ endif
+ else
+ have_vls = .true.
+ endif
+
status = nf90_inq_varid( ncid, 'Prec', varid )
if ( status .ne. nf90_noerr ) then
have_prec = .false.
diff --git a/components/eam/src/control/runtime_opts.F90 b/components/eam/src/control/runtime_opts.F90
index 3aa26e2ab9a1..27dd2931a097 100644
--- a/components/eam/src/control/runtime_opts.F90
+++ b/components/eam/src/control/runtime_opts.F90
@@ -172,6 +172,7 @@ module runtime_opts
character(len=max_chars) iopfile
logical :: scm_iop_srf_prop
logical :: iop_dosubsidence
+logical :: iop_coriolis
logical :: iop_nudge_tq
logical :: iop_nudge_uv
logical :: scm_diurnal_avg
@@ -337,7 +338,7 @@ subroutine read_namelist(single_column_in, scmlon_in, scmlat_in, scm_multcols_in
! IOP
namelist /cam_inparm/ iopfile, scm_iop_srf_prop, iop_nudge_tq, iop_nudge_uv, &
iop_nudge_tq_low, iop_nudge_tq_high, iop_nudge_tscale, &
- scm_observed_aero, precip_off, &
+ scm_observed_aero, precip_off, iop_coriolis, &
scm_zero_non_iop_tracers, iop_perturb_high, dp_crm, &
iop_dosubsidence, scm_zero_non_iop_tracers
@@ -378,6 +379,8 @@ subroutine read_namelist(single_column_in, scmlon_in, scmlat_in, scm_multcols_in
call iop_default_opts(scmlat_out=scmlat,scmlon_out=scmlon, &
single_column_out=single_column, &
scm_iop_srf_prop_out=scm_iop_srf_prop,&
+ iop_dosubsidence_out=iop_dosubsidence, &
+ iop_coriolis_out=iop_coriolis, &
iop_nudge_tq_out=iop_nudge_tq, &
iop_nudge_uv_out=iop_nudge_uv, &
iop_nudge_tq_low_out=iop_nudge_tq_low, &
@@ -385,7 +388,6 @@ subroutine read_namelist(single_column_in, scmlon_in, scmlat_in, scm_multcols_in
iop_nudge_tscale_out=iop_nudge_tscale, &
scm_observed_aero_out=scm_observed_aero, &
precip_off_out=precip_off, &
- iop_dosubsidence_out=iop_dosubsidence, &
iop_perturb_high_out=iop_perturb_high, &
scm_multcols_out=scm_multcols, &
dp_crm_out=dp_crm, &
@@ -463,7 +465,8 @@ subroutine read_namelist(single_column_in, scmlon_in, scmlat_in, scm_multcols_in
call iop_setopts( scmlat_in=scmlat,scmlon_in=scmlon, &
iopfile_in=iopfile,single_column_in=single_column,&
scm_iop_srf_prop_in=scm_iop_srf_prop,&
- iop_dosubsidence_in=iop_dosubsidence,&
+ iop_dosubsidence_in=iop_dosubsidence,&
+ iop_coriolis_in=iop_coriolis,&
iop_nudge_tq_in=iop_nudge_tq, &
iop_nudge_uv_in=iop_nudge_uv, &
iop_nudge_tq_low_in=iop_nudge_tq_low, &
diff --git a/components/eam/src/dynamics/se/se_iop_intr_mod.F90 b/components/eam/src/dynamics/se/se_iop_intr_mod.F90
index 09d6acac46db..02862980053c 100644
--- a/components/eam/src/dynamics/se/se_iop_intr_mod.F90
+++ b/components/eam/src/dynamics/se/se_iop_intr_mod.F90
@@ -165,6 +165,8 @@ subroutine iop_broadcast()
call mpibcast(have_q,1,mpilog,0,mpicom)
call mpibcast(have_u,1,mpilog,0,mpicom)
call mpibcast(have_v,1,mpilog,0,mpicom)
+ call mpibcast(have_uls,1,mpilog,0,mpicom)
+ call mpibcast(have_vls,1,mpilog,0,mpicom)
call mpibcast(have_omega,1,mpilog,0,mpicom)
call mpibcast(have_cldliq,1,mpilog,0,mpicom)
call mpibcast(have_divt,1,mpilog,0,mpicom)
@@ -182,6 +184,8 @@ subroutine iop_broadcast()
call mpibcast(qobs,plev,mpir8,0,mpicom)
call mpibcast(uobs,plev,mpir8,0,mpicom)
call mpibcast(vobs,plev,mpir8,0,mpicom)
+ call mpibcast(uls,plev,mpir8,0,mpicom)
+ call mpibcast(vls,plev,mpir8,0,mpicom)
call mpibcast(cldliqobs,plev,mpir8,0,mpicom)
call mpibcast(wfld,plev,mpir8,0,mpicom)
@@ -189,6 +193,7 @@ subroutine iop_broadcast()
call mpibcast(divq,plev,mpir8,0,mpicom)
call mpibcast(divt3d,plev,mpir8,0,mpicom)
call mpibcast(divq3d,plev,mpir8,0,mpicom)
+ call mpibcast(scmlat,1,mpir8,0,mpicom)
#endif
@@ -418,6 +423,10 @@ subroutine apply_iop_forcing(elem,hvcoord,hybrid,tl,n,t_before_advance,nets,nete
call outfld('QDIFF',qdiff_dyn,plon,begchunk)
endif
+ if (iop_coriolis) then
+ call iop_apply_coriolis(elem,t1,nelemd_todo,np_todo,dt)
+ endif
+
call outfld('TOBS',tobs,plon,begchunk)
call outfld('QOBS',qobs,plon,begchunk)
call outfld('DIVQ',divq,plon,begchunk)
@@ -472,6 +481,7 @@ subroutine iop_domain_relaxation(elem,hvcoord,hybrid,t1,dp,nelemd_todo,np_todo,d
real (kind=real_kind), dimension(nlev) :: domain_q, domain_t, domain_u, domain_v, rtau
real (kind=real_kind), dimension(nlev) :: relax_t, relax_q, relax_u, relax_v, iop_pres
real (kind=real_kind), dimension(np,np,nlev) :: temperature, Rstar, pnh, exner, dp
+ real (kind=real_kind) :: uref, vref
integer :: ie, i, j, k
! Compute pressure for IOP observations
@@ -522,9 +532,18 @@ subroutine iop_domain_relaxation(elem,hvcoord,hybrid,t1,dp,nelemd_todo,np_todo,d
rtau(k) = iop_nudge_tscale
rtau(k) = max(dt,rtau(k))
+ ! If LS/geostropic winds are available then nudge to those
+ if (have_uls .and. have_vls) then
+ uref = uls(k)
+ vref = vls(k)
+ else
+ uref = uobs(k)
+ vref = vobs(k)
+ endif
+
! Compute relaxation for winds
- relax_u(k) = -(domain_u(k) - uobs(k))/rtau(k)
- relax_v(k) = -(domain_v(k) - vobs(k))/rtau(k)
+ relax_u(k) = -(domain_u(k) - uref)/rtau(k)
+ relax_v(k) = -(domain_v(k) - vref)/rtau(k)
! Restrict nudging of T and Q to certain levels if requested by user
! pmidm1 variable is in unitis of [Pa], while iop_nudge_tq_low/high
@@ -584,7 +603,51 @@ subroutine iop_domain_relaxation(elem,hvcoord,hybrid,t1,dp,nelemd_todo,np_todo,d
end subroutine iop_domain_relaxation
-!=========================================================================
+subroutine iop_apply_coriolis(elem,t1,nelemd_todo,np_todo,dt)
+
+ ! Subroutine to provide coriolis forcing to u and v winds, using geostrophic
+ ! winds specified in IOP forcing file.
+
+ use kinds, only : real_kind
+ use iop_data_mod
+ use dimensions_mod, only : np, np, nlev, npsq, nelem
+ use parallel_mod, only: global_shared_buf, global_shared_sum
+ use global_norms_mod, only: wrap_repro_sum
+ use hybvcoord_mod, only : hvcoord_t
+ use hybrid_mod, only : hybrid_t
+ use element_mod, only : element_t
+ use physical_constants, only : Cp, Rgas, DD_PI
+ use shr_const_mod, only: shr_const_omega
+
+ ! Input/Output variables
+ type (element_t) , intent(inout), target :: elem(:)
+ integer, intent(in) :: nelemd_todo, np_todo, t1
+ real (kind=real_kind), intent(in):: dt
+
+ ! local variables
+ integer :: i,j,k, ie
+
+ real(kind=real_kind) :: fcor, u_cor, v_cor
+
+ ! compute coriolis force
+ fcor = 2._real_kind*shr_const_omega*sin(scmlat*DD_PI/180._real_kind)
+
+ do ie=1,nelemd_todo
+ do j=1,np_todo
+ do i=1,np_todo
+ do k=1,nlev
+
+ u_cor = fcor * (elem(ie)%state%v(i,j,2,k,t1) - vls(k))
+ v_cor = fcor * (elem(ie)%state%v(i,j,1,k,t1) - uls(k))
+
+ elem(ie)%state%v(i,j,1,k,t1) = elem(ie)%state%v(i,j,1,k,t1) + u_cor * dt
+ elem(ie)%state%v(i,j,2,k,t1) = elem(ie)%state%v(i,j,2,k,t1) - v_cor * dt
+ enddo
+ enddo
+ enddo
+ enddo
+
+end subroutine iop_apply_coriolis
#ifdef MODEL_THETA_L
subroutine crm_resolved_turb(elem,hvcoord,hybrid,t1,&
diff --git a/components/eam/src/dynamics/se/stepon.F90 b/components/eam/src/dynamics/se/stepon.F90
index 2e7630a47539..831fd6d87603 100644
--- a/components/eam/src/dynamics/se/stepon.F90
+++ b/components/eam/src/dynamics/se/stepon.F90
@@ -213,7 +213,7 @@ subroutine stepon_run1( dtime_out, phys_state, phys_tend, &
! doiopupdate set to true if model time step > next available IOP
if (use_iop .and. masterproc) then
- if (is_first_step()) then
+ if (is_first_step() .or. is_first_restart_step()) then
call setiopupdate_init()
else
call setiopupdate
diff --git a/components/eam/src/physics/cam/bfb_math.inc b/components/eam/src/physics/cam/bfb_math.inc
index c95cb5bc530a..c4d49103828e 100644
--- a/components/eam/src/physics/cam/bfb_math.inc
+++ b/components/eam/src/physics/cam/bfb_math.inc
@@ -6,8 +6,8 @@
! Make sure to place the following lines at the top of any modules
! that use these macros:
!
-! use physics_share_f2c, only: cxx_pow, cxx_sqrt, cxx_cbrt, cxx_gamma, cxx_log, &
-! cxx_log10, cxx_exp, cxx_tanh, cxx_erf
+! use physics_share_f2c, only: scream_pow, scream_sqrt, scream_cbrt, scream_gamma, scream_log, &
+! scream_log10, scream_exp, scream_tanh, scream_erf
#ifndef SCREAM_BFB_MATH_INC
#define SCREAM_BFB_MATH_INC
@@ -30,16 +30,16 @@
# define bfb_tanh(val) tanh(val)
# define bfb_erf(val) erf(val)
#else
-# define bfb_pow(base, exp) cxx_pow(base, exp)
-# define bfb_sqrt(base) cxx_sqrt(base)
-# define bfb_cbrt(base) cxx_cbrt(base)
-# define bfb_gamma(val) cxx_gamma(val)
-# define bfb_log(val) cxx_log(val)
-# define bfb_log10(val) cxx_log10(val)
-# define bfb_exp(val) cxx_exp(val)
-# define bfb_expm1(val) cxx_expm1(val)
-# define bfb_tanh(val) cxx_tanh(val)
-# define bfb_erf(val) cxx_erf(val)
+# define bfb_pow(base, exp) scream_pow(base, exp)
+# define bfb_sqrt(base) scream_sqrt(base)
+# define bfb_cbrt(base) scream_cbrt(base)
+# define bfb_gamma(val) scream_gamma(val)
+# define bfb_log(val) scream_log(val)
+# define bfb_log10(val) scream_log10(val)
+# define bfb_exp(val) scream_exp(val)
+# define bfb_expm1(val) scream_expm1(val)
+# define bfb_tanh(val) scream_tanh(val)
+# define bfb_erf(val) scream_erf(val)
#endif
#endif
diff --git a/components/eam/src/physics/cam/physpkg.F90 b/components/eam/src/physics/cam/physpkg.F90
index 67262ff3215e..5f942152a20f 100644
--- a/components/eam/src/physics/cam/physpkg.F90
+++ b/components/eam/src/physics/cam/physpkg.F90
@@ -2867,8 +2867,8 @@ subroutine tphysbc (ztodt, &
call check_energy_chng(state, tend, "clubb_tend", nstep, ztodt, &
cam_in%cflx(:,1)/cld_macmic_num_steps, flx_cnd/cld_macmic_num_steps, &
det_ice/cld_macmic_num_steps, flx_heat/cld_macmic_num_steps)
-
-
+
+
endif
diff --git a/components/eam/src/physics/cam/shoc.F90 b/components/eam/src/physics/cam/shoc.F90
index a245516eeeeb..f354ac8c71b7 100644
--- a/components/eam/src/physics/cam/shoc.F90
+++ b/components/eam/src/physics/cam/shoc.F90
@@ -17,8 +17,8 @@ module shoc
! Bit-for-bit math functions.
#ifdef SCREAM_CONFIG_IS_CMAKE
- use physics_share_f2c, only: cxx_pow, cxx_sqrt, cxx_cbrt, cxx_gamma, cxx_log, &
- cxx_log10, cxx_exp, cxx_erf
+ use physics_share_f2c, only: scream_pow, scream_sqrt, scream_cbrt, scream_gamma, scream_log, &
+ scream_log10, scream_exp, scream_erf
#endif
implicit none
@@ -46,6 +46,7 @@ module shoc
real(rtype) :: lice ! latent heat of fusion [J/kg]
real(rtype) :: eps ! rh2o/rair - 1 [-]
real(rtype) :: vk ! von karmann constant [-]
+real(rtype) :: p0 ! Reference pressure, Pa
!=========================================================
! Tunable parameters used in SHOC
@@ -65,10 +66,8 @@ module shoc
real(rtype) :: lambda_thresh = 0.02_rtype ! value to apply stability correction
real(rtype) :: Ckh = 0.1_rtype ! Eddy diffusivity coefficient for heat
real(rtype) :: Ckm = 0.1_rtype ! Eddy diffusivity coefficient for momentum
-real(rtype) :: Ckh_s_min = 0.1_rtype ! Stable PBL diffusivity minimum for heat
-real(rtype) :: Ckm_s_min = 0.1_rtype ! Stable PBL diffusivity minimum for momentum
-real(rtype) :: Ckh_s_max = 0.1_rtype ! Stable PBL diffusivity maximum for heat
-real(rtype) :: Ckm_s_max = 0.1_rtype ! Stable PBL diffusivity maximum for momentum
+real(rtype) :: Ckh_s = 0.1_rtype ! Stable PBL diffusivity for heat
+real(rtype) :: Ckm_s = 0.1_rtype ! Stable PBL diffusivity for momentum
!=========================================================
! Private module parameters
@@ -127,13 +126,12 @@ module shoc
subroutine shoc_init( &
nlev, gravit, rair, rh2o, cpair, &
- zvir, latvap, latice, karman, &
+ zvir, latvap, latice, karman, p0_shoc, &
pref_mid, nbot_shoc, ntop_shoc, &
thl2tune_in, qw2tune_in, qwthl2tune_in, &
w2tune_in, length_fac_in, c_diag_3rd_mom_in, &
lambda_low_in, lambda_high_in, lambda_slope_in, &
- lambda_thresh_in, Ckh_in, Ckm_in, Ckh_s_min_in, &
- Ckm_s_min_in, Ckh_s_max_in, Ckm_s_max_in)
+ lambda_thresh_in, Ckh_in, Ckm_in, Ckh_s_in, Ckm_s_in)
implicit none
@@ -151,6 +149,7 @@ subroutine shoc_init( &
real(rtype), intent(in) :: latvap ! latent heat of vaporization
real(rtype), intent(in) :: latice ! latent heat of fusion
real(rtype), intent(in) :: karman ! Von Karman's constant
+ real(rtype), intent(in) :: p0_shoc! Reference pressure, Pa
real(rtype), intent(in) :: pref_mid(nlev) ! reference pressures at midpoints
@@ -170,10 +169,8 @@ subroutine shoc_init( &
real(rtype), intent(in), optional :: lambda_thresh_in ! value to apply stability correction
real(rtype), intent(in), optional :: Ckh_in ! eddy diffusivity coefficient for heat
real(rtype), intent(in), optional :: Ckm_in ! eddy diffusivity coefficient for momentum
- real(rtype), intent(in), optional :: Ckh_s_min_in ! Stable PBL diffusivity minimum for heat
- real(rtype), intent(in), optional :: Ckm_s_min_in ! Stable PBL diffusivity minimum for momentum
- real(rtype), intent(in), optional :: Ckh_s_max_in ! Stable PBL diffusivity maximum for heat
- real(rtype), intent(in), optional :: Ckm_s_max_in ! Stable PBL diffusivity maximum for momentum
+ real(rtype), intent(in), optional :: Ckh_s_in ! Stable PBL diffusivity for heat
+ real(rtype), intent(in), optional :: Ckm_s_in ! Stable PBL diffusivity for momentum
integer :: k
@@ -185,6 +182,7 @@ subroutine shoc_init( &
lcond = latvap ! [J/kg]
lice = latice ! [J/kg]
vk = karman ! [-]
+ p0 = p0_shoc ! [Pa]
! Tunable parameters, all unitless
! override default values if value is present
@@ -200,10 +198,8 @@ subroutine shoc_init( &
if (present(lambda_thresh_in)) lambda_thresh=lambda_thresh_in
if (present(Ckh_in)) Ckh=Ckh_in
if (present(Ckm_in)) Ckm=Ckm_in
- if (present(Ckh_s_min_in)) Ckh_s_min=Ckh_s_min_in
- if (present(Ckm_s_min_in)) Ckm_s_min=Ckm_s_min_in
- if (present(Ckh_s_max_in)) Ckh_s_max=Ckh_s_max_in
- if (present(Ckm_s_max_in)) Ckm_s_max=Ckm_s_max_in
+ if (present(Ckh_s_in)) Ckh_s=Ckh_s_in
+ if (present(Ckm_s_in)) Ckm_s=Ckm_s_in
! Limit pbl height to regions below 400 mb
! npbl = max number of levels (from bottom) in pbl
@@ -381,6 +377,8 @@ subroutine shoc_main ( &
real(rtype) :: rho_zt(shcol,nlev)
! SHOC water vapor [kg/kg]
real(rtype) :: shoc_qv(shcol,nlev)
+ ! SHOC temperature [K]
+ real(rtype) :: shoc_tabs(shcol,nlev)
! Grid difference centereted on thermo grid [m]
real(rtype) :: dz_zt(shcol,nlev)
@@ -462,6 +460,11 @@ subroutine shoc_main ( &
shcol,nlev,qw,shoc_ql,& ! Input
shoc_qv) ! Output
+ ! Diagnose absolute temperature
+ call compute_shoc_temperature(&
+ shcol,nlev,thetal,shoc_ql,inv_exner,& ! Input
+ shoc_tabs) ! Output
+
call shoc_diag_obklen(&
shcol,uw_sfc,vw_sfc,& ! Input
wthl_sfc,wqw_sfc,thetal(:shcol,nlev),& ! Input
@@ -487,8 +490,8 @@ subroutine shoc_main ( &
call shoc_tke(&
shcol,nlev,nlevi,dtime,& ! Input
wthv_sec,shoc_mix,& ! Input
- dz_zi,dz_zt,pres,& ! Input
- u_wind,v_wind,brunt,obklen,& ! Input
+ dz_zi,dz_zt,pres,shoc_tabs,& ! Input
+ u_wind,v_wind,brunt,& ! Input
zt_grid,zi_grid,pblh,& ! Input
tke,tk,tkh,& ! Input/Output
isotropy) ! Output
@@ -730,6 +733,59 @@ subroutine compute_shoc_vapor( &
end subroutine compute_shoc_vapor
+!==============================================================
+! Compute temperature from SHOC prognostic/diagnostic variables
+
+subroutine compute_shoc_temperature( &
+ shcol,nlev,thetal,ql,inv_exner,& ! Input
+ tabs) ! Output
+
+ ! Purpose of this subroutine is to compute temperature
+ ! based on SHOC's prognostic liquid water potential
+ ! temperature.
+
+#ifdef SCREAM_CONFIG_IS_CMAKE
+ use shoc_iso_f, only: compute_shoc_temperature_f
+#endif
+
+ implicit none
+
+! INPUT VARIABLES
+ ! number of columns [-]
+ integer, intent(in) :: shcol
+ ! number of mid-point levels [-]
+ integer, intent(in) :: nlev
+ ! liquid water potential temperature [K]
+ real(rtype), intent(in) :: thetal(shcol,nlev)
+ ! cloud water mixing ratio [kg/kg]
+ real(rtype), intent(in) :: ql(shcol,nlev)
+ ! inverse exner function [-]
+ real(rtype), intent(in) :: inv_exner(shcol,nlev)
+
+! OUTPUT VARIABLES
+ ! absolute temperature [K]
+ real(rtype), intent(out) :: tabs(shcol,nlev)
+
+! LOCAL VARIABLES
+ integer :: i, k
+
+#ifdef SCREAM_CONFIG_IS_CMAKE
+ if (use_cxx) then
+ call compute_shoc_temperature_f(shcol,nlev,thetal,ql,inv_exner,tabs)
+ return
+ endif
+#endif
+
+ do k = 1, nlev
+ do i = 1, shcol
+ tabs(i,k) = thetal(i,k)/inv_exner(i,k)+(lcond/cp)*ql(i,k)
+ enddo
+ enddo
+
+ return
+
+end subroutine compute_shoc_temperature
+
!==============================================================
! Update T, q, tracers, tke, u, and v based on implicit diffusion
! Here we use a backward Euler scheme.
@@ -2864,8 +2920,8 @@ subroutine shoc_assumed_pdf_compute_s(&
qn=s
endif
endif
-
- ! Prevent possibility of empty clouds or rare occurence of
+
+ ! Prevent possibility of empty clouds or rare occurence of
! cloud liquid less than zero
if (qn .le. 0._rtype) then
C=0._rtype
@@ -2969,8 +3025,8 @@ end subroutine shoc_assumed_pdf_compute_buoyancy_flux
subroutine shoc_tke(&
shcol,nlev,nlevi,dtime,& ! Input
wthv_sec,shoc_mix,& ! Input
- dz_zi,dz_zt,pres,& ! Input
- u_wind,v_wind,brunt,obklen,&! Input
+ dz_zi,dz_zt,pres,tabs,& ! Input
+ u_wind,v_wind,brunt,& ! Input
zt_grid,zi_grid,pblh,& ! Input
tke,tk,tkh, & ! Input/Output
isotropy) ! Output
@@ -2997,14 +3053,14 @@ subroutine shoc_tke(&
real(rtype), intent(in) :: u_wind(shcol,nlev)
! Zonal wind [m/s]
real(rtype), intent(in) :: v_wind(shcol,nlev)
- ! Obukov length
- real(rtype), intent(in) :: obklen(shcol)
! thickness on interface grid [m]
real(rtype), intent(in) :: dz_zi(shcol,nlevi)
! thickness on thermodynamic grid [m]
real(rtype), intent(in) :: dz_zt(shcol,nlev)
! pressure [Pa]
real(rtype), intent(in) :: pres(shcol,nlev)
+ ! absolute temperature [K]
+ real(rtype), intent(in) :: tabs(shcol,nlev)
! Brunt Vaisalla frequncy [/s]
real(rtype), intent(in) :: brunt(shcol,nlev)
! heights on midpoint grid [m]
@@ -3052,7 +3108,7 @@ subroutine shoc_tke(&
call isotropic_ts(nlev, shcol, brunt_int, tke, a_diss, brunt, isotropy)
!Compute eddy diffusivity for heat and momentum
- call eddy_diffusivities(nlev, shcol, obklen, pblh, zt_grid, &
+ call eddy_diffusivities(nlev, shcol, pblh, zt_grid, tabs, &
shoc_mix, sterm_zt, isotropy, tke, tkh, tk)
return
@@ -3316,7 +3372,7 @@ subroutine isotropic_ts(nlev, shcol, brunt_int, tke, a_diss, brunt, isotropy)
end subroutine isotropic_ts
-subroutine eddy_diffusivities(nlev, shcol, obklen, pblh, zt_grid, &
+subroutine eddy_diffusivities(nlev, shcol, pblh, zt_grid, tabs, &
shoc_mix, sterm_zt, isotropy, tke, tkh, tk)
!------------------------------------------------------------
@@ -3332,12 +3388,12 @@ subroutine eddy_diffusivities(nlev, shcol, obklen, pblh, zt_grid, &
!intent-ins
integer, intent(in) :: nlev, shcol
- ! Monin-Okbukov length [m]
- real(rtype), intent(in) :: obklen(shcol)
! PBL height [m]
real(rtype), intent(in) :: pblh(shcol)
! Heights on the mid-point grid [m]
real(rtype), intent(in) :: zt_grid(shcol,nlev)
+ ! Absolute temperature [K]
+ real(rtype), intent(in) :: tabs(shcol,nlev)
! Mixing length [m]
real(rtype), intent(in) :: shoc_mix(shcol,nlev)
! Interpolate shear production to thermo grid
@@ -3355,48 +3411,30 @@ subroutine eddy_diffusivities(nlev, shcol, obklen, pblh, zt_grid, &
!local vars
integer :: i, k
- real(rtype) :: z_over_L, zt_grid_1d(shcol)
- real(rtype) :: Ckh_s, Ckm_s
!parameters
- ! Critical value of dimensionless Monin-Obukhov length,
- ! for which diffusivities are no longer damped
- real(rtype), parameter :: zL_crit_val = 100.0_rtype
+ ! Minimum absolute temperature threshold for which to apply extra mixing [K]
+ real(rtype), parameter :: temp_crit = 182.0_rtype
! Transition depth [m] above PBL top to allow
! stability diffusivities
real(rtype), parameter :: pbl_trans = 200.0_rtype
#ifdef SCREAM_CONFIG_IS_CMAKE
if (use_cxx) then
- call eddy_diffusivities_f(nlev, shcol, obklen, pblh, zt_grid, &
+ call eddy_diffusivities_f(nlev, shcol, pblh, zt_grid, tabs, &
shoc_mix, sterm_zt, isotropy, tke, tkh, tk)
return
endif
#endif
- !store zt_grid at nlev in 1d array
- zt_grid_1d(1:shcol) = zt_grid(1:shcol,nlev)
-
do k = 1, nlev
do i = 1, shcol
- ! Dimensionless Okukhov length considering only
- ! the lowest model grid layer height to scale
- z_over_L = zt_grid_1d(i)/obklen(i)
+ if (tabs(i,nlev) .lt. temp_crit .and. (zt_grid(i,k) .lt. pblh(i)+pbl_trans)) then
+ ! If surface layer temperature is running away, apply extra mixing
+ ! based on traditional stable PBL diffusivities that are not damped
+ ! by stability functions.
- if (z_over_L .gt. 0._rtype .and. (zt_grid(i,k) .lt. pblh(i)+pbl_trans)) then
- ! If surface layer is stable, based on near surface
- ! dimensionless Monin-Obukov use modified coefficients of
- ! tkh and tk that are primarily based on shear production
- ! and SHOC length scale, to promote mixing within the PBL
- ! and to a height slighty above to ensure smooth transition.
-
- ! Compute diffusivity coefficient as function of dimensionless
- ! Obukhov, given a critical value
- Ckh_s = max(Ckh_s_min,min(Ckh_s_max,z_over_L/zL_crit_val))
- Ckm_s = max(Ckm_s_min,min(Ckm_s_max,z_over_L/zL_crit_val))
-
- ! Compute stable PBL diffusivities
tkh(i,k) = Ckh_s*bfb_square(shoc_mix(i,k))*bfb_sqrt(sterm_zt(i,k))
tk(i,k) = Ckm_s*bfb_square(shoc_mix(i,k))*bfb_sqrt(sterm_zt(i,k))
else
@@ -3740,6 +3778,9 @@ subroutine shoc_energy_integrals(&
do k=1,nlev
do i=1,shcol
rvm = rtm(i,k) - rcm(i,k) ! compute water vapor
+
+!technically wrong, need to remove gz from geopotential
+!but shoc does not change gz term
se_int(i) = se_int(i) + host_dse(i,k)*pdel(i,k)/ggr
ke_int(i) = ke_int(i) + 0.5_rtype*(bfb_square(u_wind(i,k))+bfb_square(v_wind(i,k)))*pdel(i,k)/ggr
wv_int(i) = wv_int(i) + rvm*pdel(i,k)/ggr
@@ -3896,7 +3937,7 @@ subroutine shoc_energy_fixer(&
zt_grid,zi_grid,& ! Input
se_b,ke_b,wv_b,wl_b,& ! Input
se_a,ke_a,wv_a,wl_a,& ! Input
- wthl_sfc,wqw_sfc,rho_zt,& ! Input
+ wthl_sfc,wqw_sfc,rho_zt,pint,& ! Input
te_a, te_b) ! Output
call shoc_energy_threshold_fixer(&
@@ -3921,7 +3962,7 @@ subroutine shoc_energy_total_fixer(&
zt_grid,zi_grid,& ! Input
se_b,ke_b,wv_b,wl_b,& ! Input
se_a,ke_a,wv_a,wl_a,& ! Input
- wthl_sfc,wqw_sfc,rho_zt,& ! Input
+ wthl_sfc,wqw_sfc,rho_zt,pint,& ! Input
te_a, te_b) ! Output
implicit none
@@ -3963,6 +4004,8 @@ subroutine shoc_energy_total_fixer(&
real(rtype), intent(in) :: zi_grid(shcol,nlevi)
! density on midpoint grid [kg/m^3]
real(rtype), intent(in) :: rho_zt(shcol,nlev)
+ ! pressure on interface grid [Pa]
+ real(rtype), intent(in) :: pint(shcol,nlevi)
! OUTPUT VARIABLES
real(rtype), intent(out) :: te_a(shcol)
@@ -3972,7 +4015,7 @@ subroutine shoc_energy_total_fixer(&
! density on interface grid [kg/m^3]
real(rtype) :: rho_zi(shcol,nlevi)
! sensible and latent heat fluxes [W/m^2]
- real(rtype) :: shf, lhf, hdtime
+ real(rtype) :: shf, lhf, hdtime, exner_surf
integer :: i
! compute the host timestep
@@ -3983,8 +4026,10 @@ subroutine shoc_energy_total_fixer(&
! Based on these integrals, compute the total energy before and after SHOC
! call
do i=1,shcol
- ! convert shf and lhf to W/m^2
- shf=wthl_sfc(i)*cp*rho_zi(i,nlevi)
+ ! convert shf and lhf
+ exner_surf = bfb_pow(pint(i,nlevi)/p0, rgas/cp)
+ shf=wthl_sfc(i)*cp*rho_zi(i,nlevi)*exner_surf
+
lhf=wqw_sfc(i)*rho_zi(i,nlevi)
te_a(i) = se_a(i) + ke_a(i) + (lcond+lice)*wv_a(i)+lice*wl_a(i)
te_b(i) = se_b(i) + ke_b(i) + (lcond+lice)*wv_b(i)+lice*wl_b(i)
diff --git a/components/eam/src/physics/cam/shoc_intr.F90 b/components/eam/src/physics/cam/shoc_intr.F90
index a1d4db53181d..f008ffec3e0d 100644
--- a/components/eam/src/physics/cam/shoc_intr.F90
+++ b/components/eam/src/physics/cam/shoc_intr.F90
@@ -6,12 +6,12 @@ module shoc_intr
! by Peter Bogenschutz (Bogenschutz and Krueger 2013). !
! !
! SHOC replaces the exisiting turbulence, shallow convection, and !
- ! macrophysics in E3SM !
- ! !
+ ! macrophysics in E3SM !
+ ! !
! !
!---------------------------Code history---------------------------- !
- ! Authors: P. Bogenschutz !
- ! !
+ ! Authors: P. Bogenschutz !
+ ! !
!------------------------------------------------------------------- !
use shr_kind_mod, only: r8=>shr_kind_r8
@@ -19,18 +19,18 @@ module shoc_intr
use ppgrid, only: pver, pverp
use phys_control, only: phys_getopts
use physconst, only: rair, cpair, gravit, latvap, latice, zvir, &
- rh2o, karman, tms_orocnst, tms_z0fac
+ rh2o, karman, tms_orocnst, tms_z0fac
use constituents, only: pcnst, cnst_add, stateq_names=>cnst_name
use pbl_utils, only: calc_ustar, calc_obklen
use perf_mod, only: t_startf, t_stopf
- use cam_logfile, only: iulog
- use shoc, only: linear_interp, largeneg
+ use cam_logfile, only: iulog
+ use shoc, only: linear_interp, largeneg
use spmd_utils, only: masterproc
use cam_abortutils, only: endrun
-
- implicit none
- public :: shoc_init_cnst, shoc_implements_cnst
+ implicit none
+
+ public :: shoc_init_cnst, shoc_implements_cnst
! define physics buffer indicies here
integer :: tke_idx, & ! turbulent kinetic energy
@@ -38,7 +38,7 @@ module shoc_intr
tk_idx, &
wthv_idx, & ! buoyancy flux
cld_idx, & ! Cloud fraction
- tot_cloud_frac_idx, & ! Cloud fraction with higher ice threshold
+ tot_cloud_frac_idx, & ! Cloud fraction with higher ice threshold
concld_idx, & ! Convective cloud fraction
ast_idx, & ! Stratiform cloud fraction
alst_idx, & ! Liquid stratiform cloud fraction
@@ -62,11 +62,11 @@ module shoc_intr
fice_idx, &
vmag_gust_idx, &
ixq ! water vapor index in state%q array
-
+
integer :: ixtke ! SHOC_TKE index in state%q array
integer :: cmfmc_sh_idx = 0
-
+
real(r8), parameter :: tke_tol = 0.0004_r8
real(r8), parameter :: &
@@ -78,31 +78,22 @@ module shoc_intr
shoc_liq_deep = 8.e-6, &
shoc_liq_sh = 10.e-6, &
shoc_ice_deep = 25.e-6, &
- shoc_ice_sh = 50.e-6
-
+ shoc_ice_sh = 50.e-6
+
logical :: lq(pcnst)
- !lq_dry_wet_cnvr is true for all the water based scalars used by SHOC.
- !These water based scalars will participate in dry/wet mmr conversion
- logical :: lq_dry_wet_cnvr(pcnst)
-
logical :: history_budget
- integer :: history_budget_histfile_num
+ integer :: history_budget_histfile_num
logical :: micro_do_icesupersat
- !Store names of the state%q array scalars (as they appear in the state%q array)
- !which should be "excluded" from wet<->dry mmr conversion
- !NOTE: Scalar name should be exactly same as it appear in the state%q array
- character(len=8), parameter :: dry_wet_exclude_scalars(1) = ['SHOC_TKE']
-
character(len=16) :: eddy_scheme ! Default set in phys_control.F90
- character(len=16) :: deep_scheme ! Default set in phys_control.F90
-
+ character(len=16) :: deep_scheme ! Default set in phys_control.F90
+
real(r8), parameter :: unset_r8 = huge(1.0_r8)
-
+
real(r8) :: shoc_timestep = unset_r8 ! Default SHOC timestep set in namelist
real(r8) :: dp1
-
+
real(r8) :: shoc_thl2tune = unset_r8
real(r8) :: shoc_qw2tune = unset_r8
real(r8) :: shoc_qwthl2tune = unset_r8
@@ -115,52 +106,50 @@ module shoc_intr
real(r8) :: shoc_lambda_thresh = unset_r8
real(r8) :: shoc_Ckh = unset_r8
real(r8) :: shoc_Ckm = unset_r8
- real(r8) :: shoc_Ckh_s_min = unset_r8
- real(r8) :: shoc_Ckm_s_min = unset_r8
- real(r8) :: shoc_Ckh_s_max = unset_r8
- real(r8) :: shoc_Ckm_s_max = unset_r8
+ real(r8) :: shoc_Ckh_s = unset_r8
+ real(r8) :: shoc_Ckm_s = unset_r8
integer :: edsclr_dim
-
+
logical :: prog_modal_aero
real(r8) :: micro_mg_accre_enhan_fac = huge(1.0_r8) !Accretion enhancement factor from namelist
-
+
integer, parameter :: ncnst=1
character(len=8) :: cnst_names(ncnst)
logical :: do_cnst=.true.
-
+
logical :: liqcf_fix = .FALSE. ! HW for liquid cloud fraction fix
- logical :: relvar_fix = .FALSE. !PMA for relvar fix
-
+ logical :: relvar_fix = .FALSE. !PMA for relvar fix
+
contains
-
+
! =============================================================================== !
! !
! =============================================================================== !
-
+
subroutine shoc_register_e3sm()
#ifdef SHOC_SGS
! Add SHOC fields to pbuf
use physics_buffer, only: pbuf_add_field, dtype_r8, dyn_time_lvls
- use ppgrid, only: pver, pverp, pcols
-
+ use ppgrid, only: pver, pverp, pcols
+
call phys_getopts( eddy_scheme_out = eddy_scheme, &
- deep_scheme_out = deep_scheme, &
+ deep_scheme_out = deep_scheme, &
history_budget_out = history_budget, &
history_budget_histfile_num_out = history_budget_histfile_num, &
micro_do_icesupersat_out = micro_do_icesupersat, &
- micro_mg_accre_enhan_fac_out = micro_mg_accre_enhan_fac)
-
- cnst_names=(/'TKE '/)
-
+ micro_mg_accre_enhan_fac_out = micro_mg_accre_enhan_fac)
+
+ cnst_names=(/'TKE '/)
+
! TKE is prognostic in SHOC and should be advected by dynamics
call cnst_add('SHOC_TKE',0._r8,0._r8,0._r8,ixtke,longname='turbulent kinetic energy',cam_outfld=.false.)
-
+
! Fields that are not prognostic should be added to PBUF
call pbuf_add_field('WTHV', 'global', dtype_r8, (/pcols,pver,dyn_time_lvls/), wthv_idx)
- call pbuf_add_field('TKH', 'global', dtype_r8, (/pcols,pver,dyn_time_lvls/), tkh_idx)
- call pbuf_add_field('TK', 'global', dtype_r8, (/pcols,pver,dyn_time_lvls/), tk_idx)
+ call pbuf_add_field('TKH', 'global', dtype_r8, (/pcols,pver,dyn_time_lvls/), tkh_idx)
+ call pbuf_add_field('TK', 'global', dtype_r8, (/pcols,pver,dyn_time_lvls/), tk_idx)
call pbuf_add_field('pblh', 'global', dtype_r8, (/pcols/), pblh_idx)
call pbuf_add_field('tke', 'global', dtype_r8, (/pcols, pverp/), tke_idx)
@@ -178,70 +167,70 @@ subroutine shoc_register_e3sm()
call pbuf_add_field('FICE', 'physpkg',dtype_r8, (/pcols,pver/), fice_idx)
call pbuf_add_field('RAD_CLUBB', 'global', dtype_r8, (/pcols,pver/), radf_idx)
call pbuf_add_field('CMELIQ', 'physpkg',dtype_r8, (/pcols,pver/), cmeliq_idx)
-
+
call pbuf_add_field('vmag_gust', 'global', dtype_r8, (/pcols/), vmag_gust_idx)
-
+
#endif
-
+
end subroutine shoc_register_e3sm
! =============================================================================== !
! !
! =============================================================================== !
-
+
function shoc_implements_cnst(name)
!--------------------------------------------------------------------
! Return true if specified constituent is implemented by this package
!--------------------------------------------------------------------
- character(len=*), intent(in) :: name
+ character(len=*), intent(in) :: name
logical :: shoc_implements_cnst
shoc_implements_cnst = (do_cnst .and. any(name == cnst_names))
end function shoc_implements_cnst
-
+
subroutine shoc_init_cnst(name, q, gcid)
-
+
!------------------------------------------------------------------- !
! Initialize the state for SHOC's prognostic variable !
!------------------------------------------------------------------- !
-
+
character(len=*), intent(in) :: name ! constituent name
real(r8), intent(out) :: q(:,:) ! mass mixing ratio (gcol, plev)
integer, intent(in) :: gcid(:) ! global column id
-
+
#ifdef SHOC_SGS
if (trim(name) == trim('SHOC_TKE')) q = tke_tol
-#endif
+#endif
end subroutine shoc_init_cnst
-
+
! =============================================================================== !
! !
! =============================================================================== !
-
+
subroutine shoc_readnl(nlfile)
-
+
!------------------------------------------------------------------- !
! Read in any namelist parameters here !
! (currently none) !
- !------------------------------------------------------------------- !
+ !------------------------------------------------------------------- !
use units, only: getunit, freeunit
use namelist_utils, only: find_group_name
use mpishorthand
character(len=*), intent(in) :: nlfile ! filepath for file containing namelist input
-
+
integer :: iunit, read_status
-
+
namelist /shocpbl_diff_nl/ shoc_timestep, shoc_thl2tune, shoc_qw2tune, shoc_qwthl2tune, &
shoc_w2tune, shoc_length_fac, shoc_c_diag_3rd_mom, &
shoc_lambda_low, shoc_lambda_high, shoc_lambda_slope, &
- shoc_lambda_thresh, shoc_Ckh, shoc_Ckm, shoc_Ckh_s_min, &
- shoc_Ckm_s_min, shoc_Ckh_s_max, shoc_Ckm_s_max
-
+ shoc_lambda_thresh, shoc_Ckh, shoc_Ckm, shoc_Ckh_s, &
+ shoc_Ckm_s
+
! Read namelist to determine if SHOC history should be called
if (masterproc) then
iunit = getunit()
@@ -257,8 +246,8 @@ subroutine shoc_readnl(nlfile)
close(unit=iunit)
call freeunit(iunit)
- end if
-
+ end if
+
#ifdef SPMD
! Broadcast namelist variables
call mpibcast(shoc_timestep, 1, mpir8, 0, mpicom)
@@ -274,23 +263,21 @@ subroutine shoc_readnl(nlfile)
call mpibcast(shoc_lambda_thresh, 1, mpir8, 0, mpicom)
call mpibcast(shoc_Ckh, 1, mpir8, 0, mpicom)
call mpibcast(shoc_Ckm, 1, mpir8, 0, mpicom)
- call mpibcast(shoc_Ckh_s_min, 1, mpir8, 0, mpicom)
- call mpibcast(shoc_Ckm_s_min, 1, mpir8, 0, mpicom)
- call mpibcast(shoc_Ckh_s_max, 1, mpir8, 0, mpicom)
- call mpibcast(shoc_Ckm_s_max, 1, mpir8, 0, mpicom)
+ call mpibcast(shoc_Ckh_s, 1, mpir8, 0, mpicom)
+ call mpibcast(shoc_Ckm_s, 1, mpir8, 0, mpicom)
#endif
-
+
end subroutine shoc_readnl
-
+
! =============================================================================== !
! !
! =============================================================================== !
-
+
subroutine shoc_init_e3sm(pbuf2d, dp1_in)
!------------------------------------------------------------------- !
! Initialize SHOC for E3SM !
- !------------------------------------------------------------------- !
+ !------------------------------------------------------------------- !
use physics_types, only: physics_state, physics_ptend
use ppgrid, only: pver, pverp, pcols
@@ -300,46 +287,45 @@ subroutine shoc_init_e3sm(pbuf2d, dp1_in)
use physics_buffer, only: pbuf_get_index, pbuf_set_field, &
physics_buffer_desc
use rad_constituents, only: rad_cnst_get_info, rad_cnst_get_mode_num_idx, &
- rad_cnst_get_mam_mmr_idx
- use constituents, only: cnst_get_ind
- use shoc, only: shoc_init
+ rad_cnst_get_mam_mmr_idx
+ use constituents, only: cnst_get_ind
+ use shoc, only: shoc_init
use cam_history, only: horiz_only, addfld, add_default
use error_messages, only: handle_errmsg
- use trb_mtn_stress, only: init_tms
-
+ use trb_mtn_stress, only: init_tms
+
implicit none
! Input Variables
type(physics_buffer_desc), pointer :: pbuf2d(:,:)
-
+
real(r8) :: dp1_in
-
+
integer :: lptr
integer :: nmodes, nspec, m, l, icnst, idw
integer :: ixnumliq
integer :: ntop_shoc
integer :: nbot_shoc
- integer :: sz_dw_sclr !size of dry<->wet conversion excluded scalar array
- character(len=128) :: errstring
+ character(len=128) :: errstring
logical :: history_amwg
-
+
lq(1:pcnst) = .true.
edsclr_dim = pcnst
-
+
!----- Begin Code -----
call cnst_get_ind('Q',ixq) ! get water vapor index from the state%q array
! ----------------------------------------------------------------- !
- ! Determine how many constituents SHOC will transport. Note that
- ! SHOC does not transport aerosol consituents. Therefore, need to
+ ! Determine how many constituents SHOC will transport. Note that
+ ! SHOC does not transport aerosol consituents. Therefore, need to
! determine how many aerosols constituents there are and subtract that
- ! off of pcnst (the total consituents)
+ ! off of pcnst (the total consituents)
! ----------------------------------------------------------------- !
call phys_getopts(prog_modal_aero_out=prog_modal_aero, &
history_amwg_out = history_amwg, &
- liqcf_fix_out = liqcf_fix)
-
+ liqcf_fix_out = liqcf_fix)
+
! Define physics buffers indexes
cld_idx = pbuf_get_index('CLD') ! Cloud fraction
tot_cloud_frac_idx = pbuf_get_index('TOT_CLOUD_FRAC') ! Cloud fraction
@@ -347,7 +333,7 @@ subroutine shoc_init_e3sm(pbuf2d, dp1_in)
ast_idx = pbuf_get_index('AST') ! Stratiform cloud fraction
alst_idx = pbuf_get_index('ALST') ! Liquid stratiform cloud fraction
aist_idx = pbuf_get_index('AIST') ! Ice stratiform cloud fraction
- qlst_idx = pbuf_get_index('QLST') ! Physical in-stratus LWC
+ qlst_idx = pbuf_get_index('QLST') ! Physical in-stratus LWC
qist_idx = pbuf_get_index('QIST') ! Physical in-stratus IWC
dp_frac_idx = pbuf_get_index('DP_FRAC') ! Deep convection cloud fraction
icwmrdp_idx = pbuf_get_index('ICWMRDP') ! In-cloud deep convective mixing ratio
@@ -357,31 +343,31 @@ subroutine shoc_init_e3sm(pbuf2d, dp1_in)
prer_evap_idx = pbuf_get_index('PRER_EVAP')
qrl_idx = pbuf_get_index('QRL')
cmfmc_sh_idx = pbuf_get_index('CMFMC_SH')
- tke_idx = pbuf_get_index('tke')
+ tke_idx = pbuf_get_index('tke')
vmag_gust_idx = pbuf_get_index('vmag_gust')
-
+
if (is_first_step()) then
- call pbuf_set_field(pbuf2d, wthv_idx, 0.0_r8)
- call pbuf_set_field(pbuf2d, tkh_idx, 0.0_r8)
- call pbuf_set_field(pbuf2d, tk_idx, 0.0_r8)
+ call pbuf_set_field(pbuf2d, wthv_idx, 0.0_r8)
+ call pbuf_set_field(pbuf2d, tkh_idx, 0.0_r8)
+ call pbuf_set_field(pbuf2d, tk_idx, 0.0_r8)
call pbuf_set_field(pbuf2d, fice_idx, 0.0_r8)
call pbuf_set_field(pbuf2d, tke_idx, tke_tol)
call pbuf_set_field(pbuf2d, alst_idx, 0.0_r8)
call pbuf_set_field(pbuf2d, aist_idx, 0.0_r8)
-
+
call pbuf_set_field(pbuf2d, vmag_gust_idx, 1.0_r8)
-
+
endif
-
+
if (prog_modal_aero) then
! Turn off modal aerosols and decrement edsclr_dim accordingly
call rad_cnst_get_info(0, nmodes=nmodes)
-
+
do m = 1, nmodes
call rad_cnst_get_mode_num_idx(m, lptr)
lq(lptr)=.false.
edsclr_dim = edsclr_dim-1
-
+
call rad_cnst_get_info(0, m, nspec=nspec)
do l = 1, nspec
call rad_cnst_get_mam_mmr_idx(m, l, lptr)
@@ -389,31 +375,14 @@ subroutine shoc_init_e3sm(pbuf2d, dp1_in)
edsclr_dim = edsclr_dim-1
end do
end do
-
+
! In addition, if running with MAM, droplet number is transported
! in dropmixnuc, therefore we do NOT want SHOC to apply transport
! tendencies to avoid double counted. Else, we apply tendencies.
call cnst_get_ind('NUMLIQ',ixnumliq)
lq(ixnumliq) = .false.
edsclr_dim = edsclr_dim-1
- endif
-
- !SHOC needs all its water based scalars in terms of "dry" mmr
- !but the state vector has all its scalars in terms of "wet" mmr
- !By default, we will include all scalars for dry<->wet conversion
- !Identify scalars which should be "excluded" from the dry<->wet conversions
-
- lq_dry_wet_cnvr(:) = .true. ! lets assume .true. (i.e., all scalars will participate in the conversion process) by default
- sz_dw_sclr = size(dry_wet_exclude_scalars) !size of dry-wet excluded scalar array
- do idw = 1, sz_dw_sclr
- do icnst = 1, pcnst
- if(trim(adjustl(stateq_names(icnst))) == trim(adjustl(dry_wet_exclude_scalars(idw))) )then
- !This "icnst" scalar will NOT participate in dry<->wet conversion
- lq_dry_wet_cnvr(icnst) = .false.
- exit ! exit the loop if we found it!
- endif
- enddo
- enddo
+ endif
! Add SHOC fields
call addfld('SHOC_TKE', (/'lev'/), 'A', 'm2/s2', 'TKE')
@@ -441,6 +410,7 @@ subroutine shoc_init_e3sm(pbuf2d, dp1_in)
call addfld('PRECIPITATING_ICE_FRAC',(/'lev'/), 'A', 'fraction', 'Precipitating ice fraction')
call addfld('LIQ_CLOUD_FRAC',(/'lev'/), 'A', 'fraction', 'Liquid cloud fraction')
call addfld('TOT_CLOUD_FRAC',(/'lev'/), 'A', 'fraction', 'total cloud fraction')
+ call addfld('PBLH',horiz_only,'A','m','PBL height')
call add_default('SHOC_TKE', 1, ' ')
call add_default('WTHV_SEC', 1, ' ')
@@ -471,69 +441,68 @@ subroutine shoc_init_e3sm(pbuf2d, dp1_in)
! ---------------------------------------------------------------!
! Initialize SHOC !
! ---------------------------------------------------------------!
-
+
ntop_shoc = 1 ! if >1, must be <= nbot_molec
- nbot_shoc = pver ! currently always pver
-
+ nbot_shoc = pver ! currently always pver
+
call shoc_init( &
pver, gravit, rair, rh2o, cpair, &
- zvir, latvap, latice, karman, &
+ zvir, latvap, latice, karman, p0_shoc, &
pref_mid, nbot_shoc, ntop_shoc, &
shoc_thl2tune, shoc_qw2tune, shoc_qwthl2tune, &
shoc_w2tune, shoc_length_fac, shoc_c_diag_3rd_mom, &
shoc_lambda_low, shoc_lambda_high, shoc_lambda_slope, &
- shoc_lambda_thresh, shoc_Ckh, shoc_Ckm, shoc_Ckh_s_min, &
- shoc_Ckm_s_min, shoc_Ckh_s_max, shoc_Ckm_s_max )
-
+ shoc_lambda_thresh, shoc_Ckh, shoc_Ckm, shoc_Ckh_s, &
+ shoc_Ckm_s )
+
! --------------- !
! End !
! Initialization !
- ! --------------- !
-
- dp1 = dp1_in
-
- end subroutine shoc_init_e3sm
-
+ ! --------------- !
+
+ dp1 = dp1_in
+
+ end subroutine shoc_init_e3sm
+
! =============================================================================== !
! !
! =============================================================================== !
-
+
subroutine shoc_tend_e3sm( &
state, ptend_all, pbuf, hdtime, &
cmfmc, cam_in, sgh30, &
macmic_it, cld_macmic_num_steps, &
dlf, det_s, det_ice, alst_o)
-
+
!------------------------------------------------------------------- !
! Provide tendencies of shallow convection , turbulence, and !
! macrophysics from SHOC to E3SM !
- !------------------------------------------------------------------- !
-
+ !------------------------------------------------------------------- !
+
use physics_types, only: physics_state, physics_ptend, &
physics_state_copy, physics_ptend_init, &
- physics_ptend_sum
-
+ physics_ptend_sum
+
use physics_update_mod, only: physics_update
use physics_buffer, only: pbuf_get_index, pbuf_old_tim_idx, pbuf_get_field, &
- pbuf_set_field, physics_buffer_desc
-
+ pbuf_set_field, physics_buffer_desc
+
use ppgrid, only: pver, pverp, pcols
use constituents, only: cnst_get_ind
use camsrfexch, only: cam_in_t
- use ref_pres, only: top_lev => trop_cloud_top_lev
- use time_manager, only: is_first_step
+ use ref_pres, only: top_lev => trop_cloud_top_lev
+ use time_manager, only: is_first_step
use wv_saturation, only: qsat
- use micro_mg_cam, only: micro_mg_version
- use cldfrc2m, only: aist_vector
+ use micro_mg_cam, only: micro_mg_version
+ use cldfrc2m, only: aist_vector
use trb_mtn_stress, only: compute_tms
use shoc, only: shoc_main
use cam_history, only: outfld
use iop_data_mod, only: single_column, dp_crm
- use physics_utils, only: calculate_drymmr_from_wetmmr, calculate_wetmmr_from_drymmr
-
+
implicit none
-
+
! --------------- !
! Input Auguments !
! --------------- !
@@ -545,11 +514,11 @@ subroutine shoc_tend_e3sm( &
real(r8), intent(in) :: cmfmc(pcols,pverp) ! convective mass flux--m sub c [kg/m2/s]
real(r8), intent(in) :: sgh30(pcols) ! std deviation of orography [m]
integer, intent(in) :: cld_macmic_num_steps ! number of mac-mic iterations
- integer, intent(in) :: macmic_it ! number of mac-mic iterations
+ integer, intent(in) :: macmic_it ! number of mac-mic iterations
! ---------------------- !
! Input-Output Auguments !
! ---------------------- !
-
+
type(physics_buffer_desc), pointer :: pbuf(:)
! ---------------------- !
@@ -558,19 +527,18 @@ subroutine shoc_tend_e3sm( &
type(physics_ptend), intent(out) :: ptend_all ! package tendencies
- ! These two variables are needed for energy check
+ ! These two variables are needed for energy check
real(r8), intent(out) :: det_s(pcols) ! Integral of detrained static energy from ice
real(r8), intent(out) :: det_ice(pcols) ! Integral of detrained ice for energy check
- real(r8), intent(out) :: alst_o(pcols,pver) ! H. Wang: for old liquid status fraction
-
+ real(r8), intent(out) :: alst_o(pcols,pver) ! H. Wang: for old liquid status fraction
+
! --------------- !
! Local Variables !
! --------------- !
- logical:: convert_back_to_wet(edsclr_dim)! To track scalars which needs a conversion back to wet mmr
integer :: shoctop(pcols)
-
+
#ifdef SHOC_SGS
type(physics_state) :: state1 ! Local copy of state variable
@@ -583,12 +551,11 @@ subroutine shoc_tend_e3sm( &
integer :: err_code ! Diagnostic, for if some calculation goes amiss.
integer :: begin_height, end_height
integer :: icnt
-
- real(r8) :: dtime ! SHOC time step [s]
- real(r8) :: edsclr_in(pcols,pver,edsclr_dim) ! Scalars to be diffused through SHOC [units vary]
+
+ real(r8) :: dtime ! SHOC time step [s]
+ real(r8) :: edsclr_in(pcols,pver,edsclr_dim) ! Scalars to be diffused through SHOC [units vary]
real(r8) :: edsclr_out(pcols,pver,edsclr_dim)
real(r8) :: rcm_in(pcols,pver)
- real(r8) :: qv_wet(pcols,pver), qv_dry(pcols,pver) ! wet [kg/kg-of-wet-air] and dry [kg/kg-of-dry-air] water vapor mmr
real(r8) :: cloudfrac_shoc(pcols,pver)
real(r8) :: newfice(pcols,pver) ! fraction of ice in cloud at CLUBB start [-]
real(r8) :: inv_exner(pcols,pver)
@@ -609,12 +576,12 @@ subroutine shoc_tend_e3sm( &
real(r8) :: cloud_frac(pcols,pver) ! CLUBB cloud fraction [fraction]
real(r8) :: ice_cloud_frac(pcols,pver) ! ice number aware cloud fraction, 0 or 1
real(r8) :: precipitating_ice_frac(pcols,pver) ! precipitating ice fraction, 0 or 1
- real(r8) :: liq_cloud_frac(pcols,pver)
+ real(r8) :: liq_cloud_frac(pcols,pver)
real(r8) :: dlf2(pcols,pver)
real(r8) :: isotropy(pcols,pver)
real(r8) :: host_dx, host_dy
real(r8) :: host_temp(pcols,pver)
- real(r8) :: host_dx_in(pcols), host_dy_in(pcols)
+ real(r8) :: host_dx_in(pcols), host_dy_in(pcols)
real(r8) :: shoc_mix_out(pcols,pver), tk_in(pcols,pver), tkh_in(pcols,pver)
real(r8) :: isotropy_out(pcols,pver), tke_zt(pcols,pver)
real(r8) :: w_sec_out(pcols,pver), thl_sec_out(pcols,pverp)
@@ -631,115 +598,89 @@ subroutine shoc_tend_e3sm( &
real(r8) :: obklen(pcols), ustar2(pcols), kinheat(pcols), kinwat(pcols)
real(r8) :: dummy2(pcols), dummy3(pcols), kbfs(pcols), th(pcols,pver), thv(pcols,pver)
- real(r8) :: thv2(pcols,pver)
-
+ real(r8) :: thv2(pcols,pver)
+
real(r8) :: minqn, rrho(pcols,pver), rrho_i(pcols,pverp) ! minimum total cloud liquid + ice threshold [kg/kg]
real(r8) :: cldthresh, frac_limit
real(r8) :: ic_limit, dum1
real(r8) :: inv_exner_surf, pot_temp
-
+
real(r8) :: wpthlp_sfc(pcols), wprtp_sfc(pcols), upwp_sfc(pcols), vpwp_sfc(pcols)
real(r8) :: wtracer_sfc(pcols,edsclr_dim)
-
+
! Variables below are needed to compute energy integrals for conservation
real(r8) :: ke_a(pcols), ke_b(pcols), te_a(pcols), te_b(pcols)
real(r8) :: wv_a(pcols), wv_b(pcols), wl_b(pcols), wl_a(pcols)
real(r8) :: se_dis(pcols), se_a(pcols), se_b(pcols), shoc_s(pcols,pver)
real(r8) :: shoc_t(pcols,pver)
-
+
! --------------- !
! Pointers !
! --------------- !
-
+
real(r8), pointer, dimension(:,:) :: tke_zi ! turbulent kinetic energy, interface
real(r8), pointer, dimension(:,:) :: wthv ! buoyancy flux
- real(r8), pointer, dimension(:,:) :: tkh
+ real(r8), pointer, dimension(:,:) :: tkh
real(r8), pointer, dimension(:,:) :: tk
real(r8), pointer, dimension(:,:) :: cld ! cloud fraction [fraction]
real(r8), pointer, dimension(:,:) :: tot_cloud_frac ! cloud fraction [fraction]
real(r8), pointer, dimension(:,:) :: concld ! convective cloud fraction [fraction]
real(r8), pointer, dimension(:,:) :: ast ! stratiform cloud fraction [fraction]
real(r8), pointer, dimension(:,:) :: alst ! liquid stratiform cloud fraction [fraction]
- real(r8), pointer, dimension(:,:) :: aist ! ice stratiform cloud fraction [fraction]
- real(r8), pointer, dimension(:,:) :: cmeliq
-
+ real(r8), pointer, dimension(:,:) :: aist ! ice stratiform cloud fraction [fraction]
+ real(r8), pointer, dimension(:,:) :: cmeliq
+
real(r8), pointer, dimension(:,:) :: qlst ! Physical in-stratus LWC [kg/kg]
real(r8), pointer, dimension(:,:) :: qist ! Physical in-stratus IWC [kg/kg]
real(r8), pointer, dimension(:,:) :: deepcu ! deep convection cloud fraction [fraction]
- real(r8), pointer, dimension(:,:) :: shalcu ! shallow convection cloud fraction [fraction]
+ real(r8), pointer, dimension(:,:) :: shalcu ! shallow convection cloud fraction [fraction]
real(r8), pointer, dimension(:,:) :: khzt ! eddy diffusivity on thermo levels [m^2/s]
real(r8), pointer, dimension(:,:) :: khzm ! eddy diffusivity on momentum levels [m^2/s]
real(r8), pointer, dimension(:) :: pblh ! planetary boundary layer height [m]
- real(r8), pointer, dimension(:,:) :: dp_icwmr ! deep convection in cloud mixing ratio [kg/kg]
- real(r8), pointer, dimension(:,:) :: cmfmc_sh ! Shallow convective mass flux--m subc (pcols,pverp) [kg/m2/s/]
+ real(r8), pointer, dimension(:,:) :: dp_icwmr ! deep convection in cloud mixing ratio [kg/kg]
+ real(r8), pointer, dimension(:,:) :: cmfmc_sh ! Shallow convective mass flux--m subc (pcols,pverp) [kg/m2/s/]
- real(r8), pointer, dimension(:,:) :: prer_evap
+ real(r8), pointer, dimension(:,:) :: prer_evap
real(r8), pointer, dimension(:,:) :: accre_enhan
real(r8), pointer, dimension(:,:) :: relvar
-
+
logical :: lqice(pcnst)
real(r8) :: relvarmax
-
+
!------------------------------------------------------------------!
!------------------------------------------------------------------!
!------------------------------------------------------------------!
! MAIN COMPUTATION BEGINS HERE !
!------------------------------------------------------------------!
!------------------------------------------------------------------!
- !------------------------------------------------------------------!
-
+ !------------------------------------------------------------------!
+
! Get indicees for cloud and ice mass and cloud and ice number
ic_limit = 1.e-12_r8
frac_limit = 0.01_r8
-
+
call cnst_get_ind('CLDLIQ',ixcldliq)
call cnst_get_ind('CLDICE',ixcldice)
call cnst_get_ind('NUMLIQ',ixnumliq)
call cnst_get_ind('NUMICE',ixnumice)
-
+
call physics_ptend_init(ptend_loc,state%psetcols, 'shoc', ls=.true., lu=.true., lv=.true., lq=lq)
-
+
call physics_state_copy(state,state1)
-
+
! Determine number of columns and which chunk computation is to be performed on
ncol = state%ncol
- lchnk = state%lchnk
-
- !obtain wet mmr from the state vector
- qv_wet (:,:) = state1%q(:,:,ixq)
- icnt = 0
- do ixind = 1, pcnst
- if (lq(ixind)) then
- icnt = icnt + 1
-
- !Track which scalars need a conversion to wetmmr after SHOC main call
- convert_back_to_wet(icnt) = .false.
-
- if(lq_dry_wet_cnvr(ixind)) then !convert from wet to dry mmr if true
- convert_back_to_wet(icnt) = .true.
- !---------------------------------------------------------------------------------------
- !Wet to dry mixing ratios:
- !-------------------------
- !Since state scalars from the host model are wet mixing ratios and SHOC needs these
- !scalars in dry mixing ratios, we convert the wet mixing ratios to dry mixing ratio
- !if lq_dry_wet_cnvr is .true. for that scalar
- !NOTE:Function calculate_drymmr_from_wetmmr takes 2 arguments: (wet mmr and "wet" water
- !vapor mixing ratio)
- !---------------------------------------------------------------------------------------
- state1%q(:,:,ixind) = calculate_drymmr_from_wetmmr(ncol, pver,state1%q(:,:,ixind), qv_wet)
- endif
- endif
- enddo
+ lchnk = state%lchnk
+
+ ! Determine time step of physics buffer
+ itim_old = pbuf_old_tim_idx()
- ! Determine time step of physics buffer
- itim_old = pbuf_old_tim_idx()
-
- ! Establish associations between pointers and physics buffer fields
+ ! Establish associations between pointers and physics buffer fields
call pbuf_get_field(pbuf, tke_idx, tke_zi)
- call pbuf_get_field(pbuf, wthv_idx, wthv, start=(/1,1,itim_old/), kount=(/pcols,pver,1/))
- call pbuf_get_field(pbuf, tkh_idx, tkh, start=(/1,1,itim_old/), kount=(/pcols,pver,1/))
- call pbuf_get_field(pbuf, tk_idx, tk, start=(/1,1,itim_old/), kount=(/pcols,pver,1/))
+ call pbuf_get_field(pbuf, wthv_idx, wthv, start=(/1,1,itim_old/), kount=(/pcols,pver,1/))
+ call pbuf_get_field(pbuf, tkh_idx, tkh, start=(/1,1,itim_old/), kount=(/pcols,pver,1/))
+ call pbuf_get_field(pbuf, tk_idx, tk, start=(/1,1,itim_old/), kount=(/pcols,pver,1/))
call pbuf_get_field(pbuf, cld_idx, cld, start=(/1,1,itim_old/), kount=(/pcols,pver,1/))
call pbuf_get_field(pbuf, tot_cloud_frac_idx, tot_cloud_frac, start=(/1,1,itim_old/), kount=(/pcols,pver,1/))
call pbuf_get_field(pbuf, concld_idx, concld, start=(/1,1,itim_old/), kount=(/pcols,pver,1/))
@@ -748,7 +689,7 @@ subroutine shoc_tend_e3sm( &
call pbuf_get_field(pbuf, aist_idx, aist, start=(/1,1,itim_old/), kount=(/pcols,pver,1/))
call pbuf_get_field(pbuf, qlst_idx, qlst, start=(/1,1,itim_old/), kount=(/pcols,pver,1/))
call pbuf_get_field(pbuf, qist_idx, qist, start=(/1,1,itim_old/), kount=(/pcols,pver,1/))
-
+
call pbuf_get_field(pbuf, prer_evap_idx, prer_evap)
call pbuf_get_field(pbuf, accre_enhan_idx, accre_enhan)
call pbuf_get_field(pbuf, cmeliq_idx, cmeliq)
@@ -759,40 +700,40 @@ subroutine shoc_tend_e3sm( &
call pbuf_get_field(pbuf, kvh_idx, khzt)
call pbuf_get_field(pbuf, pblh_idx, pblh)
call pbuf_get_field(pbuf, icwmrdp_idx, dp_icwmr)
- call pbuf_get_field(pbuf, cmfmc_sh_idx, cmfmc_sh)
-
+ call pbuf_get_field(pbuf, cmfmc_sh_idx, cmfmc_sh)
+
! Determine SHOC time step.
-
+
dtime = shoc_timestep
-
+
! If requested SHOC timestep is < 0 then set the SHOC time step
! equal to hdtime (the macrophysics/microphysics timestep).
-
+
if (dtime < 0._r8) then
dtime = hdtime
endif
-
+
! Now perform checks to determine if the requested SHOC timestep
! is reasonable based on the host model time step.
-
+
! Is SHOC timestep greater than the macrophysics/microphysics timestep?
if (dtime .gt. hdtime) then
call endrun('shoc_tend_e3sm: Requested SHOC time step is greater than the macrophysics/microphysics timestep')
endif
-
+
! Does SHOC timestep divide evenly into the macrophysics/microphyscs timestep?
if (mod(hdtime,dtime) .ne. 0) then
call endrun('shoc_tend_e3sm: SHOC time step and HOST time step NOT compatible')
endif
! If we survived this far, then the SHOC timestep is valid.
-
- ! determine number of timesteps SHOC core should be advanced,
- ! host time step divided by SHOC time step
+
+ ! determine number of timesteps SHOC core should be advanced,
+ ! host time step divided by SHOC time step
nadv = max(hdtime/dtime,1._r8)
! Set grid space, in meters. If SCM, set to a grid size representative
- ! of a typical GCM. Otherwise, compute locally.
+ ! of a typical GCM. Otherwise, compute locally.
if (single_column .and. .not. dp_crm) then
host_dx_in(:) = 100000._r8
host_dy_in(:) = 100000._r8
@@ -803,55 +744,55 @@ subroutine shoc_tend_e3sm( &
else
call grid_size(state1, host_dx_in, host_dy_in)
endif
-
+
minqn = 0._r8
newfice(:,:) = 0._r8
where(state1%q(:ncol,:pver,ixcldice) .gt. minqn) &
- newfice(:ncol,:pver) = state1%q(:ncol,:pver,ixcldice)/(state1%q(:ncol,:pver,ixcldliq)+state1%q(:ncol,:pver,ixcldice))
-
+ newfice(:ncol,:pver) = state1%q(:ncol,:pver,ixcldice)/(state1%q(:ncol,:pver,ixcldliq)+state1%q(:ncol,:pver,ixcldice))
+
! TODO: Create a general function to calculate Exner's formula - see full
! comment in micro_p3_interface.F90
do k=1,pver
do i=1,ncol
inv_exner(i,k) = 1._r8/((state1%pmid(i,k)/p0_shoc)**(rair/cpair))
enddo
- enddo
-
- ! At each SHOC call, initialize mean momentum and thermo SHOC state
+ enddo
+
+ ! At each SHOC call, initialize mean momentum and thermo SHOC state
! from the E3SM state
-
+
do k=1,pver ! loop over levels
do i=1,ncol ! loop over columns
-
+
rvm(i,k) = state1%q(i,k,ixq)
rcm(i,k) = state1%q(i,k,ixcldliq)
rtm(i,k) = rvm(i,k) + rcm(i,k)
um(i,k) = state1%u(i,k)
vm(i,k) = state1%v(i,k)
-
+
pot_temp = state1%t(i,k)*inv_exner(i,k)
thlm(i,k) = pot_temp-(pot_temp/state1%t(i,k))*(latvap/cpair)*state1%q(i,k,ixcldliq)
- thv(i,k) = state1%t(i,k)*inv_exner(i,k)*(1.0_r8+zvir*state1%q(i,k,ixq)-state1%q(i,k,ixcldliq))
-
+ thv(i,k) = state1%t(i,k)*inv_exner(i,k)*(1.0_r8+zvir*state1%q(i,k,ixq)-state1%q(i,k,ixcldliq))
+
tke_zt(i,k) = max(tke_tol,state1%q(i,k,ixtke))
-
- ! Cloud fraction needs to be initialized for first
+
+ ! Cloud fraction needs to be initialized for first
! PBL height calculation call
- cloud_frac(i,k) = alst(i,k)
-
+ cloud_frac(i,k) = alst(i,k)
+
enddo
- enddo
-
+ enddo
+
! ------------------------------------------------- !
! Prepare inputs for SHOC call !
- ! ------------------------------------------------- !
-
+ ! ------------------------------------------------- !
+
do k=1,pver
do i=1,ncol
dz_g(i,k) = state1%zi(i,k)-state1%zi(i,k+1) ! compute thickness
enddo
enddo
-
+
! Define the SHOC thermodynamic grid (in units of m)
wm_zt(:,pver) = 0._r8
do k=1,pver
@@ -862,7 +803,7 @@ subroutine shoc_tend_e3sm( &
shoc_s(i,k) = state1%s(i,k)
enddo
enddo
-
+
do k=1,pverp
do i=1,ncol
zi_g(i,k) = state1%zi(i,k)-state1%zi(i,pver+1)
@@ -883,14 +824,14 @@ subroutine shoc_tend_e3sm( &
wprtp_sfc(i) = cam_in%cflx(i,1)/(rrho_i(i,pverp)) ! Latent heat flux
upwp_sfc(i) = cam_in%wsx(i)/rrho_i(i,pverp) ! Surface meridional momentum flux
- vpwp_sfc(i) = cam_in%wsy(i)/rrho_i(i,pverp) ! Surface zonal momentum flux
+ vpwp_sfc(i) = cam_in%wsy(i)/rrho_i(i,pverp) ! Surface zonal momentum flux
wtracer_sfc(i,:) = 0._r8 ! in E3SM tracer fluxes are done elsewhere
- enddo
-
- ! Do the same for tracers
+ enddo
+
+ ! Do the same for tracers
icnt=0
do ixind=1,pcnst
- if (lq(ixind)) then
+ if (lq(ixind)) then
icnt=icnt+1
do k=1,pver
do i=1,ncol
@@ -898,67 +839,43 @@ subroutine shoc_tend_e3sm( &
enddo
enddo
end if
- enddo
-
+ enddo
+
! ------------------------------------------------- !
! Actually call SHOC !
- ! ------------------------------------------------- !
+ ! ------------------------------------------------- !
call shoc_main( &
ncol, pver, pverp, dtime, nadv, & ! Input
- host_dx_in(:ncol), host_dy_in(:ncol), thv(:ncol,:),& ! Input
+ host_dx_in(:ncol), host_dy_in(:ncol), thv(:ncol,:),& ! Input
zt_g(:ncol,:), zi_g(:ncol,:), state%pmid(:ncol,:pver), state%pint(:ncol,:pverp), state1%pdel(:ncol,:pver),& ! Input
- wpthlp_sfc(:ncol), wprtp_sfc(:ncol), upwp_sfc(:ncol), vpwp_sfc(:ncol), & ! Input
- wtracer_sfc(:ncol,:), edsclr_dim, wm_zt(:ncol,:), & ! Input
- inv_exner(:ncol,:),state1%phis(:ncol), & ! Input
- shoc_s(:ncol,:), tke_zt(:ncol,:), thlm(:ncol,:), rtm(:ncol,:), & ! Input/Ouput
- um(:ncol,:), vm(:ncol,:), edsclr_in(:ncol,:,:), & ! Input/Output
- wthv(:ncol,:),tkh(:ncol,:),tk(:ncol,:), & ! Input/Output
- rcm(:ncol,:),cloud_frac(:ncol,:), & ! Input/Output
+ wpthlp_sfc(:ncol), wprtp_sfc(:ncol), upwp_sfc(:ncol), vpwp_sfc(:ncol), & ! Input
+ wtracer_sfc(:ncol,:), edsclr_dim, wm_zt(:ncol,:), & ! Input
+ inv_exner(:ncol,:),state1%phis(:ncol), & ! Input
+ shoc_s(:ncol,:), tke_zt(:ncol,:), thlm(:ncol,:), rtm(:ncol,:), & ! Input/Ouput
+ um(:ncol,:), vm(:ncol,:), edsclr_in(:ncol,:,:), & ! Input/Output
+ wthv(:ncol,:),tkh(:ncol,:),tk(:ncol,:), & ! Input/Output
+ rcm(:ncol,:),cloud_frac(:ncol,:), & ! Input/Output
pblh(:ncol), & ! Output
shoc_mix_out(:ncol,:), isotropy_out(:ncol,:), & ! Output (diagnostic)
- w_sec_out(:ncol,:), thl_sec_out(:ncol,:), qw_sec_out(:ncol,:), qwthl_sec_out(:ncol,:), & ! Output (diagnostic)
+ w_sec_out(:ncol,:), thl_sec_out(:ncol,:), qw_sec_out(:ncol,:), qwthl_sec_out(:ncol,:), & ! Output (diagnostic)
wthl_sec_out(:ncol,:), wqw_sec_out(:ncol,:), wtke_sec_out(:ncol,:), & ! Output (diagnostic)
uw_sec_out(:ncol,:), vw_sec_out(:ncol,:), w3_out(:ncol,:), & ! Output (diagnostic)
wqls_out(:ncol,:),brunt_out(:ncol,:),rcm2(:ncol,:)) ! Output (diagnostic)
-
+
! Transfer back to pbuf variables
-
+
do k=1,pver
- do i=1,ncol
+ do i=1,ncol
cloud_frac(i,k) = min(cloud_frac(i,k),1._r8)
enddo
enddo
-
- !obtain water vapor mmr which is a "dry" mmr at this point from the SHOC output
- qv_dry(:,:) = edsclr_in(:,:,ixq)
- !----------------
- !DRY-TO-WET MMRs:
- !----------------
- !Since the host model needs wet mixing ratio tendencies(state vector has wet mixing ratios),
- !we need to convert dry mixing ratios from SHOC to wet mixing ratios before extracting tendencies
- !NOTE:Function calculate_wetmmr_from_drymmr takes 2 arguments: (wet mmr and "dry" water vapor
- !mixing ratio)
- do ixind=1,edsclr_dim
- if(convert_back_to_wet(ixind)) then
- edsclr_out(:,:,ixind) = calculate_wetmmr_from_drymmr(ncol, pver, edsclr_in(:,:,ixind), qv_dry)
- else
- edsclr_out(:,:,ixind) = edsclr_in(:,:,ixind)
- endif
- enddo
- !convert state1%q to wet mixing ratios
- qv_dry(:,:) = state1%q(:,:,ixq)
- icnt = 0
- do ixind = 1, pcnst
- if (lq(ixind)) then
- icnt = icnt + 1
- if(convert_back_to_wet(icnt)) then !convert from wet to dry mmr if true
- state1%q(:,:,ixind) = calculate_wetmmr_from_drymmr(ncol, pver, state1%q(:,:,ixind), qv_dry)
- endif
- endif
- enddo
- rcm(:,:) = calculate_wetmmr_from_drymmr(ncol, pver, rcm, qv_dry)
- rtm(:,:) = calculate_wetmmr_from_drymmr(ncol, pver, rtm, qv_dry)
+
+!sort out edsclr_in, edsclr_out
+ do ixind=1,edsclr_dim
+ edsclr_out(:,:,ixind) = edsclr_in(:,:,ixind)
+ enddo
+
! Eddy diffusivities and TKE are needed for aerosol activation code.
! Linearly interpolate from midpoint grid and onto the interface grid.
@@ -974,17 +891,17 @@ subroutine shoc_tend_e3sm( &
! Now compute the tendencies of SHOC to E3SM
do k=1,pver
do i=1,ncol
-
+
ptend_loc%u(i,k) = (um(i,k)-state1%u(i,k))/hdtime
- ptend_loc%v(i,k) = (vm(i,k)-state1%v(i,k))/hdtime
+ ptend_loc%v(i,k) = (vm(i,k)-state1%v(i,k))/hdtime
ptend_loc%q(i,k,ixq) = (rtm(i,k)-rcm(i,k)-state1%q(i,k,ixq))/hdtime ! water vapor
ptend_loc%q(i,k,ixcldliq) = (rcm(i,k)-state1%q(i,k,ixcldliq))/hdtime ! Tendency of liquid water
ptend_loc%s(i,k) = (shoc_s(i,k)-state1%s(i,k))/hdtime
-
+
ptend_loc%q(i,k,ixtke)=(tke_zt(i,k)-state1%q(i,k,ixtke))/hdtime ! TKE
-
+
! Apply tendencies to ice mixing ratio, liquid and ice number, and aerosol constituents.
- ! Loading up this array doesn't mean the tendencies are applied.
+ ! Loading up this array doesn't mean the tendencies are applied.
! edsclr_out is compressed with just the constituents being used, ptend and state are not compressed
icnt=0
@@ -992,45 +909,48 @@ subroutine shoc_tend_e3sm( &
if (lq(ixind)) then
icnt=icnt+1
if ((ixind /= ixq) .and. (ixind /= ixcldliq) .and. (ixind /= ixtke)) then
- ptend_loc%q(i,k,ixind) = (edsclr_out(i,k,icnt)-state1%q(i,k,ixind))/hdtime ! transported constituents
+ ptend_loc%q(i,k,ixind) = (edsclr_out(i,k,icnt)-state1%q(i,k,ixind))/hdtime ! transported constituents
end if
end if
enddo
enddo
- enddo
-
+ enddo
+
cmeliq(:,:) = ptend_loc%q(:,:,ixcldliq)
-
+
! Update physics tendencies
call physics_ptend_init(ptend_all, state%psetcols, 'shoc')
call physics_ptend_sum(ptend_loc,ptend_all,ncol)
call physics_update(state1,ptend_loc,hdtime)
-
+
+ ! ------------------------------------------------------------ !
! ------------------------------------------------------------ !
- ! ------------------------------------------------------------ !
! ------------------------------------------------------------ !
! The rest of the code deals with diagnosing variables !
! for microphysics/radiation computation and macrophysics !
! ------------------------------------------------------------ !
! ------------------------------------------------------------ !
- ! ------------------------------------------------------------ !
-
- ! --------------------------------------------------------------------------------- !
+ ! ------------------------------------------------------------ !
+
+ ! --------------------------------------------------------------------------------- !
! COMPUTE THE ICE CLOUD DETRAINMENT !
! Detrainment of convective condensate into the environment or stratiform cloud !
! --------------------------------------------------------------------------------- !
-
+
! Initialize the shallow convective detrainment rate, will always be zero
dlf2(:,:) = 0.0_r8
+ det_ice(:)=0.0
+ det_s(:)=0.0
+
lqice(:) = .false.
lqice(ixcldliq) = .true.
lqice(ixcldice) = .true.
lqice(ixnumliq) = .true.
- lqice(ixnumice) = .true.
-
- call physics_ptend_init(ptend_loc,state%psetcols, 'clubb_det', ls=.true., lq=lqice)
+ lqice(ixnumice) = .true.
+
+ call physics_ptend_init(ptend_loc,state%psetcols, 'clubb_det', ls=.true., lq=lqice)
do k=1,pver
do i=1,ncol
if( state1%t(i,k) > shoc_tk1 ) then
@@ -1042,62 +962,62 @@ subroutine shoc_tend_e3sm( &
!(clubb_tk1 - clubb_tk2) is also 30.0 but it introduced a non-bfb change
dum1 = ( shoc_tk1 - state1%t(i,k) ) /(shoc_tk1 - shoc_tk2)
endif
-
+
ptend_loc%q(i,k,ixcldliq) = dlf(i,k) * ( 1._r8 - dum1 )
ptend_loc%q(i,k,ixcldice) = dlf(i,k) * dum1
ptend_loc%q(i,k,ixnumliq) = 3._r8 * ( max(0._r8, ( dlf(i,k) - dlf2(i,k) )) * ( 1._r8 - dum1 ) ) &
/ (4._r8*3.14_r8* shoc_liq_deep**3*997._r8) + & ! Deep Convection
3._r8 * ( dlf2(i,k) * ( 1._r8 - dum1 ) ) &
- / (4._r8*3.14_r8*shoc_liq_sh**3*997._r8) ! Shallow Convection
+ / (4._r8*3.14_r8*shoc_liq_sh**3*997._r8) ! Shallow Convection
ptend_loc%q(i,k,ixnumice) = 3._r8 * ( max(0._r8, ( dlf(i,k) - dlf2(i,k) )) * dum1 ) &
/ (4._r8*3.14_r8*shoc_ice_deep**3*500._r8) + & ! Deep Convection
3._r8 * ( dlf2(i,k) * dum1 ) &
/ (4._r8*3.14_r8*shoc_ice_sh**3*500._r8) ! Shallow Convection
ptend_loc%s(i,k) = dlf(i,k) * dum1 * latice
-
+
! Only rliq is saved from deep convection, which is the reserved liquid. We need to keep
! track of the integrals of ice and static energy that is effected from conversion to ice
! so that the energy checker doesn't complain.
det_s(i) = det_s(i) + ptend_loc%s(i,k)*state1%pdel(i,k)/gravit
det_ice(i) = det_ice(i) - ptend_loc%q(i,k,ixcldice)*state1%pdel(i,k)/gravit
-
+
enddo
enddo
det_ice(:ncol) = det_ice(:ncol)/1000._r8 ! divide by density of water
-
+
call physics_ptend_sum(ptend_loc,ptend_all,ncol)
call physics_update(state1,ptend_loc,hdtime)
-
+
! For purposes of this implementation, just set relvar and accre_enhan to 1
relvar(:,:) = 1.0_r8
- accre_enhan(:,:) = 1._r8
-
+ accre_enhan(:,:) = 1._r8
+
! +++ JShpund: add relative cloud liquid variance (a vectorized version based on CLUBB)
! TODO: double check the hardcoded values ('relvarmax', '0.001_r8')
relvarmax = 10.0_r8
where (rcm(:ncol,:pver) /= 0.0 .and. rcm2(:ncol,:pver) /= 0.0) &
relvar(:ncol,:pver) = min(relvarmax,max(0.001_r8,rcm(:ncol,:pver)**2.0/rcm2(:ncol,:pver)))
- ! --------------------------------------------------------------------------------- !
+ ! --------------------------------------------------------------------------------- !
! Diagnose some quantities that are computed in macrop_tend here. !
! These are inputs required for the microphysics calculation. !
! !
! FIRST PART COMPUTES THE STRATIFORM CLOUD FRACTION FROM SHOC CLOUD FRACTION !
- ! --------------------------------------------------------------------------------- !
-
+ ! --------------------------------------------------------------------------------- !
+
! HW: set alst to alst_o before getting updated
if(liqcf_fix) then
if(.not.is_first_step()) alst_o(:ncol,:pver) = alst(:ncol,:pver)
endif
- ! initialize variables
+ ! initialize variables
alst(:,:) = 0.0_r8
- qlst(:,:) = 0.0_r8
-
+ qlst(:,:) = 0.0_r8
+
do k=1,pver
do i=1,ncol
- alst(i,k) = cloud_frac(i,k)
+ alst(i,k) = cloud_frac(i,k)
qlst(i,k) = rcm(i,k)/max(0.01_r8,alst(i,k)) ! Incloud stratus condensate mixing ratio
enddo
enddo
@@ -1105,56 +1025,56 @@ subroutine shoc_tend_e3sm( &
! HW
if(liqcf_fix) then
if(is_first_step()) alst_o(:ncol,:pver) = alst(:ncol,:pver)
- endif
-
- ! --------------------------------------------------------------------------------- !
+ endif
+
+ ! --------------------------------------------------------------------------------- !
! THIS PART COMPUTES CONVECTIVE AND DEEP CONVECTIVE CLOUD FRACTION !
- ! --------------------------------------------------------------------------------- !
-
+ ! --------------------------------------------------------------------------------- !
+
deepcu(:,pver) = 0.0_r8
shalcu(:,pver) = 0.0_r8
-
+
do k=1,pver-1
do i=1,ncol
- ! diagnose the deep convective cloud fraction, as done in macrophysics based on the
- ! deep convective mass flux, read in from pbuf. Since shallow convection is never
+ ! diagnose the deep convective cloud fraction, as done in macrophysics based on the
+ ! deep convective mass flux, read in from pbuf. Since shallow convection is never
! called, the shallow convective mass flux will ALWAYS be zero, ensuring that this cloud
- ! fraction is purely from deep convection scheme.
+ ! fraction is purely from deep convection scheme.
deepcu(i,k) = max(0.0_r8,min(dp1*log(1.0_r8+500.0_r8*(cmfmc(i,k+1)-cmfmc_sh(i,k+1))),0.6_r8))
shalcu(i,k) = 0._r8
-
+
if (deepcu(i,k) <= frac_limit .or. dp_icwmr(i,k) < ic_limit) then
deepcu(i,k) = 0._r8
endif
-
- ! using the deep convective cloud fraction, and SHOC cloud fraction (variable
+
+ ! using the deep convective cloud fraction, and SHOC cloud fraction (variable
! "cloud_frac"), compute the convective cloud fraction. This follows the formulation
- ! found in macrophysics code. Assumes that convective cloud is all nonstratiform cloud
+ ! found in macrophysics code. Assumes that convective cloud is all nonstratiform cloud
! from SHOC plus the deep convective cloud fraction
concld(i,k) = min(cloud_frac(i,k)-alst(i,k)+deepcu(i,k),0.80_r8)
enddo
- enddo
-
- ! --------------------------------------------------------------------------------- !
+ enddo
+
+ ! --------------------------------------------------------------------------------- !
! COMPUTE THE ICE CLOUD FRACTION PORTION !
! use the aist_vector function to compute the ice cloud fraction !
! --------------------------------------------------------------------------------- !
-
+
do k=1,pver
call aist_vector(state1%q(:,k,ixq),state1%t(:,k),state1%pmid(:,k),state1%q(:,k,ixcldice), &
state1%q(:,k,ixnumice),cam_in%landfrac(:),cam_in%snowhland(:),aist(:,k),ncol)
enddo
-
- ! --------------------------------------------------------------------------------- !
+
+ ! --------------------------------------------------------------------------------- !
! THIS PART COMPUTES THE LIQUID STRATUS FRACTION !
! !
! For now leave the computation of ice stratus fraction from macrop_driver intact !
- ! because SHOC does nothing with ice. Here I simply overwrite the liquid stratus !
+ ! because SHOC does nothing with ice. Here I simply overwrite the liquid stratus !
! fraction that was coded in macrop_driver !
- ! --------------------------------------------------------------------------------- !
-
+ ! --------------------------------------------------------------------------------- !
+
! Recompute net stratus fraction using maximum over-lapping assumption, as done
- ! in macrophysics code, using alst computed above and aist read in from physics buffer
+ ! in macrophysics code, using alst computed above and aist read in from physics buffer
cldthresh=1.e-18_r8
@@ -1163,27 +1083,27 @@ subroutine shoc_tend_e3sm( &
ast(i,k) = max(alst(i,k),aist(i,k))
- qist(i,k) = state1%q(i,k,ixcldice)/max(0.01_r8,aist(i,k))
+ qist(i,k) = state1%q(i,k,ixcldice)/max(0.01_r8,aist(i,k))
enddo
enddo
-
- ! Probably need to add deepcu cloud fraction to the cloud fraction array, else would just
+
+ ! Probably need to add deepcu cloud fraction to the cloud fraction array, else would just
! be outputting the shallow convective cloud fraction
-
+
! Add liq, ice, and precipitating ice fractions here. These are purely
! diagnostic outputs and do not impact the rest of the code. The qi threshold for
- ! setting ice_cloud_fraction and the qi dependent ni_threshold are tunable.
+ ! setting ice_cloud_fraction and the qi dependent ni_threshold are tunable.
liq_cloud_frac = 0.0_r8
ice_cloud_frac = 0.0_r8
precipitating_ice_frac = 0.0_r8
tot_cloud_frac = 0.0_r8
-
+
do k=1,pver
do i=1,ncol
cloud_frac(i,k) = min(ast(i,k)+deepcu(i,k),1.0_r8)
liq_cloud_frac(i,k) = alst(i,k)
- if (state1%q(i,k,ixcldice) .ge. 1.0e-5_r8) then
+ if (state1%q(i,k,ixcldice) .ge. 1.0e-5_r8) then
if (state1%q(i,k,ixnumice) .ge. state1%q(i,k,ixcldice)*5.0e7_r8) then
ice_cloud_frac(i,k) = 1.0_r8
else
@@ -1193,8 +1113,8 @@ subroutine shoc_tend_e3sm( &
tot_cloud_frac(i,k) = min(1.0_r8, max(ice_cloud_frac(i,k),liq_cloud_frac(i,k))+deepcu(i,k))
enddo
enddo
-
- cld(:,1:pver) = cloud_frac(:,1:pver)
+
+ cld(:,1:pver) = cloud_frac(:,1:pver)
! --------------------------------------------------------!
! Output fields
@@ -1203,14 +1123,14 @@ subroutine shoc_tend_e3sm( &
do k=1,pverp
do i=1,ncol
wthl_output(i,k) = wthl_sec_out(i,k) * rrho_i(i,k) * cpair
- wqw_output(i,k) = wqw_sec_out(i,k) * rrho_i(i,k) * latvap
+ wqw_output(i,k) = wqw_sec_out(i,k) * rrho_i(i,k) * latvap
enddo
enddo
do k=1,pver
do i=1,ncol
wthv_output(i,k) = wthv(i,k) * rrho(i,k) * cpair
- wql_output(i,k) = wqls_out(i,k) * rrho(i,k) * latvap
+ wql_output(i,k) = wqls_out(i,k) * rrho(i,k) * latvap
enddo
enddo
@@ -1239,11 +1159,12 @@ subroutine shoc_tend_e3sm( &
call outfld('PRECIPITATING_ICE_FRAC',precipitating_ice_frac,pcols,lchnk)
call outfld('LIQ_CLOUD_FRAC',liq_cloud_frac,pcols,lchnk)
call outfld('TOT_CLOUD_FRAC',tot_cloud_frac,pcols,lchnk)
+ call outfld('PBLH',pblh,pcols,lchnk)
+
+#endif
+ return
+ end subroutine shoc_tend_e3sm
-#endif
- return
- end subroutine shoc_tend_e3sm
-
subroutine grid_size(state, grid_dx, grid_dy)
! Determine the size of the grid for each of the columns in state
@@ -1251,11 +1172,11 @@ subroutine grid_size(state, grid_dx, grid_dy)
use shr_const_mod, only: shr_const_pi
use physics_types, only: physics_state
use ppgrid, only: pver, pverp, pcols
-
+
type(physics_state), intent(in) :: state
real(r8), intent(out) :: grid_dx(pcols), grid_dy(pcols) ! E3SM grid [m]
- real(r8), parameter :: earth_ellipsoid1 = 111132.92_r8 ! World Geodetic System 1984 (WGS84)
+ real(r8), parameter :: earth_ellipsoid1 = 111132.92_r8 ! World Geodetic System 1984 (WGS84)
! first coefficient, meters per degree longitude at equator
real(r8), parameter :: earth_ellipsoid2 = 559.82_r8 ! second expansion coefficient for WGS84 ellipsoid
real(r8), parameter :: earth_ellipsoid3 = 1.175_r8 ! third expansion coefficient for WGS84 ellipsoid
@@ -1271,29 +1192,29 @@ subroutine grid_size(state, grid_dx, grid_dy)
! convert latitude to radians
lat_in_rad = state%lat(i)*(shr_const_pi/180._r8)
-
+
! Now find meters per degree latitude
! Below equation finds distance between two points on an ellipsoid, derived from expansion
- ! taking into account ellipsoid using World Geodetic System (WGS84) reference
+ ! taking into account ellipsoid using World Geodetic System (WGS84) reference
mpdeglat = earth_ellipsoid1 - earth_ellipsoid2 * cos(2._r8*lat_in_rad) + earth_ellipsoid3 * cos(4._r8*lat_in_rad)
grid_dx(i) = mpdeglat * degree
grid_dy(i) = grid_dx(i) ! Assume these are the same
- enddo
+ enddo
+
+ end subroutine grid_size
- end subroutine grid_size
-
subroutine grid_size_planar_uniform(grid_dx, grid_dy)
-
+
! Get size of grid box if in doubly period planar mode
! At time of implementation planar dycore only supports uniform grids.
-
+
use iop_data_mod, only: dyn_dx_size
-
+
real(r8), intent(out) :: grid_dx, grid_dy
grid_dx = dyn_dx_size
grid_dy = grid_dx
-
+
end subroutine grid_size_planar_uniform
end module shoc_intr
diff --git a/components/eam/src/physics/cam/trb_mtn_stress.F90 b/components/eam/src/physics/cam/trb_mtn_stress.F90
index ff730872e4a8..3698ddcfeb3a 100644
--- a/components/eam/src/physics/cam/trb_mtn_stress.F90
+++ b/components/eam/src/physics/cam/trb_mtn_stress.F90
@@ -1,7 +1,16 @@
+! Include bit-for-bit math macros.
+#include "bfb_math.inc"
+
module trb_mtn_stress
+ ! Bit-for-bit math functions.
+#ifdef SCREAM_CONFIG_IS_CMAKE
+ use physics_share_f2c, only: scream_pow, scream_sqrt, scream_cbrt, scream_gamma, scream_log, &
+ scream_log10, scream_exp, scream_erf
+#endif
+
implicit none
- private
+ private
save
public init_tms ! Initialization
@@ -17,7 +26,7 @@ module trb_mtn_stress
real(r8), parameter :: z0max = 100._r8 ! Maximum value of z_0 for orography [ m ]
real(r8), parameter :: dv2min = 0.01_r8 ! Minimum shear squared [ m2/s2 ]
real(r8) :: orocnst ! Converts from standard deviation to height [ no unit ]
- real(r8) :: z0fac ! Factor determining z_0 from orographic standard deviation [ no unit ]
+ real(r8) :: z0fac ! Factor determining z_0 from orographic standard deviation [ no unit ]
real(r8) :: karman ! von Karman constant
real(r8) :: gravit ! Acceleration due to gravity
real(r8) :: rair ! Gas constant for dry air
@@ -49,7 +58,7 @@ subroutine init_tms( kind, oro_in, z0fac_in, karman_in, gravit_in, rair_in, &
karman = karman_in
gravit = gravit_in
rair = rair_in
-
+
end subroutine init_tms
!============================================================================ !
@@ -58,11 +67,11 @@ end subroutine init_tms
subroutine compute_tms( pcols , pver , ncol , &
u , v , t , pmid , exner , &
- zm , sgh , ksrf , taux , tauy , &
+ zm , sgh , ksrf , taux , tauy , &
landfrac )
!------------------------------------------------------------------------------ !
- ! Turbulent mountain stress parameterization !
+ ! Turbulent mountain stress parameterization !
! !
! Returns surface drag coefficient and stress associated with subgrid mountains !
! For points where the orographic variance is small ( including ocean ), !
@@ -72,7 +81,7 @@ subroutine compute_tms( pcols , pver , ncol , &
!------------------------------------------------------------------------------ !
! ---------------------- !
- ! Input-Output Arguments !
+ ! Input-Output Arguments !
! ---------------------- !
integer, intent(in) :: pcols ! Number of columns dimensioned
@@ -87,7 +96,7 @@ subroutine compute_tms( pcols , pver , ncol , &
real(r8), intent(in) :: zm(pcols,pver) ! Layer mid-point height [ m ]
real(r8), intent(in) :: sgh(pcols) ! Standard deviation of orography [ m ]
real(r8), intent(in) :: landfrac(pcols) ! Land fraction [ fraction ]
-
+
real(r8), intent(out) :: ksrf(pcols) ! Surface drag coefficient [ kg/s/m2 ]
real(r8), intent(out) :: taux(pcols) ! Surface zonal wind stress [ N/m2 ]
real(r8), intent(out) :: tauy(pcols) ! Surface meridional wind stress [ N/m2 ]
@@ -98,7 +107,7 @@ subroutine compute_tms( pcols , pver , ncol , &
integer :: i ! Loop index
integer :: kb, kt ! Bottom and top of source region
-
+
real(r8) :: horo ! Orographic height [ m ]
real(r8) :: z0oro ! Orographic z0 for momentum [ m ]
real(r8) :: dv2 ! (delta v)**2 [ m2/s2 ]
@@ -111,7 +120,7 @@ subroutine compute_tms( pcols , pver , ncol , &
! ----------------------- !
! Main Computation Begins !
! ----------------------- !
-
+
do i = 1, ncol
! determine subgrid orgraphic height ( mean to peak )
@@ -134,19 +143,19 @@ subroutine compute_tms( pcols , pver , ncol , &
! Calculate neutral drag coefficient
- cd = ( karman / log( ( zm(i,pver) + z0oro ) / z0oro) )**2
+ cd = bfb_square( karman / bfb_log( ( zm(i,pver) + z0oro ) / z0oro) )
! Calculate the Richardson number over the lowest 2 layers
kt = pver - 1
kb = pver
- dv2 = max( ( u(i,kt) - u(i,kb) )**2 + ( v(i,kt) - v(i,kb) )**2, dv2min )
+ dv2 = max( bfb_square( u(i,kt) - u(i,kb) ) + bfb_square( v(i,kt) - v(i,kb) ), dv2min )
! Modification : Below computation of Ri is wrong. Note that 'Exner' function here is
! inverse exner function. Here, exner function is not multiplied in
! the denominator. Also, we should use moist Ri not dry Ri.
! Also, this approach using the two lowest model layers can be potentially
- ! sensitive to the vertical resolution.
+ ! sensitive to the vertical resolution.
! OK. I only modified the part associated with exner function.
ri = 2._r8 * gravit * ( t(i,kt) * exner(i,kt) - t(i,kb) * exner(i,kb) ) * ( zm(i,kt) - zm(i,kb) ) &
@@ -156,7 +165,7 @@ subroutine compute_tms( pcols , pver , ncol , &
! / ( ( t(i,kt) + t(i,kb) ) * dv2 )
! Calculate the instability function and modify the neutral drag cofficient.
- ! We should probably follow more elegant approach like Louis et al (1982) or Bretherton and Park (2009)
+ ! We should probably follow more elegant approach like Louis et al (1982) or Bretherton and Park (2009)
! but for now we use very crude approach : just 1 for ri < 0, 0 for ri > 1, and linear ramping.
stabfri = max( 0._r8, min( 1._r8, 1._r8 - ri ) )
@@ -164,8 +173,8 @@ subroutine compute_tms( pcols , pver , ncol , &
! Compute density, velocity magnitude and stress using bottom level properties
- rho = pmid(i,pver) / ( rair * t(i,pver) )
- vmag = sqrt( u(i,pver)**2 + v(i,pver)**2 )
+ rho = pmid(i,pver) / ( rair * t(i,pver) )
+ vmag = bfb_sqrt( bfb_square(u(i,pver)) + bfb_square(v(i,pver)) )
ksrf(i) = rho * cd * vmag * landfrac(i)
taux(i) = -ksrf(i) * u(i,pver)
tauy(i) = -ksrf(i) * v(i,pver)
@@ -173,7 +182,7 @@ subroutine compute_tms( pcols , pver , ncol , &
end if
end do
-
+
return
end subroutine compute_tms
diff --git a/components/eam/src/physics/cam/wv_sat_scream.F90 b/components/eam/src/physics/cam/wv_sat_scream.F90
index d52e7e5cc30b..b43ae6bfcc3c 100644
--- a/components/eam/src/physics/cam/wv_sat_scream.F90
+++ b/components/eam/src/physics/cam/wv_sat_scream.F90
@@ -14,25 +14,25 @@ module wv_sat_scream
use physics_utils, only: rtype
use micro_p3_utils, only: T_zerodegc
#ifdef SCREAM_CONFIG_IS_CMAKE
- use physics_share_f2c, only: cxx_pow, cxx_sqrt, cxx_cbrt, cxx_gamma, cxx_log, &
- cxx_log10, cxx_exp, cxx_tanh
+ use physics_share_f2c, only: scream_pow, scream_sqrt, scream_cbrt, scream_gamma, scream_log, &
+ scream_log10, scream_exp, scream_tanh
#endif
implicit none
private
- public:: qv_sat, MurphyKoop_svp
+ public:: qv_sat_dry, qv_sat_wet, qv_sat, MurphyKoop_svp
contains
!===========================================================================================
- real(rtype) function qv_sat(t_atm,p_atm,i_wrt)
+ real(rtype) function qv_sat_dry(t_atm,p_atm_dry,i_wrt)
!------------------------------------------------------------------------------------
- ! Calls polysvp1 to obtain the saturation vapor pressure, and then computes
- ! and returns the saturation mixing ratio, with respect to either liquid or ice,
+ ! Calls MurphyKoop to obtain the saturation vapor pressure, and then computes
+ ! and returns the dry saturation mixing ratio, with respect to either liquid or ice,
! depending on value of 'i_wrt'
!------------------------------------------------------------------------------------
@@ -40,23 +40,79 @@ real(rtype) function qv_sat(t_atm,p_atm,i_wrt)
implicit none
!Calling parameters:
- real(rtype), intent(in) :: t_atm !temperature [K]
- real(rtype), intent(in) :: p_atm !pressure [Pa]
+ real(rtype), intent(in) :: t_atm !temperature [K]
+ real(rtype), intent(in) :: p_atm_dry !pressure [Pa]
+ integer, intent(in) :: i_wrt !index, 0 = w.r.t. liquid, 1 = w.r.t. ice
+
+ !Local variables:
+ real(rtype) :: e_pres !saturation vapor pressure [Pa]
+
+ !e_pres = polysvp1(t_atm,i_wrt)
+ e_pres = MurphyKoop_svp(t_atm,i_wrt)
+ qv_sat_dry = ep_2*e_pres/max(1.e-3_rtype,p_atm_dry)
+
+ return
+
+ end function qv_sat_dry
+
+ !===========================================================================================
+ real(rtype) function qv_sat(t_atm,p_atm,i_wrt)
+
+ !------------------------------------------------------------------------------------
+ ! Legacy for backwards compatibility with eam. Prefer the dry/wet versions going forward.
+ ! eamxx will use the dry/wet versions.
+ !------------------------------------------------------------------------------------
+
+ use micro_p3_utils, only: ep_2
+ implicit none
+
+ !Calling parameters:
+ real(rtype), intent(in) :: t_atm !temperature [K]
+ real(rtype), intent(in) :: p_atm !pressure [Pa]
integer, intent(in) :: i_wrt !index, 0 = w.r.t. liquid, 1 = w.r.t. ice
!Local variables:
real(rtype) :: e_pres !saturation vapor pressure [Pa]
- !e_pres = polysvp1(t_atm,i_wrt)
+ !e_pres = polysvp1(t_atm,i_wrt)
e_pres = MurphyKoop_svp(t_atm,i_wrt)
- qv_sat = ep_2*e_pres/max(1.e-3_rtype,(p_atm-e_pres))
+ qv_sat = ep_2*e_pres/max(1.e-3_rtype,p_atm-e_pres)
return
end function qv_sat
+
!===========================================================================================
- !==========================================================================================!
+ real(rtype) function qv_sat_wet(t_atm,p_atm_dry,i_wrt,dp_wet,dp_dry)
+
+ !------------------------------------------------------------------------------------
+ ! Calls qv_sat_dry to obtain the dry saturation mixing ratio,
+ ! with respect to either liquid or ice, depending on value of 'i_wrt',
+ ! and converts it to wet
+ !------------------------------------------------------------------------------------
+
+ implicit none
+
+ !Calling parameters:
+ real(rtype), intent(in) :: t_atm !temperature [K]
+ real(rtype), intent(in) :: p_atm_dry !pressure [Pa]
+ real(rtype), intent(in) :: dp_wet !pseudodensity [Pa]
+ real(rtype), intent(in) :: dp_dry !pseudodensity_dry [Pa]
+ integer, intent(in) :: i_wrt !index, 0 = w.r.t. liquid, 1 = w.r.t. ice
+
+ !Local variables:
+ real(rtype) :: qsatdry
+
+ qsatdry = qv_sat_dry(t_atm,p_atm_dry,i_wrt)
+ qv_sat_wet = qsatdry * dp_dry / dp_wet
+
+ return
+
+ end function qv_sat_wet
+ !===========================================================================================
+
+
real(rtype) function MurphyKoop_svp(t, i_type)
diff --git a/components/eam/src/physics/crm/pam/external b/components/eam/src/physics/crm/pam/external
index ce614fcd8d1b..87731d56aeee 160000
--- a/components/eam/src/physics/crm/pam/external
+++ b/components/eam/src/physics/crm/pam/external
@@ -1 +1 @@
-Subproject commit ce614fcd8d1b38e7e638e55e5fcb220531aca178
+Subproject commit 87731d56aeee4bfae4750e17732828ba186367a7
diff --git a/components/eam/src/physics/p3/eam/micro_p3.F90 b/components/eam/src/physics/p3/eam/micro_p3.F90
index 33bc9c32ab94..730736a2dbf2 100644
--- a/components/eam/src/physics/p3/eam/micro_p3.F90
+++ b/components/eam/src/physics/p3/eam/micro_p3.F90
@@ -72,8 +72,8 @@ module micro_p3
! Bit-for-bit math functions.
#ifdef SCREAM_CONFIG_IS_CMAKE
- use physics_share_f2c, only: cxx_pow, cxx_sqrt, cxx_cbrt, cxx_gamma, cxx_log, &
- cxx_log10, cxx_exp, cxx_expm1, cxx_tanh
+ use physics_share_f2c, only: scream_pow, scream_sqrt, scream_cbrt, scream_gamma, scream_log, &
+ scream_log10, scream_exp, scream_expm1, scream_tanh
#endif
implicit none
diff --git a/components/eam/src/physics/p3/scream/micro_p3.F90 b/components/eam/src/physics/p3/scream/micro_p3.F90
index 5723fd172b68..e59bcc08239c 100644
--- a/components/eam/src/physics/p3/scream/micro_p3.F90
+++ b/components/eam/src/physics/p3/scream/micro_p3.F90
@@ -56,12 +56,12 @@ module micro_p3
lookup_table_1a_dum1_c, &
p3_qc_autocon_expon, p3_qc_accret_expon
- use wv_sat_scream, only:qv_sat
+ use wv_sat_scream, only:qv_sat_dry
! Bit-for-bit math functions.
#ifdef SCREAM_CONFIG_IS_CMAKE
- use physics_share_f2c, only: cxx_pow, cxx_sqrt, cxx_cbrt, cxx_gamma, cxx_log, &
- cxx_log10, cxx_exp, cxx_expm1, cxx_tanh
+ use physics_share_f2c, only: scream_pow, scream_sqrt, scream_cbrt, scream_gamma, scream_log, &
+ scream_log10, scream_exp, scream_expm1, scream_tanh
#endif
implicit none
@@ -384,8 +384,8 @@ SUBROUTINE p3_main_part1(kts, kte, kbot, ktop, kdir, do_predict_nc, do_prescribe
!can be made consistent with E3SM definition of latent heat
rho(k) = dpres(k)/dz(k)/g ! pres(k)/(rd*t(k))
inv_rho(k) = 1._rtype/rho(k)
- qv_sat_l(k) = qv_sat(t_atm(k),pres(k),0)
- qv_sat_i(k) = qv_sat(t_atm(k),pres(k),1)
+ qv_sat_l(k) = qv_sat_dry(t_atm(k),pres(k),0)
+ qv_sat_i(k) = qv_sat_dry(t_atm(k),pres(k),1)
qv_supersat_i(k) = qv(k)/qv_sat_i(k)-1._rtype
@@ -960,7 +960,8 @@ subroutine p3_main_part3(kts, kte, kbot, ktop, kdir, &
inv_exner, cld_frac_l, cld_frac_r, cld_frac_i, &
rho, inv_rho, rhofaci, qv, th_atm, qc, nc, qr, nr, qi, ni, qm, bm, latent_heat_vapor, latent_heat_sublim, &
mu_c, nu, lamc, mu_r, lamr, vap_liq_exchange, &
- ze_rain, ze_ice, diag_vm_qi, diag_eff_radius_qi, diag_diam_qi, rho_qi, diag_equiv_reflectivity, diag_eff_radius_qc)
+ ze_rain, ze_ice, diag_vm_qi, diag_eff_radius_qi, diag_diam_qi, rho_qi, diag_equiv_reflectivity, &
+ diag_eff_radius_qc, diag_eff_radius_qr)
implicit none
@@ -974,7 +975,8 @@ subroutine p3_main_part3(kts, kte, kbot, ktop, kdir, &
qv, th_atm, qc, nc, qr, nr, qi, ni, qm, bm, latent_heat_vapor, latent_heat_sublim, &
mu_c, nu, lamc, mu_r, &
lamr, vap_liq_exchange, &
- ze_rain, ze_ice, diag_vm_qi, diag_eff_radius_qi, diag_diam_qi, rho_qi, diag_equiv_reflectivity, diag_eff_radius_qc
+ ze_rain, ze_ice, diag_vm_qi, diag_eff_radius_qi, diag_diam_qi, rho_qi, diag_equiv_reflectivity, &
+ diag_eff_radius_qc, diag_eff_radius_qr
! locals
integer :: k, dumi, dumii, dumjj, dumzz
@@ -1028,6 +1030,7 @@ subroutine p3_main_part3(kts, kte, kbot, ktop, kdir, &
ze_rain(k) = nr(k)*(mu_r(k)+6._rtype)*(mu_r(k)+5._rtype)*(mu_r(k)+4._rtype)* &
(mu_r(k)+3._rtype)*(mu_r(k)+2._rtype)*(mu_r(k)+1._rtype)/bfb_pow(lamr(k), 6._rtype)
ze_rain(k) = max(ze_rain(k),1.e-22_rtype)
+ diag_eff_radius_qr(k) = 1.5_rtype/lamr(k)
else
qv(k) = qv(k)+qr(k)
th_atm(k) = th_atm(k)-inv_exner(k)*qr(k)*latent_heat_vapor(k)*inv_cp
@@ -1122,7 +1125,7 @@ end subroutine p3_main_part3
SUBROUTINE p3_main(qc,nc,qr,nr,th_atm,qv,dt,qi,qm,ni,bm, &
pres,dz,nc_nuceat_tend,nccn_prescribed,ni_activated,inv_qc_relvar,it,precip_liq_surf,precip_ice_surf,its,ite,kts,kte,diag_eff_radius_qc, &
- diag_eff_radius_qi,rho_qi,do_predict_nc, do_prescribed_CCN, &
+ diag_eff_radius_qi,diag_eff_radius_qr,rho_qi,do_predict_nc, do_prescribed_CCN, &
dpres,inv_exner,qv2qi_depos_tend,precip_total_tend,nevapr,qr_evap_tend,precip_liq_flux,precip_ice_flux,cld_frac_r,cld_frac_l,cld_frac_i, &
p3_tend_out,mu_c,lamc,liq_ice_exchange,vap_liq_exchange, &
vap_ice_exchange,qv_prev,t_prev,col_location &
@@ -1173,6 +1176,7 @@ SUBROUTINE p3_main(qc,nc,qr,nr,th_atm,qv,dt,qi,qm,ni,bm, &
real(rtype), intent(out), dimension(its:ite) :: precip_ice_surf ! precipitation rate, solid m s-1
real(rtype), intent(out), dimension(its:ite,kts:kte) :: diag_eff_radius_qc ! effective radius, cloud m
real(rtype), intent(out), dimension(its:ite,kts:kte) :: diag_eff_radius_qi ! effective radius, ice m
+ real(rtype), intent(out), dimension(its:ite,kts:kte) :: diag_eff_radius_qr ! effective radius, rain m
real(rtype), intent(out), dimension(its:ite,kts:kte) :: rho_qi ! bulk density of ice kg m-3
real(rtype), intent(out), dimension(its:ite,kts:kte) :: mu_c ! Size distribution shape parameter for radiation
real(rtype), intent(out), dimension(its:ite,kts:kte) :: lamc ! Size distribution slope parameter for radiation
@@ -1297,6 +1301,7 @@ SUBROUTINE p3_main(qc,nc,qr,nr,th_atm,qv,dt,qi,qm,ni,bm, &
ze_rain = 1.e-22_rtype
diag_eff_radius_qc = 10.e-6_rtype ! default value
diag_eff_radius_qi = 25.e-6_rtype ! default value
+ diag_eff_radius_qr = 500.e-6_rtype ! default value
diag_vm_qi = 0._rtype
diag_diam_qi = 0._rtype
rho_qi = 0._rtype
@@ -1452,7 +1457,8 @@ SUBROUTINE p3_main(qc,nc,qr,nr,th_atm,qv,dt,qi,qm,ni,bm, &
rho(i,:), inv_rho(i,:), rhofaci(i,:), qv(i,:), th_atm(i,:), qc(i,:), nc(i,:), qr(i,:), nr(i,:), qi(i,:), ni(i,:), &
qm(i,:), bm(i,:), latent_heat_vapor(i,:), latent_heat_sublim(i,:), &
mu_c(i,:), nu(i,:), lamc(i,:), mu_r(i,:), lamr(i,:), vap_liq_exchange(i,:), &
- ze_rain(i,:), ze_ice(i,:), diag_vm_qi(i,:), diag_eff_radius_qi(i,:), diag_diam_qi(i,:), rho_qi(i,:), diag_equiv_reflectivity(i,:), diag_eff_radius_qc(i,:))
+ ze_rain(i,:), ze_ice(i,:), diag_vm_qi(i,:), diag_eff_radius_qi(i,:), diag_diam_qi(i,:), rho_qi(i,:), &
+ diag_equiv_reflectivity(i,:), diag_eff_radius_qc(i,:), diag_eff_radius_qr(i,:))
! if (debug_ON) call check_values(qv,Ti,it,debug_ABORT,800,col_location)
!..............................................
@@ -2233,7 +2239,7 @@ subroutine ice_melting(rho,t_atm,pres,rhofaci, &
real(rtype) :: qsat0
if (qi_incld .ge.qsmall .and. t_atm.gt.T_zerodegc) then
- qsat0 = qv_sat( T_zerodegc,pres,0 )
+ qsat0 = qv_sat_dry( T_zerodegc,pres,0 )
qi2qr_melt_tend = ((table_val_qi2qr_melting+table_val_qi2qr_vent_melt*bfb_cbrt(sc)*bfb_sqrt(rhofaci*rho/mu))*((t_atm- &
T_zerodegc)*kap-rho*latent_heat_vapor*dv*(qsat0-qv))*2._rtype*pi/latent_heat_fusion)*ni_incld
@@ -2283,7 +2289,7 @@ subroutine ice_cldliq_wet_growth(rho,t_atm,pres,rhofaci, &
real(rtype) :: qsat0, dum, dum1
if (qi_incld.ge.qsmall .and. qc_incld+qr_incld.ge.1.e-6_rtype .and. t_atm.lt.T_zerodegc) then
- qsat0=qv_sat( T_zerodegc,pres,0 )
+ qsat0=qv_sat_dry( T_zerodegc,pres,0 )
qwgrth = ((table_val_qi2qr_melting + table_val_qi2qr_vent_melt*bfb_cbrt(sc)*bfb_sqrt(rhofaci*rho/mu))* &
2._rtype*pi*(rho*latent_heat_vapor*dv*(qsat0-qv)-(t_atm-T_zerodegc)* &
@@ -2908,7 +2914,7 @@ subroutine prevent_liq_supersaturation(pres,t_atm,qv,latent_heat_vapor,latent_he
- qr2qv_evap_tend*latent_heat_vapor*inv_cp )*dt
!qv we would have at end of step if we were saturated with respect to liquid
- qsl = qv_sat(T_endstep,pres,0)
+ qsl = qv_sat_dry(T_endstep,pres,0)
! The balance we seek is:
! qv-qv_sinks*dt+qv_sources*frac*dt=qsl+dqsl_dT*(T correction due to conservation)
diff --git a/components/eam/src/physics/p3/scream/micro_p3_interface.F90 b/components/eam/src/physics/p3/scream/micro_p3_interface.F90
index 8e181c21a646..edea27959d25 100644
--- a/components/eam/src/physics/p3/scream/micro_p3_interface.F90
+++ b/components/eam/src/physics/p3/scream/micro_p3_interface.F90
@@ -745,7 +745,6 @@ subroutine micro_p3_tend(state, ptend, dtime, pbuf)
qsmall, &
mincld, &
inv_cp
- use physics_utils, only: calculate_drymmr_from_wetmmr, calculate_wetmmr_from_drymmr
!INPUT/OUTPUT VARIABLES
type(physics_state), intent(in) :: state
@@ -834,7 +833,6 @@ subroutine micro_p3_tend(state, ptend, dtime, pbuf)
real(rtype) :: icimrst(pcols,pver) ! stratus ice mixing ratio - on grid
real(rtype) :: icwmrst(pcols,pver) ! stratus water mixing ratio - on grid
real(rtype) :: rho(pcols,pver)
- real(rtype) :: drout2(pcols,pver)
real(rtype) :: reff_rain(pcols,pver)
real(rtype) :: col_location(pcols,3),tmp_loc(pcols) ! Array of column lon (index 1) and lat (index 2)
integer :: tmpi_loc(pcols) ! Global column index temp array
@@ -854,6 +852,9 @@ subroutine micro_p3_tend(state, ptend, dtime, pbuf)
real(rtype) :: icinc(pcols,pver)
real(rtype) :: icwnc(pcols,pver)
+ real(rtype) :: ratio_local(pcols,pver)
+ real(rtype) :: dtemp(pcols,pver)
+
integer :: it !timestep counter -
integer :: its, ite !horizontal bounds (column start,finish)
@@ -865,7 +866,6 @@ subroutine micro_p3_tend(state, ptend, dtime, pbuf)
integer :: icol, ncol, k
integer :: psetcols, lchnk
integer :: itim_old
- real(rtype) :: T_virtual
! For rrtmg optics. specified distribution.
real(rtype), parameter :: dcon = 25.e-6_rtype ! Convective size distribution effective radius (um)
@@ -984,22 +984,24 @@ subroutine micro_p3_tend(state, ptend, dtime, pbuf)
!-------------------------
!Since state constituents from the host model are wet mixing ratios and P3 needs these
!constituents in dry mixing ratios, we convert the wet mixing ratios to dry mixing ratio
- !while assigning state constituents to the local variables
- !NOTE:Function calculate_drymmr_from_wetmmr takes 3 arguments: (number of columns, wet mmr and
- ! "wet" water vapor mixing ratio)
!---------------------------------------------------------------------------------------
- qv_wet_in = state%q(:,:,1) ! Get "wet" water vapor mixing ratio from state
+
!Compute dry mixing ratios for all the constituents
- qv_dry(:ncol,:pver) = calculate_drymmr_from_wetmmr(ncol, pver, qv_wet_in, qv_wet_in)
- qv_prev_dry(:ncol,:pver) = calculate_drymmr_from_wetmmr(ncol, pver, qv_prev_wet, qv_wet_in)
- cldliq(:ncol,:pver) = calculate_drymmr_from_wetmmr(ncol, pver, state%q(:,:,ixcldliq), qv_wet_in)
- numliq(:ncol,:pver) = calculate_drymmr_from_wetmmr(ncol, pver, state%q(:,:,ixnumliq), qv_wet_in)
- rain(:ncol,:pver) = calculate_drymmr_from_wetmmr(ncol, pver, state%q(:,:,ixrain), qv_wet_in)
- numrain(:ncol,:pver) = calculate_drymmr_from_wetmmr(ncol, pver, state%q(:,:,ixnumrain), qv_wet_in)
- ice(:ncol,:pver) = calculate_drymmr_from_wetmmr(ncol, pver, state%q(:,:,ixcldice), qv_wet_in)
- qm(:ncol,:pver) = calculate_drymmr_from_wetmmr(ncol, pver, state%q(:,:,ixcldrim), qv_wet_in) !Aaron, changed ixqm to ixcldrim to match Kai's code
- numice(:ncol,:pver) = calculate_drymmr_from_wetmmr(ncol, pver, state%q(:,:,ixnumice), qv_wet_in)
- rimvol(:ncol,:pver) = calculate_drymmr_from_wetmmr(ncol, pver, state%q(:,:,ixrimvol), qv_wet_in)
+ !The conversion is done via calculation drymmr = (wetmmr * wetdp) / drydp
+ ratio_local(:ncol,:pver) = state%pdel(:ncol,:pver)/state%pdeldry(:ncol,:pver)
+
+ qv_dry (:ncol,:pver) = state%q(:ncol,:pver, 1) *ratio_local(:ncol,:pver)
+ cldliq (:ncol,:pver) = state%q(:ncol,:pver, ixcldliq) *ratio_local(:ncol,:pver)
+ numliq (:ncol,:pver) = state%q(:ncol,:pver, ixnumliq) *ratio_local(:ncol,:pver)
+ rain (:ncol,:pver) = state%q(:ncol,:pver, ixrain) *ratio_local(:ncol,:pver)
+ numrain (:ncol,:pver) = state%q(:ncol,:pver, ixnumrain) *ratio_local(:ncol,:pver)
+ ice (:ncol,:pver) = state%q(:ncol,:pver, ixcldice) *ratio_local(:ncol,:pver)
+ !Aaron, changed ixqm to ixcldrim to match Kai's code
+ qm (:ncol,:pver) = state%q(:ncol,:pver, ixcldrim) *ratio_local(:ncol,:pver)
+ numice (:ncol,:pver) = state%q(:ncol,:pver, ixnumice) *ratio_local(:ncol,:pver)
+ rimvol (:ncol,:pver) = state%q(:ncol,:pver, ixrimvol) *ratio_local(:ncol,:pver)
+
+ qv_prev_dry(:ncol,:pver) = qv_prev_wet(:ncol,:pver) *ratio_local(:ncol,:pver)
! COMPUTE GEOMETRIC THICKNESS OF GRID & CONVERT T TO POTENTIAL TEMPERATURE
!==============
@@ -1007,15 +1009,16 @@ subroutine micro_p3_tend(state, ptend, dtime, pbuf)
! used by all parameterizations, such as P3 and SHOC.
! This would take a bit more work, so we have decided to delay this task
! until a later stage of code cleanup.
- inv_exner(:ncol,:pver) = 1._rtype/((state%pmiddry(:ncol,:pver)*1.e-5_rtype)**(rair*inv_cp))
+
+ inv_exner(:ncol,:pver) = 1._rtype/((state%pmid(:ncol,:pver)*1.e-5_rtype)**(rair*inv_cp))
do icol = 1,ncol
do k = 1,pver
! Note, there is a state%zi variable that could be used to calculate
! dz, but that is in a wet coordinate frame rather than dry. Now that
! P3 is using dry MMR we instead calculated dz using virtual
! temperature and pressure.
- T_virtual = state%t(icol,k) * (1.0 + qv_dry(icol,k)*(1.0*mwdry/mwh2o - 1.0))
- dz(icol,k) = (rair/gravit) * state%pdeldry(icol,k) * T_virtual / state%pmiddry(icol,k)
+
+ dz(icol,k) = state%zi(icol,k) - state%zi(icol,k+1)
th(icol,k) = state%t(icol,k)*inv_exner(icol,k)
end do
end do
@@ -1024,8 +1027,10 @@ subroutine micro_p3_tend(state, ptend, dtime, pbuf)
ite = state%ncol
kts = 1
kte = pver
+
+!OG do we want dry or wet pressure here?
pres = state%pmiddry(:,:)
- ! Initialize the raidation dependent variables.
+ ! Initialize the radiation dependent variables.
mu = 0.0_rtype !mucon
lambdac = 0.0_rtype !(mucon + 1._rtype)/dcon
dei = 50.0_rtype !deicon
@@ -1107,6 +1112,7 @@ subroutine micro_p3_tend(state, ptend, dtime, pbuf)
kte, & ! IN vertical index upper bound -
rel(its:ite,kts:kte), & ! OUT effective radius, cloud m
rei(its:ite,kts:kte), & ! OUT effective radius, ice m
+ reff_rain(its:ite,kts:kte), & ! OUT effective radius, rain m
rho_qi(its:ite,kts:kte), & ! OUT bulk density of ice kg m-3
do_predict_nc, & ! IN .true.=prognostic Nc, .false.=specified Nc
do_prescribed_CCN, & ! IN
@@ -1184,19 +1190,27 @@ subroutine micro_p3_tend(state, ptend, dtime, pbuf)
!================
!Since the host model needs wet mixing ratio tendencies(state vector has wet mixing ratios),
!we need to convert dry mixing ratios from P3 to wet mixing ratios before extracting tendencies
- !NOTE: water vapor mixing ratio argument in calculate_wetmmr_from_drymmr function has to be dry water vapor mixing ratio
-
- qv_wet_out(:ncol,:pver) = calculate_wetmmr_from_drymmr(ncol, pver, qv_dry, qv_dry)
- cldliq(:ncol,:pver) = calculate_wetmmr_from_drymmr(ncol, pver, cldliq, qv_dry)
- numliq(:ncol,:pver) = calculate_wetmmr_from_drymmr(ncol, pver, numliq, qv_dry)
- rain(:ncol,:pver) = calculate_wetmmr_from_drymmr(ncol, pver, rain, qv_dry)
- numrain(:ncol,:pver) = calculate_wetmmr_from_drymmr(ncol, pver, numrain, qv_dry)
- ice(:ncol,:pver) = calculate_wetmmr_from_drymmr(ncol, pver, ice, qv_dry)
- numice(:ncol,:pver) = calculate_wetmmr_from_drymmr(ncol, pver, numice, qv_dry)
- qm(:ncol,:pver) = calculate_wetmmr_from_drymmr(ncol, pver, qm, qv_dry)
- rimvol(:ncol,:pver) = calculate_wetmmr_from_drymmr(ncol, pver, rimvol, qv_dry)
-
- temp(:ncol,:pver) = th(:ncol,:pver)/inv_exner(:ncol,:pver)
+
+ ratio_local(:ncol,:pver) = state%pdeldry(:ncol,:pver)/state%pdel(:ncol,:pver)
+
+ qv_wet_out (:ncol,:pver) = qv_dry (:ncol,:pver) *ratio_local(:ncol,:pver)
+ cldliq (:ncol,:pver) = cldliq (:ncol,:pver) *ratio_local(:ncol,:pver)
+ numliq (:ncol,:pver) = numliq (:ncol,:pver) *ratio_local(:ncol,:pver)
+ rain (:ncol,:pver) = rain (:ncol,:pver) *ratio_local(:ncol,:pver)
+ numrain (:ncol,:pver) = numrain(:ncol,:pver) *ratio_local(:ncol,:pver)
+ ice (:ncol,:pver) = ice (:ncol,:pver) *ratio_local(:ncol,:pver)
+ qm (:ncol,:pver) = qm (:ncol,:pver) *ratio_local(:ncol,:pver)
+ numice (:ncol,:pver) = numice (:ncol,:pver) *ratio_local(:ncol,:pver)
+ rimvol (:ncol,:pver) = rimvol (:ncol,:pver) *ratio_local(:ncol,:pver)
+
+ !compute temperature tendency as calculated by P3
+ dtemp(:ncol,:pver) = th(:ncol,:pver)/inv_exner(:ncol,:pver) - state%t(:ncol,:pver)
+ !rescale temperature tendency to conserve entahly:
+ !physics is supposed to conserve quantity dp*(cpdry*T+Lv*qv+Ll*ql) with dp=wetdp, but
+ !since P3 is dry, it conserves it for drydp. Scaling of temperature tendencies is required to fix it.
+ dtemp(:ncol,:pver) = dtemp(:ncol,:pver) *ratio_local(:ncol,:pver)
+
+ temp(:ncol,:pver) = dtemp(:ncol,:pver) + state%t(:ncol,:pver)
ptend%s(:ncol,:pver) = cpair*( temp(:ncol,:pver) - state%t(:ncol,:pver) )/dtime
ptend%q(:ncol,:pver,1) = ( max(0._rtype,qv_wet_out(:ncol,:pver) ) - state%q(:ncol,:pver,1) )/dtime
ptend%q(:ncol,:pver,ixcldliq) = ( max(0._rtype,cldliq(:ncol,:pver) ) - state%q(:ncol,:pver,ixcldliq) )/dtime
@@ -1341,23 +1355,15 @@ subroutine micro_p3_tend(state, ptend, dtime, pbuf)
!!
!! Rain/Snow effective diameter
!!
- drout2 = 0._rtype
- reff_rain = 0._rtype
aqrain = 0._rtype
anrain = 0._rtype
freqr = 0._rtype
! Prognostic precipitation
where (rain(:ncol,top_lev:) >= 1.e-7_rtype)
- drout2(:ncol,top_lev:) = avg_diameter( &
- rain(:ncol,top_lev:), &
- numrain(:ncol,top_lev:) * rho(:ncol,top_lev:), &
- rho(:ncol,top_lev:), rho_h2o)
-
aqrain(:ncol,top_lev:) = rain(:ncol,top_lev:) * cld_frac_r(:ncol,top_lev:)
anrain(:ncol,top_lev:) = numrain(:ncol,top_lev:) * cld_frac_r(:ncol,top_lev:)
freqr(:ncol,top_lev:) = cld_frac_r(:ncol,top_lev:)
- reff_rain(:ncol,top_lev:) = drout2(:ncol,top_lev:) * &
- 1.5_rtype * 1.e6_rtype
+ reff_rain(:ncol,top_lev:) = reff_rain(:ncol,top_lev:) * 1.e6_rtype
end where
!====================== COSP Specific Outputs START ======================!
diff --git a/components/eam/src/physics/p3/scream/micro_p3_utils.F90 b/components/eam/src/physics/p3/scream/micro_p3_utils.F90
index da390e48d324..979cfde9ac8f 100644
--- a/components/eam/src/physics/p3/scream/micro_p3_utils.F90
+++ b/components/eam/src/physics/p3/scream/micro_p3_utils.F90
@@ -109,7 +109,7 @@ subroutine micro_p3_utils_init(cpair,rair,rh2o,rhoh2o,mwh2o,mwdry,gravit,latvap,
piov6 = pi*sxth
! maximum total ice concentration (sum of all categories)
- max_total_ni = 500.e+3_rtype !(m)
+ max_total_ni = 740.e+3_rtype !(m)
! droplet concentration (m-3)
nccnst = 200.e+6_rtype
diff --git a/components/eamxx/CMakeLists.txt b/components/eamxx/CMakeLists.txt
index 7839e3a4e033..e2aae3e97b32 100644
--- a/components/eamxx/CMakeLists.txt
+++ b/components/eamxx/CMakeLists.txt
@@ -12,6 +12,10 @@ if (${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.12.0")
cmake_policy(SET CMP0074 NEW)
endif()
+set (EAMXX_VERSION_MAJOR 1)
+set (EAMXX_VERSION_MINOR 0)
+set (EAMXX_VERSION_PATCH 0)
+
if ($ENV{SCREAM_FORCE_CONFIG_FAIL})
message(FATAL_ERROR "Failed, as instructed by environment")
endif()
@@ -20,12 +24,13 @@ endif()
set (EKAT_CMAKE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/../../externals/ekat/cmake)
list(APPEND CMAKE_MODULE_PATH
${CMAKE_CURRENT_SOURCE_DIR}/cmake
+ ${CMAKE_CURRENT_SOURCE_DIR}/../cmake/modules
${EKAT_CMAKE_PATH}
- ${EKAT_CMAKE_PATH}/pkg_build
+ ${EKAT_CMAKE_PATH}/tpls
)
if (SCREAM_CIME_BUILD)
list(APPEND CMAKE_MODULE_PATH
- ${CMAKE_CURRENT_SOURCE_DIR}/cmake/cime)
+ ${CMAKE_CURRENT_SOURCE_DIR}/cmake/cime)
endif ()
if (Kokkos_ENABLE_CUDA)
@@ -51,12 +56,8 @@ endif()
# to be on. For now, simply ensure Kokkos Serial is enabled
option (Kokkos_ENABLE_SERIAL "" ON)
-# MAM support requires C++17 -- hopefully SCREAM itself will get there soon
-if (SCREAM_ENABLE_MAM)
- set(CMAKE_CXX_STANDARD 17)
-else()
- set(CMAKE_CXX_STANDARD 14)
-endif()
+# We want to use C++17 in EAMxx
+set(CMAKE_CXX_STANDARD 17)
if (NOT SCREAM_CIME_BUILD)
project(SCREAM CXX C Fortran)
@@ -66,18 +67,19 @@ if (NOT SCREAM_CIME_BUILD)
list(REMOVE_ITEM CMAKE_CXX_IMPLICIT_LINK_LIBRARIES "ifport")
endif()
- # Print the sha of the last commit (useful to double check which version was tested on CDash)
- execute_process (COMMAND git rev-parse HEAD
- WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
- OUTPUT_VARIABLE LAST_GIT_COMMIT_SHA
- OUTPUT_STRIP_TRAILING_WHITESPACE)
- set(LAST_GIT_COMMIT_SHA ${LAST_GIT_COMMIT_SHA} CACHE STRING "The sha of the last git commit.")
- message(STATUS "The sha of the last commit is ${LAST_GIT_COMMIT_SHA}")
else()
# Ensure our languages are all enabled
enable_language(C CXX Fortran)
endif()
+# Print the sha of the last commit (useful to double check which version was tested on CDash)
+execute_process (COMMAND git rev-parse HEAD
+ WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
+ OUTPUT_VARIABLE LAST_GIT_COMMIT_SHA
+ OUTPUT_STRIP_TRAILING_WHITESPACE)
+set(EAMXX_GIT_VERSION ${LAST_GIT_COMMIT_SHA} CACHE STRING "The sha of the last git commit.")
+message(STATUS "The sha of the last commit is ${EAMXX_GIT_VERSION}")
+
set(SCREAM_DOUBLE_PRECISION TRUE CACHE BOOL "Set to double precision (default True)")
# Set the scream base and src directory, to be used across subfolders
@@ -216,10 +218,8 @@ if (NOT SCREAM_SMALL_KERNELS)
set(EKAT_DISABLE_WORKSPACE_SHARING TRUE CACHE STRING "")
endif()
-### The following test only runs on quartz or docker container
-if (NOT DEFINED RUN_ML_CORRECTION_TEST)
- set(RUN_ML_CORRECTION_TEST FALSE)
-endif()
+# For now, only used in share/grid/remap/refining_remapper_rma.*pp
+option (EAMXX_ENABLE_EXPERIMENTAL_CODE "Compile one-sided MPI for refining remappers" OFF)
# Handle input root
if (SCREAM_MACHINE AND NOT SCREAM_INPUT_ROOT)
@@ -272,6 +272,7 @@ endif()
set (SCREAM_DATA_DIR ${SCREAM_INPUT_ROOT}/atm/scream CACHE PATH "" FORCE)
set (TOPO_DATA_DIR ${SCREAM_INPUT_ROOT}/atm/cam/topo CACHE PATH "" FORCE)
+set (IOP_DATA_DIR ${SCREAM_INPUT_ROOT}/atm/cam/scam/iop CACHE PATH "" FORCE)
#
# Handle test level
@@ -472,6 +473,7 @@ if (SCREAM_CIME_BUILD AND SCREAM_DYN_TARGET STREQUAL "theta-l_kokkos")
set (DEFAULT_SCREAM_DYNAMICS_DYCORE "Homme")
endif()
+option (SCREAM_ENABLE_ML_CORRECTION "Whether to enable ML correction parametrization" OFF)
set(SCREAM_DYNAMICS_DYCORE ${DEFAULT_SCREAM_DYNAMICS_DYCORE} CACHE STRING
"The name of the dycore to be used for dynamics. If NONE, then any code/test requiring dynamics is disabled.")
@@ -503,8 +505,6 @@ if (NOT DEFINED ENV{SCREAM_FAKE_ONLY})
if (NOT SCREAM_LIB_ONLY)
add_subdirectory(tests)
- include(BuildCprnc)
- BuildCprnc()
endif()
# Generate scream_config.h and scream_config.f
diff --git a/components/eamxx/cime_config/buildlib_cmake b/components/eamxx/cime_config/buildlib_cmake
index 155e44efc3c7..bfe56901837c 100755
--- a/components/eamxx/cime_config/buildlib_cmake
+++ b/components/eamxx/cime_config/buildlib_cmake
@@ -25,11 +25,18 @@ def buildlib(bldroot, installpath, case):
expect (len(tokens) % 2 == 0, "Error! SCREAM_CMAKE_OPTIONS should contain a string of the form 'option1 value1 option2 value2 ...'\n")
it = iter(tokens)
- cmake_args = ""
+ # Parse all options and put them in a dict first. This allows to overwrite options via
+ # ./xmlchange --append SCRAM_CMAKE_OPTIONS="NAME VALUE"
+ # rather than having to reset them all, running ./xmlquery first to see what the others are
+ cmake_args_dict = {}
for item in it:
- cmake_args += " -D{}={}".format(item,next(it))
+ cmake_args_dict[item] = next(it)
+
+ cmake_args = ""
+ for k,v in cmake_args_dict.items():
+ cmake_args += f" -D{k}={v}"
- atm_dyn_tgt = case.get_value("ATM_DYN_TARGET")
+ atm_dyn_tgt = case.get_value("CAM_TARGET")
cmake_args += " -DSCREAM_DYN_TARGET={}".format(atm_dyn_tgt)
cmake_args += " -DSCREAM_CIME_BUILD=ON"
diff --git a/components/eamxx/cime_config/buildnml b/components/eamxx/cime_config/buildnml
index 0f4dceaf0308..9fa6fbbb933c 100755
--- a/components/eamxx/cime_config/buildnml
+++ b/components/eamxx/cime_config/buildnml
@@ -26,7 +26,7 @@ import os, sys
from CIME.case import Case
from CIME.utils import expect, safe_copy, SharedArea, run_cmd_no_fail
-from CIME.buildlib import parse_input
+from CIME.buildnml import parse_input
from eamxx_buildnml import create_raw_xml_file, create_input_files, create_input_data_list_file, \
do_cime_vars_on_yaml_output_files
diff --git a/components/eamxx/cime_config/config_component.xml b/components/eamxx/cime_config/config_component.xml
index 2106d7ca2e5c..0aeccd1e6d24 100644
--- a/components/eamxx/cime_config/config_component.xml
+++ b/components/eamxx/cime_config/config_component.xml
@@ -13,7 +13,7 @@
Name of atmospheric component
-
+
char
theta-l_kokkos
theta-l_kokkos
@@ -29,6 +29,8 @@
SCREAM_NP 4 SCREAM_NUM_VERTICAL_LEV 72 SCREAM_NUM_TRACERS 10
SCREAM_NP 4 SCREAM_NUM_VERTICAL_LEV 128 SCREAM_NUM_TRACERS 10
+ SCREAM_NP 4 SCREAM_NUM_VERTICAL_LEV 72 SCREAM_NUM_TRACERS 10
+ SCREAM_NP 4 SCREAM_NUM_VERTICAL_LEV 128 SCREAM_NUM_TRACERS 10
build_component_scream
env_build.xml
diff --git a/components/eamxx/cime_config/eamxx_buildnml.py b/components/eamxx/cime_config/eamxx_buildnml.py
index 1e7626d46f47..db05bd2e20e1 100644
--- a/components/eamxx/cime_config/eamxx_buildnml.py
+++ b/components/eamxx/cime_config/eamxx_buildnml.py
@@ -8,24 +8,27 @@
from collections import OrderedDict
import xml.etree.ElementTree as ET
-
-_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","cime")
-sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools"))
+import xml.dom.minidom as md
# Add path to scream libs
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "scripts"))
-# Cime imports
-from standard_script_setup import * # pylint: disable=wildcard-import
-from CIME.utils import expect, safe_copy, SharedArea, run_cmd_no_fail
-
# SCREAM imports
from eamxx_buildnml_impl import get_valid_selectors, get_child, refine_type, \
- resolve_all_inheritances, gen_atm_proc_group, check_all_values
+ resolve_all_inheritances, gen_atm_proc_group, check_all_values, find_node
+from atm_manip import apply_atm_procs_list_changes_from_buffer, apply_non_atm_procs_list_changes_from_buffer
-from utils import ensure_yaml
+from utils import ensure_yaml # pylint: disable=no-name-in-module
ensure_yaml()
import yaml
+from yaml_utils import Bools,Ints,Floats,Strings,array_representer
+
+_CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..","..","..","cime")
+sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools"))
+
+# Cime imports
+from standard_script_setup import * # pylint: disable=wildcard-import
+from CIME.utils import expect, safe_copy, SharedArea
logger = logging.getLogger(__name__) # pylint: disable=undefined-variable
@@ -41,7 +44,7 @@
# Examples:
# - constraints="ge 0; lt 4" means the value V must satisfy V>=0 && V<4.
# - constraints="mod 2 eq 0" means the value V must be a multiple of 2.
-METADATA_ATTRIBS = ("type", "valid_values", "locked", "constraints", "inherit")
+METADATA_ATTRIBS = ("type", "valid_values", "locked", "constraints", "inherit", "doc", "append")
###############################################################################
def do_cime_vars(entry, case, refine=False, extra=None):
@@ -101,6 +104,107 @@ def do_cime_vars(entry, case, refine=False, extra=None):
return entry
+###############################################################################
+def perform_consistency_checks(case, xml):
+###############################################################################
+ """
+ There may be separate parts of the xml that must satisfy some consistency
+ Here, we run any such check, so we can catch errors before submit time
+
+ >>> from eamxx_buildnml_impl import MockCase
+ >>> xml_str = '''
+ ...
+ ...
+ ... 3
+ ...
+ ...
+ ... '''
+ >>> import xml.etree.ElementTree as ET
+ >>> xml = ET.fromstring(xml_str)
+ >>> case = MockCase({'ATM_NCPL':'24', 'REST_N':24, 'REST_OPTION':'nsteps'})
+ >>> perform_consistency_checks(case,xml)
+ >>> case = MockCase({'ATM_NCPL':'24', 'REST_N':2, 'REST_OPTION':'nsteps'})
+ >>> perform_consistency_checks(case,xml)
+ Traceback (most recent call last):
+ CIME.utils.CIMEError: ERROR: rrtmgp::rad_frequency incompatible with restart frequency.
+ Please, ensure restart happens on a step when rad is ON
+ >>> case = MockCase({'ATM_NCPL':'24', 'REST_N':10800, 'REST_OPTION':'nseconds'})
+ >>> perform_consistency_checks(case,xml)
+ >>> case = MockCase({'ATM_NCPL':'24', 'REST_N':7200, 'REST_OPTION':'nseconds'})
+ >>> perform_consistency_checks(case,xml)
+ Traceback (most recent call last):
+ CIME.utils.CIMEError: ERROR: rrtmgp::rad_frequency incompatible with restart frequency.
+ Please, ensure restart happens on a step when rad is ON
+ rest_tstep: 7200
+ rad_testep: 10800.0
+ >>> case = MockCase({'ATM_NCPL':'24', 'REST_N':180, 'REST_OPTION':'nminutes'})
+ >>> perform_consistency_checks(case,xml)
+ >>> case = MockCase({'ATM_NCPL':'24', 'REST_N':120, 'REST_OPTION':'nminutes'})
+ >>> perform_consistency_checks(case,xml)
+ Traceback (most recent call last):
+ CIME.utils.CIMEError: ERROR: rrtmgp::rad_frequency incompatible with restart frequency.
+ Please, ensure restart happens on a step when rad is ON
+ rest_tstep: 7200
+ rad_testep: 10800.0
+ >>> case = MockCase({'ATM_NCPL':'24', 'REST_N':6, 'REST_OPTION':'nhours'})
+ >>> perform_consistency_checks(case,xml)
+ >>> case = MockCase({'ATM_NCPL':'24', 'REST_N':8, 'REST_OPTION':'nhours'})
+ >>> perform_consistency_checks(case,xml)
+ Traceback (most recent call last):
+ CIME.utils.CIMEError: ERROR: rrtmgp::rad_frequency incompatible with restart frequency.
+ Please, ensure restart happens on a step when rad is ON
+ rest_tstep: 28800
+ rad_testep: 10800.0
+ >>> case = MockCase({'ATM_NCPL':'12', 'REST_N':2, 'REST_OPTION':'ndays'})
+ >>> perform_consistency_checks(case,xml)
+ >>> case = MockCase({'ATM_NCPL':'10', 'REST_N':2, 'REST_OPTION':'ndays'})
+ >>> perform_consistency_checks(case,xml)
+ Traceback (most recent call last):
+ CIME.utils.CIMEError: ERROR: rrtmgp::rad_frequency incompatible with restart frequency.
+ Please, ensure restart happens on a step when rad is ON
+ For daily (or less frequent) restart, rad_frequency must divide ATM_NCPL
+ """
+
+ # RRTMGP can be supercycled. Restarts cannot fall in the middle
+ # of a rad superstep
+ rrtmgp = find_node(xml,"rrtmgp")
+ rest_opt = case.get_value("REST_OPTION")
+ if rrtmgp is not None and rest_opt is not None and rest_opt not in ["never","none"]:
+ rest_n = int(case.get_value("REST_N"))
+ rad_freq = int(find_node(rrtmgp,"rad_frequency").text)
+ atm_ncpl = int(case.get_value("ATM_NCPL"))
+ atm_tstep = 86400 / atm_ncpl
+ rad_tstep = atm_tstep * rad_freq
+
+
+ if rad_freq==1:
+ pass
+ elif rest_opt in ["nsteps", "nstep"]:
+ expect (rest_n % rad_freq == 0,
+ "rrtmgp::rad_frequency incompatible with restart frequency.\n"
+ " Please, ensure restart happens on a step when rad is ON")
+ elif rest_opt in ["nseconds", "nsecond", "nminutes", "nminute", "nhours", "nhour"]:
+ if rest_opt in ["nseconds", "nsecond"]:
+ factor = 1
+ elif rest_opt in ["nminutes", "nminute"]:
+ factor = 60
+ else:
+ factor = 3600
+
+ rest_tstep = factor*rest_n
+ expect (rest_tstep % rad_tstep == 0,
+ "rrtmgp::rad_frequency incompatible with restart frequency.\n"
+ " Please, ensure restart happens on a step when rad is ON\n"
+ f" rest_tstep: {rest_tstep}\n"
+ f" rad_testep: {rad_tstep}")
+
+ else:
+ # for "very infrequent" restarts, we request rad_freq to divide atm_ncpl
+ expect (atm_ncpl % rad_freq ==0,
+ "rrtmgp::rad_frequency incompatible with restart frequency.\n"
+ " Please, ensure restart happens on a step when rad is ON\n"
+ " For daily (or less frequent) restart, rad_frequency must divide ATM_NCPL")
+
###############################################################################
def ordered_dump(data, item, Dumper=yaml.SafeDumper, **kwds):
###############################################################################
@@ -108,6 +212,7 @@ def ordered_dump(data, item, Dumper=yaml.SafeDumper, **kwds):
Copied from: https://stackoverflow.com/a/21912744
Added ability to pass filename
"""
+
class OrderedDumper(Dumper):
pass
def _dict_representer(dumper, data):
@@ -116,6 +221,12 @@ def _dict_representer(dumper, data):
data.items())
OrderedDumper.add_representer(OrderedDict, _dict_representer)
+ # These allow to dump arrays with a tag specifying the type
+ OrderedDumper.add_representer(Bools, array_representer)
+ OrderedDumper.add_representer(Ints, array_representer)
+ OrderedDumper.add_representer(Floats, array_representer)
+ OrderedDumper.add_representer(Strings, array_representer)
+
if isinstance(item, str) and item.endswith(".yaml"):
# Item is a filepath
with open(item, "w") as fd:
@@ -233,27 +344,50 @@ def evaluate_selectors(element, case, ez_selectors):
CIME.utils.CIMEError: ERROR: child 'var1' element without selectors occurred after other parameter elements for this parameter
"""
- child_values = {} # elem_name -> evaluated XML element
+ selected_child = {} # elem_name -> evaluated XML element
children_to_remove = []
+ child_base_value = {} # map elme name to values to be appended to if append=="base"
+ child_type = {} # map elme name to its type (since only first entry may have type specified)
for child in element:
- child_name = child.tag
- child_val = child.text
-
# Note: in our system, an XML element is either a "node" (has children)
# or a "leaf" (has a value).
has_children = len(child) > 0
if has_children:
evaluate_selectors(child, case, ez_selectors)
else:
+ child_name = child.tag
+ child.text = None if child.text is None else child.text.strip(' \n')
+ child_val = child.text
selectors = child.attrib
+
+ if child_name not in child_type:
+ child_type[child_name] = selectors["type"] if "type" in selectors.keys() else "unset"
+
+ is_array = child_type[child_name].startswith("array")
+ expect (is_array or "append" not in selectors.keys(),
+ "The 'append' metadata attribute is only supported for entries of array type\n"
+ f" param name: {child_name}\n"
+ f" param type: {child_type[child_name]}")
+
+ append = selectors["append"] if "append" in selectors.keys() else "no"
+ expect (append in ["no","base","last"],
+ "Unrecognized value for 'append' attribute\n" +
+ f" param name : {child_name}\n" +
+ f" append value: {append}\n" +
+ " valid values: base, last\n")
if selectors:
all_match = True
- is_first = False
+ had_case_selectors = False
for k, v in selectors.items():
# Metadata attributes are used only when it's time to generate the input files
if k in METADATA_ATTRIBS:
+ if k=="type" and child_name in selected_child.keys():
+ if "type" in selected_child[child_name].attrib:
+ expect (v==selected_child[child_name].attrib["type"],
+ f"The 'type' attribute of {child_name} is not consistent across different selectors")
continue
+ had_case_selectors = True
val_re = re.compile(v)
if k in ez_selectors:
@@ -279,39 +413,57 @@ def evaluate_selectors(element, case, ez_selectors):
expect(val is not None,
"Bad selector '{0}' for child '{1}'. '{0}' is not a valid case value or easy selector".format(k, child_name))
-
if val is None or val_re.match(val) is None:
all_match = False
+ children_to_remove.append(child)
break
if all_match:
- if child_name in child_values:
- orig_child = child_values[child_name]
- orig_child.text = do_cime_vars(child_val, case)
+ if child_name in selected_child.keys():
+ orig_child = selected_child[child_name]
+ if append=="base":
+ orig_child.text = child_base_value[child_name] + "," + child.text
+ elif append=="last":
+ orig_child.text = orig_child.text + "," + child.text
+ else:
+ orig_child.text = child.text
+ children_to_remove.append(child)
else:
- is_first = True
- child_values[child_name] = child
- child.text = do_cime_vars(child_val, case)
+ # If all selectors were the METADATA_ATTRIB ones, then this is the "base" value
+ if not had_case_selectors:
+ child_base_value[child_name] = child.text
+ selected_child[child_name] = child
# Make a copy of selectors.keys(), since selectors=child.attrib,
# and we might delete an entry, causing the error
# RuntimeError: dictionary changed size during iteration
- for k in list(selectors.keys()):
- if k not in METADATA_ATTRIBS:
- del child.attrib[k]
-
- if not is_first:
- children_to_remove.append(child)
else:
- expect(child_name not in child_values,
+ expect(child_name not in selected_child,
"child '{}' element without selectors occurred after other parameter elements for this parameter".format(child_name))
- child_values[child_name] = child
+ child_base_value[child_name] = child.text
+ selected_child[child_name] = child
child.text = do_cime_vars(child_val, case)
for child_to_remove in children_to_remove:
element.remove(child_to_remove)
+###############################################################################
+def expand_cime_vars(element, case):
+###############################################################################
+ """
+ Expand all CIME variables inside an XML node text
+ """
+
+ for child in element:
+ # Note: in our system, an XML element is either a "node" (has children)
+ # or a "leaf" (has a value).
+ has_children = len(child) > 0
+ if has_children:
+ expand_cime_vars(child, case)
+ else:
+ child.text = do_cime_vars(child.text, case)
+
###############################################################################
def _create_raw_xml_file_impl(case, xml):
###############################################################################
@@ -327,7 +479,7 @@ def _create_raw_xml_file_impl(case, xml):
...
...
...
- ... (P1,P2)
+ ... P1,P2
...
... zero
...
@@ -350,7 +502,7 @@ def _create_raw_xml_file_impl(case, xml):
>>> import pprint
>>> pp = pprint.PrettyPrinter(indent=4)
>>> pp.pprint(d)
- OrderedDict([ ('atm_procs_list', '(P1,P2)'),
+ OrderedDict([ ('atm_procs_list', 'P1,P2'),
('prop2', 'one'),
('prop1', 'zero'),
('P1', OrderedDict([('prop1', 'two')])),
@@ -364,7 +516,7 @@ def _create_raw_xml_file_impl(case, xml):
...
...
...
- ... (P1,P2)
+ ... P1,P2
...
... zero
...
@@ -388,7 +540,7 @@ def _create_raw_xml_file_impl(case, xml):
>>> import pprint
>>> pp = pprint.PrettyPrinter(indent=4)
>>> pp.pprint(d)
- OrderedDict([ ('atm_procs_list', '(P1,P2)'),
+ OrderedDict([ ('atm_procs_list', 'P1,P2'),
('prop2', 'one'),
('prop1', 'zero'),
('P1', OrderedDict([('prop1', 'two_selected')])),
@@ -402,7 +554,7 @@ def _create_raw_xml_file_impl(case, xml):
...
...
...
- ... (P1,P2)
+ ... P1,P2
...
... 1
... true
@@ -432,7 +584,7 @@ def _create_raw_xml_file_impl(case, xml):
>>> import pprint
>>> pp = pprint.PrettyPrinter(indent=4)
>>> pp.pprint(d)
- OrderedDict([ ('atm_procs_list', '(P1,P2)'),
+ OrderedDict([ ('atm_procs_list', 'P1,P2'),
('prop2', 'one'),
('number_of_subcycles', 1),
('enable_precondition_checks', True),
@@ -457,55 +609,74 @@ def _create_raw_xml_file_impl(case, xml):
get_child(xml,"generated_files",remove=True)
selectors = get_valid_selectors(xml)
- # 1. Resolve all inheritances, and evaluate all selectors
+ # 1. Evaluate all selectors
evaluate_selectors(xml, case, selectors)
+
+ # 2. Apply all changes in the SCREAM_ATMCHANGE_BUFFER that may alter
+ # which atm processes are used
+ apply_atm_procs_list_changes_from_buffer (case,xml)
+
+ # 3. Resolve all inheritances
resolve_all_inheritances(xml)
- # 2. Grab the atmosphere_processes macro list, with all the defaults
+ # 4. Expand any CIME var that appears inside XML nodes text
+ expand_cime_vars(xml,case)
+
+ # 5. Grab the atmosphere_processes macro list, with all the defaults
atm_procs_defaults = get_child(xml,"atmosphere_processes_defaults",remove=True)
- # 3. Get atm procs list
+ # 6. Get atm procs list
atm_procs_list = get_child(atm_procs_defaults,"atm_procs_list",remove=True)
- # 4. Form the nested list of atm procs needed, append to atmosphere_driver section
- atm_procs = gen_atm_proc_group (atm_procs_list.text, atm_procs_defaults)
+ # 7. Form the nested list of atm procs needed, append to atmosphere_driver section
+ atm_procs = gen_atm_proc_group(atm_procs_list.text, atm_procs_defaults)
atm_procs.tag = "atmosphere_processes"
xml.append(atm_procs)
+ # 8. Apply all changes in the SCREAM_ATMCHANGE_BUFFER that do not alter
+ # which atm processes are used
+ apply_non_atm_procs_list_changes_from_buffer (case,xml)
+
+ perform_consistency_checks (case, xml)
+
return xml
###############################################################################
def create_raw_xml_file(case, caseroot):
###############################################################################
"""
- Create the raw $case/namelist_scream.xml file. This file is intended to be
- modified by users via the atmchange script if they want
+ Create the $case/namelist_scream.xml file. This file is intended to be
+ modified by users via the atmchange script if they want,
to make tweaks to input files (yaml and/or nml).
+ Note: users calls to atmchange do two things: 1) they add the change
+ to the SCREAM_ATMCHANGE_BUFFER case variable, and 2) they
+ call this function, which regenerates the scream xml file from
+ the defaults, applyin all buffered changes.
"""
- src = os.path.join(case.get_value("SRCROOT"), "components/eamxx/cime_config/namelist_defaults_scream.xml")
-
raw_xml_file = os.path.join(caseroot, "namelist_scream.xml")
- with open(src, "r") as fd:
- defaults = ET.parse(fd)
- raw_xml = _create_raw_xml_file_impl(case, defaults.getroot())
-
if os.path.exists(raw_xml_file) and case.get_value("SCREAM_HACK_XML"):
print("{} already exists and SCREAM_HACK_XML is on, will not overwrite. Remove to regenerate".format(raw_xml_file))
else:
- if os.path.exists(raw_xml_file):
- print("Regenerating {}. Manual edits will be lost.".format(raw_xml_file))
+ print("Regenerating {}. Manual edits will be lost.".format(raw_xml_file))
+
+ src = os.path.join(case.get_value("SRCROOT"), "components/eamxx/cime_config/namelist_defaults_scream.xml")
+
+ # Some atmchanges will require structural changes to the XML file and must
+ # be processed early by treating them as if they were made to the defaults file.
+ with open(src, "r") as fd:
+ defaults = ET.parse(fd).getroot()
+ raw_xml = _create_raw_xml_file_impl(case, defaults)
check_all_values(raw_xml)
with open(raw_xml_file, "w") as fd:
- ET.ElementTree(raw_xml).write(fd, method='xml', encoding="unicode")
-
- # Now that we have our namelist_scream.xml file, we can apply buffered
- # atmchange requests.
- atmchg_buffer = case.get_value("SCREAM_ATMCHANGE_BUFFER")
- if atmchg_buffer:
- run_cmd_no_fail("{}/atmchange {} --no-buffer".format(caseroot, atmchg_buffer))
+ # dom has better pretty printing than ET in older python versions < 3.9
+ dom = md.parseString(ET.tostring(raw_xml, encoding="unicode"))
+ pretty_xml = dom.toprettyxml(indent=" ")
+ pretty_xml = os.linesep.join([s for s in pretty_xml.splitlines()
+ if s.strip()])
+ fd.write(pretty_xml)
###############################################################################
def convert_to_dict(element):
@@ -516,8 +687,8 @@ def convert_to_dict(element):
...
... 1
...
- ... 2,3
- ... two,three
+ ... 2,3
+ ... two,three
...
...
... '''
@@ -535,14 +706,14 @@ def convert_to_dict(element):
result = OrderedDict()
for child in element:
child_name = child.tag.replace("__", " ")
- child_val = child.text
has_children = len(child) > 0
- if not has_children:
+ if has_children:
+ result[child_name] = convert_to_dict(child)
+ else:
+ child_val = child.text
force_type = None if "type" not in child.attrib.keys() else child.attrib["type"]
result[child_name] = refine_type(child_val,force_type=force_type)
- else:
- result[child_name] = convert_to_dict(child)
return result
@@ -732,8 +903,10 @@ def create_input_data_list_file(caseroot):
fd.write("scream_dl_input_{} = {}\n".format(idx, file_path))
###############################################################################
-def do_cime_vars_on_yaml_output_files(case,caseroot):
+def do_cime_vars_on_yaml_output_files(case, caseroot):
###############################################################################
+ from yaml_utils import array_constructor
+
rundir = case.get_value("RUNDIR")
eamxx_xml_file = os.path.join(caseroot, "namelist_scream.xml")
@@ -744,11 +917,18 @@ def do_cime_vars_on_yaml_output_files(case,caseroot):
out_files_xml = get_child(scorpio,"output_yaml_files",must_exist=False)
out_files = out_files_xml.text.split(",") if (out_files_xml is not None and out_files_xml.text is not None) else []
+ # Add array parsing knowledge to yaml loader
+ loader = yaml.SafeLoader
+ loader.add_constructor("!bools",array_constructor)
+ loader.add_constructor("!ints",array_constructor)
+ loader.add_constructor("!floats",array_constructor)
+ loader.add_constructor("!strings",array_constructor)
+
# We will also change the 'output_yaml_files' entry in scream_input.yaml,
# to point to the copied files in $rundir/data
output_yaml_files = []
scream_input_file = os.path.join(rundir,'data','scream_input.yaml')
- scream_input = yaml.safe_load(open(scream_input_file,"r"))
+ scream_input = yaml.load(open(scream_input_file,"r"),Loader=loader)
# Determine the physics grid type for use in CIME-var substitution.
pgt = 'GLL'
@@ -768,7 +948,7 @@ def do_cime_vars_on_yaml_output_files(case,caseroot):
safe_copy(src_yaml,dst_yaml)
# Now load dst file, and process any CIME var present (if any)
- content = yaml.safe_load(open(dst_yaml,"r"))
+ content = yaml.load(open(dst_yaml,"r"),Loader=loader)
do_cime_vars(content,case,refine=True,
extra={'PHYSICS_GRID_TYPE': pgt})
@@ -778,7 +958,7 @@ def do_cime_vars_on_yaml_output_files(case,caseroot):
# Hence, change default output settings to perform a single AVERAGE step at the end of the run
if case.get_value("TESTCASE") in ["ERP", "ERS"]:
test_env = case.get_env('test')
- stop_n = test_env.get_value("STOP_N")
+ stop_n = int(test_env.get_value("STOP_N"))
stop_opt = test_env.get_value("STOP_OPTION")
content['output_control']['Frequency'] = stop_n
content['output_control']['frequency_units'] = stop_opt
@@ -791,7 +971,6 @@ def do_cime_vars_on_yaml_output_files(case,caseroot):
# Now update the output yaml files entry, and dump the new content
# of the scream input to YAML file
- print ("out list: {}".format(",".join(output_yaml_files)))
scream_input["Scorpio"]["output_yaml_files"] = refine_type(",".join(output_yaml_files),"array(string)")
with open(scream_input_file, "w") as fd:
fd.write(
diff --git a/components/eamxx/cime_config/eamxx_buildnml_impl.py b/components/eamxx/cime_config/eamxx_buildnml_impl.py
index aaf7fe9d9f17..2fa00b7f4a4b 100644
--- a/components/eamxx/cime_config/eamxx_buildnml_impl.py
+++ b/components/eamxx/cime_config/eamxx_buildnml_impl.py
@@ -5,6 +5,8 @@
sys.path.append(_CIMEROOT)
from CIME.utils import expect
+from yaml_utils import make_array
+
###############################################################################
class MockCase(object):
@@ -23,58 +25,7 @@ def get_value(self, key):
return None
###############################################################################
-def parse_string_as_list (string):
-###############################################################################
- """
- Takes a string representation of nested list and creates
- a nested list of stirng. For instance, with
- s = "(a,b,(c,d),e)
- l = parse_string_as_list
- we would have l = ['a', 'b', '(c,d)', 'e']
-
- >>> s = '(a,(b,c))'
- >>> l = parse_string_as_list(s)
- >>> len(l)
- 2
- >>> l[0] == 'a'
- True
- >>> l[1] == '(b,c)'
- True
- >>> ###### NOT STARTING/ENDING WITH PARENTHESES #######
- >>> s = '(a,b,'
- >>> l = parse_string_as_list(s)
- Traceback (most recent call last):
- ValueError: Input string must start with '(' and end with ')'.
- >>> ################ UNMATCHED PARENTHESES ##############
- >>> s = '(a,(b)'
- >>> l = parse_string_as_list(s)
- Traceback (most recent call last):
- ValueError: Unmatched parentheses in input string
- """
-
- if string[0]!='(' or string[-1]!=')':
- raise ValueError ("Input string must start with '(' and end with ')'.")
-
- sub_open = string.find('(',1)
- sub_close = string.rfind(')',0,-1)
- if not (sub_open>=0)==(sub_close>=0):
- raise ValueError ("Unmatched parentheses in input string")
-
- # Prevent empty string to pollute s.split()
- my_split = lambda str : [s for s in str.split(',') if s.strip() != '']
-
- if sub_open>=0:
- l = []
- l.extend(my_split(string[1:sub_open-1]))
- l.append(string[sub_open:sub_close+1])
- l.extend(my_split(string[sub_close+2:-1]))
- else:
- l = my_split(string[1:-1])
-
- return l
-
-###############################################################################
-def is_array_type (name):
+def is_array_type(name):
###############################################################################
"""
>>> is_array_type('array(T)')
@@ -84,10 +35,10 @@ def is_array_type (name):
>>> is_array_type('array(T)')
True
"""
- return name[0:6]=="array(" and name[-1]==")"
+ return name is not None and name[0:6]=="array(" and name[-1]==")"
###############################################################################
-def array_elem_type (name):
+def array_elem_type(name):
###############################################################################
"""
>>> print(array_elem_type('array(T)'))
@@ -200,94 +151,78 @@ def refine_type(entry, force_type=None):
>>> refine_type(e)==e
True
>>> e = 'a,b'
- >>> refine_type(e)==['a','b']
+ >>> refine_type(e,'array(string)')==['a','b']
True
>>> e = 'true,falsE'
- >>> refine_type(e)==[True,False]
+ >>> refine_type(e,'array(logical)')==[True,False]
True
>>> e = '1'
>>> refine_type(e,force_type='real')==1.0
True
- >>> e = '1,b'
- >>> refine_type(e)==[1,'b',True]
- Traceback (most recent call last):
- CIME.utils.CIMEError: ERROR: List '1,b' has inconsistent types inside
>>> e = '1.0'
>>> refine_type(e,force_type='my_type')
Traceback (most recent call last):
- NameError: Bad force_type: my_type
+ NameError: ERROR: Invalid/unsupported force type 'my_type'
>>> e = 'true,falsE'
>>> refine_type(e,'logical')
Traceback (most recent call last):
- CIME.utils.CIMEError: ERROR: Error! Invalid type 'logical' for an array.
+ ValueError: Could not refine 'true,falsE' as type 'logical'
>>> refine_type(e,'array(logical)')
[True, False]
>>> refine_type('', 'array(string)')
[]
- >>> refine_type('', 'array(float)')
+ >>> refine_type('', 'array(real)')
[]
- >>> refine_type(None, 'array(float)')
+ >>> refine_type(None, 'array(real)')
[]
"""
- # We want to preserve strings representing lists
-
- if entry:
- if (entry[0]=="(" and entry[-1]==")") or \
- (entry[0]=="[" and entry[-1]=="]") :
- expect (force_type is None or force_type == "string",
- "Error! Invalid force type '{}' for a string representing a list"
- .format(force_type))
- return entry
-
- if "," in entry:
- expect (force_type is None or is_array_type(force_type),
- "Error! Invalid type '{}' for an array.".format(force_type))
-
- elem_type = force_type if force_type is None else array_elem_type(force_type)
- result = [refine_type(item.strip(), force_type=elem_type) for item in entry.split(",") if item.strip() != ""]
- expected_type = type(result[0])
- for item in result[1:]:
- expect(isinstance(item, expected_type),
- "List '{}' has inconsistent types inside".format(entry))
-
- return result
-
- elif force_type is not None and is_array_type(force_type):
-
- return []
+ # If force type is unspecified, try to deduce it
+ if force_type is None:
+ expect (entry is not None,
+ "If an entry is None, you must specify the force_type")
+ else:
+ elem_valid = ["logical","integer","real","string","file"]
+ valid = elem_valid + ["array("+e+")" for e in elem_valid]
+ expect (force_type in valid, exc_type=NameError,
+ error_msg=f"Invalid/unsupported force type '{force_type}'")
+
+ if is_array_type(force_type):
+ elem_type = array_elem_type(force_type)
+ if entry:
+ try:
+ result = [refine_type(item.strip(), force_type=elem_type) for item in entry.split(",") if item.strip() != ""]
+ except ValueError:
+ expect(False, "List '{entry}' has items not compatible with requested element type '{elem_type}'")
+ else:
+ result = []
- if force_type:
- try:
- elem_type = force_type if not is_array_type(force_type) else array_elem_type(force_type)
+ return make_array(result, elem_type)
- if elem_type == "logical":
- if entry.upper() == "TRUE":
- elem = True
- elif entry.upper() == "FALSE":
- elem = False
- else:
- elem = bool(int(entry))
-
- elif elem_type == "integer":
- tmp = float(entry)
- expect (float(int(tmp))==tmp, "Cannot interpret {} as int".format(entry), exc_type=ValueError)
- elem = int(tmp)
- elif elem_type == "real":
- elem = float(entry)
- elif elem_type in ["string", "file"]:
- elem = str(entry)
+ # Not an array (or no force type passed)
+ elem_type = force_type
+ try:
+ if elem_type == "logical":
+ if entry.upper() == "TRUE":
+ return True
+ elif entry.upper() == "FALSE":
+ return False
else:
- raise NameError ("Bad force_type: {}".format(force_type))
+ return bool(int(entry))
- if is_array_type(force_type):
- return [elem]
- else:
- return elem
+ elif elem_type == "integer":
+ tmp = float(entry)
+ expect (float(int(tmp))==tmp, f"Cannot interpret {entry} as int", exc_type=ValueError)
+ return int(tmp)
+ elif elem_type == "real":
+ return float(entry)
+ elif elem_type in ["string", "file"]:
+ return str(entry)
- except ValueError as e:
- raise ValueError ("Could not use '{}' as type '{}'".format(entry, force_type)) from e
+ except ValueError as e:
+ raise ValueError (f"Could not refine '{entry}' as type '{force_type}'") from e
+ # No force type provided. Try to infer from value
if entry.upper() == "TRUE":
return True
elif entry.upper() == "FALSE":
@@ -303,6 +238,7 @@ def refine_type(entry, force_type=None):
v = float(entry)
return v
except ValueError:
+ # We ran out of options. Simply return the entry itself
return entry
###############################################################################
@@ -317,9 +253,9 @@ def derive_type(entry):
>>> derive_type('one')
'string'
>>> derive_type('one,two')
- 'array(string)'
- >>> derive_type('true,FALSE')
- 'array(logical)'
+ 'string'
+ >>> derive_type('truE')
+ 'logical'
"""
refined_value = refine_type(entry)
@@ -357,7 +293,7 @@ def check_value(elem, value):
>>> root = ET.fromstring(xml)
>>> check_value(root,'1.5')
Traceback (most recent call last):
- ValueError: Could not use '1.5' as type 'integer'
+ ValueError: Could not refine '1.5' as type 'integer'
>>> check_value(root,'3')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Invalid value '3' for element 'a'. Value not in the valid list ('[1, 2]')
@@ -510,7 +446,7 @@ def check_all_values(root):
check_value(root,root.text)
###############################################################################
-def resolve_inheritance (root,elem):
+def resolve_inheritance(root, elem):
###############################################################################
"""
If elem inherits from another node within $root, this function adds all
@@ -556,17 +492,26 @@ def resolve_inheritance (root,elem):
if not has_child(elem,entry.tag):
new_entry = copy.deepcopy(entry)
elem.append(new_entry)
+ else:
+ # Parent may define the type and/or doc of an entry. We cannot change this
+ for att in ["type","doc"]:
+ if att in entry.attrib.keys():
+ parent_type = entry.attrib[att]
+ for child in elem:
+ if child.tag==entry.tag:
+ expect (att not in child.attrib.keys(),
+ f"Do not set '{att}' attribute when parent node already specifies it.")
+ child.attrib[att] = parent_type
for child in elem:
resolve_inheritance(root,child)
###############################################################################
-def resolve_all_inheritances (root):
+def resolve_all_inheritances(root):
###############################################################################
"""
Resolve all inheritances in the root tree
"""
-
for elem in root:
resolve_inheritance(root,elem)
@@ -623,38 +568,32 @@ def get_valid_selectors(xml_root):
return selectors
###############################################################################
-def gen_group_processes (ap_names_str, atm_procs_defaults):
+def gen_group_processes(ap_names_str, atm_procs_defaults):
###############################################################################
"""
- Given a (possibly nested) string representation of an atm group,
+ Given a comma-separated list of atm procs names,
generates the corresponding atm processes as XML nodes.
"""
group = ET.Element("__APG__")
- ap_names_list = parse_string_as_list(ap_names_str)
- for ap in ap_names_list:
- # The current ap can be itself a group if either:
- # - ap = "(ap1,ap2,...,apXYZ)", with each ap possibly itself a group string.
- # This group is built on the fly based on the building blocks specs.
- # - ap is declared in the XML defaults as an atm proc group (which must store
- # the 'atm_procs_list' child, with the string representation of the group.
-
- if ap[0]=='(':
- # Create the atm proc group
- proc = gen_atm_proc_group(ap,atm_procs_defaults)
- else:
- # Get defaults
- proc = copy.deepcopy(get_child(atm_procs_defaults,ap))
-
- # Check if this pre-defined proc is itself a group, and, if so,
- # build all its sub-processes
- ptype = get_child(proc,"Type",must_exist=False)
- if ptype is not None and ptype.text=="Group":
- # This entry of the group is itself a group, with pre-defined
- # defaults. Let's add its entries to it
- sub_group_procs = get_child(proc,"atm_procs_list").text
- proc.extend(gen_group_processes(sub_group_procs,atm_procs_defaults))
+ ap_list = [] if ap_names_str is None or ap_names_str=="" else ap_names_str.split(',')
+ for ap in ap_list:
+ # The current ap can be itself a group if ap is declared in the XML defaults
+ # as an atm proc group (which must store the 'atm_procs_list' child,
+ # with the string representation of the group.
+
+ # Get defaults
+ proc = copy.deepcopy(get_child(atm_procs_defaults,ap))
+
+ # Check if this pre-defined proc is itself a group, and, if so,
+ # build all its sub-processes
+ ptype = get_child(proc, "Type", must_exist=False)
+ if ptype is not None and ptype.text=="Group":
+ # This entry of the group is itself a group, with pre-defined
+ # defaults. Let's add its entries to it
+ sub_group_procs = get_child(proc, "atm_procs_list").text
+ proc.extend(gen_group_processes(sub_group_procs, atm_procs_defaults))
# Append subproc to group
group.append(proc)
@@ -673,7 +612,7 @@ def gen_atm_proc_group(atm_procs_list, atm_procs_defaults):
...
...
... 1
- ... THE_LIST
+ ... THE_LIST
...
...
...
@@ -682,19 +621,17 @@ def gen_atm_proc_group(atm_procs_list, atm_procs_defaults):
... 3
...
...
- ... (p1,ap2)
+ ... p1,ap2
...
...
... '''
>>> import xml.etree.ElementTree as ET
>>> defaults = ET.fromstring(xml)
- >>> ap_list = '(ap1,(ap2,ap1))'
+ >>> ap_list = 'ap1,ap2,ap1'
>>> apg = gen_atm_proc_group(ap_list,defaults)
>>> get_child(apg,'atm_procs_list').text==ap_list
True
>>>
- >>> has_child(apg,'group.ap2_ap1.')
- True
>>> get_child(apg,'prop1').text=="1"
True
"""
@@ -702,18 +639,17 @@ def gen_atm_proc_group(atm_procs_list, atm_procs_defaults):
# Set defaults from atm_proc_group
group = ET.Element("__APG__")
group.attrib["inherit"] = "atm_proc_group"
- resolve_inheritance(atm_procs_defaults,group)
+ resolve_inheritance(atm_procs_defaults, group)
get_child(group,"atm_procs_list").text = atm_procs_list
# Create processes
- group_procs = gen_group_processes (atm_procs_list, atm_procs_defaults)
+ group_procs = gen_group_processes(atm_procs_list, atm_procs_defaults)
- # Append procs and generate name for the group.
- # NOTE: the name of a 'generic' group is 'group.AP1_AP2_..._APN.'
- names = []
+ # Append procs
for c in group_procs:
- names.append(c.tag)
group.append(c)
- group.tag = "group." + '_'.join(names) + '.'
+
+ # Will be set from outside
+ group.tag = "MISSING"
return group
diff --git a/components/eamxx/cime_config/namelist_defaults_scream.xml b/components/eamxx/cime_config/namelist_defaults_scream.xml
index df2007ca9941..71c1bfd4f828 100644
--- a/components/eamxx/cime_config/namelist_defaults_scream.xml
+++ b/components/eamxx/cime_config/namelist_defaults_scream.xml
@@ -35,7 +35,7 @@ be lost if SCREAM_HACK_XML is not enabled.
-->
-
+
@@ -141,41 +141,51 @@ be lost if SCREAM_HACK_XML is not enabled.
so it must be of the form (a,b,...).
NOTE: *CANNOT* be changed.
-->
-
- (sc_import,homme,physics,sc_export)
+ sc_import,homme,physics,sc_export
- 1
+ 1
true
true
trace
- NONE
+
+ 0
+
- ERROR_NO_ATM_PROCS
+
Group
- Sequential
+ Sequential
- NONE
- 0
+
+
+
+
+
+
+
+
- Dynamics
moist
- 0
+ in-fields. <= 0 disables hashing. -->
+ 18
@@ -185,6 +195,7 @@ be lost if SCREAM_HACK_XML is not enabled.
true
false
false
+ 740.0e3
${DIN_LOC_ROOT}/atm/scream/tables/p3_lookup_table_1.dat-v4.1.1,
${DIN_LOC_ROOT}/atm/scream/tables/mu_r_table_vals.dat8,
@@ -197,11 +208,70 @@ be lost if SCREAM_HACK_XML is not enabled.
false
+ false
+ 0.001
+ 0.04
+ 2.65
+ 0.02
+ 1.0
+ 1.0
+ 1.0
+ 1.0
+ 0.5
+ 7.0
+ 0.1
+ 0.1
+
+
+
+
+ 0
+ false
+
+ TIME_DEPENDENT_3D_PROFILE
+
+
+ "no-file-given"
+
+
+ 0.0
+
+
+
+
+
+
+
+
+
+ false
+
+
+
+
+ 1,2
+ 3,4
+ 5,6
+ 3,4
+ 5,6
+
+
UNSET
@@ -229,7 +299,7 @@ be lost if SCREAM_HACK_XML is not enabled.
- h2o, co2, o3, n2o, co, ch4, o2, n2
+ h2o, co2, o3, n2o, co, ch4, o2, n2
1807.851e-9
388.717e-6
323.141e-9
@@ -258,25 +328,49 @@ be lost if SCREAM_HACK_XML is not enabled.
3
3
4
- true
+ true
false
- false
+ false
+
+ false
+
+
+ false
+
- (shoc,cldFraction,spa,p3)
- (shoc,cldFraction,p3)
+ shoc,cldFraction,spa,p3
+ tms,shoc,cldFraction,spa,p3
+ shoc,cldFraction,p3
+ tms,shoc,cldFraction,p3
24
- 6
- 3
- 3
- 1
+ 12
+ 6
+ 6
+ 2
1
5
+
+ 10
+
+ 1
+ hours
+
+
+
+
+
- (mac_aero_mic,rrtmgp)
+ mac_aero_mic,rrtmgp
@@ -311,14 +405,14 @@ be lost if SCREAM_HACK_XML is not enabled.
UNSET
-
+
${DIN_LOC_ROOT}/atm/cam/topo/USGS-gtopo30_ne30np4pg2_x6t-SGH.c20210614.nc
+ ${DIN_LOC_ROOT}/atm/cam/topo/USGS-gtopo30_ne120np4pg2_x6t_20230404.nc
${DIN_LOC_ROOT}/atm/cam/topo/USGS-gtopo30_ne256np4pg2_x6t-SGH.c20210614.nc
+ ${DIN_LOC_ROOT}/atm/cam/topo/USGS-gtopo30_ne512np4pg2_x6t_20230404.nc
${DIN_LOC_ROOT}/atm/cam/topo/USGS-gtopo30_ne1024np4pg2_x6t-SGH.c20210614.nc
-
+
${DIN_LOC_ROOT}/atm/cam/topo/USGS-gtopo30_ne4np4pg2_16x_converted.c20200527.nc
- ${DIN_LOC_ROOT}/atm/cam/topo/USGS-gtopo30_ne120np4pg2_16xdel2.nc
- ${DIN_LOC_ROOT}/atm/cam/topo/USGS-gtopo30_ne512np4pg2_16xconsistentSGH_20190212_converted.nc
${DIN_LOC_ROOT}/atm/cam/topo/USGS_conusx4v1pg2_12x_consistentSGH_20200609.nc
@@ -351,7 +445,7 @@ be lost if SCREAM_HACK_XML is not enabled.
0.0
0.0
0.0
- 0.0,0.0
+ 0.0,0.0
0.0
@@ -366,7 +460,7 @@ be lost if SCREAM_HACK_XML is not enabled.
${SRCROOT}/components/eamxx/data/scream_default_output.yaml
./${CASE}.scream
-
+
${REST_N}
${REST_OPTION}
@@ -376,12 +470,17 @@ be lost if SCREAM_HACK_XML is not enabled.
0
- info
+
+ info
+
false
1e-10
1e-14
Warning
true
+ phis,landfrac
@@ -452,6 +551,9 @@ be lost if SCREAM_HACK_XML is not enabled.
10
0
100.0
+
+ 0
2
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/scream/bfbhash/shell_commands b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/bfbhash/shell_commands
index c0b42727ba49..b962f5f962b9 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/scream/bfbhash/shell_commands
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/bfbhash/shell_commands
@@ -1 +1,2 @@
-./xmlchange --append SCREAM_ATMCHANGE_BUFFER='BfbHash=6'
+
+$CIMEROOT/../components/eamxx/scripts/atmchange BfbHash=1 -b
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/scream/internal_diagnostics_level/shell_commands b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/internal_diagnostics_level/shell_commands
new file mode 100644
index 000000000000..d3a4a39b668a
--- /dev/null
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/internal_diagnostics_level/shell_commands
@@ -0,0 +1,2 @@
+$CIMEROOT/../components/eamxx/scripts/atmchange --all internal_diagnostics_level=1 atmosphere_processes::internal_diagnostics_level=0 -b
+./xmlchange POSTRUN_SCRIPT="$CIMEROOT/../components/eamxx/tests/postrun/check_hashes_ers.py"
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/scream/rad_frequency_2/shell_commands b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/rad_frequency_2/shell_commands
index 5ccb459798ed..d0abbbeb0c7f 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/scream/rad_frequency_2/shell_commands
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/rad_frequency_2/shell_commands
@@ -1 +1 @@
-./xmlchange SCREAM_ATMCHANGE_BUFFER='rad_frequency=2'
+$CIMEROOT/../components/eamxx/scripts/atmchange rad_frequency=2 -b
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/scream/scream_example_testmod_atmchange/shell_commands b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/scream_example_testmod_atmchange/shell_commands
index 0ebd594935b7..b7cd82b0c548 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/scream/scream_example_testmod_atmchange/shell_commands
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/scream_example_testmod_atmchange/shell_commands
@@ -1 +1,2 @@
-./xmlchange --append SCREAM_ATMCHANGE_BUFFER='cubed_sphere_map=42'
+
+$CIMEROOT/../components/eamxx/scripts/atmchange cubed_sphere_map=42 -b
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/scream/small_kernels/shell_commands b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/small_kernels/shell_commands
index 0496585c7d0d..04989a22796a 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/scream/small_kernels/shell_commands
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/small_kernels/shell_commands
@@ -1 +1,7 @@
./xmlchange --append SCREAM_CMAKE_OPTIONS='SCREAM_SMALL_KERNELS On'
+$CIMEROOT/../components/eamxx/scripts/atmchange --all internal_diagnostics_level=1 atmosphere_processes::internal_diagnostics_level=0 -b
+
+f=$(./xmlquery --value MACH)
+if [ $f == chrysalis ]; then
+ ./xmlchange BATCH_COMMAND_FLAGS="--time 00:30:00 -p debug --account e3sm --exclude=chr-0512"
+fi
diff --git a/components/eamxx/cime_config/yaml_utils.py b/components/eamxx/cime_config/yaml_utils.py
new file mode 100644
index 000000000000..73a5b13d09bb
--- /dev/null
+++ b/components/eamxx/cime_config/yaml_utils.py
@@ -0,0 +1,72 @@
+# Add path to scream libs
+import sys, os
+sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "scripts"))
+
+from utils import ensure_yaml # pylint: disable=no-name-in-module
+ensure_yaml()
+import yaml
+
+###############################################################################
+# These are types that we use to differentiate lists of ints,bools,floats,strings
+# We can use these types to tell YAML how to write them to file, which ultimately
+# means to simply add the proper tag to the yaml file
+###############################################################################
+class Array(list):
+ def __init__ (self, vals, t):
+ super().__init__(t(v) for v in vals)
+class Bools(Array):
+ def __init__ (self,vals):
+ Array.__init__(self,vals,bool)
+class Ints(Array):
+ def __init__ (self,vals):
+ Array.__init__(self,vals,int)
+class Floats(Array):
+ def __init__ (self,vals):
+ Array.__init__(self,vals,float)
+class Strings(Array):
+ def __init__ (self,vals):
+ Array.__init__(self,vals,str)
+
+###############################################################################
+def make_array (vals,etype):
+###############################################################################
+ if etype=="bool" or etype=="logical":
+ return Bools(vals)
+ elif etype=="int" or etype=="integer":
+ return Ints(vals)
+ elif etype=="float" or etype=="real":
+ return Floats(vals)
+ elif etype=="string" or etype=="file":
+ return Strings(vals)
+ else:
+ raise ValueError (f"Unsupported element type '{etype}' for arrays.")
+
+###############################################################################
+def array_constructor(loader: yaml.SafeLoader, node: yaml.nodes.SequenceNode) -> list:
+###############################################################################
+ entries = loader.construct_sequence(node)
+ if node.tag=="!bools":
+ return Bools(entries)
+ elif node.tag=="!ints":
+ return Ints(entries)
+ elif node.tag=="!floats":
+ return Floats(entries)
+ elif node.tag=="!strings":
+ return Strings(entries)
+ else:
+ raise ValueError(f"Invalid node tag={node.tag} for array constructor.")
+
+###############################################################################
+def array_representer(dumper,array) -> yaml.nodes.SequenceNode:
+###############################################################################
+ if isinstance(array,Bools):
+ return dumper.represent_sequence('!bools',array)
+ elif isinstance(array,Ints):
+ return dumper.represent_sequence('!ints',array)
+ elif isinstance(array,Floats):
+ return dumper.represent_sequence('!floats',array)
+ elif isinstance(array,Strings):
+ return dumper.represent_sequence('!strings',array)
+ else:
+ raise ValueError (f"Unsupported array type: {type(array)}")
+
diff --git a/components/eamxx/cmake/CompareNCFiles.cmake b/components/eamxx/cmake/CompareNCFiles.cmake
new file mode 100644
index 000000000000..e17bc76b3a96
--- /dev/null
+++ b/components/eamxx/cmake/CompareNCFiles.cmake
@@ -0,0 +1,203 @@
+# Utility to create a test that compares two nc files
+# Mandatory keyword arguments
+# - TEST_NAME: the name to be given to the test
+# - SRC_FILE: the name of the first nc file
+# - TGT_FILE: the name of the second nc file
+# Optional keyword arguments
+# - LABELS: labels to attach to the created tests
+# - FIXTURES_REQUIRED: list of fixtures required
+function(CompareNCFiles)
+ # Parse keyword arguments
+ set (options)
+ set (args1v TEST_NAME SRC_FILE TGT_FILE)
+ set (argsMv LABELS FIXTURES_REQUIRED)
+
+ cmake_parse_arguments(PARSE "${options}" "${args1v}" "${argsMv}" ${ARGN})
+ CheckMacroArgs(CompareNCFilesFamily PARSE "${options}" "${args1v}" "${argsMv}")
+
+ # Sanity checks
+ if (NOT PARSE_TEST_NAME)
+ message ("Error! CompareNCFilesPair requires the keyword argument TEST_NAME")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+ if (NOT PARSE_SRC_FILE)
+ message ("Error! CompareNCFilesPair requires the keyword argument SRC_FILE")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+ if (NOT PARSE_TGT_FILE)
+ message ("Error! CompareNCFilesPair requires the keyword argument TGT_FILE")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+
+ add_test (
+ NAME ${PARSE_TEST_NAME}
+ COMMAND cmake -P ${CMAKE_BINARY_DIR}/bin/CprncTest.cmake ${PARSE_SRC_FILE} ${PARSE_TGT_FILE}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
+
+ # Set test properties, if needed
+ if (PARSE_LABELS)
+ set_tests_properties(${PARSE_TEST_NAME} PROPERTIES LABELS "${PARSE_LABELS}")
+ endif()
+
+ # Set test fixtures, if needed
+ if (PARSE_FIXTURES_REQUIRED)
+ set_tests_properties(${PARSE_TEST_NAME} PROPERTIES FIXTURES_REQUIRED "${PARSE_FIXTURES_REQUIRED}")
+ endif()
+endfunction()
+
+# This function is a more complex version of the one above: it creates tests
+# to compare a set of files, which differ in their name by a simple substring.
+# For instance, files generated with a different choice of a parameter
+
+# Mandatory keyword arguments
+# - TEST_META_NAME: the base name to be given to the tests generated by this macro
+# - FILE_META_NAME: the name of the files
+# - MAGIC_STRING : the string that will be replaced with MAGIC_VALUES entries
+# - MAGIC_VALUES : the values to be used to replace ${MAGIC_STRING}
+# Optional keyword arguments
+# - LABELS: labels to attach to the created tests
+# - FIXTURES_REQUIRED: list of fixtures required
+# Note:
+# - TEST_META_NAME and FILE_META_NAME *MUST* contain the MAGIC_STRING
+# - FIXTURES_REQUIRED *can* contain the MAGIC_STRING (but doesn't have to)
+function (CompareNCFilesFamily)
+ # Parse keyword arguments
+ set (options)
+ set (args1v TEST_META_NAME FILE_META_NAME MAGIC_STRING)
+ set (argsMv MAGIC_VALUES LABELS FIXTURES_REQUIRED)
+
+ cmake_parse_arguments(PARSE "${options}" "${args1v}" "${argsMv}" ${ARGN})
+ CheckMacroArgs(CompareNCFilesFamily PARSE "${options}" "${args1v}" "${argsMv}")
+
+ # Sanity checks
+ if (NOT PARSE_TEST_META_NAME)
+ message ("Error! CompareNCFilesFamily requires the keyword argument TEST_META_NAME")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+ if (NOT PARSE_FILE_META_NAME)
+ message ("Error! CompareNCFilesFamily requires the keyword argument FILE_META_NAME")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+ if (NOT PARSE_MAGIC_STRING)
+ message ("Error! CompareNCFilesFamily requires the keyword argument MAGIC_STRING")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+ if (NOT PARSE_MAGIC_VALUES)
+ message ("Error! CompareNCFilesFamily requires the keyword argument MAGIC_VALUES")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+ if (NOT PARSE_TEST_META_NAME MATCHES ${PARSE_MAGIC_STRING})
+ message ("Error! MAGIC_STRING not contained in TEST_META_NAME.")
+ message (" MAGIC_STRING: ${PARSE_MAGIC_STRING}")
+ message (" TEST_META_NAME: ${PARSE_TEST_META_NAME}")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+ if (NOT PARSE_FILE_META_NAME MATCHES ${PARSE_MAGIC_STRING})
+ message ("Error! MAGIC_STRING not contained in FILE_META_NAME.")
+ message (" MAGIC_STRING: ${PARSE_MAGIC_STRING}")
+ message (" FILE_META_NAME: ${PARSE_FILE_META_NAME}")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+
+ # Ensure cprnc is built
+ include (BuildCprnc)
+ BuildCprnc()
+
+ # Remove first entry of magic values. Compare all other entries against this
+ list (POP_FRONT PARSE_MAGIC_VALUES first)
+ string (REPLACE "${PARSE_MAGIC_STRING}" "${first}" TGT_FILE ${PARSE_FILE_META_NAME})
+
+ # FIXTURES_REQUIRED *can* also contain the magic string
+ foreach (item IN LISTS PARSE_MAGIC_VALUES)
+ # Expand the magic string in src file
+ string (REPLACE ${PARSE_MAGIC_STRING} ${item} SRC_FILE ${PARSE_FILE_META_NAME})
+
+ # Create the test. Also the test base name may contain the magic string
+ string (REPLACE ${PARSE_MAGIC_STRING} ${item} TEST_NAME ${PARSE_TEST_META_NAME})
+
+ add_test (
+ NAME ${TEST_NAME}
+ COMMAND cmake -P ${CMAKE_BINARY_DIR}/bin/CprncTest.cmake ${SRC_FILE} ${TGT_FILE}
+ WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
+
+ # Set test properties, if needed
+ if (PARSE_LABELS)
+ set_tests_properties(${TEST_NAME} PROPERTIES LABELS "${PARSE_LABELS}")
+ endif()
+
+ # Set test fixtures, if needed
+ if (PARSE_FIXTURES_REQUIRED)
+ set (TMP_LIST ${PARSE_FIXTURES_REQUIRED})
+ list (TRANSFORM TMP_LIST REPLACE "${PARSE_MAGIC_STRING}" ${item})
+
+ set_tests_properties(${TEST_NAME} PROPERTIES
+ FIXTURES_REQUIRED "${TMP_LIST}")
+ endif()
+ endforeach()
+endfunction (CompareNCFilesFamily)
+
+# A version of the above tailored for PEM-like comparisons, where the family of NC files
+# corresponds to runs using different number of MPI ranks
+function (CompareNCFilesFamilyMpi)
+ # Parse keyword arguments
+ set (options)
+ set (args1v TEST_BASE_NAME FILE_META_NAME)
+ set (argsMv MPI_RANKS LABELS META_FIXTURES_REQUIRED)
+ cmake_parse_arguments(PARSE "${options}" "${args1v}" "${argsMv}" ${ARGN})
+ CheckMacroArgs(CompareNCFilesFamily PARSE "${options}" "${args1v}" "${argsMv}")
+
+ if (NOT PARSE_TEST_BASE_NAME)
+ message ("Error! CompareNCFilesFamilyMpi requires the keyword argument TEST_BASE_NAME")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+
+ # Grab the args for the MPI_RANKS range specs. This follows the same convention of CreateUnitTest:
+ # - 1 value: start==end
+ # - 2 values:
+ # - 3 values:
+ list (LENGTH PARSE_MPI_RANKS NUM_MPI_RANK_ARGS)
+
+ if (NUM_MPI_RANK_ARGS EQUAL 2)
+ list (GET PARSE_MPI_RANKS 0 BEG)
+ list (GET PARSE_MPI_RANKS 1 END)
+ set (INC 1)
+ elseif(NUM_MPI_RANK_ARGS EQUAL 3)
+ list (GET PARSE_MPI_RANKS 0 BEG)
+ list (GET PARSE_MPI_RANKS 1 END)
+ list (GET PARSE_MPI_RANKS 2 INC)
+ else()
+ message ("CompareNCFilesFamilyMpi requires 2 or 3 values for the keyword argument MPI_RANKS")
+ message (" Input values: ${PARSE_MPI_RANKS}")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+
+ # Create the range
+ CreateRange(MpiRanks ${BEG} ${END} ${INC})
+
+ # The input META_FIXTURES_REQUIRED is a required argument, which *MUST* contain the "MPIRANKS" string.
+ # We assume for each rank N, there is a test with FIXTURE_SETUP set to that string (with MPIRANKS=N).
+ if (NOT PARSE_META_FIXTURES_REQUIRED)
+ message ("Missing value for the mandatory META_FIXTURES_REQUIRED keyword argument.")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+ if (NOT PARSE_META_FIXTURES_REQUIRED MATCHES "MPIRANKS")
+ message ("Error! MPIRANKS string not contained in META_FIXTURES_REQUIRED.")
+ message (" META_FIXTURES_REQUIRED: ${META_FIXTURES_REQUIRED}")
+ message (FATAL_ERROR "Aborting...")
+ endif()
+
+ # Each comparison is between rank=BEG and rank=N, so we need two fixtures, one of which
+ # has a predefined value for MPIRANKS.
+ string (REPLACE "MPIRANKS" ${BEG} REQUIRED_FIXTURES "${PARSE_META_FIXTURES_REQUIRED}")
+ list (APPEND REQUIRED_FIXTURES "${PARSE_META_FIXTURES_REQUIRED}")
+
+ # Call the function above
+ CompareNCFilesFamily(
+ TEST_META_NAME ${PARSE_TEST_BASE_NAME}_npMPIRANKS_vs_np${BEG}
+ FILE_META_NAME ${PARSE_FILE_META_NAME}
+ MAGIC_STRING "MPIRANKS"
+ MAGIC_VALUES ${MpiRanks}
+ LABELS ${PARSE_LABELS} PEM
+ FIXTURES_REQUIRED ${REQUIRED_FIXTURES}
+ )
+endfunction()
diff --git a/components/eamxx/cmake/ScreamUtils.cmake b/components/eamxx/cmake/ScreamUtils.cmake
index ae21931f5bce..44e3e79ce0d5 100644
--- a/components/eamxx/cmake/ScreamUtils.cmake
+++ b/components/eamxx/cmake/ScreamUtils.cmake
@@ -2,6 +2,44 @@ include(CMakeParseArguments) # Needed for backwards compatibility
include(EkatCreateUnitTest)
include(EkatUtils)
+# Create a list containing a range of integers
+function (CreateRange resultVar BEG END)
+ set(options SKIP_FIRST SKIP_LAST)
+ set(arg1v INC)
+ set(argMv)
+ cmake_parse_arguments(CR "${options}" "${arg1v}" "${argMv}" ${ARGN})
+
+ # Compute beg/end/inc based on input args
+ if (CR_SKIP_FIRST)
+ math(EXPR BEG "${BEG}+1")
+ endif()
+ if (CR_SKIP_LAST)
+ math(EXPR END "${END}-1")
+ endif()
+ if (NOT CR_INC)
+ set (CR_INC 1)
+ endif()
+
+ # Sanity check
+ if (NOT CR_INC GREATER 0)
+ message (FATAL_ERROR "INC must be a positive integer")
+ endif()
+ if (BEG GREATER END)
+ message (FATAL_ERROR "BEG is larger than END")
+ endif()
+
+ # Create range list
+ set (res_list)
+ set (N ${BEG})
+ while (NOT N GREATER END)
+ list (APPEND res_list ${N})
+ math (EXPR N "${N}+${CR_INC}")
+ endwhile()
+
+ # Set in parent scope
+ set (${resultVar} ${res_list} PARENT_SCOPE)
+endfunction()
+
# This function takes the following arguments:
# - test_name: the base name of the test. We create an executable with this name
# - test_srcs: a list of src files for the executable.
@@ -45,40 +83,27 @@ set(SCREAM_CUT_TEST_MV_ARGS ${CUT_TEST_MV_ARGS})
# Scream always excludes the ekat test session since it has its own
list(REMOVE_ITEM SCREAM_CUT_EXEC_OPTIONS EXCLUDE_TEST_SESSION)
-# Libs are a position arg for SCREAM, not an optional arg like in EKAT
-list(REMOVE_ITEM SCREAM_CUT_EXEC_MV_ARGS LIBS)
-
###############################################################################
-function(CreateUnitTestExec exec_name test_srcs scream_libs)
+function(CreateUnitTestExec exec_name test_srcs)
###############################################################################
- cmake_parse_arguments(cute "${SCREAM_CUT_EXEC_OPTIONS}" "${SCREAM_CUT_EXEC_1V_ARGS}" "${SCREAM_CUT_EXEC_MV_ARGS}" ${ARGN})
- CheckMacroArgs(CreateUnitTestExec cute "${SCREAM_CUT_EXEC_OPTIONS}" "${SCREAM_CUT_EXEC_1V_ARGS}" "${SCREAM_CUT_EXEC_MV_ARGS}")
-
- separate_cut_arguments(cute "${SCREAM_CUT_EXEC_OPTIONS}" "${SCREAM_CUT_EXEC_1V_ARGS}" "${SCREAM_CUT_EXEC_MV_ARGS}" options)
-
- set(TEST_INCLUDE_DIRS
- ${SCREAM_INCLUDE_DIRS}
- ${CMAKE_CURRENT_SOURCE_DIR}
- ${CMAKE_CURRENT_BINARY_DIR}
- )
-
- set(test_libs "${scream_libs};scream_test_support")
- list(APPEND test_libs "${SCREAM_TPL_LIBRARIES}")
-
- if (SCREAM_Fortran_FLAGS)
- list(APPEND options COMPILER_F_FLAGS ${SCREAM_Fortran_FLAGS})
- endif ()
-
- EkatCreateUnitTestExec("${exec_name}" "${test_srcs}" ${options}
- EXCLUDE_TEST_SESSION LIBS ${test_libs} INCLUDE_DIRS ${TEST_INCLUDE_DIRS})
-
+ # Call Ekat function, with a couple of extra params
+ EkatCreateUnitTestExec("${exec_name}" "${test_srcs}" ${ARGN}
+ EXCLUDE_TEST_SESSION LIBS scream_share scream_test_support)
endfunction(CreateUnitTestExec)
+###############################################################################
+function(CreateADUnitTestExec exec_name)
+###############################################################################
+ # Call the function above specifying some params
+ CreateUnitTestExec("${exec_name}" "${SCREAM_SRC_DIR}/share/util/eamxx_ad_test.cpp"
+ LIBS scream_control scream_io diagnostics ${ARGN})
+endfunction(CreateADUnitTestExec)
+
###############################################################################
function(CreateUnitTestFromExec test_name test_exec)
###############################################################################
cmake_parse_arguments(cutfe "${SCREAM_CUT_TEST_OPTIONS}" "${SCREAM_CUT_TEST_1V_ARGS}" "${SCREAM_CUT_TEST_MV_ARGS}" ${ARGN})
- CheckMacroArgs(CreateUnitTestExec cutfe "${SCREAM_CUT_TEST_OPTIONS}" "${SCREAM_CUT_TEST_1V_ARGS}" "${SCREAM_CUT_TEST_MV_ARGS}")
+ CheckMacroArgs(CreateUnitTestFromExec cutfe "${SCREAM_CUT_TEST_OPTIONS}" "${SCREAM_CUT_TEST_1V_ARGS}" "${SCREAM_CUT_TEST_MV_ARGS}")
#
# If asking for mpi/omp ranks/threads, verify we stay below the max number of threads
@@ -132,7 +157,7 @@ function(CreateUnitTestFromExec test_name test_exec)
endfunction(CreateUnitTestFromExec)
###############################################################################
-function(CreateUnitTest test_name test_srcs scream_libs)
+function(CreateUnitTest test_name test_srcs)
###############################################################################
set(options ${SCREAM_CUT_EXEC_OPTIONS} ${SCREAM_CUT_TEST_OPTIONS})
set(oneValueArgs ${SCREAM_CUT_EXEC_1V_ARGS} ${SCREAM_CUT_TEST_1V_ARGS})
@@ -147,7 +172,7 @@ function(CreateUnitTest test_name test_srcs scream_libs)
#------------------------------#
separate_cut_arguments(cut "${SCREAM_CUT_EXEC_OPTIONS}" "${SCREAM_CUT_EXEC_1V_ARGS}" "${SCREAM_CUT_EXEC_MV_ARGS}" options_ExecPhase)
- CreateUnitTestExec("${test_name}" "${test_srcs}" "${scream_libs}" ${options_ExecPhase})
+ CreateUnitTestExec("${test_name}" "${test_srcs}" ${options_ExecPhase})
#------------------------------#
# Create Tests Phase #
@@ -158,6 +183,15 @@ function(CreateUnitTest test_name test_srcs scream_libs)
endfunction(CreateUnitTest)
+###############################################################################
+function(CreateADUnitTest test_name)
+###############################################################################
+
+ # Call the function above specifying some params
+ CreateUnitTest("${test_name}" "${SCREAM_SRC_DIR}/share/util/eamxx_ad_test.cpp"
+ LABELS driver LIBS scream_control scream_io diagnostics ${ARGN})
+endfunction(CreateADUnitTest)
+
###############################################################################
function(GetInputFile src_path)
###############################################################################
diff --git a/components/eamxx/cmake/machine-files/alvarez.cmake b/components/eamxx/cmake/machine-files/alvarez.cmake
index 6df22e6fe08c..037da48eaf03 100644
--- a/components/eamxx/cmake/machine-files/alvarez.cmake
+++ b/components/eamxx/cmake/machine-files/alvarez.cmake
@@ -1,18 +1,8 @@
include(${CMAKE_CURRENT_LIST_DIR}/common.cmake)
common_setup()
-#message(STATUS "alvarez PROJECT_NAME=${PROJECT_NAME} USE_CUDA=${USE_CUDA} KOKKOS_ENABLE_CUDA=${KOKKOS_ENABLE_CUDA}")
-
include (${EKAT_MACH_FILES_PATH}/kokkos/amd-zen3.cmake)
-if ("${PROJECT_NAME}" STREQUAL "E3SM")
- if (BUILD_THREADED)
- include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
- else()
- include (${EKAT_MACH_FILES_PATH}/kokkos/serial.cmake)
- endif()
-else()
- include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
-endif()
+include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
include (${EKAT_MACH_FILES_PATH}/mpi/srun.cmake)
diff --git a/components/eamxx/cmake/machine-files/compy.cmake b/components/eamxx/cmake/machine-files/compy.cmake
index ebd53132699b..1f156b0546c0 100644
--- a/components/eamxx/cmake/machine-files/compy.cmake
+++ b/components/eamxx/cmake/machine-files/compy.cmake
@@ -6,7 +6,5 @@ include (${EKAT_MACH_FILES_PATH}/kokkos/intel-skx.cmake)
include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
include (${EKAT_MACH_FILES_PATH}/mpi/srun.cmake)
-set (NetCDF_PATH /share/apps/netcdf/4.6.3/gcc/8.1.0 CACHE STRING "")
-
#Compy SLURM specific settings
set(EKAT_MPI_NP_FLAG "-p short -n" CACHE STRING "" FORCE)
diff --git a/components/eamxx/cmake/machine-files/cori-knl.cmake b/components/eamxx/cmake/machine-files/cori-knl.cmake
index f1a2bca491e2..11296f343748 100644
--- a/components/eamxx/cmake/machine-files/cori-knl.cmake
+++ b/components/eamxx/cmake/machine-files/cori-knl.cmake
@@ -3,23 +3,14 @@ common_setup()
# Load knl arch and openmp backend for kokkos
include (${EKAT_MACH_FILES_PATH}/kokkos/intel-knl.cmake)
-
-if ("${PROJECT_NAME}" STREQUAL "E3SM")
- if (BUILD_THREADED)
- include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
- else()
- include (${EKAT_MACH_FILES_PATH}/kokkos/serial.cmake)
- endif()
-else()
- include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
-endif()
+include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
include (${EKAT_MACH_FILES_PATH}/mpi/srun.cmake)
if ("${PROJECT_NAME}" STREQUAL "E3SM")
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
if (CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10)
- set(CMAKE_Fortran_FLAGS "-fallow-argument-mismatch" CACHE STRING "" FORCE) # only works with gnu v10 and above
+ set(CMAKE_Fortran_FLAGS "-fallow-argument-mismatch" CACHE STRING "" FORCE) # only works with gnu v10 and above
endif()
endif()
else()
diff --git a/components/eamxx/cmake/machine-files/crusher-scream-gpu.cmake b/components/eamxx/cmake/machine-files/crusher-scream-gpu.cmake
index c35c9fd66bfe..391a9b8d6880 100644
--- a/components/eamxx/cmake/machine-files/crusher-scream-gpu.cmake
+++ b/components/eamxx/cmake/machine-files/crusher-scream-gpu.cmake
@@ -2,7 +2,6 @@ include(${CMAKE_CURRENT_LIST_DIR}/common.cmake)
common_setup()
#serial is needed, but maybe it is always on?
-#include (${EKAT_MACH_FILES_PATH}/kokkos/serial.cmake)
include (${EKAT_MACH_FILES_PATH}/kokkos/mi250.cmake)
include (${EKAT_MACH_FILES_PATH}/kokkos/hip.cmake)
include (${EKAT_MACH_FILES_PATH}/mpi/srun.cmake)
diff --git a/components/eamxx/cmake/machine-files/docker-scream.cmake b/components/eamxx/cmake/machine-files/docker-scream.cmake
index 0287c5d399f7..87d507fef20f 100644
--- a/components/eamxx/cmake/machine-files/docker-scream.cmake
+++ b/components/eamxx/cmake/machine-files/docker-scream.cmake
@@ -8,7 +8,7 @@ set(BLAS_LIBRARIES /opt/conda/lib/libblas.so CACHE STRING "")
set(LAPACK_LIBRARIES /opt/conda/lib/liblapack.so CACHE STRING "")
set(SCREAM_INPUT_ROOT "/storage/inputdata/" CACHE STRING "")
set(PYBIND11_PYTHON_VERSION 3.9 CACHE STRING "")
-set(RUN_ML_CORRECTION_TEST TRUE CACHE BOOL "")
+option (SCREAM_ENABLE_ML_CORRECTION "Whether to enable ML correction parametrization" ON)
if ("${PROJECT_NAME}" STREQUAL "E3SM")
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
@@ -18,4 +18,4 @@ if ("${PROJECT_NAME}" STREQUAL "E3SM")
endif()
else()
set(CMAKE_Fortran_FLAGS "-fallow-argument-mismatch" CACHE STRING "" FORCE) # only works with gnu v10 and above
-endif()
\ No newline at end of file
+endif()
diff --git a/components/eamxx/cmake/machine-files/frontier-scream-gpu.cmake b/components/eamxx/cmake/machine-files/frontier-scream-gpu.cmake
new file mode 100644
index 000000000000..14d1d501160d
--- /dev/null
+++ b/components/eamxx/cmake/machine-files/frontier-scream-gpu.cmake
@@ -0,0 +1,9 @@
+set (EKAT_MACH_FILES_PATH ${CMAKE_CURRENT_LIST_DIR}/../../../../externals/ekat/cmake/machine-files)
+
+include (${EKAT_MACH_FILES_PATH}/kokkos/mi250.cmake)
+include (${EKAT_MACH_FILES_PATH}/kokkos/hip.cmake)
+
+set(SCREAM_MPIRUN_EXE "srun" CACHE STRING "")
+set(SCREAM_MACHINE "frontier-scream-gpu" CACHE STRING "")
+
+set(CMAKE_CXX_FLAGS "--amdgpu-target=gfx90a -fno-gpu-rdc -I$ENV{MPICH_DIR}/include" CACHE STRING "" FORCE)
diff --git a/components/eamxx/cmake/machine-files/gcp.cmake b/components/eamxx/cmake/machine-files/gcp.cmake
index fe105682b1d9..ecfa8d7e9604 100644
--- a/components/eamxx/cmake/machine-files/gcp.cmake
+++ b/components/eamxx/cmake/machine-files/gcp.cmake
@@ -1,19 +1,8 @@
include(${CMAKE_CURRENT_LIST_DIR}/common.cmake)
common_setup()
-#message(STATUS "gcp PROJECT_NAME=${PROJECT_NAME} USE_CUDA=${USE_CUDA} KOKKOS_ENABLE_CUDA=${KOKKOS_ENABLE_CUDA}")
# use default backend?
-
-if ("${PROJECT_NAME}" STREQUAL "E3SM")
- if (BUILD_THREADED)
- include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
- else()
- include (${EKAT_MACH_FILES_PATH}/kokkos/serial.cmake)
- endif()
-else()
- include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
-endif()
-
+include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
include (${EKAT_MACH_FILES_PATH}/mpi/srun.cmake)
set(CMAKE_CXX_FLAGS "-DTHRUST_IGNORE_CUB_VERSION_CHECK" CACHE STRING "" FORCE)
diff --git a/components/eamxx/cmake/machine-files/lassen.cmake b/components/eamxx/cmake/machine-files/lassen.cmake
index a709bba15689..36b69c7f0253 100644
--- a/components/eamxx/cmake/machine-files/lassen.cmake
+++ b/components/eamxx/cmake/machine-files/lassen.cmake
@@ -1,8 +1,9 @@
include(${CMAKE_CURRENT_LIST_DIR}/common.cmake)
common_setup()
-set(NetCDF_Fortran_PATH /usr/gdata/climdat/libs/netcdf-fortran/install/lassen/fortran CACHE STRING "")
-set(BLAS_LIBRARIES /usr/gdata/climdat/libs/blas/libblas.a CACHE STRING "")
-set(LAPACK_LIBRARIES /usr/gdata/climdat/libs/lapack/liblapack.a CACHE STRING "")
+set(NetCDF_PATH /usr/gdata/climdat/netcdf CACHE STRING "")
+set(NetCDF_Fortran_PATH /usr/gdata/climdat/netcdf CACHE STRING "")
+set(LAPACK_LIBRARIES /usr/lib64/liblapack.so CACHE STRING "")
+set(CMAKE_CXX_FLAGS "-DTHRUST_IGNORE_CUB_VERSION_CHECK" CACHE STRING "" FORCE)
set(SCREAM_INPUT_ROOT "/usr/gdata/climdat/ccsm3data/inputdata/" CACHE STRING "")
diff --git a/components/eamxx/cmake/machine-files/mappy.cmake b/components/eamxx/cmake/machine-files/mappy.cmake
index 86a2fb1d5302..7c1fc8cf25ea 100644
--- a/components/eamxx/cmake/machine-files/mappy.cmake
+++ b/components/eamxx/cmake/machine-files/mappy.cmake
@@ -1,2 +1,3 @@
include(${CMAKE_CURRENT_LIST_DIR}/common.cmake)
common_setup()
+set(PYTHON_EXECUTABLE "/ascldap/users/jgfouca/packages/Python-3.8.5/bin/python3.8" CACHE STRING "" FORCE)
\ No newline at end of file
diff --git a/components/eamxx/cmake/machine-files/pm-cpu.cmake b/components/eamxx/cmake/machine-files/pm-cpu.cmake
index 3c32d23dfbff..ef66da562f02 100644
--- a/components/eamxx/cmake/machine-files/pm-cpu.cmake
+++ b/components/eamxx/cmake/machine-files/pm-cpu.cmake
@@ -1,17 +1,8 @@
include(${CMAKE_CURRENT_LIST_DIR}/common.cmake)
common_setup()
-if ("${PROJECT_NAME}" STREQUAL "E3SM")
- if (BUILD_THREADED)
- include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
- #message(STATUS, "pm-cpu openmp BUILD_THREADED=${BUILD_THREADED}")
- else()
- include (${EKAT_MACH_FILES_PATH}/kokkos/serial.cmake)
- #message(STATUS, "pm-cpu serial BUILD_THREADED=${BUILD_THREADED}")
- endif()
-else()
- include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
-endif()
+include (${EKAT_MACH_FILES_PATH}/kokkos/amd-zen3.cmake)
+include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
set(CMAKE_CXX_FLAGS "-DTHRUST_IGNORE_CUB_VERSION_CHECK" CACHE STRING "" FORCE)
diff --git a/components/eamxx/cmake/machine-files/quartz-intel.cmake b/components/eamxx/cmake/machine-files/quartz-intel.cmake
index cefda8437da4..753c782702db 100644
--- a/components/eamxx/cmake/machine-files/quartz-intel.cmake
+++ b/components/eamxx/cmake/machine-files/quartz-intel.cmake
@@ -1,3 +1,7 @@
include(${CMAKE_CURRENT_LIST_DIR}/quartz.cmake)
-set(CMAKE_EXE_LINKER_FLAGS "-L/usr/tce/packages/gcc/gcc-10.3.1-magic/lib/gcc/x86_64-redhat-linux/10/ -qmkl" CACHE STRING "" FORCE)
-set(RUN_ML_CORRECTION_TEST TRUE CACHE BOOL "")
+set(CMAKE_EXE_LINKER_FLAGS "-L/usr/tce/packages/mkl/mkl-2022.1.0/lib/intel64 -qmkl" CACHE STRING "" FORCE)
+set(PYTHON_EXECUTABLE "/usr/tce/packages/python/python-3.9.12/bin/python3" CACHE STRING "" FORCE)
+set(PYTHON_LIBRARIES "/usr/lib64/libpython3.9.so.1.0" CACHE STRING "" FORCE)
+option (SCREAM_ENABLE_ML_CORRECTION "Whether to enable ML correction parametrization" ON)
+set(HDF5_DISABLE_VERSION_CHECK 1 CACHE STRING "" FORCE)
+execute_process(COMMAND source /usr/WS1/climdat/python_venv/3.9.2/screamML/bin/activate)
diff --git a/components/eamxx/cmake/machine-files/quartz.cmake b/components/eamxx/cmake/machine-files/quartz.cmake
index 24c97078475c..ee9a3dcbffd3 100644
--- a/components/eamxx/cmake/machine-files/quartz.cmake
+++ b/components/eamxx/cmake/machine-files/quartz.cmake
@@ -9,7 +9,7 @@ option(Kokkos_ARCH_BDW "" ON)
#if COMPILER is not defined, should be running standalone with quartz-intel or quartz-gcc
if ("${COMPILER}" STREQUAL "intel")
- set(CMAKE_EXE_LINKER_FLAGS "-L/usr/tce/packages/gcc/gcc-10.3.1-magic/lib/gcc/x86_64-redhat-linux/10/ -qmkl" CACHE STRING "" FORCE)
+ set(CMAKE_EXE_LINKER_FLAGS "-L/usr/tce/packages/mkl/mkl-2022.1.0/lib/intel64/ -qmkl" CACHE STRING "" FORCE)
elseif ("${COMPILER}" STREQUAL "gnu")
message(WARNING "You are using an unsupported e3sm compiler. For supported quartz compilers run ./${E3SM_ROOT}/cime/scripts/query_config --machines quartz")
set(CMAKE_CXX_FLAGS "-w" CACHE STRING "" FORCE)
diff --git a/components/eamxx/cmake/machine-files/ruby-intel.cmake b/components/eamxx/cmake/machine-files/ruby-intel.cmake
index 5ebae48a9285..63fff478fdaf 100644
--- a/components/eamxx/cmake/machine-files/ruby-intel.cmake
+++ b/components/eamxx/cmake/machine-files/ruby-intel.cmake
@@ -1,5 +1,7 @@
include(${CMAKE_CURRENT_LIST_DIR}/ruby.cmake)
-set(CMAKE_CXX_FLAGS "-w -cxxlib=/usr/tce/packages/gcc/gcc-8.3.1/rh" CACHE STRING "" FORCE)
-set(CMAKE_EXE_LINKER_FLAGS "-L/usr/tce/packages/gcc/gcc-8.3.1/rh/lib/gcc/x86_64-redhat-linux/8/ -mkl" CACHE STRING "" FORCE)
-set(PYTHON_EXECUTABLE "/usr/tce/packages/python/python-3.8.2/bin/python3" CACHE STRING "" FORCE)
-set(RUN_ML_CORRECTION_TEST TRUE CACHE BOOL "")
+set(CMAKE_EXE_LINKER_FLAGS "-L/usr/tce/packages/mkl/mkl-2022.1.0/lib/intel64/ -qmkl" CACHE STRING "" FORCE)
+set(PYTHON_EXECUTABLE "/usr/tce/packages/python/python-3.9.12/bin/python3" CACHE STRING "" FORCE)
+set(PYTHON_LIBRARIES "/usr/lib64/libpython3.9.so.1.0" CACHE STRING "" FORCE)
+option (SCREAM_ENABLE_ML_CORRECTION "Whether to enable ML correction parametrization" ON)
+set(HDF5_DISABLE_VERSION_CHECK 1 CACHE STRING "" FORCE)
+execute_process(COMMAND source /usr/WS1/climdat/python_venv/3.9.2/screamML/bin/activate)
diff --git a/components/eamxx/cmake/machine-files/ruby.cmake b/components/eamxx/cmake/machine-files/ruby.cmake
index 20a7eb008444..d0a9de4baf4b 100644
--- a/components/eamxx/cmake/machine-files/ruby.cmake
+++ b/components/eamxx/cmake/machine-files/ruby.cmake
@@ -12,6 +12,4 @@ include (${EKAT_MACH_FILES_PATH}/kokkos/openmp.cmake)
include (${EKAT_MACH_FILES_PATH}/mpi/srun.cmake)
-set(CMAKE_CXX_FLAGS "-w -cxxlib=/usr/tce/packages/gcc/gcc-8.3.1/rh" CACHE STRING "" FORCE)
-set(CMAKE_EXE_LINKER_FLAGS "-L/usr/tce/packages/gcc/gcc-8.3.1/rh/lib/gcc/x86_64-redhat-linux/8/ -mkl" CACHE STRING "" FORCE)
-
+set(SCREAM_INPUT_ROOT "/usr/gdata/climdat/ccsm3data/inputdata" CACHE STRING "")
diff --git a/components/eamxx/cmake/machine-files/weaver.cmake b/components/eamxx/cmake/machine-files/weaver.cmake
index fa948b80003c..cf8251b44877 100644
--- a/components/eamxx/cmake/machine-files/weaver.cmake
+++ b/components/eamxx/cmake/machine-files/weaver.cmake
@@ -1,8 +1,9 @@
include(${CMAKE_CURRENT_LIST_DIR}/common.cmake)
common_setup()
-set (BLAS_LIBRARIES /ascldap/users/projects/e3sm/scream/libs/openblas/install/weaver/gcc/8.5.0/lib/libopenblas.so CACHE STRING "")
-set (LAPACK_LIBRARIES /ascldap/users/projects/e3sm/scream/libs/openblas/install/weaver/gcc/8.5.0/lib/libopenblas.so CACHE STRING "")
+set (BLAS_LIBRARIES $ENV{NETLIB_LAPACK_ROOT}/lib64/libblas.so CACHE STRING "")
+set (LAPACK_LIBRARIES $ENV{NETLIB_LAPACK_ROOT}/lib64/liblapack.so CACHE STRING "")
set(SCREAM_INPUT_ROOT "/home/projects/e3sm/scream/data" CACHE STRING "")
-set(CMAKE_CXX_FLAGS "-DTHRUST_IGNORE_CUB_VERSION_CHECK" CACHE STRING "" FORCE)
+#set(CMAKE_CXX_FLAGS "-DTHRUST_IGNORE_CUB_VERSION_CHECK" CACHE STRING "" FORCE)
+set(CMAKE_Fortran_FLAGS "-fallow-argument-mismatch" CACHE STRING "" FORCE)
set(HOMMEXX_CUDA_MAX_WARP_PER_TEAM 8 CACHE STRING "")
diff --git a/components/eamxx/cmake/tpls/CsmShare.cmake b/components/eamxx/cmake/tpls/CsmShare.cmake
index 3faa98d3ec9c..fd682d0ca4e9 100644
--- a/components/eamxx/cmake/tpls/CsmShare.cmake
+++ b/components/eamxx/cmake/tpls/CsmShare.cmake
@@ -1,55 +1,25 @@
macro (CreateCsmShareTarget)
- if (TARGET csm_share)
- message (FATAL_ERROR "Error! The target csm_share already exists!")
- endif()
-
if (SCREAM_CIME_BUILD)
- # Some sanity checks
- if (NOT DEFINED INSTALL_SHAREDPATH)
- message (FATAL_ERROR "Error! The cmake variable 'INSTALL_SHAREDPATH' is not defined.")
- endif ()
- if (NOT DEFINED COMP_INTERFACE)
- message (FATAL_ERROR "Error! The cmake variable 'COMP_INTERFACE' is not defined.")
- endif ()
- if (NOT DEFINED NINST_VALUE)
- message (FATAL_ERROR "Error! The cmake variable 'NINST_VALUE' is not defined.")
- endif ()
-
- # If we didn't already parse this script, create imported target
- if (NOT TARGET csm_share)
-
- # Build the name of the path where libcsm_share should be located
- if (USE_ESMF_LIB)
- set(ESMFDIR "esmf")
- else()
- set(ESMFDIR "noesmf")
- endif()
- set(CSM_SHARE "${INSTALL_SHAREDPATH}/${COMP_INTERFACE}/${ESMFDIR}/${NINST_VALUE}/csm_share")
+ find_package(CsmShare REQUIRED)
- # Look for libcsm_share in the complex path we built above
- find_library(CSM_SHARE_LIB csm_share REQUIRED PATHS ${CSM_SHARE})
-
- # Create the interface library, and set target properties
- add_library (csm_share INTERFACE)
- target_link_libraries (csm_share INTERFACE ${CSM_SHARE_LIB})
- target_include_directories(csm_share INTERFACE ${CSM_SHARE})
-
- # Link against piof
- target_link_libraries(csm_share INTERFACE piof)
- endif ()
else()
# Build csm_share library manually
+ if (TARGET csm_share)
+ message (FATAL_ERROR "Error! The target csm_share already exists!")
+ endif()
# Set variables needed for processing genf90 templates
set(CIMEROOT ${SCREAM_BASE_DIR}/../../cime)
list(APPEND CMAKE_MODULE_PATH ${CIMEROOT}/CIME/non_py/src/CMake)
- set(GENF90 ${CIMEROOT}/CIME/non_py/externals/genf90/genf90.pl)
+ # Setting GENF90_PATH here will prevent cprnc from trying to redefine the genf90 target
+ set(GENF90_PATH ${CIMEROOT}/CIME/non_py/externals/genf90)
+ set(GENF90 ${GENF90_PATH}/genf90.pl)
set(ENABLE_GENF90 True)
include(genf90_utils)
include(Sourcelist_utils)
# GENF90_SOURCE lists source files we will need to run through the genf90 perl script
- set (GENF90_SOURCE
+ set (GENF90_SOURCE
${SCREAM_BASE_DIR}/../../share/util/shr_infnan_mod.F90.in
${SCREAM_BASE_DIR}/../../share/util/shr_assert_mod.F90.in
)
diff --git a/components/eamxx/cmake/tpls/GPTL.cmake b/components/eamxx/cmake/tpls/GPTL.cmake
deleted file mode 100644
index 0442a136eda6..000000000000
--- a/components/eamxx/cmake/tpls/GPTL.cmake
+++ /dev/null
@@ -1,25 +0,0 @@
-macro (CreateGPTLTarget)
- # Sanity check
- if (TARGET gptl)
- # We should not call this macro twice
- message (FATAL_ERROR "The GPTL target was already created!")
- endif()
-
- if (SCREAM_CIME_BUILD)
- # Some sanity checks
- if (NOT DEFINED INSTALL_SHAREDPATH)
- message (FATAL_ERROR "Error! The cmake variable 'INSTALL_SHAREDPATH' is not defined.")
- endif ()
-
- # Look for libgptl in INSTALL_SHAREDPATH/lib
- find_library(GPTL_LIB gptl REQUIRED PATHS ${INSTALL_SHAREDPATH}/lib)
-
- # Create the imported target that scream targets can link to
- add_library (gptl INTERFACE)
- target_link_libraries (gptl INTERFACE ${GPTL_LIB})
- target_include_directories (gptl INTERFACE ${INSTALL_SHAREDPATH}/include)
- if (NOT MPILIB STREQUAL "mpi-serial")
- target_compile_definitions (gptl INTERFACE HAVE_MPI)
- endif()
- endif ()
-endmacro()
diff --git a/components/eamxx/cmake/tpls/GetNetcdfLibs.cmake b/components/eamxx/cmake/tpls/GetNetcdfLibs.cmake
deleted file mode 100644
index 6202577468d4..000000000000
--- a/components/eamxx/cmake/tpls/GetNetcdfLibs.cmake
+++ /dev/null
@@ -1,64 +0,0 @@
-# Use Macros.cmake to get info on netcdf paths.
-# Note: the inputs are supposed to be *the name* of the variables storing the result
-# Note: Keep this a FUNCTION, not a MACRO, to avoid polluting the calling scope
-# with all the stuff from Macros.cmake
-function (GetNetcdfLibs)
- # Sanity check
- if (NOT SCREAM_CIME_BUILD)
- message (FATAL_ERROR "Error! Do not call 'GetNetcdfPaths' in a non-CIME build.\n")
- endif ()
-
- # Load variables set by CIME
- include(${CASEROOT}/Macros.cmake)
-
- # Pnetcdf is optional, and only if not running serial
- if (NOT MPILIB STREQUAL mpi-serial)
- if (PNETCDF_PATH)
- find_library(pnetcdf_lib pnetcdf REQUIRED PATHS ${PNETCDF_PATH}/lib)
- set (pnetcdf_lib ${pnetcdf_lib} PARENT_SCOPE)
- find_path (pnetcdf_incdir pnetcdf.h REQUIRED PATHS ${PNETCDF_PATH}/include)
- endif()
- endif()
-
- if (NETCDF_C_PATH)
- # Sanity checks
- if (NOT NETCDF_FORTRAN_PATH)
- message(FATAL_ERROR "NETCDF_C_PATH specified without NETCDF_FORTRAN_PATH")
- endif()
- if (NOT EXISTS ${NETCDF_C_PATH}/lib AND NOT EXISTS ${NETCDF_C_PATH}/lib64)
- message(FATAL_ERROR "NETCDF_C_PATH does not contain a lib or lib64 directory")
- endif ()
- if (NOT EXISTS ${NETCDF_FORTRAN_PATH}/lib AND NOT EXISTS ${NETCDF_FORTRAN_PATH}/lib64)
- message(FATAL_ERROR "NETCDF_FORTRAN_PATH does not contain a lib or lib64 directory")
- endif ()
-
- # Find the libraries
- find_library(netcdf_c_lib netcdf REQUIRED PATHS ${NETCDF_C_PATH}/lib ${NETCDF_C_PATH}/lib64)
- find_library(netcdf_f_lib netcdff REQUIRED PATHS ${NETCDF_FORTRAN_PATH}/lib ${NETCDF_FORTRAN_PATH}/lib64)
- find_path (netcdf_c_incdir netcdf.h REQUIRED PATHS ${NETCDF_C_PATH}/include)
- find_path (netcdf_f_incdir netcdf.inc REQUIRED PATHS ${NETCDF_FORTRAN_PATH}/include)
-
- elseif (NETCDF_FORTRAN_PATH)
- message(FATAL_ERROR "NETCDF_FORTRAN_PATH specified without NETCDF_C_PATH")
- elseif (NETCDF_PATH)
-
- # Sanity checks
- if (NOT EXISTS ${NETCDF_PATH}/lib AND NOT EXISTS ${NETCDF_PATH}/lib64)
- message(FATAL_ERROR "NETCDF_PATH does not contain a lib or lib64 directory")
- endif ()
-
- find_library(netcdf_c_lib netcdf REQUIRED PATHS ${NETCDF_PATH}/lib ${NETCDF_PATH}/lib64)
- find_library(netcdf_f_lib netcdff REQUIRED PATHS ${NETCDF_PATH}/lib ${NETCDF_PATH}/lib64)
- find_path (netcdf_c_incdir netcdf.h REQUIRED PATHS ${NETCDF_PATH}/include)
- find_path (netcdf_f_incdir netcdf.inc REQUIRED PATHS ${NETCDF_PATH}/include)
- else()
- message(FATAL_ERROR "NETCDF not found: Define NETCDF_PATH or NETCDF_C_PATH and NETCDF_FORTRAN_PATH in config_machines.xml or config_compilers.xml")
- endif()
- set (pnetcdf_lib ${pnetcdf_lib} PARENT_SCOPE)
- set (netcdf_c_lib ${netcdf_c_lib} PARENT_SCOPE)
- set (netcdf_f_lib ${netcdf_f_lib} PARENT_SCOPE)
- set (pnetcdf_incdir ${pnetcdf_incdir} PARENT_SCOPE)
- set (netcdf_c_incdir ${netcdf_c_incdir} PARENT_SCOPE)
- set (netcdf_f_incdir ${netcdf_f_incdir} PARENT_SCOPE)
-
-endfunction ()
diff --git a/components/eamxx/cmake/tpls/Mct.cmake b/components/eamxx/cmake/tpls/Mct.cmake
deleted file mode 100644
index 1650bec4dab8..000000000000
--- a/components/eamxx/cmake/tpls/Mct.cmake
+++ /dev/null
@@ -1,26 +0,0 @@
-macro (CreateMctTarget)
-
- # Some sanity checks
- if (NOT SCREAM_CIME_BUILD)
- message (FATAL_ERROR "Error! You should need the mct target only in CIME builds")
- endif ()
- if (NOT DEFINED INSTALL_SHAREDPATH)
- message (FATAL_ERROR "Error! The cmake variable 'INSTALL_SHAREDPATH' is not defined.")
- endif ()
-
- if (TARGET mct)
- # We should not call this macro twice
- message (FATAL_ERROR "The mct target was already created!")
- endif()
-
- # Look for libmct in INSTALL_SHAREDPATH/lib
- find_library(MCT_LIB mct REQUIRED PATHS ${INSTALL_SHAREDPATH}/lib)
-
- # Create the interface library, and set target properties
- add_library(mct INTERFACE)
- target_link_libraries(mct INTERFACE ${MCT_LIB})
- target_include_directories(mct INTERFACE ${INSTALL_SHAREDPATH}/include)
-
- # Link against csm_share
- target_link_libraries(mct INTERFACE csm_share)
-endmacro()
diff --git a/components/eamxx/cmake/tpls/Scorpio.cmake b/components/eamxx/cmake/tpls/Scorpio.cmake
index b54aa2689a66..db1746298413 100644
--- a/components/eamxx/cmake/tpls/Scorpio.cmake
+++ b/components/eamxx/cmake/tpls/Scorpio.cmake
@@ -3,8 +3,6 @@
set (E3SM_EXTERNALS_DIR ${CMAKE_CURRENT_LIST_DIR}/../../../../externals CACHE INTERNAL "")
set (SCREAM_TPLS_MODULE_DIR ${CMAKE_CURRENT_LIST_DIR} CACHE INTERNAL "")
-include (${SCREAM_TPLS_MODULE_DIR}/GPTL.cmake)
-include (${SCREAM_TPLS_MODULE_DIR}/GetNetcdfLibs.cmake)
macro (CreateScorpioTargets)
@@ -15,55 +13,21 @@ macro (CreateScorpioTargets)
endif()
if (SCREAM_CIME_BUILD)
- # For CIME builds, we simply wrap the already built pioc/piof libs into a cmake target
- if (NOT DEFINED INSTALL_SHAREDPATH)
- message (FATAL_ERROR "Error! The cmake variable 'INSTALL_SHAREDPATH' is not defined.")
- endif ()
+ find_package(PIO REQUIRED)
- set(SCORPIO_LIB_DIR ${INSTALL_SHAREDPATH}/lib)
- set(SCORPIO_INC_DIR ${INSTALL_SHAREDPATH}/include)
- set(CSM_SHR_INCLUDE ${INSTALL_SHAREDPATH}/${COMP_INTERFACE}/noesmf/${NINST_VALUE}/include)
-
- # Look for pioc deps. We will have to link them to the pioc target, so that cmake will
- # propagate them to any downstream target linking against pioc
- CreateGPTLTarget()
- GetNetcdfLibs()
-
- ######################
- # PIOc #
- ######################
-
- # Look for pioc in INSTALL_SHAREDPATH/lib
- find_library(SCORPIO_C_LIB pioc REQUIRED PATHS ${SCORPIO_LIB_DIR})
-
- # Create the interface library, and set target properties
add_library (pioc INTERFACE)
- target_link_libraries (pioc INTERFACE ${SCORPIO_C_LIB} gptl ${netcdf_c_lib})
- target_include_directories (pioc INTERFACE ${SCORPIO_INC_DIR} ${netcdf_c_incdir} ${pnetcdf_incdir} ${CSM_SHR_INCLUDE})
+ target_link_libraries (pioc INTERFACE spio)
+ add_library (piof INTERFACE)
+ target_link_libraries (piof INTERFACE spio)
+
+ #set(SCORPIO_INC_DIR ${INSTALL_SHAREDPATH}/include)
# HACK: CIME only copies headers from the bld dir to the CSM_SHR_INCLUDE dir
# This means all the pioc headers in the src folder are not copied.
# It would be nice if CIME used the cmake-generated makefile, and
# ran 'make install' rather than copy files. Alas, we don't control
# that, so we need another way. Including the src tree folder works.
- target_include_directories (pioc INTERFACE ${SCREAM_BASE_DIR}/../../externals/scorpio/src/clib)
- get_target_property (pioc_inc_dirs pioc INTERFACE_INCLUDE_DIRECTORIES)
- message ("pioc includes: ${pioc_inc_dirs}")
- if (pnetcdf_lib)
- target_link_libraries(pioc INTERFACE "${pnetcdf_lib}")
- endif ()
-
- ######################
- # PIOf #
- ######################
-
- # Look for piof lib in INSTALL_SHAREDPATH/lib
- find_library(SCORPIO_F_LIB piof REQUIRED PATHS ${SCORPIO_LIB_DIR})
-
- # Create the interface library, and set target properties
- add_library(piof INTERFACE)
- target_link_libraries (piof INTERFACE ${SCORPIO_F_LIB} ${netcdf_f_lib} pioc)
- target_include_directories (piof INTERFACE ${SCORPIO_INC_DIR} ${netcdf_f_incdir} )
+ target_include_directories(pioc INTERFACE ${SCREAM_BASE_DIR}/../../externals/scorpio/src/clib)
else ()
# Not a CIME build. We'll add scorpio as a subdir
diff --git a/components/eamxx/data/scream_default_output.yaml b/components/eamxx/data/scream_default_output.yaml
index 81ccf0293dac..7e0a45f12d6a 100644
--- a/components/eamxx/data/scream_default_output.yaml
+++ b/components/eamxx/data/scream_default_output.yaml
@@ -35,8 +35,10 @@ Fields:
- qr
- eff_radius_qc
- eff_radius_qi
+ - eff_radius_qr
- precip_ice_surf_mass
- precip_liq_surf_mass
+ - rainfrac
# SHOC + P3
- qc
- qv
@@ -59,6 +61,13 @@ Fields:
- surf_sens_flux
# Diagnostics
- PotentialTemperature
+ # GLL output for homme states.
+ Dynamics:
+ Field Names:
+ - ps_dyn
+ - dp3d_dyn
+ - omega_dyn
+ IO Grid Name: Physics GLL
output_control:
# WARNING: ERS/ERP tets will override this with STOP_N/STOP_OPTION
Frequency: ${HIST_N}
diff --git a/components/eamxx/data/scream_default_remap.yaml b/components/eamxx/data/scream_default_remap.yaml
index 6749034adc4b..8bf47386c76d 100644
--- a/components/eamxx/data/scream_default_remap.yaml
+++ b/components/eamxx/data/scream_default_remap.yaml
@@ -35,8 +35,10 @@ Fields:
- qr
- eff_radius_qc
- eff_radius_qi
+ - eff_radius_qr
- precip_ice_surf_mass
- precip_liq_surf_mass
+ - rainfrac
# SHOC + P3
- qc
- qv
diff --git a/components/eamxx/docs/README.md b/components/eamxx/docs/README.md
deleted file mode 100644
index 5c6e4a1de213..000000000000
--- a/components/eamxx/docs/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-This is the top-level of the SCREAM design documentation. That documentation
-is written in LaTeX and is spread over a directory structure mimicking that
-of the actual code within the ../src directory.
-
-To compile the documentation for a particular process, go to the docs
-directory for that process and issue:
-
-pdflatex main.tex
-bibtex main
-pdflatex main.tex
-
-If you want to compile the documentation for the whole model, issue the same
-command from the top-level docs directory.
-
-Obviously, you need to have a working Latex installation for this to work.
\ No newline at end of file
diff --git a/components/eamxx/docs/build.md b/components/eamxx/docs/build.md
deleted file mode 100644
index 7ca9f05e1a93..000000000000
--- a/components/eamxx/docs/build.md
+++ /dev/null
@@ -1,162 +0,0 @@
-# Building and Testing SCREAM Unit Tests
-
-Follow these simple instructions to build and test SCREAM's standalone
-configuration for yourself. Note that similar documentation is available on confluence (for E3SM team members)
-at https://acme-climate.atlassian.net/wiki/spaces/NGDNA/pages/1264386127/Running+SCREAM+Tests.
-This document makes use of the following paths:
-
-+ `${RUN_ROOT_DIR}`: the root directory where SCREAM is built and run
-+ `${SCREAM_SRC_DIR}`: the directory into which you've cloned the `scream` repo
-
-SCREAM's configuration and build system is based on [CMake](https://cmake.org/).
-CMake has been around a while and has gained a lot of traction in recent years,
-especially in the HPC community. It has good [reference documentation](https://cmake.org/cmake/help/latest/index.html),
-but it can be tricky to use if you've never encountered it. Ask a SCREAM team
-member for help if you're stuck on something CMake-related.
-
-If you see a `CMakeLists.txt` files or a file with a `.cmake` suffix, that's
-just part of the build system. You might also see files with `CTest` as part of
-their name. These files are related to [CTest](https://cmake.org/cmake/help/latest/manual/ctest.1.html),
-CMake's testing tool.
-
-## 1. Start From a Trustworthy Commit
-
-First, make sure you've cloned the [SCREAM repo (including all submodules)](https://github.com/E3SM-Project/scream)
-to `SCREAM_SRC_DIR` using the following command:
-
-```
-git clone --recurse-submodules https://github.com/E3SM-Project/scream
-```
-
-If you have already cloned the project and forgot to type `--recurse-submodules`,
-you can change to `$SCREAM_SRC_DIR` and using the following command to initialize,
-fetch and checkout all submodules:
-
-```
-git submodule update --init --recursive
-```
-
-If you're running a branch that's not `master`, check out this branch with
-
-```
-git checkout
-```
-
-## 2. Configure Your SCREAM Build
-
-Change to your `$RUN_ROOT_DIR` directory and use CMake to configure your build.
-
-If you're building SCREAM on one of our supported platforms, you can tell CMake
-to use the appropriate machine file using the `-C` flag. Machine files are
-located in `$SCREAM_SRC_DIR/components/eamxx/cmake/machine-files`. Take a look
-and see whether your favorite machine has one.
-
-For example, to configure SCREAM on the Quartz machine at LLNL:
-
-```
-cd $RUN_ROOT_DIR
-cmake \
- -DCMAKE_CXX_COMPILER=$(which mpicxx) \
- -DCMAKE_BUILD_TYPE=Debug \
- -C ${SCREAM_SRC_DIR}/components/eamxx/cmake/machine-files/quartz.cmake \
- ${SCREAM_SRC_DIR}/components/eamxx
-```
-
-If you're building on a machine that doesn't have a ready-made machine file,
-you can try configuring your build by manually passing options to CMake. This
-usually looks something like the following:
-```
-cd $RUN_ROOT_DIR
-cmake \
- -D CMAKE_BUILD_TYPE=Debug \
- -D Kokkos_ENABLE_DEBUG=TRUE \
- -D Kokkos_ENABLE_AGGRESSIVE_VECTORIZATION=OFF \
- -D Kokkos_ENABLE_SERIAL=ON \
- -D Kokkos_ENABLE_OPENMP=ON \
- -D Kokkos_ENABLE_PROFILING=OFF \
- -D Kokkos_ENABLE_DEPRECATED_CODE=OFF \
- -D KOKKOS_ENABLE_ETI:BOOL=OFF \
- -D CMAKE_C_COMPILER=mpicc \
- -D CMAKE_CXX_COMPILER=mpicxx \
- -D CMAKE_Fortran_COMPILER=mpif90 \
- ${SCREAM_SRC_DIR}/components/eamxx
-```
-
-In either case, SCREAM requires MPI-savvy compilers, which can be specified
-using the `CMAKE_xyz_COMPІLER` options.
-
-Above, we've configured `Debug` builds to make it easier to find and fix errors.
-For performance testing, you should configure a `Release` build and make use of
-other options, depending on your architecture.
-
-## 3. Build SCREAM
-
-Now you can build SCREAM from that same directory:
-
-```
-make -j
-```
-
-The `-j` flag tells Make to use threads to compile in parallel. If you like, you
-can set the number of threads by passing it as an argument to `-j` (e.g.
-`make -j8`).
-
-## 4. Run SCREAM's Tests
-
-Before running the tests, generate a baseline file:
-
-```
-cd $RUN_ROOT_DIR
-make baseline
-```
-
-The tests will run, automatically using the baseline file, which is located in
-the CMake-configurable path `${SCREAM_TEST_DATA_DIR}`. By default, this path is
-set to `data/` within your build directory (which is `$RUN_ROOT_DIR`, in
-our case).
-
-To run all of SCREAM's tests, make sure you're in `$RUN_ROOT_DIR` and type
-
-```
-ctest -VV
-```
-
-This runs everything and reports results in an extra-verbose (`-VV`) manner.
-
-You can also run subsets of the SCREAM tests. For example, to run only the
-P3 regression tests (again, from the `$RUN_ROOT_DIR` directory), use
-
-```
-ctest -R p3_regression
-```
-
-### Grouping Tests with Labels
-
-We can create groupings of tests by using **labels**. For example, we have a
-`driver` label that runs tests for SCREAM's standalone driver. You can see a
-list of available labels by typing
-
-```
-ctest --print-labels
-```
-
-To see which tests are associated with a given label (e.g. `driver`), use
-
-```
-ctest -L driver -N
-```
-
-# SCREAM Test Suites
-
-## The `p3_regression` Suite
-
-`p3_regression` uses a baseline file to compare any new or altered
-implementations with our P3 Fortran reference implementation. If you're working
-on the C++/Kokkos implementation, you can invoke any new tests to the function
-`Baseline::run_and_cmp` in
-`${SCREAM_SRC_DIR}/components/eamxx/p3/tests/p3_run_and_cmp.cpp`.
-
-If the reference Fortran implementation changes enough that a new baseline file
-is required, make sure to let other SCREAM team members know, in order to
-minimize disruptions.
-
diff --git a/components/eamxx/docs/common/installation.md b/components/eamxx/docs/common/installation.md
new file mode 100644
index 000000000000..017007abdce8
--- /dev/null
+++ b/components/eamxx/docs/common/installation.md
@@ -0,0 +1,170 @@
+# Installation
+
+Follow these simple instructions to build and test EAMxx's standalone
+configuration for yourself. This document makes use of the following paths:
+
++ `${RUN_ROOT_DIR}`: the root directory where EAMxx is built and run
++ `${EAMXX_SRC_DIR}`: the directory into which you've cloned the `scream` repo
+
+EAMxx's configuration and build system is based on [CMake](https://cmake.org/).
+CMake has been around a while and has gained a lot of traction in recent years,
+especially in the HPC community. It has good [reference documentation](https://cmake.org/cmake/help/latest/index.html),
+but it can be tricky to use if you've never encountered it. Ask a EAMxx team
+member for help if you're stuck on something CMake-related.
+
+If you see a `CMakeLists.txt` files or a file with a `.cmake` suffix, that's
+just part of the build system. You might also see files with `CTest` as part of
+their name. These files are related to [CTest](https://cmake.org/cmake/help/latest/manual/ctest.1.html),
+CMake's testing tool.
+
+## Prerequisites
+
+First, make sure you're on one of the machines supported by EAMxx, or that you
+have the following software installed:
+
+* A working MPI installation (typically [MPICH]() or [Open-MPI]())
+* [CMake](https://cmake.org) and [GNU Make](https://www.gnu.org/software/make/)
+* A working set of C, C++, and Fortran compilers
+* A recent version of [Git](https://git-scm.com/)
+* A working installation of [NetCDF](https://www.unidata.ucar.edu/software/netcdf/),
+ including both [C](https://github.com/Unidata/netcdf-c) and
+ [Fortran](https://github.com/Unidata/netcdf-fortran) libraries.
+
+## Setting Up Your Environment
+
+## Configuring and Building Scream
+
+### 1. Start From a Trustworthy Commit
+
+First, make sure you've cloned the [EAMxx repo (including all submodules)](https://github.com/E3SM-Project/scream)
+to `EAMXX_SRC_DIR` using the following command:
+
+```
+git clone --recurse-submodules https://github.com/E3SM-Project/scream
+```
+
+If you have already cloned the project and forgot to type `--recurse-submodules`,
+you can change to `$EAMXX_SRC_DIR` and using the following command to initialize,
+fetch and checkout all submodules:
+
+```
+git submodule update --init --recursive
+```
+
+If you're running a branch that's not `master`, check out this branch with
+
+```
+git checkout
+```
+
+### 2. Configure Your EAMxx Build
+
+Change to your `$RUN_ROOT_DIR` directory and use CMake to configure your build.
+
+If you're building SCREAM on one of our supported platforms, you can tell CMake
+to use the appropriate machine file using the `-C` flag. Machine files are
+located in `$EAMXX_SRC_DIR/components/eamxx/cmake/machine-files`. Take a look
+and see whether your favorite machine has one.
+
+For example, to configure SCREAM on the Quartz machine at LLNL:
+
+```
+cd $RUN_ROOT_DIR
+cmake \
+ -DCMAKE_CXX_COMPILER=$(which mpicxx) \
+ -DCMAKE_BUILD_TYPE=Debug \
+ -C ${EAMXX_SRC_DIR}/components/eamxx/cmake/machine-files/quartz.cmake \
+ ${EAMXX_SRC_DIR}/components/eamxx
+```
+
+If you're building on a machine that doesn't have a ready-made machine file,
+you can try configuring your build by manually passing options to CMake. This
+usually looks something like the following, which configures EAMxx to compile
+CPU code using Kokkos's OpenMP backend:
+```
+cd $RUN_ROOT_DIR
+cmake \
+ -D CMAKE_BUILD_TYPE=Debug \
+ -D CMAKE_C_COMPILER=mpicc \
+ -D CMAKE_CXX_COMPILER=mpicxx \
+ -D CMAKE_Fortran_COMPILER=mpif90 \
+ -D MPIEXEC_EXECUTABLE=`which mpiexec` \
+ -D EKAT_MPI_NP_FLAG:STRING=-n \
+ -D SCREAM_DYNAMICS_DYCORE=HOMME \
+ -D SCREAM_DOUBLE_PRECISION:BOOL=ON \
+ -D SCREAM_INPUT_ROOT:PATH=/path/to/scream-input \
+ -D Kokkos_ENABLE_DEBUG=TRUE \
+ -D Kokkos_ENABLE_AGGRESSIVE_VECTORIZATION=OFF \
+ -D Kokkos_ENABLE_SERIAL=ON \
+ -D Kokkos_ENABLE_OPENMP=ON \
+ -D Kokkos_ENABLE_LIBDL=OFF \
+ -D Kokkos_ENABLE_PROFILING=OFF \
+ -D Kokkos_ENABLE_DEPRECATED_CODE=OFF \
+ -D KOKKOS_ENABLE_ETI:BOOL=OFF \
+ -D NetCDF_C_PATHS=/path/to/netcdf-c-dir \
+ -D NetCDF_Fortran_PATHS=/path/to/netcdf-f90-dir \
+ -D PnetCDF_C_PATHS=/path/to/pnetcdf-dir \
+ -D PnetCDF_Fortran_PATHS=/path/to/pnetcdf-f90-dir \
+ ${EAMXX_SRC_DIR}/components/eamxx
+```
+
+In either case, EAMxx requires MPI-aware compilers. Let's examine these
+options (only some of which are required on any given machine) to make sure we
+know what they do:
+
+* `CMAKE_BUILD_TYPE`: specifies whether you are building EAMxx in a
+ developer-friendly configuration (`Debug`), for a production run (`Release`)
+ or for performance profiling or some other specialized purpose. Typically,
+ you'll set this option to `Debug` or `Release`.
+* `CMAKE_{C,CXX,Fortran}_COMPILER`: the name of the command used to invoke an
+ MPI-enabled C, C++, or Fortran compiler to build EAMxx
+* `MPIEXEC_EXECUTABLE`: the name of the command used to run EAMxx using MPI,
+ typically `mpiexec` or `mpirun`, but possibly different depending on your
+ desired machine
+* `EKAT_MPI_NP_FLAG`: the flag passed to `MPIEXEC_EXECUTABLE` that you use to
+ specify the number of desired MPI processes. This is typically `-n` for
+ `mpiexec` and `-np` for `mpirun`.
+* `SCREAM_DYNAMICS_DYCORE`: specifies the dycore used for configuring EAMxx,
+ which is `NONE` if you are not configuring EAMxx to run its dycore-related
+ tests, or `HOMME` if you want to use HOMMExx
+* `SCREAM_DOUBLE_PRECISION`: indicates whether EAMxx's `Real` type is a
+ double-precision (`ON`) or single-precision (`OFF`) floating point type
+* `SCREAM_INPUT_ROOT`: specifies the location of the top-level folder that
+ stores input data files for EAMxx. This folder is populated with input files
+ which are downloaded automatically during EAMxx's build process.
+* The Kokkos-related build options (most of which begin with `Kokkos_`) are
+ described [in the Kokkos Wiki](https://kokkos.github.io/kokkos-core-wiki/keywords.html)
+* `NetCDF_C_PATHS`: specifies one or more folders in which the NetCDF C library
+ and headers are installed. In the simplest configuration, the headers should
+ be located in `${NetCDF_C_PATHS}/include` and the library should live in
+ `${NetCDF_C_PATHS}/lib`.
+* `NetCDF_Fortran_PATHS`: specifies one or more folders in which the NetCDF
+ Fortran library and modules are installed. Analogous to `${NetCDF_C_PATHS}`,
+ `.mod` files should be in `${NetCDF_Fortran_PATHS}/include`, and the library
+ should be installed in `${NetCDF_Fortran_PATHS}/lib`.
+* `PnetCDF_C_PATHS`: specifies one or more folders in which the pNetCDF C
+ library and headers are installed, analogous to `NetCDF_C_PATHS`.
+* `PnetCDF_Fortran_PATHS`: specifies one or more folders in which the pNetCDF
+ Fortran library and modules are installed, analogous to
+ `NetCDF_Fortran_PATHS`.
+
+Above, we've configured `Debug` builds to make it easier to find and fix errors.
+For performance testing, you should configure a `Release` build and make use of
+other options, depending on your architecture.
+
+### 3. Build SCREAM
+
+Now you can build SCREAM from that same directory:
+
+```
+make -j
+```
+
+The `-j` flag tells Make to use threads to compile in parallel. If you like, you
+can set the number of threads by passing it as an argument to `-j` (e.g.
+`make -j8`).
+
+## Running Tests
+
+You can run EAMxx's tests to make sure your build works by following the
+instructions [here](../developer/standalone_testing.md).
diff --git a/components/eamxx/docs/control/main.tex b/components/eamxx/docs/control/main.tex
deleted file mode 100644
index 4ce1ffab4733..000000000000
--- a/components/eamxx/docs/control/main.tex
+++ /dev/null
@@ -1,35 +0,0 @@
-\documentclass[12pt]{article}
-\usepackage{authblk} %needed to compile chunks as standalone
-\bibliographystyle{../amermeteorsoc}
-
-\title{A next generation driver for EAMxx}
-
-\author[2]{Luca Bertagna}
-\author[1]{Aaron Donahue}
-\author[2]{Ben Hillman}
-\author[1]{Peter Caldwell}
-\author[2]{Thomas Clevenger}
-\author[2]{Jim Foucar}
-\date{\today}
-
-\affil[1]{Lawrence Livermore National Lab, Livermore CA}
-\affil[2]{Sandia National Laboratories, Albuquerque, NM}
-\affil[3]{Lawrence Berkeley National Laboratory, Berkeley, CA}
-\affil[4]{Brookhaven National Laboratory, Upton, NY}
-\affil[5]{Pacific Northwest National Laboratory, Richland, WA}
-\affil[6]{University of California, Davis, Davis, CA}
-
-\begin{document}
-\maketitle{}
-
-
-\input{driver_doc.tex}
-
-%bibliography needs to be in main b/c will be called from different directories when an individual
-%section is compiled versus when all documentation is compiled.
-%================================
-\subsection{Bibliography}
-%================================
-\bibliography{../bibliography.bib}
-
-\end{document}
diff --git a/components/eamxx/docs/developer/ci_nightly.md b/components/eamxx/docs/developer/ci_nightly.md
new file mode 100644
index 000000000000..4089f523daf9
--- /dev/null
+++ b/components/eamxx/docs/developer/ci_nightly.md
@@ -0,0 +1,4 @@
+# Continuous Integration and Nightly Testing
+
+* Autotester quick overview
+* Nightly overview, CDash
diff --git a/components/eamxx/docs/developer/cime_testing.md b/components/eamxx/docs/developer/cime_testing.md
new file mode 100644
index 000000000000..7c2d91a36579
--- /dev/null
+++ b/components/eamxx/docs/developer/cime_testing.md
@@ -0,0 +1,7 @@
+# Full Model Testing
+
+Quickly review CIME test infrastructure and how EAMxx uses it
+
+* test types, specifiers (`_LnX`,`_D`,`_PMxN`,..), grids, compsets, test-mods
+* available grids/compsets for EAMxx, and where to find them
+* how to add atmchange in `shell_commands` test mods
diff --git a/components/eamxx/docs/developer/field.md b/components/eamxx/docs/developer/field.md
new file mode 100644
index 000000000000..013dcf857b62
--- /dev/null
+++ b/components/eamxx/docs/developer/field.md
@@ -0,0 +1,46 @@
+## Field
+
+In EAMxx, a `Field` is a data structure holding two things: pointers to the data and pointers to metadata.
+Both the data and metadata are stored in `std::shared_ptr` instances, to ensure consistency across all copies
+of the field. This allows for fast shallow copy semantic for this class.
+
+The data is stored on both CPU and device memory (these may be the same, depending on the Kokkos
+backend). In EAMxx, we always assume and guarantee that the device data is up to date. That implies that the data
+be explicitly synced to host before using it on host, and explicitly synced to device after host manipulation,
+in order to ensure correctness. In order to access the data, users must use the `get_view` method, which takes
+two template arguments: the data type, and an enum specifying whether CPU or device data is needed. The data
+type is used to reinterpret the generic pointer stored inside to a view of the correct scalar type and layout.
+It is a possibly const-qualified type, and if the field was marked as "read-only", the method ensures that the
+provided data type is const. A read-only field can be created via the `getConst` method, which returns an
+identical copy of the field, but marked as read-only. The enum specifying host or device data is optional,
+with device being the default.
+
+The metadata is a collection of information on the field, such as name, layout, units, allocation size, and more.
+Part of the metadata is immutable after creation (e.g., name, units, or layout), while some metadata can be
+partially or completely modified. The metadata is contained in the `FieldHeader` data structure, which contains
+four parts:
+
+* `FieldIdentifier`: stores the field's name, layout, units, data type, and name of the grid where it's defined.
+ These information are condensed in a single string, that can be used to uniquely identify a field,
+ allowing to distinguish between different version of the same field. The layout is stored in the `FieldLayout`
+ data structure, which includes:
+ * the field tags: stored as a `std::vector`, they give context to the field's extents.
+ * the field dims: stored both as a `std::vector`, as well as a 1d `Kokkos::View`.
+* `FieldTracking`: stores information on the usage of the field, as well as its possible connections to other
+ fields. In particular, the tracked items are:
+ * the field time stamp: the time stamp when the field was last updated.
+ * the field accumulation start time: used for fields that are accumulated over several time steps
+ (or time step subcycles). For instance, it allows to reconstruct fluxes from raw accumulations.
+ * the providers/customers: lists of atmosphere processes (see below) that respectively require/compute
+ the field in their calculations.
+ * the field groups: a list of field groups that this field belongs too. Field groups are used to access
+ a group of fields without explicit prior knowledge about the number and/or names of the fields.
+* `FieldAllocProp`: stores information about the allocation. While the field is not yet allocated, users can
+ request special allocations for the field, for instance to accommodate packing (for SIMD), which may
+ require padding. Upon allocation, this information is then used by the Field structure to extract the
+ actual data, wrapped in a properly shaped `Kokkos::View`. The alloc props are also responsible of tracking
+ additional information in case the field is a "slice" of a higher-dimensional one, a fact that can affect
+ how the data is accessed.
+* Extra data: stored as a `std::map`, allows to catch any metadata that does not fit
+ in the above structures. This is a last resort structure, intended to accommodate the most peculiar
+ corner cases, and should be used sparingly.
diff --git a/components/eamxx/docs/developer/grid.md b/components/eamxx/docs/developer/grid.md
new file mode 100644
index 000000000000..8a61b97e0795
--- /dev/null
+++ b/components/eamxx/docs/developer/grid.md
@@ -0,0 +1,22 @@
+## Grids and Remappers
+
+In EAMxx, the `AbstractGrid` is an interface used to access information regarding the horizontal and vertical
+discretization. The most important information that the grid stores is:
+
+* the number of local/global DOFs: these are the degrees of freedom of the horizontal grid only. Here,
+ local/global refers to the MPI partitioning.
+* the DOFs global IDs (GIDs): a list of GIDs of the DOFs on the current MPI rank, stored as a Field
+* the local IDs (LIDs) to index list: this list maps the LID of a DOF (that is, the position of the DOF
+ in the GID list) to a "native" indexing system for that DOF. For instance, a `PointGrid` (a class derived from
+ `AbstractGrid`) is a simple collection of points, so the "native" indexing system coincides with the LIDs.
+ However, for a `SEGrid` (a derived class, for spectral element grids), the "native" indexing is a triplet
+ `(ielem,igp,jgp)`, specifying the element index, and the two indices of the Gauss point within the element.
+* geometry data: stored as a `std::map`, this represent any data that is intrinsically
+ linked to the grid (either along the horizontal or vertical direction), such as lat/lon coordinates,
+ vertical coordinates, area associated with the DOF.
+
+Grids can also be used to retrieve the layout of a 2d/3d scalar/vector field, which allows certain downstream
+classes to perform certain operations without assuming anything on the horizontal grid.
+
+In general, grid objects are passed around the different parts of EAMxx as const objects (read-only).
+The internal data can only be modified during construction, which usually is handled by a `GridsManager` object.
diff --git a/components/eamxx/docs/developer/index.md b/components/eamxx/docs/developer/index.md
new file mode 100644
index 000000000000..2d47bab65fe3
--- /dev/null
+++ b/components/eamxx/docs/developer/index.md
@@ -0,0 +1,3 @@
+# SCREAM Developer Guide
+
+
diff --git a/components/eamxx/docs/developer/io.md b/components/eamxx/docs/developer/io.md
new file mode 100644
index 000000000000..caf237010a33
--- /dev/null
+++ b/components/eamxx/docs/developer/io.md
@@ -0,0 +1,5 @@
+# Input-Output
+
+In EAMxx, I/O is handled through the SCORPIO library, currently a submodule of E3SM.
+The `scream_io` library within eamxx allows to interface the EAMxx infrastructure classes
+with the SCORPIO library.
diff --git a/components/eamxx/docs/developer/kokkos_ekat.md b/components/eamxx/docs/developer/kokkos_ekat.md
new file mode 100644
index 000000000000..4a5df20ab80a
--- /dev/null
+++ b/components/eamxx/docs/developer/kokkos_ekat.md
@@ -0,0 +1,12 @@
+# Building Blocks
+
+Here we can discuss EKAT, Kokkos, and all of the highly-technical non-scientific
+stuff that makes our heads hurt.
+
+## Kokkos Views
+
+## Vectorization: Packs
+
+## Fields and the Field Manager
+
+### Preconditions, Postconditions, and Invariants
diff --git a/components/eamxx/docs/developer/managers.md b/components/eamxx/docs/developer/managers.md
new file mode 100644
index 000000000000..676449a21845
--- /dev/null
+++ b/components/eamxx/docs/developer/managers.md
@@ -0,0 +1 @@
+## FieldManager and GridsManager
diff --git a/components/eamxx/docs/developer/processes.md b/components/eamxx/docs/developer/processes.md
new file mode 100644
index 000000000000..d6a81b3cae20
--- /dev/null
+++ b/components/eamxx/docs/developer/processes.md
@@ -0,0 +1,18 @@
+# Atmospheric Processes
+
+In EAMxx, the `AtmosphereProcess` (AP) is a class representing a portion of the atmosphere timestep algorithm.
+In simple terms, an AP is an object that given certain input fields performs some calculations to compute
+some output fields.
+
+TODO: describe init sequcene (e.g., the process of requesting fields), base class main
+ interfaces/capabilities (e.g., subcycling), class expectations (e.g., must update fields on physics grid)
+
+Here is a list of currently implemented atmosphere processes.
+TODO: add links to papers/github-repos, and a SMALL description
+* p3: Microphysics, blah blah
+* SHOC: Macrophysics/Turbulence, blah
+* rrtmgp: Radiation, blah
+* spa: prescribed aerosols, blah blah
+* surface coupling: blah
+* mam: prognostic aerosols, blah blah
+* nudging: This process is responsible for nudging the model simulation given a set of files with a target nudged state.
diff --git a/components/eamxx/docs/developer/source_tree.md b/components/eamxx/docs/developer/source_tree.md
new file mode 100644
index 000000000000..15c018cc8858
--- /dev/null
+++ b/components/eamxx/docs/developer/source_tree.md
@@ -0,0 +1,59 @@
+# EAMxx's Source Tree
+
+All EAMxx-specific code can be found in `components/eamxx` within the
+[EAMxx repo](https://github.com/E3SM-Project/scream). Here's how things are
+organized:
+
++ `cime_config`: Tools and XML files for integrating EAMxx with E3SM via the
+ CIME framework.
++ `cmake`: CMake functions and macros used by the configuration/build system.
++ `data`: Data files used by our tests.
++ `docs`: Documentation for the EAMxx project, including design documents,
+ instructions for building and testing EAMxx, and this document.
++ `scripts`: Miscellaneous scripts that implement workflows for running tests
+ and analyzing performance.
++ `src`: All C++ source code (and any bridges to Fortran) for EAMxx are stored
+ here. We describe the contents of this directory in greater detail below.
++ `tests`: Implements standalone, end-to-end tests for various EAMxx
+ components (RRTMG, HOMME, P3, SHOC, etc).
+
+In addition, you'll notice the following files in `components/eamxx`:
+
++ `CMakeLists.txt`: The CMake file that defines EAMxx's configuration/build
+ system.
++ `CTestConfig.cmake`: This CTest file contains parameters that determine how
+ our test results are reported to the [E3SM CDash Site](http://my.cdash.org/submit.php?project=E3SM).
++ `README.md`: EAMxx's top-level README file, which describes the project and
+ its purpose.
++ `mkdocs.yml`: The configuration file for [mkdocs](https://www.mkdocs.org/),
+ the tool we currently use to build and publish our documentation.
+
+## The `src` Directory
+
+Herein lіes the source code for EAMxx. Broadly, here's where things are:
+
++ `control`: Contains the atmosphere driver and basic tests for it.
++ `dynamics`: Here's where HOMME lives within EAMxx, along with code for
+ interfacing with it using EAMxx's data structures.
++ `mct_coupling`: Glue code for embedding EAMxx within E3SM as an atmosphere
+ component using the MCT coupler.
++ `physics`: Source code for physics-related atmospheric processes, including
+ + `p3`: The C++/Kokkos implementation of P3 microphysics within EAMxx.
+ + `shoc`: The C++/Kokkos implementation of SHOC macrophysics within EAMxx.
+ + `rrtmgp`: A stub for the radiation processes as represented in EAMxx.
+ + `share`: Utilities and data structures common to these processes.
++ `share`: Utilities used by various components within EAMxx. Of note:
+ + `io`: EAMxx's interface to the [SCORPIO](https://e3sm.org/scorpio-parallel-io-library/)
+ library.
++ `diagnostics`: A collection of simple classes used to compute diagnostic
+ quantities.
+
+Each of these directories contains a `CMakeLists.txt` file for defining how
+things are build, and a `tests/` subdirectory that houses relevant
+unit and verification tests.
+
+You'll also see some other files in the `src/` directory itself, such as
+
++ `scream_config.h.in`: A template for generating a C++ header file with
+ EAMxx configuration information.
+
diff --git a/components/eamxx/docs/developer/standalone_testing.md b/components/eamxx/docs/developer/standalone_testing.md
new file mode 100644
index 000000000000..aedb7b2cbaad
--- /dev/null
+++ b/components/eamxx/docs/developer/standalone_testing.md
@@ -0,0 +1,84 @@
+# Standalone EAMxx Testing
+
+In this section we describe our testing methodology for standalone EAMxx
+configurations. We use several types of tests
+
+* **Unit tests** are individual test programs that demonstrate that a small set
+ of code performs a single function or a set of related functions. We use
+ a C++ unit testing framework called [Catch2](https://catch2-temp.readthedocs.io/en/latest/index.html)
+ to implement unit tests.
+* **Property (verification) tests** are test programs that configure code that
+ demonstrates that a part of EAMxx (for example, an atmospheric physics
+ parameterization or the dynamical core) is able to produce an answer that
+ satisfies some physical constraint or matches a known solution under specific
+ circumstances.
+* **Fortran-C++ "bit-for-bit" (BFB) tests** are test programs, often implemented
+ as unit tests, that demonstrate that a set of C++ code ported from Fortran
+ produces bit-for-bit identical results to its Fortran counterpart, provided
+ certain compiler options are enabled (such as "strict" floating-point
+ arithmetic).
+* **Test Suites** are named collections of tests that can be run on demand using
+ the [ctest](https://cmake.org/cmake/help/latest/manual/ctest.1.html) command.
+
+We also support a `test-all-scream` configuration that runs all of the
+standalone tests for an EAMxx configuration.
+
+## Running EAMxx's Tests with CTest
+
+Before running the tests, generate a baseline file:
+
+```
+cd $RUN_ROOT_DIR
+make baseline
+```
+
+The tests will run, automatically using the baseline file, which is located in
+the CMake-configurable path `${SCREAM_TEST_DATA_DIR}`. By default, this path is
+set to `data/` within your build directory (which is `$RUN_ROOT_DIR`, in
+our case).
+
+To run all of SCREAM's tests, make sure you're in `$RUN_ROOT_DIR` and type
+
+```
+ctest -VV
+```
+
+This runs everything and reports results in an extra-verbose (`-VV`) manner.
+
+You can also run subsets of the SCREAM tests. For example, to run only the
+P3 regression tests (again, from the `$RUN_ROOT_DIR` directory), use
+
+```
+ctest -R p3_regression
+```
+
+### Grouping Tests with Labels
+
+We can create groupings of tests by using **labels**. For example, we have a
+`driver` label that runs tests for SCREAM's standalone driver. You can see a
+list of available labels by typing
+
+```
+ctest --print-labels
+```
+
+To see which tests are associated with a given label (e.g. `driver`), use
+
+```
+ctest -L driver -N
+```
+
+## EAMxx Test Suites
+
+### The `p3_regression` Suite
+
+`p3_regression` uses a baseline file to compare any new or altered
+implementations with our P3 Fortran reference implementation. If you're working
+on the C++/Kokkos implementation, you can invoke any new tests to the function
+`Baseline::run_and_cmp` in
+`${SCREAM_SRC_DIR}/components/eamxx/p3/tests/p3_run_and_cmp.cpp`.
+
+If the reference Fortran implementation changes enough that a new baseline file
+is required, make sure to let other SCREAM team members know, in order to
+minimize disruptions.
+
diff --git a/components/eamxx/docs/developer/style_guide.md b/components/eamxx/docs/developer/style_guide.md
new file mode 100644
index 000000000000..f43678330099
--- /dev/null
+++ b/components/eamxx/docs/developer/style_guide.md
@@ -0,0 +1,10 @@
+# SCREAM C++ Style Guide
+
+Here's our style guide. Let the holy wars begin!
+
+## Types
+
+## Functions and Methods
+
+## Variables
+
diff --git a/components/eamxx/docs/dynamics/.DS_Store b/components/eamxx/docs/dynamics/.DS_Store
deleted file mode 100644
index 243ceaf8ed8a..000000000000
Binary files a/components/eamxx/docs/dynamics/.DS_Store and /dev/null differ
diff --git a/components/eamxx/docs/dynamics/homme/NHxx_doc.tex b/components/eamxx/docs/dynamics/homme/NHxx_doc.tex
deleted file mode 100644
index 0b616743913b..000000000000
--- a/components/eamxx/docs/dynamics/homme/NHxx_doc.tex
+++ /dev/null
@@ -1,17 +0,0 @@
-\section{Nonhydrostatic Spectral Element Dycore}
-
-\subsection{Theory}
-
-Put explanation of continuous equations here.
-
-\subsection{Numerical Methods}
-
-Describe the numerical methods used to discretize the equations here.
-
-\subsection{Computational Implementation}
-
-Describe the strategies used (if any) to ensure good computational performance.
-
-\subsection{Verification}
-
-Describe testing strategy
diff --git a/components/eamxx/docs/dynamics/homme/main.tex b/components/eamxx/docs/dynamics/homme/main.tex
deleted file mode 100644
index 00e6f2d75e7d..000000000000
--- a/components/eamxx/docs/dynamics/homme/main.tex
+++ /dev/null
@@ -1,41 +0,0 @@
-\documentclass[12pt]{article}
-\usepackage{authblk}
-
-\title{Design Document for the Non-hydrostatic Spectral-Element Dycore Used by SCREAM}
-
-\author[1]{Peter Caldwell}
-\author[2]{Andy Salinger}
-\author[2]{Luca Bertagna}
-\author[1]{Hassan Beydoun}
-\author[1]{Peter Bogenschutz}
-\author[2]{Andrew Bradley}
-\author[1]{Aaron Donahue}
-\author[2]{Jim Foucar}
-\author[1]{Chris Golaz}
-\author[2]{Oksana Guba}
-\author[2]{Ben Hillman}
-\author[3]{Noel Keen}
-\author[4]{Wuyin Lin}
-\author[5]{Kyle Pressel}
-\author[5]{Balwinder Singh}
-\author[2]{Andrew Steyer}
-\author[2]{Mark Taylor}
-\author[1]{Chris Terai}
-\author[6]{Paul Ullrich}
-\date{\today}
-
-\affil[1]{Lawrence Livermore National Lab, Livermore CA}
-\affil[2]{Sandia National Laboratories, Albuquerque, NM}
-\affil[3]{Lawrence Berkeley National Laboratory, Berkeley, CA}
-\affil[4]{Brookhaven National Laboratory, Upton, NY}
-\affil[5]{Pacific Northwest National Laboratory, Richland, WA}
-\affil[6]{University of California, Davis, Davis, CA}
-
-\begin{document}
-\maketitle{}
-
-NOTE: author list for just this section should be culled to just the people working on this section.
-
-\input{NHxx_doc.tex}
-
-\end{document}
diff --git a/components/eamxx/docs/index.md b/components/eamxx/docs/index.md
new file mode 100644
index 000000000000..992b797671cf
--- /dev/null
+++ b/components/eamxx/docs/index.md
@@ -0,0 +1,10 @@
+# The C++ E3SM Atmosphere Model (EAMxx)
+
+Some nice introductory text goes here! Maybe some figures, too. Who knows?!
+
+* The [User Guide](user/index.md) explains how to run EAMxx, both in
+ its standalone configuration and within E3SM.
+* The [Developer Guide](developer/index.md) contains all the information needed
+ to contribute to the development of EAMxx.
+* The [Technical Guide](technical/index.md) contains all the technical
+ information about EAMxx.
diff --git a/components/eamxx/docs/main.tex b/components/eamxx/docs/main.tex
deleted file mode 100644
index b7969bd723aa..000000000000
--- a/components/eamxx/docs/main.tex
+++ /dev/null
@@ -1,67 +0,0 @@
-\documentclass[12pt]{article}
-\usepackage{authblk}
-\usepackage{graphicx} %used by PSL at least
-\usepackage{amsmath} %used by SHOC at least
-\usepackage{natbib} %allows use of \citep{} and \citet{}
-\bibliographystyle{amermeteorsoc}
-
-\makeindex
-
-\title{Design Document for the Simple Cloud-Resolving E3SM Atmosphere Model}
-\author[1]{Peter Caldwell}
-\author[2]{Andy Salinger}
-\author[2]{Luca Bertagna}
-\author[1]{Hassan Beydoun}
-\author[1]{Peter Bogenschutz}
-\author[2]{Andrew Bradley}
-\author[5]{Conrad Clevenger}
-\author[1]{Aaron Donahue}
-\author[2]{Jim Foucar}
-\author[1]{Chris Golaz}
-\author[2]{Oksana Guba}
-\author[2]{Ben Hillman}
-\author[3]{Noel Keen}
-\author[4]{Wuyin Lin}
-\author[5]{Balwinder Singh}
-\author[2]{Andrew Steyer}
-\author[2]{Mark Taylor}
-\author[1]{Chris Terai}
-\author[6]{Paul Ullrich}
-\date{\today}
-
-\affil[1]{Lawrence Livermore National Lab, Livermore CA}
-\affil[2]{Sandia National Laboratories, Albuquerque, NM}
-\affil[3]{Lawrence Berkeley National Laboratory, Berkeley, CA}
-\affil[4]{Brookhaven National Laboratory, Upton, NY}
-\affil[5]{Pacific Northwest National Laboratory, Richland, WA}
-\affil[6]{University of California, Davis, Davis, CA}
-
-\begin{document}
-\maketitle{}
-
-\setcounter{tocdepth}{4}
-\setcounter{secnumdepth}{4}
-\tableofcontents
-
-\newpage
-
-\section{Overview}
-
-Put description of model here as well as summary of what will/won't be in this document. Mention that there will be a separate user guide. Also, this doc isn't done until the list of authors is updated. Folks who joined after day 1 aren't included.
-
-%INCLUDE SECTIONS FROM EACH PROCESS
-%graphicspath forces latex to look for figures in each of the listed directories
-\graphicspath{{control/}{physics/psl/}{dynamics/homme/}{physics/shoc/}{physics/p3/}{physics/rrtmgp/}}
-\input{control/driver_doc.tex}
-\input{dynamics/homme/NHxx_doc.tex}
-\input{physics/shoc/shoc_doc.tex}
-\input{physics/p3/p3_doc.tex}
-\input{physics/rrtmgp/rrtmgp_doc.tex}
-\input{physics/psl/psl_doc.tex}
-
-%================================
-\section{Bibliography}
-%================================
-\bibliography{biblio.bib}
-
-\end{document}
diff --git a/components/eamxx/docs/amermeteorsoc.bst b/components/eamxx/docs/old/amermeteorsoc.bst
similarity index 100%
rename from components/eamxx/docs/amermeteorsoc.bst
rename to components/eamxx/docs/old/amermeteorsoc.bst
diff --git a/components/eamxx/docs/biblio.bib b/components/eamxx/docs/old/biblio.bib
similarity index 100%
rename from components/eamxx/docs/biblio.bib
rename to components/eamxx/docs/old/biblio.bib
diff --git a/components/eamxx/docs/control/driver_doc.tex b/components/eamxx/docs/old/control/driver_doc.tex
similarity index 100%
rename from components/eamxx/docs/control/driver_doc.tex
rename to components/eamxx/docs/old/control/driver_doc.tex
diff --git a/components/eamxx/docs/physics/psl/main.tex b/components/eamxx/docs/old/physics/psl/main.tex
similarity index 100%
rename from components/eamxx/docs/physics/psl/main.tex
rename to components/eamxx/docs/old/physics/psl/main.tex
diff --git a/components/eamxx/docs/physics/psl/psl_doc.tex b/components/eamxx/docs/old/physics/psl/psl_doc.tex
similarity index 100%
rename from components/eamxx/docs/physics/psl/psl_doc.tex
rename to components/eamxx/docs/old/physics/psl/psl_doc.tex
diff --git a/components/eamxx/docs/physics/shoc/main.tex b/components/eamxx/docs/old/physics/shoc/main.tex
similarity index 100%
rename from components/eamxx/docs/physics/shoc/main.tex
rename to components/eamxx/docs/old/physics/shoc/main.tex
diff --git a/components/eamxx/docs/physics/shoc/shoc_doc.tex b/components/eamxx/docs/old/physics/shoc/shoc_doc.tex
similarity index 100%
rename from components/eamxx/docs/physics/shoc/shoc_doc.tex
rename to components/eamxx/docs/old/physics/shoc/shoc_doc.tex
diff --git a/components/eamxx/docs/physics/p3/main.tex b/components/eamxx/docs/physics/p3/main.tex
deleted file mode 100644
index 3c09baa03f22..000000000000
--- a/components/eamxx/docs/physics/p3/main.tex
+++ /dev/null
@@ -1,42 +0,0 @@
-\documentclass[12pt]{article}
-\usepackage{authblk}
-\bibliographystyle{../../amermeteorsoc}
-
-\title{Design Document for the Microphysics Scheme Used by SCREAM}
-
-\author[1]{Peter Caldwell}
-\author[2]{Andy Salinger}
-\author[2]{Luca Bertagna}
-\author[1]{Hassan Beydoun}
-\author[1]{Peter Bogenschutz}
-\author[2]{Andrew Bradley}
-\author[1]{Aaron Donahue}
-\author[2]{Jim Foucar}
-\author[1]{Chris Golaz}
-\author[2]{Oksana Guba}
-\author[2]{Ben Hillman}
-\author[3]{Noel Keen}
-\author[4]{Wuyin Lin}
-\author[5]{Kyle Pressel}
-\author[5]{Balwinder Singh}
-\author[2]{Andrew Steyer}
-\author[2]{Mark Taylor}
-\author[1]{Chris Terai}
-\author[6]{Paul Ullrich}
-\date{\today}
-
-\affil[1]{Lawrence Livermore National Lab, Livermore CA}
-\affil[2]{Sandia National Laboratories, Albuquerque, NM}
-\affil[3]{Lawrence Berkeley National Laboratory, Berkeley, CA}
-\affil[4]{Brookhaven National Laboratory, Upton, NY}
-\affil[5]{Pacific Northwest National Laboratory, Richland, WA}
-\affil[6]{University of California, Davis, Davis, CA}
-
-\begin{document}
-\maketitle{}
-
-NOTE: author list for just this section should be culled to just the people working on this section.
-
-\input{p3_doc.tex}
-
-\end{document}
diff --git a/components/eamxx/docs/physics/p3/p3_doc.tex b/components/eamxx/docs/physics/p3/p3_doc.tex
deleted file mode 100644
index fb6b026fde09..000000000000
--- a/components/eamxx/docs/physics/p3/p3_doc.tex
+++ /dev/null
@@ -1,52 +0,0 @@
-\section{Predicted Particle Properties (P3)}
-
-Describe scheme in general (copy/paste Hassan's existing doc)
-
-%================================
-\subsection{Autoconversion}
-%================================
-
-Say what autoconversion does
-
-\subsubsection{Theory}
-
-Put explanation of continuous equations here.
-
-\subsubsection{Numerical Methods}
-
-Describe the numerical methods used to discretize the equations here.
-
-\subsubsection{Computational Implementation}
-
-Describe the strategies used (if any) to ensure good computational performance.
-
-\subsubsection{Verification}
-
-Describe testing strategy
-
-%================================
-\subsection{Accretion}
-%================================
-
-Say what accretion does
-
-\subsubsection{Theory}
-
-Put explanation of continuous equations here.
-
-\subsubsection{Numerical Methods}
-
-Describe the numerical methods used to discretize the equations here.
-
-\subsubsection{Computational Implementation}
-
-Describe the strategies used (if any) to ensure good computational performance.
-
-\subsubsection{Verification}
-
-Describe testing strategy
-
-%================================
-%... and so on...
-%================================
-
diff --git a/components/eamxx/docs/physics/rrtmgp/main.tex b/components/eamxx/docs/physics/rrtmgp/main.tex
deleted file mode 100644
index b904d29ad47a..000000000000
--- a/components/eamxx/docs/physics/rrtmgp/main.tex
+++ /dev/null
@@ -1,41 +0,0 @@
-\documentclass[12pt]{article}
-\usepackage{authblk}
-
-\title{Design Document for the RRTMGP Radiation Interface Used by SCREAM}
-
-\author[1]{Peter Caldwell}
-\author[2]{Andy Salinger}
-\author[2]{Luca Bertagna}
-\author[1]{Hassan Beydoun}
-\author[1]{Peter Bogenschutz}
-\author[2]{Andrew Bradley}
-\author[1]{Aaron Donahue}
-\author[2]{Jim Foucar}
-\author[1]{Chris Golaz}
-\author[2]{Oksana Guba}
-\author[2]{Ben Hillman}
-\author[3]{Noel Keen}
-\author[4]{Wuyin Lin}
-\author[5]{Kyle Pressel}
-\author[5]{Balwinder Singh}
-\author[2]{Andrew Steyer}
-\author[2]{Mark Taylor}
-\author[1]{Chris Terai}
-\author[6]{Paul Ullrich}
-\date{\today}
-
-\affil[1]{Lawrence Livermore National Lab, Livermore CA}
-\affil[2]{Sandia National Laboratories, Albuquerque, NM}
-\affil[3]{Lawrence Berkeley National Laboratory, Berkeley, CA}
-\affil[4]{Brookhaven National Laboratory, Upton, NY}
-\affil[5]{Pacific Northwest National Laboratory, Richland, WA}
-\affil[6]{University of California, Davis, Davis, CA}
-
-\begin{document}
-\maketitle{}
-
-NOTE: author list for just this section should be culled to just the people working on this section.
-
-\input{rrtmgp_doc.tex}
-
-\end{document}
diff --git a/components/eamxx/docs/physics/rrtmgp/rrtmgp_doc.tex b/components/eamxx/docs/physics/rrtmgp/rrtmgp_doc.tex
deleted file mode 100644
index a9e24b784a1e..000000000000
--- a/components/eamxx/docs/physics/rrtmgp/rrtmgp_doc.tex
+++ /dev/null
@@ -1,18 +0,0 @@
-\section{Rapid Radiative Transfer for Global models in Parallel (RRTMGP)}
-
-\subsection{Theory}
-
-Put explanation of continuous equations here.
-
-\subsection{Numerical Methods}
-
-Describe the numerical methods used to discretize the equations here.
-
-\subsection{Computational Implementation}
-
-Describe the strategies used (if any) to ensure good computational performance.
-
-\subsection{Verification}
-
-Describe testing strategy
-
diff --git a/components/eamxx/docs/source-tree.md b/components/eamxx/docs/source-tree.md
deleted file mode 100644
index f3ae49e2d23b..000000000000
--- a/components/eamxx/docs/source-tree.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# SCREAM's Source Tree
-
-All SCREAM-specific code can be found in `components/eamxx` within the
-[SCREAM repo](https://github.com/E3SM-Project/scream). Here's how things are
-organized:
-
-+ `cime_config`: Tools and XML files for integrating SCREAM with E3SM via the
- CIME framework.
-+ `cmake`: CMake functions and macros used by the configuration/build system.
-+ `data`: Data files used by our tests.
-+ `docs`: Documentation for the SCREAM project, including design documents,
- instructions for building and testing SCREAM, and this document.
-+ `extern`: Source for certain lightweight third-party libraries, embedded
- directly into the repo (and not imported as submodules).
-+ `scripts`: Miscellaneous scripts that implement workflows for running tests
- and analyzing performance.
-+ `src`: All C++ source code (and any bridges to Fortran) for SCREAM are stored
- here. We describe the contents of this directory in greater detail below.
-+ `tests`: Implements standalone, end-to-end tests for various SCREAM
- components (RRTMG, HOMME, P3, SHOC, etc).
-
-In addition, you'll notice the following files in `components/eamxx`:
-
-+ `CMakeLists.txt`: The CMake file that defines SCREAM's configuration/build
- system.
-+ `CTestConfig.cmake`: This CTest file contains parameters that determine how
- our test results are reported to the [E3SM CDash Site](http://my.cdash.org/submit.php?project=E3SM).
-+ `README.md`: SCREAM's top-level README file, which describes the project and
- its purpose.
-
-## The `src` Directory
-
-Herein lіes the source code for SCREAM. Broadly, here's where things are:
-
-+ `control`: Contains the atmosphere driver and basic tests for it.
-+ `dynamics`: Here's where HOMME lives within SCREAM, along with code for
- interfacing with it using SCREAM's data structures.
-+ `interface`: Glue code for embedding SCREAM within E3SM as an atmosphere
- component.
-+ `physics`: Source code for physics-related atmospheric processes, including
- + `p3`: The C++/Kokkos implementation of P3 microphysics within SCREAM.
- + `shoc`: The C++/Kokkos implementation of SHOC macrophysics within SCREAM.
- + `rrtmgp`: A stub for the radiation processes as represented in SCREAM.
- + `common`: Utilities and data structures common to these processes.
-+ `share`: Utilities used by various components within SCREAM. A lot of things
- here will likely end up in `ekat`.
-
-Each of these directories contains a `CMakeLists.txt` file for defining how
-things are build, and a `tests/` subdirectory that houses relevant
-unit and verification tests.
-
-You'll also see some other files in the `src/` directory itself:
-
-+ `scream_config.f.in`: A template for generating a Fortran include file with
- SCREAM configuration information.
-+ `scream_config.h.in`: A template for generating a C++ header file with
- SCREAM configuration information.
-
diff --git a/components/eamxx/docs/technical/aerocom_cldtop.md b/components/eamxx/docs/technical/aerocom_cldtop.md
new file mode 100644
index 000000000000..18fea456cf6c
--- /dev/null
+++ b/components/eamxx/docs/technical/aerocom_cldtop.md
@@ -0,0 +1,27 @@
+# The AeroCOM algorithm
+
+The goal of the AeroCOM algorithm is to calculate properties at cloud top based on the AeroCOM recommendation. There are two main parts of the algorithm: probabilistically determining "cloud top" and then "calculating properties" at said cloud top.
+
+We treat model columns independently, so we loop over all columns in parallel. We then loop over all layers in serial (due to needing an accumulative product), starting at 2 (second highest) layer because the highest is assumed to have no clouds. Let's take a photonic approach from above the model top. Let's say that $p_{k}$ is the probability of a photon passing through the layer $k$. We follow the maximum-random overlap assumption. In all cases, we assume the cloudiness (or cloudy fraction) is completely opaque.
+
+We assume the highest layer has no clouds, thus the $p_{k} = 1$ for the highest layer. Note that $p_{k}$ is initialized as 1 for all layers. We also clip the cloudy fraction $C_{i,k}$ to ensure that $C_{i,k} \in [0+\epsilon, 1-\epsilon]$, where $\epsilon = 0.001$. Starting at the second highest layer, $k+1$, we check if some "cloudy" conditions are met. These conditions are now arbitrarily defined by a cloudiness threshold of $\epsilon$ (i.e., $C_{i,k}>\epsilon$) and a non-zero threshold on the total (both liquid and ice) droplet number concentration (i.e., $cQ_{i,k} + iQ_{i,k} > 0$). If the conditions are met, we estimate the cloud-top cloud fraction using an accumulative product following the maximum-random overlap assumption.
+
+$$c_{i} = 1 - \prod_{k=2}^{K} p_{k} = 1 - \prod_{k=2}^{K} \frac{1 - \max(C_{i,k}, C_{i,k-1})}{1-C_{i,k-1}}$$
+
+In order to estimate cloud-top properties, we weight by the probability of "remaining cloudiness" or $p_{k-1} - p_{k}$.
+
+| Type | Equation |
+| --- | --------- |
+| cloud property | $x_{i} = \sum_{k=2}^{K} X_{i,k} \Phi_{i,k} (p_{k-1} - p_{k})$ |
+| cloud content | $x_{i} = \sum_{k=2}^{K} \Phi_{i,k} (p_{k-1} - p_{k})$ |
+| other property | $x_{i} = \sum_{k=2}^{K} X_{i,k} (p_{k-1} - p_{k})$ |
+
+In the above, $\Phi_{i,k}$ is the thermodynamic phase defined by the cloud droplet number concentration ratios.
+
+$$i\Phi_{i,k} = \frac{iQ_{i,k}}{iQ_{i,k} + cQ_{i,k}}$$
+
+$$c\Phi_{i,k} = \frac{cQ_{i,k}}{iQ_{i,k} + cQ_{i,k}}$$
+
+The thermodynamic phase is used only for cloud properties (e.g., cloud-top cloud droplet number concentration) or cloud content (e.g., cloud liquid content). Further, $X_{i,k}$ is the three-dimensional cloud property of interest which is needed if we are converting a property from three-dimensional ($X$) to its two-dimensional counterpart ($x$). "Other" properties here include temperature and pressure which are not dependent on the thermodynamic phase.
+
+A helpful references: Räisänen, P., Barker, H. W., Khairoutdinov, M. F., Li, J., & Randall, D. A. (2004). Stochastic generation of subgrid‐scale cloudy columns for large‐scale models. Quarterly Journal of the Royal Meteorological Society: A journal of the atmospheric sciences, applied meteorology and physical oceanography, 130(601), 2047-2067.
diff --git a/components/eamxx/docs/technical/clean_clear_sky.md b/components/eamxx/docs/technical/clean_clear_sky.md
new file mode 100644
index 000000000000..82380f903156
--- /dev/null
+++ b/components/eamxx/docs/technical/clean_clear_sky.md
@@ -0,0 +1,10 @@
+# Clean- and clean-clear-sky diagnostics
+
+In order to decompose the aerosol effective radiative forcing, additional diagnostic radiation calls are needed.
+These extra diagnostics are optionally added to the main radiation call. The extra diagnostics are:
+
+- Clean-clear-sky fluxes: the fluxes that would be present if there were neither aerosols nor clouds, and are calculated by adding an additional radiation call at the very beginning of the logic before the optics class is endowed with aerosol and cloud properties.
+- Clean-sky fluxes: the fluxes that would be present if there were no aerosols, and are calculated by adding an additional radiation call after substantiating an additional optics class, but not endowing it with aerosol properties.
+
+It was necessary to add an additional optics class because the original optics class is endowed with aerosols before clouds (in order to calculate the clear-sky fluxes).
+The extra calls are controlled by runtime flags `extra_clnclrsky_diag` and `extra_clnsky_diag` (they take either `true` or `false` as their values).
diff --git a/components/eamxx/docs/technical/index.md b/components/eamxx/docs/technical/index.md
new file mode 100644
index 000000000000..8b9a45be4fdf
--- /dev/null
+++ b/components/eamxx/docs/technical/index.md
@@ -0,0 +1,3 @@
+# SCREAM Technical Guide
+
+SCREAM contributors and maintainers will add detailed technical information about SCREAM here.
diff --git a/components/eamxx/docs/user/coarse_nudging.md b/components/eamxx/docs/user/coarse_nudging.md
new file mode 100644
index 000000000000..c52ce9a0eb24
--- /dev/null
+++ b/components/eamxx/docs/user/coarse_nudging.md
@@ -0,0 +1,25 @@
+# Nudging from coarse data
+
+Because EAMxx is designed to support ultra-high resolutions (in fact, that was the initial reason for its inception), it is not feasible to produce nudging data at the same resolution.
+Instead, in EAMxx, it is possible to nudge from coarse data.
+This is done by remapping the coarse data provided by the user to the runtime physics grid of EAMxx.
+In order to enable nudging from coarse data, the user must provide nudging data at the coarse resolution desired and an appropriate ncremap-compatible mapping file.
+
+## Example setup
+
+A user can produce coarse nudging data from running EAMxx or EAM at a ne30pg2 or any other applicable resolution.
+Additionally, several users in the E3SM projects have produced nudging data at the ne30pg2 resolution from the MERRA2 and ERA5 datasets.
+A limitation for now is that the nudging data must be provided explicitly, either as one file or as a list of files.
+This can be problematic for long list of files, but we are working on a solution to this problem.
+
+Let's say that the nudging data is provided as one file in the following path: `/path/to/nudging_data_ne4pg2_L72.nc`.
+Then, a mapping file is provided as `/another/path/to/mapping_file_ne4pg2_to_ne120pg2.nc`.
+Then if the physics grid is ne120pg2, the user must enable the nudging process, specify the nudging files, and provide the specifies the nudging data and a remap file.
+In other words, the following options are needed:
+
+```shell
+./atmchange atm_procs_list=(sc_import,nudging,homme,physics,sc_export)
+./atmchange nudging_fields=U,V
+./atmchange nudging_filename=/path/to/nudging_data_ne4pg2_L72.nc
+./atmchange nudging_refine_remap_mapfile=/another/path/to/mapping_file_ne4pg2_to_ne120pg2.nc
+```
diff --git a/components/eamxx/docs/user/index.md b/components/eamxx/docs/user/index.md
new file mode 100644
index 000000000000..ba53083fc75c
--- /dev/null
+++ b/components/eamxx/docs/user/index.md
@@ -0,0 +1,3 @@
+# SCREAM User Guide
+
+For the time being, see our [public confluence EAMxx user guide](https://acme-climate.atlassian.net/wiki/spaces/DOC/pages/3858890786/EAMxx+User+s+Guide)
diff --git a/components/eamxx/docs/user/model_input.md b/components/eamxx/docs/user/model_input.md
new file mode 100644
index 000000000000..38486c77a21e
--- /dev/null
+++ b/components/eamxx/docs/user/model_input.md
@@ -0,0 +1,8 @@
+Model input
+=====================================
+
+TODO: explain how defaults XML, atmchange/atmquery, buildml, and input.yaml work.
+
+[Here](../common/eamxx_params.md) is a list of the currently configurable runtime parameters for EAMxx.
+
+
diff --git a/components/eamxx/docs/user/model_output.md b/components/eamxx/docs/user/model_output.md
new file mode 100644
index 000000000000..1ef0c8b26761
--- /dev/null
+++ b/components/eamxx/docs/user/model_output.md
@@ -0,0 +1,153 @@
+# Model output
+
+EAMxx allows the user to configure the desired model output via [YAML](https://yaml.org/) files,
+with each YAML file associated to a different output file.
+
+## Basic output YAML file syntax
+
+The following is an example of a simple output request.
+
+```yaml
+%YAML 1.1
+---
+filename_prefix: my_output
+Averaging Type: Average
+Max Snapshots Per File: 10
+Fields:
+ Physics:
+ Field Names:
+ - T_mid
+ - qv
+ Dynamics:
+ Field Names:
+ - dp3d_dyn
+ - omega_dyn
+output_control:
+ Frequency: 6
+ frequency_units: nhours
+```
+
+Notice that lists can be equivalently specified in YAML as `Field Names: [f1, f2, f3]`.
+The user can specify fields to be outputted from any of the grids used in the simulation.
+In the example above, we requested fields from both the Physics and Dynamics grid.
+The other parameters are
+
+- `Averaging Type`: how the fields are integrated in time before being saved. Valid
+ options are
+
+ - Instant: no integration, each time frame saved corresponds to instantaneous values
+ of the fields
+ - Average/Max/Min: the fields undergo the corresponding operation over the time
+ interval specified in the `output_control` section. In the case above, each snapshot
+ saved to file corresponds to an average of the output fields over 6h windows.
+
+- `filename_prefix`: the prefix of the output file, which will be created in the run
+ directory. The full filename will be `$prefix.$avgtype.$frequnits_x$freq.$timestamp.nc`,
+ where $timestamp corresponds to the first snapshot saved in the file for Instant output,
+ or the beginning of the first averaging window for the other averaging types
+- `Max Snapshots Per File`: specifies how many time snapshots can be put in a file. Once
+ this number is reached, EAMxx will close the file and open a new one.
+- `Frequency`: how many units of time are between two consecutive writes to file. For
+ Instant output the fields are "sampled" at this frequency, while for other averaging
+ types the fields are "integrated" in time over this window
+- `frequency_units`: units of the output frequency. Valid options are `nsteps` (the
+ number of atmosphere time steps), `nsecs`, `nmins`, `nhours`, `ndays`, `nmonths`,
+ `nyears`.
+
+## Diagnostic output
+
+In addition to the fields computed by EAMxx as part of the timestep, the user can
+request to output derived quantities, which will be computed on the fly by the
+I/O interface of EAMxx. There are two types of diagnostic outputs:
+
+- quantities computed as a function of EAMxx fields. These are simply physical quantities
+ that EAMxx does not keep in persistent storage. As of August 2023, the available
+ derived quantities are (case sensitive):
+
+ - `PotentialTemperature`
+ - `AtmosphereDensity`
+ - `Exner`
+ - `VirtualTemperature`
+ - `z_int`
+ - `z_mid`
+ - `geopotential_int`
+ - `geopotential_mid`
+ - `dz`
+ - `DryStaticEnergy`
+ - `SeaLevelPressure`
+ - `LiqWaterPath`
+ - `IceWaterPath`
+ - `VapWaterPath`
+ - `RainWaterPath`
+ - `RimeWaterPath`
+ - `ShortwaveCloudForcing`
+ - `LongwaveCloudForcing`
+ - `RelativeHumidity`
+ - `ZonalVapFlux`
+ - `MeridionalVapFlux`
+ - `precip_liq_surf_mass_flux`
+ - `precip_ice_surf_mass_flux`
+ - `precip_total_surf_mass_flux`
+ - `surface_upward_latent_heat_flux`
+
+- lower-dimensional slices of a field. These are hyperslices of an existing field or of
+ another diagnostic output. As of August 2023, given a field X, the available options
+ are:
+
+ - `X_at_lev_N`: slice the field `X` at the N-th vertical level index. Recall that
+ in EAMxx N=0 corresponds to the model top.
+ - `X_at_model_bot`, `X_at_model_top`: special case for top and bottom of the model.
+ - `X_at_Ymb`, `X_at_YPa`, `X_at_YhPa`: interpolates the field `X` at a vertical position
+ specified by the give pressure `Y`. Available units are `mb` (millibar), `Pa`, and `hPa`.
+ - `X_at_Ym`: interpolates the field `X` at a vertical height of `Y` meters.
+
+## Remapped output
+
+The following options can be used to to save fields on a different grid from the one
+they are computed on.
+
+- `horiz_remap_file`: a path to a map file (as produced by `ncremap`) between the grid
+ where the fields are defined and a coarser grid. EAMxx will use this to remap fields
+ on the fly, allowing to reduce the size of the output file. Note: with this feature,
+ the user can only specify fields from a single grid.
+- `vertical_remap_file`: similar to the previous option, this map file is used to
+ refine/coarsen fields in the vertical direction.
+- `IOGrid`: this parameter can be specified inside one of the grids sections, and will
+ denote the grid (which must exist in the simulation) where the fields must be remapped
+ before being saved to file. This feature is really only used to save fields on the
+ dynamics grid without saving twice the DOFs at the interface of two spectral elements.
+ In fact, native output from the Dynamics grid would produce `6*num_elems*ngp*ngp`,
+ where `ngp` is the number of Gauss points along each axis in the 2d spectral element.
+ Note: this feature cannot be used along with the horizontal/vertical remapper.
+
+## Add output stream to a CIME case
+
+In order to tell EAMxx that a new output stream is needed, one must add the name of
+the yaml file to be used to the list of yaml files that EAMxx will process. From the
+case folder, after `case.setup` has run, one can do
+
+```shell
+./atmchange output_yaml_files=/path/to/my/yaml/file
+```
+
+to specify a single yaml file, or
+
+```shell
+./atmchange output_yaml_files+=/path/to/my/yaml/file
+```
+
+to append to the list of yaml files.
+
+### Important notes
+
+- The user should not specify a path to a file in `$RUNDIR/data`. EAMxx will
+put a copy of the specified yaml files in that directory, pruning any existing copy
+of that file. This happens every time that `buildnml` runs; in particular, it happens
+during `case.submit`.
+- As a consequence of the above, the user should not modify the generated yaml files
+ that are in `$RUNDIR/data`, since any modification will be lost on the next run
+ of `buildnml`. To modify output parmeters, the user should modify the yaml file
+ that was specified with the `atmchange` command.
+- EAMxx will parse the yaml file and expand any string of the form $VAR, by looking
+ for the value of the variable VAR in the CIME case. If VAR is not a valid CIME
+ variable, an error will be raised.
diff --git a/components/eamxx/mkdocs.yml b/components/eamxx/mkdocs.yml
new file mode 100644
index 000000000000..dde0970a4ed2
--- /dev/null
+++ b/components/eamxx/mkdocs.yml
@@ -0,0 +1,67 @@
+site_name: EAMxx
+
+nav:
+ - 'Home': 'index.md'
+ - 'User Guide':
+ - 'Overview': 'user/index.md'
+ - 'Installation': 'common/installation.md'
+ - 'Model output': 'user/model_output.md'
+ - 'Model input': 'user/model_input.md'
+ - 'Runtime parameters': 'common/eamxx_params.md'
+ - 'Coarse nudging': 'user/coarse_nudging.md'
+ - 'Developer Guide':
+ - 'Overview': 'developer/index.md'
+ - 'Installation': 'common/installation.md'
+ - 'Style Guide': 'developer/style_guide.md'
+ - 'Kokkos and EKAT': 'developer/kokkos_ekat.md'
+ - 'Source Tree': 'developer/source_tree.md'
+ - 'Important Data Structures':
+ - 'Fields': 'developer/field.md'
+ - 'Grids and Remappers': 'developer/grid.md'
+ - 'Atmosphere Processes': 'developer/processes.md'
+ - 'Managers': 'developer/managers.md'
+ - 'I/O': 'developer/io.md'
+ - 'Testing':
+ - 'Standalone': 'developer/standalone_testing.md'
+ - 'Full model': 'developer/cime_testing.md'
+ - 'CI and Nightly Testing': 'developer/ci_nightly.md'
+ - 'Technical Guide':
+ - 'AeroCOM cloud top': 'technical/aerocom_cldtop.md'
+ - 'Extra radiation calls': 'technical/clean_clear_sky.md'
+
+edit_uri: ""
+
+theme:
+ name: material
+ palette:
+ - media: "(prefers-color-scheme: light)"
+ scheme: default
+ toggle:
+ icon: material/weather-sunny
+ name: Switch to dark mode
+ - media: "(prefers-color-scheme: dark)"
+ scheme: slate
+ toggle:
+ icon: material/weather-night
+ name: Switch to light mode
+ features:
+ - navigation.indices
+ - navigation.instant
+ - navigation.sections
+ - navigation.top
+# - navigation.tabs
+
+markdown_extensions:
+ - pymdownx.highlight
+ - pymdownx.superfences
+ - pymdownx.tabbed:
+ alternate_style: true
+ - pymdownx.arithmatex:
+ generic: true
+
+extra_javascript:
+ # - javascript/mathjax.js
+ - https://polyfill.io/v3/polyfill.min.js?features=es6
+ - https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js
+
+repo_url: https://github.com/E3SM-Project/scream
diff --git a/components/eamxx/scripts/CMakeLists.txt b/components/eamxx/scripts/CMakeLists.txt
index dbf93bebed6d..3c7ab6eba5ff 100644
--- a/components/eamxx/scripts/CMakeLists.txt
+++ b/components/eamxx/scripts/CMakeLists.txt
@@ -9,4 +9,4 @@ add_executable(query-cf-database query-cf-database.cpp)
target_compile_definitions(query-cf-database PUBLIC
CF_STANDARD_NAME_FILE=${CF_STANDARD_NAME_FILE}
CF_SCREAM_NAME_FILE=${CF_SCREAM_NAME_FILE})
-target_link_libraries(query-cf-database ekat)
+target_link_libraries(query-cf-database ekat yaml-cpp)
diff --git a/components/eamxx/scripts/atm_manip.py b/components/eamxx/scripts/atm_manip.py
index c656119adb2f..2a763bc1990d 100755
--- a/components/eamxx/scripts/atm_manip.py
+++ b/components/eamxx/scripts/atm_manip.py
@@ -2,178 +2,285 @@
Retrieve nodes from EAMxx XML config file.
"""
-import sys, os
+import sys, os, re
# Used for doctests
import xml.etree.ElementTree as ET # pylint: disable=unused-import
# Add path to cime_config folder
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "cime_config"))
-from eamxx_buildnml_impl import check_value, is_array_type
-from utils import expect
+from eamxx_buildnml_impl import check_value, is_array_type, get_child, find_node
+from eamxx_buildnml_impl import gen_atm_proc_group
+from utils import expect, run_cmd_no_fail
+
+ATMCHANGE_SEP = "-ATMCHANGE_SEP-"
+ATMCHANGE_ALL = "__ALL__"
+ATMCHANGE_BUFF_XML_NAME = "SCREAM_ATMCHANGE_BUFFER"
+
+###############################################################################
+def apply_atm_procs_list_changes_from_buffer(case, xml):
+###############################################################################
+ atmchg_buffer = case.get_value(ATMCHANGE_BUFF_XML_NAME)
+ if atmchg_buffer:
+ atmchgs, atmchgs_all = unbuffer_changes(case)
+
+ expect (len(atmchgs)==len(atmchgs_all),"Failed to unbuffer changes from SCREAM_ATMCHANGE_BUFFER")
+ for chg, to_all in zip(atmchgs,atmchgs_all):
+ if "atm_procs_list" in chg:
+ expect (not to_all, "Makes no sense to change 'atm_procs_list' for all groups")
+ atm_config_chg_impl(xml, chg, all_matches=False)
###############################################################################
-class AmbiguousName (Exception):
- pass
+def apply_non_atm_procs_list_changes_from_buffer(case, xml):
###############################################################################
+ atmchg_buffer = case.get_value(ATMCHANGE_BUFF_XML_NAME)
+ if atmchg_buffer:
+ atmchgs, atmchgs_all = unbuffer_changes(case)
+
+ expect (len(atmchgs)==len(atmchgs_all),"Failed to unbuffer changes from SCREAM_ATMCHANGE_BUFFER")
+ for chg, to_all in zip(atmchgs,atmchgs_all):
+ if "atm_procs_list" not in chg:
+ atm_config_chg_impl(xml, chg, all_matches=to_all)
###############################################################################
-def num_nodes_with_name (root,name,recurse=True):
+def buffer_changes(changes, all_matches=False):
###############################################################################
"""
- Count nodes with certain name in an XML tree
+ Take a list of raw changes and buffer them in the XML case settings. Raw changes
+ are what goes to atm_config_chg_impl.
+ """
+ # Commas confuse xmlchange and so need to be escaped.
+ if all_matches:
+ changes_temp = [c + ATMCHANGE_ALL for c in changes]
+ changes_str = ATMCHANGE_SEP.join(changes_temp).replace(",",r"\,")
+ else:
+ # changes_str += f"{ATMCHANGE_SEP}--all"
+ changes_str = ATMCHANGE_SEP.join(changes).replace(",",r"\,")
- >>> xml = '''
- ...
- ...
- ...
- ...
- ...
- ...
- ... '''
- >>> import xml.etree.ElementTree as ET
- >>> tree = ET.fromstring(xml)
- >>> num_nodes_with_name(tree,'a',recurse=False)
- 1
- >>> num_nodes_with_name(tree,'a',recurse=True)
- 2
+ run_cmd_no_fail(f"./xmlchange --append {ATMCHANGE_BUFF_XML_NAME}='{changes_str}{ATMCHANGE_SEP}'")
+
+###############################################################################
+def unbuffer_changes(case):
+###############################################################################
+ """
+ From a case, get a list of raw changes. Returns (changes, all_matches_flag)
"""
+ atmchg_buffer = case.get_value(ATMCHANGE_BUFF_XML_NAME)
+ atmchgs = []
+ atmchgs_all = []
+ for item in atmchg_buffer.split(ATMCHANGE_SEP):
+ if item.strip():
+ atmchgs_all.append(ATMCHANGE_ALL in item)
+ atmchgs.append(item.replace(ATMCHANGE_ALL,"").replace(r"\,", ",").strip())
- count = 0
+ return atmchgs, atmchgs_all
- for elem in root:
- if elem.tag==name:
- count += 1
- if recurse:
- count += num_nodes_with_name(elem,name)
- return count
+###############################################################################
+def reset_buffer():
+###############################################################################
+ run_cmd_no_fail(f"./xmlchange {ATMCHANGE_BUFF_XML_NAME}=''")
###############################################################################
-def find_node (root,name,recurse=True):
+def get_xml_nodes(xml_root, name):
###############################################################################
"""
+ Find all elements matching a name where name uses '::' syntax
+
>>> xml = '''
...
+ ... one
...
- ... 2
+ ... two
+ ... 2
...
...
... '''
>>> import xml.etree.ElementTree as ET
>>> tree = ET.fromstring(xml)
- >>> a,parents = find_node(tree,'a')
- >>> print(f"{','.join(p.tag for p in parents)}")
- root,sub
- >>> print(a.text)
- 2
- >>> print(len(parents))
- 2
- >>> print(f"{','.join(p.tag for p in parents)}")
- root,sub
- >>> a,parents = find_node(tree,'a',recurse=False)
- >>> print(a)
- None
+ >>> ################ INVALID SYNTAX #######################
+ >>> get_xml_nodes(tree,'sub::::prop1')
+ Traceback (most recent call last):
+ SystemExit: ERROR: Invalid xml node name format, 'sub::::prop1' contains ::::
+ >>> ################ VALID USAGE #######################
+ >>> get_xml_nodes(tree,'invalid::prop1')
+ []
+ >>> [item.text for item in get_xml_nodes(tree,'prop1')]
+ ['one', 'two']
+ >>> [item.text for item in get_xml_nodes(tree,'::prop1')]
+ ['one']
+ >>> [item.text for item in get_xml_nodes(tree,'prop2')]
+ ['2']
+ >>> item = get_xml_nodes(tree,'prop2')[0]
+ >>> parent_map = create_parent_map(tree)
+ >>> [p.tag for p in get_parents(item, parent_map)]
+ ['root', 'sub']
"""
+ expect("::::" not in name, f"Invalid xml node name format, '{name}' contains ::::")
+
+ if name.startswith("::"):
+ prefix = "./" # search immediate children only
+ name = name[2:]
+ else:
+ prefix = ".//" # search entire tree
- for elem in root:
- if elem.tag==name:
- return elem, [root]
- if len(elem)>0 and recurse:
- found, parents = find_node(elem,name,recurse=True)
- if found is not None:
- return found, [root] + parents
+ try:
+ xpath_str = prefix + name.replace("::", "/")
+ result = xml_root.findall(xpath_str)
+ except SyntaxError as e:
+ expect(False, f"Invalid syntax '{name}' -> {e}")
- return None, []
+ return result
###############################################################################
-def get_xml_node(xml_root,name):
+def modify_ap_list(xml_root, group, ap_list_str, append_this):
###############################################################################
"""
+ Modify the atm_procs_list entry of this XML node (which is an atm proc group).
+ This routine can only be used to add an atm proc group OR to remove some
+ atm procs.
>>> xml = '''
...
- ... one
- ...
- ... two
- ... 2
- ...
+ ...
+ ...
+ ...
+ ...
+ ...
+ ... 1
+ ...
+ ...
+ ... 2
+ ...
+ ...
...
... '''
+ >>> from eamxx_buildnml_impl import has_child
>>> import xml.etree.ElementTree as ET
>>> tree = ET.fromstring(xml)
- >>> ################ INVALID SYNTAX #######################
- SystemExit: ERROR: Invalid change format. Expected A[::B[...]=value, got' prop1->2'
- >>> get_xml_node(tree,'sub::::prop1')
- Traceback (most recent call last):
- SystemExit: ERROR: Invalid xml node name format. Expected A[::B[...], got' sub::::prop1'
- Did you put two '::' in a row?
- >>> ################ INVALID NAMESPACE #######################
- >>> get_xml_node(tree,'invalid::prop1')
- Traceback (most recent call last):
- SystemExit: ERROR: Error! XML entry invalid not found in section root
- >>> ################ AMBIGUOUS ENTRY #######################
- >>> get_xml_node(tree,'prop1')
+ >>> node = ET.Element("my_group")
+ >>> node.append(ET.Element("atm_procs_list"))
+ >>> get_child(node,"atm_procs_list").text = ""
+ >>> modify_ap_list(tree,node,"p1,p2",False)
+ True
+ >>> get_child(node,"atm_procs_list").text
+ 'p1,p2'
+ >>> modify_ap_list(tree,node,"p1",True)
+ True
+ >>> get_child(node,"atm_procs_list").text
+ 'p1,p2,p1'
+ >>> modify_ap_list(tree,node,"p1,p3",False)
Traceback (most recent call last):
- atm_manip.AmbiguousName: ERROR: Error! Multiple XML entries with name prop1 found in section root
- >>> ################ VALID USAGE #######################
- >>> n,p = get_xml_node(tree,'::prop1')
- >>> print(n.text)
- one
- >>> print(len(p))
- 1
- >>> print(p[0].tag)
- root
- >>> n,p = get_xml_node(tree,'prop2')
- >>> print(n.text)
- 2
- >>> m,p = get_xml_node(tree,'prop2')
- >>> print([k for k in n.attrib.keys()])
- ['type', 'valid_values']
- >>> print(len(p))
- 2
- >>> print(f"{','.join(e.tag for e in p)}")
- root,sub
+ ValueError: ERROR: Unrecognized atm proc name 'p3'. To declare a new group, prepend and append '_' to the name.
+ >>> modify_ap_list(tree,node,"p1,_my_group_",False)
+ True
+ >>> get_child(node,"atm_procs_list").text
+ 'p1,_my_group_'
+ >>> defaults = get_child(tree,'atmosphere_processes_defaults')
+ >>> has_child(defaults,'_my_group_')
+ True
"""
+ curr_apl = get_child(group,"atm_procs_list")
+ if curr_apl.text==ap_list_str:
+ return False
+
+ ap_list = ap_list_str.split(",")
+ expect (len(ap_list)==len(set(ap_list)),
+ "Input list of atm procs contains repetitions")
+
+ # If we're here b/c of a manual call of atmchange from command line, this will be None,
+ # since we don't have this node in the genereated XML file. But in that case, we don't
+ # have to actually add the new nodes, we can simply just modify the atm_procs_list entry
+ # If, however, we're calling this from buildnml, then what we are passed in is the XML
+ # tree from namelists_defaults_scream.xml, so this section *will* be present. And we
+ # need to add the new atm procs group as children, so that buildnml knows how to build
+ # them
+ ap_defaults = find_node(xml_root,"atmosphere_processes_defaults")
+ if ap_defaults is not None:
+
+ # Figure out which aps in the list are new groups and which ones already
+ # exist in the defaults
+ add_aps = [n for n in ap_list if n not in curr_apl.text.split(',')]
+ new_aps = [n for n in add_aps if find_node(ap_defaults,n) is None]
+
+ for ap in new_aps:
+ expect (ap[0]=="_" and ap[-1]=="_" and len(ap)>2, exc_type=ValueError,
+ error_msg=f"Unrecognized atm proc name '{ap}'. To declare a new group, prepend and append '_' to the name.")
+ group = gen_atm_proc_group("", ap_defaults)
+ group.tag = ap
+
+ ap_defaults.append(group)
+
+ # Update the 'atm_procs_list' in this node
+ if append_this:
+ curr_apl.text = ','.join(curr_apl.text.split(",")+ap_list)
+ else:
+ curr_apl.text = ','.join(ap_list)
+ return True
- selectors = name.split("::")
+###############################################################################
+def apply_change(xml_root, node, new_value, append_this):
+###############################################################################
+ any_change = False
- # Allow :: at the beginning (as in '::A::b'), but do not allow multiple :: operators
- expect('' not in selectors[1:],
- "Invalid xml node name format. Expected A[::B[...], got' {}'\n".format(name) +
- " Did you put two '::' in a row?")
+ # User can change the list of atm procs in a group doing ./atmchange group_name=a,b,c
+ # If we detect that this node is an atm proc group, don't modify the text, but do something els
+ if node.tag=="atm_procs_list":
+ parent_map = create_parent_map(xml_root)
+ group = get_parents(node,parent_map)[-1]
+ return modify_ap_list (xml_root,group,new_value,append_this)
- # Regardless of whether we have namespaces or not, the first selector must be unique through the whole XML tree
- s = selectors[0]
- if s == '':
- # User started with ::
- node = xml_root
- parents = []
- else:
- expect (num_nodes_with_name(xml_root,s,recurse=True)>0,
- "Error! XML entry {} not found in section {}".format(s,xml_root.tag))
- expect (num_nodes_with_name(xml_root,s,recurse=True)==1,
- "Error! Multiple XML entries with name {} found in section {}"
- .format(s,xml_root.tag), AmbiguousName)
+ if append_this:
- node, parents = find_node(xml_root,s,recurse=True)
+ expect ("type" in node.attrib.keys(),
+ f"Error! Missing type information for {node.tag}")
+ type_ = node.attrib["type"]
+ expect (is_array_type(type_) or type_=="string",
+ "Error! Can only append with array and string types.\n"
+ f" - name: {node.tag}\n"
+ f" - type: {type_}")
+ if is_array_type(type_):
+ node.text += ", " + new_value
+ else:
+ node.text += new_value
- # If user specified selectors via namespace, recurse over them
- for s in selectors[1:]:
- expect (num_nodes_with_name(node,s,recurse=False)>0,
- "Error! XML entry {} not found in section {}".format(s,node.tag))
- expect (num_nodes_with_name(node,s,recurse=False)==1,
- "Error! Multiple XML entries with name {} found in section {}"
- .format(s,node.tag))
+ any_change = True
- node, parents = find_node(node,s,recurse=False)
+ elif node.text != new_value:
+ check_value(node,new_value)
+ node.text = new_value
+ any_change = True
- return node, parents
+ return any_change
###############################################################################
-def atm_config_chg_impl(xml_root,changes):
+def parse_change(change):
###############################################################################
"""
+ >>> parse_change("a+=2")
+ ('a', '2', True)
+ >>> parse_change("a=hello")
+ ('a', 'hello', False)
+ """
+ tokens = change.split('+=')
+ if len(tokens)==2:
+ append_this = True
+ else:
+ append_this = False
+ tokens = change.split('=')
+
+ expect (len(tokens)==2,
+ f"Invalid change request '{change}'. Valid formats are:\n"
+ f" - A[::B[...]=value\n"
+ f" - A[::B[...]+=value (implies append for this change)")
+ node_name = tokens[0]
+ new_value = tokens[1]
+
+ return node_name,new_value,append_this
+###############################################################################
+def atm_config_chg_impl(xml_root, change, all_matches=False):
+###############################################################################
+ """
>>> xml = '''
...
... 1,2,3
@@ -191,90 +298,141 @@ def atm_config_chg_impl(xml_root,changes):
>>> import xml.etree.ElementTree as ET
>>> tree = ET.fromstring(xml)
>>> ################ INVALID SYNTAX #######################
- >>> atm_config_chg_impl(tree,['prop1->2'])
+ >>> atm_config_chg_impl(tree,'prop1->2')
Traceback (most recent call last):
SystemExit: ERROR: Invalid change request 'prop1->2'. Valid formats are:
- A[::B[...]=value
- A[::B[...]+=value (implies append for this change)
>>> ################ INVALID TYPE #######################
- >>> atm_config_chg_impl(tree,['prop2=two'])
+ >>> atm_config_chg_impl(tree,'prop2=two')
Traceback (most recent call last):
- ValueError: Could not use 'two' as type 'integer'
+ ValueError: Could not refine 'two' as type 'integer'
>>> ################ INVALID VALUE #######################
- >>> atm_config_chg_impl(tree,['prop2=3'])
+ >>> atm_config_chg_impl(tree,'prop2=3')
Traceback (most recent call last):
CIME.utils.CIMEError: ERROR: Invalid value '3' for element 'prop2'. Value not in the valid list ('[1, 2]')
+ >>> ################ AMBIGUOUS CHANGE #######################
+ >>> atm_config_chg_impl(tree,'prop1=three')
+ Traceback (most recent call last):
+ SystemExit: ERROR: prop1 is ambiguous (use --all to change all matches), matches:
+ root::prop1
+ root::sub::prop1
+
>>> ################ VALID USAGE #######################
- >>> atm_config_chg_impl(tree,['::prop1=two'])
+ >>> atm_config_chg_impl(tree,'::prop1=two')
True
- >>> atm_config_chg_impl(tree,['::prop1=two'])
+ >>> atm_config_chg_impl(tree,'::prop1=two')
False
- >>> atm_config_chg_impl(tree,['sub::prop1=one'])
+ >>> atm_config_chg_impl(tree,'sub::prop1=one')
+ True
+ >>> atm_config_chg_impl(tree,'prop1=three', all_matches=True)
True
+ >>> [item.text for item in get_xml_nodes(tree,'prop1')]
+ ['three', 'three']
>>> ################ TEST APPEND += #################
- >>> atm_config_chg_impl(tree,['a+=4'])
+ >>> atm_config_chg_impl(tree,'a+=4')
True
- >>> get_xml_node(tree,'a')[0].text
+ >>> get_xml_nodes(tree,'a')[0].text
'1,2,3, 4'
>>> ################ ERROR, append to non-array and non-string
- >>> atm_config_chg_impl(tree,['c+=2'])
+ >>> atm_config_chg_impl(tree,'c+=2')
Traceback (most recent call last):
SystemExit: ERROR: Error! Can only append with array and string types.
- name: c
- type: int
>>> ################ Append to string ##################
- >>> atm_config_chg_impl(tree,['d+=two'])
+ >>> atm_config_chg_impl(tree,'d+=two')
True
- >>> get_xml_node(tree,'d')[0].text
+ >>> get_xml_nodes(tree,'d')[0].text
'onetwo'
>>> ################ Append to array(string) ##################
- >>> atm_config_chg_impl(tree,['e+=two'])
+ >>> atm_config_chg_impl(tree,'e+=two')
True
- >>> get_xml_node(tree,'e')[0].text
+ >>> get_xml_nodes(tree,'e')[0].text
'one, two'
"""
+ node_name, new_value, append_this = parse_change(change)
+ matches = get_xml_nodes(xml_root, node_name)
- any_change = False
- for change in changes:
+ expect(len(matches) > 0, f"{node_name} did not match any items")
- tokens = change.split('+=')
- if len(tokens)==2:
- append_this = True
- else:
- append_this = False
- tokens = change.split('=')
-
- expect (len(tokens)==2,
- f"Invalid change request '{change}'. Valid formats are:\n"
- f" - A[::B[...]=value\n"
- f" - A[::B[...]+=value (implies append for this change)")
- node, __ = get_xml_node(xml_root,tokens[0])
- new_value = tokens[1]
-
- if append_this:
- expect ("type" in node.attrib.keys(),
- "Error! Missing type information for {}".format(tokens[0]))
- type_ = node.attrib["type"]
- expect (is_array_type(type_) or type_=="string",
- "Error! Can only append with array and string types.\n"
- f" - name: {tokens[0]}\n"
- f" - type: {type_}")
- if is_array_type(type_):
- node.text += ", " + new_value
- else:
- node.text += new_value
+ if len(matches) > 1 and not all_matches:
+ parent_map = create_parent_map(xml_root)
+ error_str = ""
+ for node in matches:
+ parents = get_parents(node, parent_map)
+ name = "::".join(e.tag for e in parents) + "::" + node.tag
+ error_str += " " + name + "\n"
- any_change = True
+ expect(False, f"{node_name} is ambiguous (use --all to change all matches), matches:\n{error_str}")
- elif node.text != new_value:
- check_value(node,new_value)
- node.text = new_value
- any_change = True
+ any_change = False
+ for node in matches:
+ any_change |= apply_change(xml_root, node, new_value, append_this)
return any_change
###############################################################################
-def print_var(xml_root,var,full,dtype,value,valid_values,print_style="invalid",indent=""):
+def create_parent_map(root):
+###############################################################################
+ return {c: p for p in root.iter() for c in p}
+
+###############################################################################
+def get_parents(elem, parent_map):
+###############################################################################
+ """
+ Return all parents of an elem in descending order (first item in list will
+ be the furthest ancestor, last item will be direct parent)
+ """
+ results = []
+ if elem in parent_map:
+ parent = parent_map[elem]
+ results = get_parents(parent, parent_map) + [parent]
+
+ return results
+
+###############################################################################
+def print_var_impl(node,parent_map,full,dtype,value,valid_values,print_style="invalid",indent=""):
+###############################################################################
+
+ expect (print_style in ["short","full"],
+ f"Invalid print_style '{print_style}' for print_var_impl. Use 'full' or 'short'.")
+
+ if print_style=="short":
+ # Just the inner most name
+ name = node.tag
+ else:
+ parents = get_parents(node, parent_map)
+ name = "::".join(e.tag for e in parents) + "::" + node.tag
+
+ if full:
+ expect ("type" in node.attrib.keys(),
+ f"Error! Missing type information for {name}")
+ print (f"{indent}{name}")
+ print (f"{indent} value: {node.text}")
+ print (f"{indent} type: {node.attrib['type']}")
+ if "valid_values" not in node.attrib.keys():
+ valid = []
+ else:
+ valid = node.attrib["valid_values"].split(",")
+ print (f"{indent} valid values: {valid}")
+ elif dtype:
+ expect ("type" in node.attrib.keys(),
+ f"Error! Missing type information for {name}")
+ print (f"{indent}{name}: {node.attrib['type']}")
+ elif value:
+ print (f"{indent}{node.text}")
+ elif valid_values:
+ if "valid_values" not in node.attrib.keys():
+ valid = ''
+ else:
+ valid = node.attrib["valid_values"].split(",")
+ print (f"{indent}{name}: {valid}")
+ else:
+ print (f"{indent}{name}: {node.text}")
+
+###############################################################################
+def print_var(xml_root,parent_map,var,full,dtype,value,valid_values,print_style="invalid",indent=""):
###############################################################################
"""
>>> xml = '''
@@ -288,29 +446,27 @@ def print_var(xml_root,var,full,dtype,value,valid_values,print_style="invalid",i
... '''
>>> import xml.etree.ElementTree as ET
>>> tree = ET.fromstring(xml)
+ >>> parent_map = create_parent_map(tree)
>>> ################ Missing type data #######################
- >>> print_var(tree,'::prop1',False,True,False,False,"short")
+ >>> print_var(tree,parent_map,'::prop1',False,True,False,False,"short")
Traceback (most recent call last):
SystemExit: ERROR: Error! Missing type information for prop1
- >>> print_var(tree,'prop2',True,False,False,False,"short")
+ >>> print_var(tree,parent_map,'prop2',True,False,False,False,"short")
prop2
value: 2
type: integer
valid values: ['1', '2']
- >>> print_var(tree,'prop2',False,True,False,False,"short")
+ >>> print_var(tree,parent_map,'prop2',False,True,False,False,"short")
prop2: integer
- >>> print_var(tree,'prop2',False,False,True,False,"short")
+ >>> print_var(tree,parent_map,'prop2',False,False,True,False,"short")
2
- >>> print_var(tree,'prop2',False,False,False,True,"short"," ")
+ >>> print_var(tree,parent_map,'prop2',False,False,False,True,"short"," ")
prop2: ['1', '2']
"""
expect (print_style in ["short","full"],
f"Invalid print_style '{print_style}' for print_var. Use 'full' or 'short'.")
- # Get node, along with all its parents (which might be used for 'full' print style)
- node, parents = get_xml_node(xml_root,var)
-
# Get the shortest unique repr of the var name
tokens = var.split("::")
if tokens[0]=='':
@@ -318,60 +474,33 @@ def print_var(xml_root,var,full,dtype,value,valid_values,print_style="invalid",i
while len(tokens)>1:
new_name = "::".join(tokens[1:])
- try:
- get_xml_node(xml_root,new_name)
- tokens.pop(0)
- name = new_name
- except AmbiguousName:
- # new_name was either "" or an ambiguous name, and get_xml_node failed
+ matches = get_xml_nodes(xml_root, new_name)
+ if len(matches) > 1:
break
+ else:
+ tokens.pop(0)
- if print_style=="short":
- # Just the inner most name
- name = tokens[-1]
- else:
- name = "::".join(e.tag for e in parents) + "::" + node.tag
+ # Get node, along with all its parents (which might be used for 'full' print style)
+ matches = get_xml_nodes(xml_root,var)
+ expect(len(matches) == 1, f"Expected one match for {var}")
+ node = matches[0]
- if full:
- expect ("type" in node.attrib.keys(),
- "Error! Missing type information for {}".format(name))
- print (f"{indent}{name}")
- print (f"{indent} value: {node.text}")
- print (f"{indent} type: {node.attrib['type']}")
- if "valid_values" not in node.attrib.keys():
- valid = []
- else:
- valid = node.attrib["valid_values"].split(",")
- print (f"{indent} valid values: {valid}")
- elif dtype:
- expect ("type" in node.attrib.keys(),
- "Error! Missing type information for {}".format(name))
- print (f"{indent}{name}: {node.attrib['type']}")
- elif value:
- print (f"{indent}{node.text}")
- elif valid_values:
- if "valid_values" not in node.attrib.keys():
- valid = ''
- else:
- valid = node.attrib["valid_values"].split(",")
- print (f"{indent}{name}: {valid}")
- else:
- print (f"{indent}{name}: {node.text}")
+ print_var_impl(node,parent_map,full,dtype,value,valid_values,print_style,indent)
###############################################################################
-def print_all_vars(xml_root,xml_node,curr_namespace,full,dtype,value,valid_values,print_style,indent):
+def print_all_vars(xml_root,xml_node,parent_map,curr_namespace,full,dtype,value,valid_values,print_style,indent):
###############################################################################
print (f"{indent}{xml_node.tag}")
for c in xml_node:
if len(c)>0:
- print_all_vars(xml_root,c,curr_namespace+c.tag+"::",full,dtype,value,valid_values,print_style,indent+" ")
+ print_all_vars(xml_root,c,parent_map,curr_namespace+c.tag+"::",full,dtype,value,valid_values,print_style,indent+" ")
else:
- print_var(xml_root,curr_namespace+c.tag,full,dtype,value,valid_values,print_style,indent+" ")
+ print_var(xml_root,parent_map,curr_namespace+c.tag,full,dtype,value,valid_values,print_style,indent+" ")
###############################################################################
-def atm_query_impl(xml_root,variables,listall=False,full=False,value=False, \
- dtype=False, valid_values=False):
+def atm_query_impl(xml_root,variables,listall=False,full=False,value=False,
+ dtype=False, valid_values=False, grep=False):
###############################################################################
"""
>>> xml = '''
@@ -386,21 +515,41 @@ def atm_query_impl(xml_root,variables,listall=False,full=False,value=False, \
>>> import xml.etree.ElementTree as ET
>>> tree = ET.fromstring(xml)
>>> vars = ['prop2','::prop1']
- >>> success = atm_query_impl(tree, vars, False,False,False,False,False)
+ >>> success = atm_query_impl(tree, vars)
root::sub::prop2: 2
root::prop1: one
- >>> success = atm_query_impl(tree, [], True,False,False,False,True)
+ >>> success = atm_query_impl(tree, [], listall=True, valid_values=True)
root
prop1:
sub
prop1:
prop2: ['1', '2']
+ >>> success = atm_query_impl(tree,['prop1'], grep=True)
+ root::prop1: one
+ sub::prop1: two
"""
-
+ parent_map = create_parent_map(xml_root)
if listall:
- print_all_vars(xml_root,xml_root,"::",full,dtype,value,valid_values,"short"," ")
+ print_all_vars(xml_root,xml_root,parent_map,"::",full,dtype,value,valid_values,"short"," ")
+
+ elif grep:
+ for regex in variables:
+ expect("::" not in regex, "query --grep does not support including parent info")
+ var_re = re.compile(f'{regex}')
+ if var_re.search(xml_root.tag):
+ print_all_vars(xml_root,xml_root,parent_map,"::",full,dtype,value,valid_values,"short"," ")
+ else:
+ for elem in xml_root:
+ if len(elem)>0:
+ atm_query_impl(elem,variables,listall,full,value,dtype,valid_values,grep)
+ else:
+ if var_re.search(elem.tag):
+ nodes = get_xml_nodes(xml_root, "::"+elem.tag)
+ expect(len(nodes) == 1, "No matches?")
+ print_var_impl(nodes[0],parent_map,full,dtype,value,valid_values,"full"," ")
+
else:
for var in variables:
- print_var(xml_root,var,full,dtype,value,valid_values,"full"," ")
+ print_var(xml_root,parent_map,var,full,dtype,value,valid_values,"full"," ")
return True
diff --git a/components/eamxx/scripts/atmchange b/components/eamxx/scripts/atmchange
index f0611b484ca6..ff886ae83507 100755
--- a/components/eamxx/scripts/atmchange
+++ b/components/eamxx/scripts/atmchange
@@ -13,39 +13,71 @@ sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from eamxx_buildnml_impl import check_value, is_array_type
-from atm_manip import get_xml_node, atm_config_chg_impl
+from eamxx_buildnml import create_raw_xml_file
+from atm_manip import atm_config_chg_impl, buffer_changes, reset_buffer, get_xml_nodes, parse_change
from utils import run_cmd_no_fail, expect
+# Add path to cime
+_CIMEROOT = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..","..","..","cime")
+sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools"))
+from standard_script_setup import * # pylint: disable=wildcard-import
+from CIME.case import Case
+
###############################################################################
-def atm_config_chg(changes, no_buffer=False, reset=False):
+def recreate_raw_xml_file():
###############################################################################
- expect(os.path.exists("namelist_scream.xml"),
- "No pwd/namelist_scream.xml file is present. Please run from a case dir that has been set up")
+ caseroot = os.getcwd()
+ with Case(caseroot) as case:
+ create_raw_xml_file(case, caseroot)
+
+###############################################################################
+def atm_config_chg(changes, reset=False, all_matches=False, buffer_only=False):
+###############################################################################
+ if not buffer_only:
+ expect(os.path.exists("namelist_scream.xml"),
+ "No pwd/namelist_scream.xml file is present. Please run from a case dir that has been set up")
+ else:
+ expect(not reset, "Makes no sense for buffer_only and reset to both be on")
if reset:
- run_cmd_no_fail("./xmlchange SCREAM_ATMCHANGE_BUFFER=''")
- print("All buffered atmchanges have been removed. A fresh namelist_scream.xml will be generated the next time buildnml (case.setup) is run.")
+ reset_buffer()
+ print("All buffered atmchanges have been removed.")
hack_xml = run_cmd_no_fail("./xmlquery SCREAM_HACK_XML --value")
if hack_xml == "TRUE":
print("SCREAM_HACK_XML is on. Removing namelist_scream.xml to force regen")
os.remove("namelist_scream.xml")
+ recreate_raw_xml_file()
return True
else:
expect(changes, "Missing = args")
- with open("namelist_scream.xml", "r") as fd:
- tree = ET.parse(fd)
- root = tree.getroot()
+ # Before applying/buffering changes, at the very least check the syntax
+ for c in changes:
+ # This will throw if the syntax is bad
+ _, _, _ = parse_change(c)
+
+ # If buffer_only=True, we must assume there were changes (we can't check).
+ # Otherwise, we'll assume no changes, and if we find one, we'll adjust
+ any_change = buffer_only
+ if not buffer_only:
+ with open("namelist_scream.xml", "r") as fd:
+ tree = ET.parse(fd)
+ root = tree.getroot()
+
+ for change in changes:
+ this_changed = atm_config_chg_impl(root, change, all_matches)
+ any_change |= this_changed
- any_change = atm_config_chg_impl(root,changes)
if any_change:
- tree.write("namelist_scream.xml")
+ # NOTE: if a change is wrong (e.g., typo in param name), we are still buffering it.
+ # We have no way of checking this, unfortunately. If you get an error that is
+ # not just syntax, your best course of action is to run atmchange --reset.
+ buffer_changes(changes, all_matches=all_matches)
- if not no_buffer:
- changes_str = " ".join(changes).replace(",",r"\,")
- run_cmd_no_fail(f"./xmlchange --append SCREAM_ATMCHANGE_BUFFER='{changes_str}'")
+ if not buffer_only:
+ recreate_raw_xml_file()
return True
@@ -72,19 +104,26 @@ OR
)
parser.add_argument(
- "--no-buffer",
+ "-a", "--all",
default=False,
+ dest="all_matches",
action="store_true",
- help="Used by buildnml to replay buffered commands",
+ help="Apply change to all entries matching the name"
)
-
parser.add_argument(
- "--reset",
+ "-r", "--reset",
default=False,
action="store_true",
help="Forget all previous atmchanges",
)
+ parser.add_argument(
+ "-b", "--buffer-only",
+ default=False,
+ action="store_true",
+ help="Only buffer the changes, don't actually do them. Useful for testmod scripts where the case is not setup yet",
+ )
+
parser.add_argument("changes", nargs="*", help="Values to change")
return parser.parse_args(args[1:])
diff --git a/components/eamxx/scripts/atmquery b/components/eamxx/scripts/atmquery
index 7a0b8e07bdd6..f07ecefa94f1 100755
--- a/components/eamxx/scripts/atmquery
+++ b/components/eamxx/scripts/atmquery
@@ -13,11 +13,12 @@ sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from eamxx_buildnml_impl import check_value
-from atm_manip import expect, get_xml_node, AmbiguousName, atm_query_impl
+from atm_manip import atm_query_impl
+from utils import expect
###############################################################################
def atm_query(variables,listall=False,full=False,value=False, \
- dtype=False, valid_values=False):
+ dtype=False, valid_values=False, grep=False):
###############################################################################
expect(os.path.exists("namelist_scream.xml"),
"No pwd/namelist_scream.xml file is present. Please rum from a case dir that has been setup")
@@ -26,29 +27,32 @@ def atm_query(variables,listall=False,full=False,value=False, \
tree = ET.parse(fd)
xml_root = tree.getroot()
- return atm_query_impl(xml_root,variables,listall,full,value,dtype,valid_values)
+ return atm_query_impl(xml_root,variables,listall,full,value,dtype,valid_values,grep)
###############################################################################
def parse_command_line(args, description):
###############################################################################
parser = argparse.ArgumentParser(
- usage="""\n{0} [--listall] [--value] [--type] [--valid-values] [--full] [var1 [,var2 ...]
+ usage="""\n{0} [--grep] [--listall] [--value] [--type] [--valid-values] [--full] [var1 [,var2 ...]
OR
{0} --help
\033[1mEXAMPLES:\033[0m
- \033[1;32m# List all settings as VAR=VALUE
+ \033[1;32m# List all settings as VAR=VALUE\033[0m
> {0} --listall
- \033[1;32m# print var1 and var2
+ \033[1;32m# print var1 and var2\033[0m
> {0} var1 var2
- \033[1;32m# print var1 and var2, with full details
+ \033[1;32m# print var1 and var2, with full details\033[0m
> {0} var1 var2 --full
- \033[1;32m# print var1 type and valid values
+ \033[1;32m# print var1 type and valid values\033[0m
> {0} var1 --type --valid-values
+ \033[1;32m# print all variables whose name matches expr\033[0m
+ > {0} --grep expr
+
""".format(pathlib.Path(args[0]).name),
description=description,
formatter_class=argparse.ArgumentDefaultsHelpFormatter
@@ -62,16 +66,23 @@ OR
)
parser.add_argument(
- "--listall",
+ "--grep",
default=False,
action="store_true",
+ help="List all matching variables and their values.",
+ )
+
+ parser.add_argument(
+ "--listall",
+ action="store_true",
help="List all variables and their values.",
)
# The following options are mutually exclusive
- group = parser.add_mutually_exclusive_group()
+ group1 = parser.add_argument_group(title="Display options")
+ group2 = parser.add_mutually_exclusive_group()
- group.add_argument(
+ group2.add_argument(
"--full",
default=False,
action="store_true",
@@ -79,7 +90,7 @@ OR
"valid values, description and file.",
)
- group.add_argument(
+ group2.add_argument(
"--value",
default=False,
action="store_true",
@@ -87,14 +98,15 @@ OR
"If more than one has been found print first value in list.",
)
- group.add_argument(
+ group2.add_argument(
"--type",
default=False,
+ dest="dtype",
action="store_true",
help="Print the data type associated with each variable.",
)
- group.add_argument(
+ group2.add_argument(
"--valid-values",
default=False,
action="store_true",
@@ -103,19 +115,16 @@ OR
args = parser.parse_args(args[1:])
- if len(args.variables) == 1:
- variables = args.variables[0].split(",")
- else:
- variables = args.variables
-
- return (
- variables,
- args.listall,
- args.full,
- args.value,
- args.type,
- args.valid_values,
- )
+ if args.grep and args.listall:
+ parser.error("Cannot specify --listall and --grep at the same time")
+
+ if args.grep and args.variables is None:
+ parser.error("Option --grep requires to pass a variable regex")
+
+ if args.variables is not None and len(args.variables)==1:
+ args.variables = args.variables[0].split(",")
+
+ return args
###############################################################################
def _main_func(description):
@@ -126,15 +135,7 @@ def _main_func(description):
testmod()
testmod(m=atm_manip)
else:
- (
- variables,
- listall,
- value,
- full,
- dtype,
- valid_values,
- ) = parse_command_line(sys.argv, description)
- success = atm_query(variables,listall,value,full,dtype,valid_values)
+ success = atm_query(**vars(parse_command_line(sys.argv, description)))
sys.exit(0 if success else 1)
###############################################################################
diff --git a/components/eamxx/scripts/cime-nml-tests b/components/eamxx/scripts/cime-nml-tests
index 44c1a9d76579..0a60376bd45f 100755
--- a/components/eamxx/scripts/cime-nml-tests
+++ b/components/eamxx/scripts/cime-nml-tests
@@ -5,7 +5,8 @@ Script containing python test suite for SCREAM's CIME
namelist-related infrastructure.
"""
-from utils import check_minimum_python_version, expect, ensure_pylint, run_cmd_assert_result, get_timestamp
+from utils import check_minimum_python_version, expect, ensure_pylint, get_timestamp, \
+ run_cmd_assert_result, run_cmd_no_fail, run_cmd
check_minimum_python_version(3, 6)
@@ -34,6 +35,8 @@ class TestBuildnml(unittest.TestCase):
Convenience wrapper around create_test. Returns list of full paths to created cases. If multiple cases,
the order of the returned list is not guaranteed to match the order of the arguments.
"""
+ extra_args = extra_args.split()
+
test_id = f"cmd_nml_tests-{get_timestamp()}"
extra_args.append("-t {}".format(test_id))
@@ -60,29 +63,61 @@ class TestBuildnml(unittest.TestCase):
return cases[0] if len(cases) == 1 else cases
###########################################################################
- def _chg_atmconfig(self, changes, case, buff=True, reset=False, expect_lost=False):
+ def _get_values(self, case, name, value=None, expect_equal=True, all_matches=False):
###########################################################################
- buffer_opt = "" if buff else "--no-buffer"
-
- for name, value in changes:
+ """
+ Queries a name, optionally checking if the value matches or does not match the
+ argument.
+ """
+ if not all_matches:
orig = run_cmd_assert_result(self, f"./atmquery {name} --value", from_dir=case)
- self.assertNotEqual(orig, value)
+ if value:
+ if expect_equal:
+ self.assertEqual(orig, value, msg=name)
+ else:
+ self.assertNotEqual(orig, value, msg=name)
+
+ return [name]
+
+ else:
+ output = run_cmd_assert_result(self, f"./atmquery {name} --grep", from_dir=case).splitlines()
+ names = [line.rsplit(": ", maxsplit=1)[0].strip() for line in output]
+ values = [line.rsplit(": ", maxsplit=1)[1].strip() for line in output]
+
+ if value:
+ for orig_value in values:
+ if expect_equal:
+ self.assertEqual(orig_value, value, msg=name)
+ else:
+ self.assertNotEqual(orig_value, value, msg=name)
+
+ return names
+
+ ###########################################################################
+ def _chg_atmconfig(self, changes, case, reset=False, expect_lost=None):
+ ###########################################################################
+ changes = [(item[0], item[1], False) if len(item) == 2 else item for item in changes]
+
+ expect_lost = reset if expect_lost is None else expect_lost
- run_cmd_assert_result(self, f"./atmchange {buffer_opt} {name}={value}", from_dir=case)
- curr_value = run_cmd_assert_result(self, f"./atmquery {name} --value", from_dir=case)
- self.assertEqual(curr_value, value)
+ changes_unpacked = {}
+ for name, value, all_matches in changes:
+ all_matches_opt = "-a" if all_matches else ""
+
+ names = self._get_values(case, name, value=value, expect_equal=False, all_matches=all_matches)
+
+ run_cmd_assert_result(self, f"./atmchange {all_matches_opt} {name}='{value}'", from_dir=case)
+
+ for item in names:
+ changes_unpacked[item] = value
if reset:
run_cmd_assert_result(self, "./atmchange --reset", from_dir=case)
- run_cmd_assert_result(self, "./case.setup", from_dir=case)
+ # run_cmd_assert_result(self, "./case.setup", from_dir=case)
- for name, value in changes:
- curr_value = run_cmd_assert_result(self, f"./atmquery {name} --value", from_dir=case)
- if expect_lost:
- self.assertNotEqual(curr_value, value)
- else:
- self.assertEqual(curr_value, value)
+ for name, value in changes_unpacked.items():
+ self._get_values(case, name, value=value, expect_equal=not expect_lost)
###########################################################################
def setUp(self):
@@ -120,7 +155,7 @@ class TestBuildnml(unittest.TestCase):
"""
Test that xmlchanges impact atm config files
"""
- case = self._create_test("ERS_Ln22.ne30_ne30.F2010-SCREAMv1 --no-build".split())
+ case = self._create_test("ERS_Ln22.ne30_ne30.F2010-SCREAMv1 --no-build")
# atm config should match case test opts
case_rest_n = run_cmd_assert_result(self, "./xmlquery REST_N --value", from_dir=case)
@@ -143,67 +178,100 @@ class TestBuildnml(unittest.TestCase):
"""
Test that atmchanges are not lost when eamxx setup is called
"""
- case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build".split())
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
+
+ self._chg_atmconfig([("atm_log_level", "trace"), ("output_to_screen", "true")], case)
- self._chg_atmconfig([("atm_log_level", "trace")], case)
+ run_cmd_no_fail ("./case.setup", from_dir=case)
+
+ out1 = run_cmd_no_fail("./atmquery --value atm_log_level", from_dir=case)
+ out2 = run_cmd_no_fail("./atmquery --value output_to_screen", from_dir=case)
+
+ expect (out1=="trace", "An atm change appears to have been lost during case.setup")
+ expect (out2=="true", "An atm change appears to have been lost during case.setup")
###########################################################################
- def test_manual_atmchanges_are_lost(self):
+ def test_nml_defaults_append(self):
###########################################################################
"""
- Test that manual atmchanges are lost when eamxx setup is called
+ Test that the append attribute for array-type params in namelist defaults works as expected
"""
- case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build".split())
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
+
+ # Add testOnly proc
+ self._chg_atmconfig([("mac_aero_mic::atm_procs_list", "shoc,cldFraction,spa,p3,testOnly")], case)
- # An unbuffered atmchange is semantically the same as a manual edit
- self._chg_atmconfig([("atm_log_level", "trace")], case, buff=False, expect_lost=True)
+ # Test case 1: append to base, then to last. Should give all 3 entries stacked
+ run_cmd_no_fail(f"./xmlchange --append SCREAM_CMAKE_OPTIONS='SCREAM_NUM_VERTICAL_LEV 1'", from_dir=case)
+ run_cmd_no_fail("./case.setup", from_dir=case)
+ self._get_values(case,"my_param","1,2,3,4,5,6")
+
+ # Test case 2: append to last, then to base. Should give 1st and 3rd entry
+ run_cmd_no_fail(f"./xmlchange --append SCREAM_CMAKE_OPTIONS='SCREAM_NUM_VERTICAL_LEV 2'", from_dir=case)
+ run_cmd_no_fail("./case.setup", from_dir=case)
+ self._get_values(case,"my_param","1,2,5,6")
###########################################################################
- def test_reset_atmchanges_are_lost(self):
+ def test_append(self):
###########################################################################
"""
- Test that manual atmchanges are lost when eamxx setup is called
+ Test that var+=value syntax behaves as expected
"""
- case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build".split())
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
- # An unbuffered atmchange is semantically the same as a manual edit
- self._chg_atmconfig([("atm_log_level", "trace")], case, reset=True, expect_lost=True)
+ # Append to an existing entry
+ name = 'output_yaml_files'
+ out = run_cmd_no_fail(f"./atmchange {name}+=a.yaml", from_dir=case)
+
+ # Get the yaml files
+ expected =f'{EAMXX_DIR / "data/scream_default_output.yaml"}, a.yaml'
+ self._get_values(case, name, value=expected, expect_equal=True)
###########################################################################
- def test_manual_atmchanges_are_not_lost_hack_xml(self):
+ def test_reset_atmchanges_are_lost(self):
###########################################################################
"""
- Test that manual atmchanges are not lost when eamxx setup is called if
- xml hacking is enabled.
+ Test that atmchanges are lost when resetting
"""
- case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build".split())
-
- run_cmd_assert_result(self, f"./xmlchange SCREAM_HACK_XML=TRUE", from_dir=case)
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
- self._chg_atmconfig([("atm_log_level", "trace")], case, buff=False)
+ self._chg_atmconfig([("atm_log_level", "trace")], case, reset=True)
###########################################################################
- def test_multiple_atmchanges_are_preserved(self):
+ def test_atmchanges_are_lost_with_hack_xml(self):
###########################################################################
"""
- Test that multiple atmchanges are not lost when eamxx setup is called
+ Test that atmchanges are lost if SCREAM_HACK_XML=TRUE
"""
- case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build".split())
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
- self._chg_atmconfig([("atm_log_level", "trace"), ("output_to_screen", "true")], case)
+ run_cmd_assert_result(self, "./xmlchange SCREAM_HACK_XML=TRUE", from_dir=case)
+
+ self._chg_atmconfig([("atm_log_level", "trace")], case, expect_lost=True)
###########################################################################
def test_atmchanges_are_preserved_testmod(self):
###########################################################################
"""
- Test that atmchanges are not lost when eamxx setup is called when that
- parameter is impacted by an active testmod
+ Test that atmchanges via testmod are preserved
"""
def_mach_comp = \
run_cmd_assert_result(self, "../CIME/Tools/list_e3sm_tests cime_tiny", from_dir=CIME_SCRIPTS_DIR).splitlines()[-1].split(".")[-1]
- case = self._create_test(f"SMS.ne30_ne30.F2010-SCREAMv1.{def_mach_comp}.scream-scream_example_testmod_atmchange --no-build".split())
+ case = self._create_test(f"SMS.ne30_ne30.F2010-SCREAMv1.{def_mach_comp}.scream-scream_example_testmod_atmchange --no-build")
+
+ # Check that the value match what's in the testmod
+ out = run_cmd_no_fail("./atmquery --value cubed_sphere_map", from_dir=case)
+ expect (out=="42", "An atm change appears to have been lost during case.setup")
+
+ ###########################################################################
+ def test_atmchanges_with_namespace(self):
+ ###########################################################################
+ """
+ Test that atmchange works when using 'namespace' syntax foo::bar
+ """
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
- self._chg_atmconfig([("cubed_sphere_map", "84")], case)
+ self._chg_atmconfig([("p3::enable_precondition_checks", "false")], case)
###########################################################################
def test_atmchanges_on_arrays(self):
@@ -211,10 +279,116 @@ class TestBuildnml(unittest.TestCase):
"""
Test that atmchange works for array data
"""
- case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build".split())
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
self._chg_atmconfig([("surf_mom_flux", "40.0,2.0")], case)
+ ###########################################################################
+ def test_atmchanges_on_all_matches(self):
+ ###########################################################################
+ """
+ Test that atmchange --all works
+ """
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
+
+ self._chg_atmconfig([("enable_precondition_checks", "false", True)], case)
+
+ ###########################################################################
+ def test_atmchanges_on_all_matches_plus_spec(self):
+ ###########################################################################
+ """
+ Test atmchange --all followed by an atmchange of one of them
+ """
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
+
+ self._chg_atmconfig([("enable_precondition_checks", "false", True),
+ ("p3::enable_precondition_checks", "true")], case)
+
+ ###########################################################################
+ def test_atmchanges_for_atm_procs_add(self):
+ ###########################################################################
+ """
+ Test atmchanges that add atm procs
+ """
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
+
+ self._chg_atmconfig([("mac_aero_mic::atm_procs_list", "shoc,cldFraction,spa,p3,testOnly")], case)
+
+ # If we are able to change subcycles of testOnly then we know the atmchange
+ # above added the necessary atm proc XML block.
+ self._chg_atmconfig([("testOnly::number_of_subcycles", "42")], case)
+
+ ###########################################################################
+ def test_atmchanges_for_atm_procs_add_invalid(self):
+ ###########################################################################
+ """
+ Test atmchanges that add atm procs
+ """
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
+
+ # modifying a procs list requires known processes or "_" pre/post suffixes
+ stat, out, err = run_cmd ("./atmchange mac_aero_mic::atm_procs_list=shoc,cldfraction,spa,p3,spiderman",
+ from_dir=case)
+
+ expect (stat!=0,"Command './atmchange mac_aero_mic::atm_procs_list=shoc,cldFraction,spa,p3,spiderman' should have failed")
+
+ ###########################################################################
+ def test_buffer_unchanged_with_bad_change_syntax(self):
+ ###########################################################################
+ """
+ Test atmchange does not change buffer if syntax was wrong
+ """
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
+
+ # Attempting a bad change should not alter the content of SCREAM_ATMCHANGE_BUFFER
+ old = run_cmd_no_fail ("./xmlquery --value SCREAM_ATMCHANGE_BUFFER",from_dir=case)
+ stat, out, err = run_cmd ("./atmchange foo",from_dir=case)
+ expect (stat!=0,"Command './atmchange foo' should have failed")
+
+ new = run_cmd_no_fail ("./xmlquery --value SCREAM_ATMCHANGE_BUFFER",from_dir=case)
+
+ expect (new==old, "A bad atmchange should have not modified SCREAM_ATMCHANGE_BUFFER")
+
+ ###########################################################################
+ def test_invalid_xml_option(self):
+ ###########################################################################
+ """
+ Test atmchange errors out with invalid param names
+ """
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
+
+ stat, out, err = run_cmd ("./atmchange p3::non_existent=3",from_dir=case)
+ expect (stat!=0,"Command './atmchange p3::non_existent=3' should have failed")
+
+ ###########################################################################
+ def test_atmchanges_for_atm_procs_add_group(self):
+ ###########################################################################
+ """
+ Test atmchanges that add atm proc groups
+ """
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
+
+ out = run_cmd_no_fail ("./atmchange mac_aero_mic::atm_procs_list=shoc,_my_group_",from_dir=case)
+
+ self._chg_atmconfig([("_my_group_::atm_procs_list", "testOnly")], case)
+
+ # If we are able to change subcycles of testOnly then we know the atmchange
+ # above added the necessary atm proc XML block.
+ self._chg_atmconfig([("testOnly::number_of_subcycles", "42")], case)
+
+ ###########################################################################
+ def test_atmchanges_for_atm_procs_remove(self):
+ ###########################################################################
+ """
+ Test atmchanges that remove atm procs
+ """
+ case = self._create_test("SMS.ne30_ne30.F2010-SCREAMv1 --no-build")
+
+ self._chg_atmconfig([("mac_aero_mic::atm_procs_list", "shoc,cldFraction,spa")], case)
+
+ stat, output, error = run_cmd("./atmquery --grep p3",from_dir=case)
+ expect (output=="", "There is still a trace of the removed process")
+
###############################################################################
def parse_command_line(args, desc):
###############################################################################
diff --git a/components/eamxx/scripts/eamxx-params-docs-autogen b/components/eamxx/scripts/eamxx-params-docs-autogen
new file mode 100755
index 000000000000..3024677d96b0
--- /dev/null
+++ b/components/eamxx/scripts/eamxx-params-docs-autogen
@@ -0,0 +1,142 @@
+#!/usr/bin/env python3
+
+"""
+This script parses the file `cime_config/namelist_defaults_scream.xml'
+and generates the markdown file `docs/common/eamxx_params.md`,
+containing all the runtime parameters that can be configured via calls
+to `atmchange` (in the case folder). For each parameter, we also report
+a doc string and its type, as well as, if present, constraints and valid values.
+"""
+
+import argparse, sys, os, pathlib
+
+from utils import _ensure_pylib_impl
+
+_ensure_pylib_impl("mdutils")
+
+import xml.etree.ElementTree as ET
+from mdutils.mdutils import MdUtils
+from mdutils import Html
+
+sys.path.append(os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "cime_config"))
+from eamxx_buildnml_impl import resolve_all_inheritances, get_valid_selectors
+
+###############################################################################
+def parse_command_line(args, description):
+###############################################################################
+ parser = argparse.ArgumentParser(
+ usage="""{0}
+""".format(pathlib.Path(args[0]).name),
+ description=description,
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter
+ )
+
+ return parser.parse_args(args[1:])
+
+###########################################################################
+def add_param(docs,scope,item):
+###########################################################################
+ # Locked parameters are not to be configured at runtime, so don't even bother
+ # E.g, a locked param is something we need to get in the input file, like
+ # the restart write frequency, but we don't want the user to modify it
+ # via atmchange
+ if "locked" in item.attrib.keys():
+ return
+ docs.new_line(f"* {scope}{item.tag}:")
+
+ pdoc = item.attrib['doc'] if 'doc' in item.attrib.keys() else "**MISSING**"
+ docs.new_line(f" - description: {pdoc}")
+
+ ptype = item.attrib['type'] if 'type' in item.attrib.keys() else "**MISSING**"
+ docs.new_line(f" - type: {ptype}")
+
+ pvalid = item.attrib['valid_values'] if 'valid_values' in item.attrib.keys() else None
+ if pvalid is not None:
+ docs.new_line(f" - valid values: {pvalid}")
+ pconstr = item.attrib['constraints'] if 'constraints' in item.attrib.keys() else None
+ if pconstr is not None:
+ docs.new_line(f" - constraints: {pconstr}")
+
+###########################################################################
+def add_children(docs,xml,scope=""):
+###########################################################################
+ done = []
+ # Locked parameters are not to be configured at runtime, so don't even bother
+ # E.g, a locked param is something we need to get in the input file, like
+ # the restart write frequency, but we don't want the user to modify it
+ # via atmchange
+ if "locked" in xml.attrib.keys():
+ return
+ for item in xml:
+ # The same entry may appear multiple times in the XML defaults file,
+ # each time with different selectors. We don't want to generate the
+ # same documentation twice.
+ if item.tag in done:
+ continue
+ done.append(item.tag)
+ if len(item)>0:
+ add_children (docs,item,f"{scope}{xml.tag}::")
+ else:
+ add_param(docs,f"{scope}{xml.tag}::",item)
+ docs.new_line()
+
+###########################################################################
+def generate_params_docs():
+###########################################################################
+
+ eamxx = pathlib.Path(__file__).parent.parent.resolve()
+ xml_defaults_file = eamxx / "cime_config" / "namelist_defaults_scream.xml"
+ output_file = eamxx / "docs" / "common" / "eamxx_params.md"
+
+ print("Generating eamxx params documentation...")
+ print(f" output file: {output_file}")
+
+ with open(xml_defaults_file, "r") as fd:
+ tree = ET.parse(fd)
+ xml_defaults = tree.getroot()
+
+ selectors = get_valid_selectors(xml_defaults)
+ resolve_all_inheritances(xml_defaults)
+
+ docs = MdUtils(file_name=str(output_file),title='EAMxx runtime configurable parameters')
+ with open (output_file, "w") as fd:
+ docs.new_header(level=1,title='Atmosphere Processes Parameters')
+ aps = xml_defaults.find('atmosphere_processes_defaults')
+ for ap in aps:
+ if ap.tag.startswith('atm_proc'):
+ continue
+ docs.new_header(level=2,title=ap.tag)
+ add_children(docs,ap)
+
+ ic = xml_defaults.find('initial_conditions')
+ docs.new_header(level=1,title="Initial Conditions Parameters")
+ add_children(docs,ic)
+
+ ad = xml_defaults.find('driver_options')
+ docs.new_header(level=1,title='Atmosphere Driver Parameters')
+ add_children(docs,ad)
+
+ scorpio = xml_defaults.find('Scorpio')
+ docs.new_header(level=1,title='Scorpio Parameters')
+ add_children(docs,scorpio)
+
+ homme = xml_defaults.find('ctl_nl')
+ docs.new_header(level=1,title='Homme namelist')
+ add_children(docs,homme)
+ docs.create_md_file()
+
+ print("Generating eamxx params documentation ... SUCCESS!")
+ return True
+
+###############################################################################
+def _main_func(description):
+###############################################################################
+
+ success = generate_params_docs(**vars(parse_command_line(sys.argv, description)))
+
+ sys.exit(0 if success else 1)
+
+###############################################################################
+
+if (__name__ == "__main__"):
+ _main_func(__doc__)
diff --git a/components/eamxx/scripts/gather_all_data.py b/components/eamxx/scripts/gather_all_data.py
index 6a6417b373a0..9c9b1726c465 100644
--- a/components/eamxx/scripts/gather_all_data.py
+++ b/components/eamxx/scripts/gather_all_data.py
@@ -98,6 +98,7 @@ def run_on_machine(self, machine):
if self._local:
run_cmd_no_fail(cmd, arg_stdout=None, arg_stderr=None, verbose=True, dry_run=self._dry_run, exc_type=RuntimeError)
else:
+ output = "" # Making pylint happy
try:
ssh_cmd = "ssh -o StrictHostKeyChecking=no {} '{}'".format(machine, cmd)
output = run_cmd_no_fail(ssh_cmd, dry_run=self._dry_run, exc_type=RuntimeError, combine_output=True)
diff --git a/components/eamxx/scripts/gen_boiler.py b/components/eamxx/scripts/gen_boiler.py
index 95838a7c631a..d45197deeeba 100644
--- a/components/eamxx/scripts/gen_boiler.py
+++ b/components/eamxx/scripts/gen_boiler.py
@@ -2,7 +2,8 @@
from git_utils import get_git_toplevel_dir
from collections import OrderedDict
-import pathlib, re, os
+import re
+from pathlib import Path
#
# Global hardcoded data
@@ -11,67 +12,67 @@
# Templates: maps piece name to generic file text
FILE_TEMPLATES = {
"cxx_bfb_unit_impl": lambda phys, sub, gen_code:
-"""#include "catch2/catch.hpp"
+f"""#include "catch2/catch.hpp"
#include "share/scream_types.hpp"
#include "ekat/ekat_pack.hpp"
#include "ekat/kokkos/ekat_kokkos_utils.hpp"
-#include "physics/{physics}/{physics}_functions.hpp"
-#include "physics/{physics}/{physics}_functions_f90.hpp"
+#include "physics/{phys}/{phys}_functions.hpp"
+#include "physics/{phys}/{phys}_functions_f90.hpp"
-#include "{physics}_unit_tests_common.hpp"
+#include "{phys}_unit_tests_common.hpp"
namespace scream {{
-namespace {physics} {{
+namespace {phys} {{
namespace unit_test {{
template
-struct UnitWrap::UnitTest::{test_data_struct} {{
+struct UnitWrap::UnitTest::{get_data_test_struct_name(sub)} {{
{gen_code}
}};
}} // namespace unit_test
-}} // namespace {physics}
+}} // namespace {phys}
}} // namespace scream
namespace {{
-TEST_CASE("{sub}_bfb", "[{physics}]")
+TEST_CASE("{sub}_bfb", "[{phys}]")
{{
- using TestStruct = scream::{physics}::unit_test::UnitWrap::UnitTest::{test_data_struct};
+ using TestStruct = scream::{phys}::unit_test::UnitWrap::UnitTest::{get_data_test_struct_name(sub)};
TestStruct::run_bfb();
}}
}} // empty namespace
-""".format(physics=phys, sub=sub, test_data_struct=get_data_test_struct_name(sub), gen_code=gen_code),
+""",
###############################################################################
"cxx_func_impl": lambda phys, sub, gen_code:
-"""#ifndef {phys_upper}_{sub_upper}_IMPL_HPP
-#define {phys_upper}_{sub_upper}_IMPL_HPP
+f"""#ifndef {phys.upper()}_{sub.upper()}_IMPL_HPP
+#define {phys.upper()}_{sub.upper()}_IMPL_HPP
-#include "{physics}_functions.hpp" // for ETI only but harmless for GPU
+#include "{phys}_functions.hpp" // for ETI only but harmless for GPU
namespace scream {{
-namespace {physics} {{
+namespace {phys} {{
/*
- * Implementation of {physics} {sub}. Clients should NOT
- * #include this file, but include {physics}_functions.hpp instead.
+ * Implementation of {phys} {sub}. Clients should NOT
+ * #include this file, but include {phys}_functions.hpp instead.
*/
template
{gen_code}
-}} // namespace {physics}
+}} // namespace {phys}
}} // namespace scream
#endif
-""".format(physics=phys, sub=sub, gen_code=gen_code, phys_upper=phys.upper(), sub_upper=sub.upper()),
+""",
###############################################################################
@@ -83,16 +84,16 @@
FILEPATH, FILECREATE, INSERT_REGEX, ID_SELF_BEGIN_REGEX, ID_SELF_END_REGEX, DESC = range(6)
PIECES = OrderedDict([
("f90_c2f_bind", (
- lambda phys, sub, gb: "{}_iso_c.f90".format(phys),
+ lambda phys, sub, gb: f"{phys}_iso_c.f90",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "f90_c2f_bind"),
- lambda phys, sub, gb: re.compile(r"^\s*end\s+module\s{}_iso_c".format(phys)), # put at end of module
+ lambda phys, sub, gb: re.compile(fr"^\s*end\s+module\s{phys}_iso_c"), # put at end of module
lambda phys, sub, gb: get_subroutine_begin_regex(sub + "_c"), # sub_c begin
lambda phys, sub, gb: get_subroutine_end_regex(sub + "_c"), # sub_c end
lambda *x : "The c to f90 fortran subroutine(_c)"
)),
("f90_f2c_bind" , (
- lambda phys, sub, gb: "{}_iso_f.f90".format(phys),
+ lambda phys, sub, gb: f"{phys}_iso_f.f90",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "f90_f2c_bind"),
lambda phys, sub, gb: re.compile(r"^\s*end\s+interface"), # put at end of interface
lambda phys, sub, gb: get_subroutine_begin_regex(sub + "_f"), # sub_f begin
@@ -101,7 +102,7 @@
)),
("cxx_c2f_bind_decl" , (
- lambda phys, sub, gb: "{}_functions_f90.cpp".format(phys),
+ lambda phys, sub, gb: f"{phys}_functions_f90.cpp",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "cxx_c2f_bind_decl"),
lambda phys, sub, gb: get_cxx_close_block_regex(comment='extern "C" : end _c decls'), # reqs special comment
lambda phys, sub, gb: get_cxx_function_begin_regex(sub + "_c"), # cxx_c decl
@@ -110,7 +111,7 @@
)),
("cxx_c2f_glue_decl" , (
- lambda phys, sub, gb: "{}_functions_f90.hpp".format(phys),
+ lambda phys, sub, gb: f"{phys}_functions_f90.hpp",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "cxx_c2f_glue_decl"),
lambda phys, sub, gb: re.compile(r'^\s*extern\s+"C"'), # put before _f decls
lambda phys, sub, gb: get_cxx_function_begin_regex(sub), # cxx(data) decl
@@ -119,7 +120,7 @@
)),
("cxx_c2f_glue_impl" , (
- lambda phys, sub, gb: "{}_functions_f90.cpp".format(phys),
+ lambda phys, sub, gb: f"{phys}_functions_f90.cpp",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "cxx_c2f_glue_impl"),
lambda phys, sub, gb: re.compile(r"^\s*// end _c impls"), # reqs special comment
lambda phys, sub, gb: get_cxx_function_begin_regex(sub), # cxx(data)
@@ -128,7 +129,7 @@
)),
("cxx_c2f_data" , (
- lambda phys, sub, gb: "{}_functions_f90.hpp".format(phys),
+ lambda phys, sub, gb: f"{phys}_functions_f90.hpp",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "cxx_c2f_data"),
lambda phys, sub, gb: re.compile(r"^\s*// Glue functions to call fortran"), # reqs special comment
lambda phys, sub, gb: get_cxx_struct_begin_regex(get_data_struct_name(sub)), # struct Sub
@@ -137,7 +138,7 @@
)),
("cxx_f2c_bind_decl" , (
- lambda phys, sub, gb: "{}_functions_f90.hpp".format(phys),
+ lambda phys, sub, gb: f"{phys}_functions_f90.hpp",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "cxx_f2c_bind_decl"),
lambda phys, sub, gb: get_cxx_close_block_regex(comment="end _f function decls"), # reqs special comment
lambda phys, sub, gb: get_cxx_function_begin_regex(sub + "_f"), # cxx_f decl
@@ -146,7 +147,7 @@
)),
("cxx_f2c_bind_impl" , (
- lambda phys, sub, gb: "{}_functions_f90.cpp".format(phys),
+ lambda phys, sub, gb: f"{phys}_functions_f90.cpp",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "cxx_f2c_bind_impl"),
lambda phys, sub, gb: get_namespace_close_regex(phys), # insert at end of namespace
lambda phys, sub, gb: get_cxx_function_begin_regex(sub + "_f"), # cxx_f
@@ -155,7 +156,7 @@
)),
("cxx_func_decl", (
- lambda phys, sub, gb: "{}_functions.hpp".format(phys),
+ lambda phys, sub, gb: f"{phys}_functions.hpp",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "cxx_func_decl"),
lambda phys, sub, gb: get_cxx_close_block_regex(semicolon=True, comment="struct Functions"), # end of struct, reqs special comment
lambda phys, sub, gb: get_cxx_function_begin_regex(sub, static=True), # cxx decl
@@ -164,16 +165,16 @@
)),
("cxx_incl_impl", (
- lambda phys, sub, gb: "{}_functions.hpp".format(phys),
+ lambda phys, sub, gb: f"{phys}_functions.hpp",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "cxx_incl_impl"),
- lambda phys, sub, gb: re.compile(r"^\s*#\s*endif\s+//\s*KOKKOS_ENABLE_CUDA"), # insert at end of impl includes, reqs special comment
+ lambda phys, sub, gb: re.compile(r"^\s*#\s*endif\s+//\s*GPU"), # insert at end of impl includes, reqs special comment
lambda phys, sub, gb: re.compile(r'^\s*#\s*include\s+"{}"'.format(get_piece_data(phys, sub, "cxx_func_impl", FILEPATH, gb))),
lambda phys, sub, gb: re.compile(r".*"),
lambda *x : "The include of *impl.hpp file at bottom of main hpp"
)),
("cxx_func_impl", (
- lambda phys, sub, gb: "{}_{}_impl.hpp".format(phys, sub),
+ lambda phys, sub, gb: f"impl/{phys}_{sub}_impl.hpp",
lambda phys, sub, gb: create_template(phys, sub, gb, "cxx_func_impl"),
lambda phys, sub, gb: get_namespace_close_regex(phys), # insert at end of namespace
lambda phys, sub, gb: get_cxx_function_begin_regex(sub, template="Functions"), # cxx begin
@@ -182,7 +183,7 @@
)),
("cxx_bfb_unit_decl", (
- lambda phys, sub, gb: "tests/{}_unit_tests_common.hpp".format(phys),
+ lambda phys, sub, gb: f"tests/{phys}_unit_tests_common.hpp",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "cxx_bfb_unit_decl"),
lambda phys, sub, gb: get_cxx_close_block_regex(semicolon=True), # insert at end of test struct
lambda phys, sub, gb: get_cxx_struct_begin_regex(get_data_test_struct_name(sub)), # struct decl
@@ -191,7 +192,7 @@
)),
("cxx_bfb_unit_impl", (
- lambda phys, sub, gb: "tests/{}_{}_tests.cpp".format(phys, sub),
+ lambda phys, sub, gb: f"tests/{phys}_{sub}_tests.cpp",
lambda phys, sub, gb: create_template(phys, sub, gb, "cxx_bfb_unit_impl"),
lambda phys, sub, gb: get_cxx_close_block_regex(semicolon=True, at_line_start=True), # insert of end of struct
lambda phys, sub, gb: get_cxx_function_begin_regex("run_bfb", static=True), # run_bfb
@@ -200,7 +201,7 @@
)),
("cxx_eti", (
- lambda phys, sub, gb: "{}_{}.cpp".format(phys, sub),
+ lambda phys, sub, gb: f"eti/{phys}_{sub}.cpp",
lambda phys, sub, gb: create_template(phys, sub, gb, "cxx_eti"),
lambda phys, sub, gb: re.compile(".*"), # insert at top of file
lambda phys, sub, gb: re.compile(".*"), # start at top of file
@@ -211,8 +212,8 @@
("cmake_impl_eti", (
lambda phys, sub, gb: "CMakeLists.txt",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "cmake_impl_eti"),
- lambda phys, sub, gb: re.compile(r".*[)]\s*#\s*{} ETI SRCS".format(phys.upper())), # insert at end of ETI src list, reqs special comment
- lambda phys, sub, gb: re.compile(r".*{}".format(get_piece_data(phys, sub, "cxx_eti", FILEPATH, gb))),
+ lambda phys, sub, gb: re.compile(fr".*[)]\s*#\s*{phys.upper()} ETI SRCS"), # insert at end of ETI src list, reqs special comment
+ lambda phys, sub, gb: re.compile(fr".*{get_piece_data(phys, sub, 'cxx_eti', FILEPATH, gb)}"),
lambda phys, sub, gb: re.compile(".*"),
lambda *x : "Make cmake aware of the ETI file if not cuda build"
)),
@@ -220,8 +221,8 @@
("cmake_unit_test", (
lambda phys, sub, gb: "tests/CMakeLists.txt",
lambda phys, sub, gb: expect_exists(phys, sub, gb, "cmake_unit_test"),
- lambda phys, sub, gb: re.compile(r".*[)]\s*#\s*{}_TESTS_SRCS".format(phys.upper())), # insert at end of test src list, reqs special comment
- lambda phys, sub, gb: re.compile(r".*{}".format(os.path.basename(get_piece_data(phys, sub, "cxx_bfb_unit_impl", FILEPATH, gb)))),
+ lambda phys, sub, gb: re.compile(fr".*[)]\s*#\s*{phys.upper()}_TESTS_SRCS"), # insert at end of test src list, reqs special comment
+ lambda phys, sub, gb: re.compile(fr".*{Path(get_piece_data(phys, sub, 'cxx_bfb_unit_impl', FILEPATH, gb)).name}"),
lambda phys, sub, gb: re.compile(".*"),
lambda *x : "Make cmake aware of the unit test"
)),
@@ -230,17 +231,22 @@
# physics map. maps the name of a physics packages containing the original fortran subroutines to:
# (path-to-origin, path-to-cxx-src)
-ORIGIN_FILE, CXX_ROOT, INIT_CODE = range(3)
+ORIGIN_FILES, CXX_ROOT, INIT_CODE = range(3)
PHYSICS = {
"p3" : (
- "components/eam/src/physics/cam/micro_p3.F90",
+ ("components/eam/src/physics/cam/micro_p3.F90",),
"components/eamxx/src/physics/p3",
"p3_init();"
),
"shoc" : (
- "components/eam/src/physics/cam/shoc.F90",
+ ("components/eam/src/physics/cam/shoc.F90",),
"components/eamxx/src/physics/shoc",
- "shoc_init(REPLACE_ME, true);"
+ "shoc_init(d.nlev, true);"
+ ),
+ "dp" : (
+ ("components/eam/src/control/apply_iop_forcing.F90", "components/eam/src/dynamics/se/se_iop_intr_mod.F90", "components/eam/src/control/iop_data_mod.F90", "components/eam/src/control/history_iop.F90"),
+ "components/eamxx/src/physics/dp",
+ "dp_init(d.plev, true);"
),
}
@@ -316,7 +322,7 @@ def get_subroutine_begin_regex(name):
>>> bool(get_subroutine_begin_regex("fake_sub").match("subroutine fake_sub"))
False
"""
- subroutine_begin_regex_str = r"^\s*subroutine\s+{}\s*[(]".format(name)
+ subroutine_begin_regex_str = fr"^\s*subroutine\s+{name}\s*[(]"
return re.compile(subroutine_begin_regex_str)
###############################################################################
@@ -338,7 +344,7 @@ def get_function_begin_regex(name):
>>> bool(get_function_begin_regex("fake_sub").match("end function fake_sub"))
False
"""
- function_begin_regex_str = r"^\s*((pure\s+)?function)\s+{}\s*[(].*result\s*[(]\s*([^) ]+)".format(name)
+ function_begin_regex_str = fr"^\s*((pure\s+)?function)\s+{name}\s*[(].*result\s*[(]\s*([^) ]+)"
return re.compile(function_begin_regex_str)
###############################################################################
@@ -358,7 +364,7 @@ def get_subroutine_end_regex(name):
>>> bool(get_subroutine_end_regex("fake_sub").match("end function fake_sub_2"))
False
"""
- subroutine_end_regex_str = r"^\s*end\s+(subroutine|function)\s+{}\s*$".format(name)
+ subroutine_end_regex_str = fr"^\s*end\s+(subroutine|function)\s+{name}\s*$"
return re.compile(subroutine_end_regex_str)
###############################################################################
@@ -383,8 +389,8 @@ def get_cxx_function_begin_regex(name, static=False, template=None):
True
"""
static_regex_str = r"static\s+" if static else ""
- template_regex_str = r"{}::".format(template) if template else ""
- function_begin_regex_str = r"^\s*{}void\s+{}{}\s*[(]".format(static_regex_str, template_regex_str, name)
+ template_regex_str = fr"{template}::" if template else ""
+ function_begin_regex_str = fr"^\s*{static_regex_str}void\s+{template_regex_str}{name}\s*[(]"
return re.compile(function_begin_regex_str)
###############################################################################
@@ -420,8 +426,8 @@ def get_cxx_close_block_regex(semicolon=False, comment=None, at_line_start=False
"""
semicolon_regex_str = r"\s*;" if semicolon else ""
line_start_regex_str = "" if at_line_start else r"\s*"
- comment_regex_str = r"\s*//\s*{}".format(comment) if comment else ""
- close_block_regex_str = re.compile(r"^{}}}{}{}\s*$".format(line_start_regex_str, semicolon_regex_str, comment_regex_str))
+ comment_regex_str = fr"\s*//\s*{comment}" if comment else ""
+ close_block_regex_str = re.compile(fr"^{line_start_regex_str}}}{semicolon_regex_str}{comment_regex_str}\s*$")
return re.compile(close_block_regex_str)
###############################################################################
@@ -433,7 +439,7 @@ def get_namespace_close_regex(namespace):
>>> bool(get_namespace_close_regex("foo").match(" } // namespace foo_bar"))
False
"""
- return get_cxx_close_block_regex(comment=r"namespace\s+{}".format(namespace))
+ return get_cxx_close_block_regex(comment=fr"namespace\s+{namespace}")
###############################################################################
def get_cxx_struct_begin_regex(struct):
@@ -446,7 +452,7 @@ def get_cxx_struct_begin_regex(struct):
>>> bool(get_cxx_struct_begin_regex("Foo").match("struct FooBar"))
False
"""
- struct_regex_str = r"^\s*struct\s+{}([\W]|$)".format(struct)
+ struct_regex_str = fr"^\s*struct\s+{struct}([\W]|$)"
return re.compile(struct_regex_str)
###############################################################################
@@ -469,7 +475,7 @@ def get_data_test_struct_name(sub):
>>> get_data_test_struct_name("update_prognostics_implicit")
'TestUpdatePrognosticsImplicit'
"""
- return "Test{}".format(get_data_struct_name(sub)[:-4])
+ return f"Test{get_data_struct_name(sub)[:-4]}"
###############################################################################
def get_supported_pieces():
@@ -495,8 +501,8 @@ def get_physics_data(physics_name, physics_data):
def expect_exists(physics, sub, gb, piece):
###############################################################################
filepath = gb.get_path_for_piece_file(physics, sub, piece)
- expect(filepath.exists(), "For generating {}'s {} for phyiscs {}, expected file {} to already exist".\
- format(sub, piece, physics, filepath))
+ expect(filepath.exists(),
+ f"For generating {sub}'s {piece} for phyiscs {physics}, expected file {filepath} to already exist")
return False # File was not created
###############################################################################
@@ -507,7 +513,7 @@ def create_template(physics, sub, gb, piece, force=False, force_arg_data=None):
>>> gb = GenBoiler(["linear_interp"], ["cxx_func_impl"], dry_run=True)
>>> create_template("shoc", "linear_interp", gb, "cxx_func_impl", force=True, force_arg_data=UT_ARG_DATA) #doctest: +ELLIPSIS
- Would create file .../components/eamxx/src/physics/shoc/shoc_linear_interp_impl.hpp with contents:
+ Would create file .../components/eamxx/src/physics/shoc/impl/shoc_linear_interp_impl.hpp with contents:
#ifndef SHOC_LINEAR_INTERP_IMPL_HPP
#define SHOC_LINEAR_INTERP_IMPL_HPP
@@ -539,12 +545,12 @@ def create_template(physics, sub, gb, piece, force=False, force_arg_data=None):
filepath = gb.get_path_for_piece_file(physics, sub, piece)
if not filepath.exists() or force:
expect(piece in FILE_TEMPLATES,
- "{} does not exist and there is no template for generating files for piece {}".format(filepath, piece))
+ f"{filepath} does not exist and there is no template for generating files for piece {piece}")
- gen_code = getattr(gb, "gen_{}".format(piece))(physics, sub, force_arg_data=force_arg_data)
+ gen_code = getattr(gb, f"gen_{piece}")(physics, sub, force_arg_data=force_arg_data)
contents = FILE_TEMPLATES[piece](physics, sub, gen_code)
if gb.dry_run():
- print("Would create file {} with contents:\n{}".format(filepath, contents))
+ print(f"Would create file {filepath} with contents:\n{contents}")
else:
with filepath.open("w", encoding="utf-8") as fd:
fd.write(contents)
@@ -662,7 +668,7 @@ def split_top_commas(line):
if balanced:
top_splits.append(raw_split)
else:
- top_splits[-1] += ",{}".format(raw_split)
+ top_splits[-1] += f",{raw_split}"
balanced = top_splits[-1].count("(") == top_splits[-1].count(")")
@@ -689,7 +695,7 @@ def get_arg_order(line):
first_paren_contents = ""
for c in line:
if c == "(":
- expect(not first_paren, "Bad line, multiple opening parens: {}".format(line))
+ expect(not first_paren, f"Bad line, multiple opening parens: {line}")
first_paren = True
elif c == ")":
break
@@ -733,20 +739,31 @@ def parse_f90_args(line):
[('x1', 'real', 'in', ('ncol', 'km1')), ('y1', 'real', 'in', ('ncol', 'km1'))]
>>> parse_f90_args('real(rtype), intent(in) :: x1(ncol,km1,ntracers)')
[('x1', 'real', 'in', ('ncol', 'km1', 'ntracers'))]
+ >>> parse_f90_args('type(element_t), intent(inout) :: elem(:)')
+ [('elem', 'type::element_t', 'inout', (':',))]
+ >>> parse_f90_args('character*(max_path_len), intent(out), optional :: iopfile_out')
+ [('iopfile_out', 'type::string', 'out', None)]
"""
- expect(line.count("::") == 1, "Expected line format 'type-info :: names' for: {}".format(line))
+ expect(line.count("::") == 1, f"Expected line format 'type-info :: names' for: {line}")
metadata_str, names_str = line.split("::")
names_dims = split_top_commas(names_str)
metadata = split_top_commas(metadata_str)
- argtype = metadata[0].split("(")[0].strip()
+ argtoken = metadata[0]
+ argtype = argtoken.split("(")[0].strip()
+ if argtype == "type":
+ expect("(" in argtoken, f"Undefined type for {argtoken}")
+ argtype += ("::" + argtoken.split("(")[1].strip().rstrip(")"))
+ elif argtype == "character*":
+ argtype = "type::string"
+
intent, dims = None, None
for metadatum in metadata:
if metadatum.startswith("intent"):
- expect(intent is None, "Multiple intents in line: {}".format(line))
+ expect(intent is None, f"Multiple intents in line: {line}")
intent = metadatum.split("(")[-1].rstrip(")").strip()
elif metadatum.startswith("dimension"):
- expect(dims is None, "Multiple dimensions in line: {}".format(line))
+ expect(dims is None, f"Multiple dimensions in line: {line}")
dims_raw = metadatum.split("(")[-1].rstrip(")").strip()
dims = tuple(item.replace(" ", "") for item in dims_raw.split(","))
@@ -756,7 +773,7 @@ def parse_f90_args(line):
name, dims_raw = name_dim.split("(")
dims_raw = dims_raw.rstrip(")").strip()
dims_check = tuple(item.replace(" ", "") for item in dims_raw.split(","))
- expect(dims is None or dims_check == dims, "Inconsistent dimensions in line: {}".format(line))
+ expect(dims is None or dims_check == dims, f"Inconsistent dimensions in line: {line}")
dims = dims_check
names.append(name.strip())
else:
@@ -833,12 +850,41 @@ def parse_origin(contents, subs):
...
... return foo
... end function impli_srf_stress_term
+ ...
+ ... subroutine advance_iop_forcing(scm_dt, ps_in, & ! In
+ ... u_in, v_in, t_in, q_in, t_phys_frc,& ! In
+ ... u_update, v_update, t_update, q_update) ! Out
+ ...
+ ... ! Input arguments
+ ... real(r8), intent(in) :: ps_in ! surface pressure [Pa]
+ ... real(r8), intent(in) :: u_in(plev) ! zonal wind [m/s]
+ ... real(r8), intent(in) :: v_in(plev) ! meridional wind [m/s]
+ ... real(r8), intent(in) :: t_in(plev) ! temperature [K]
+ ... real(r8), intent(in) :: q_in(plev,pcnst) ! q tracer array [units vary]
+ ... real(r8), intent(in) :: t_phys_frc(plev) ! temperature forcing from physics [K/s]
+ ... real(r8), intent(in) :: scm_dt ! model time step [s]
+ ...
+ ... ! Output arguments
+ ... real(r8), intent(out) :: t_update(plev) ! updated temperature [K]
+ ... real(r8), intent(out) :: q_update(plev,pcnst)! updated q tracer array [units vary]
+ ... real(r8), intent(out) :: u_update(plev) ! updated zonal wind [m/s]
+ ... real(r8), intent(out) :: v_update(plev) ! updated meridional wind [m/s]
+ ...
+ ... end subroutine advance_iop_forcing
+ ...
+ ... subroutine iop_setinitial(elem)
+ ... type(element_t), intent(inout) :: elem(:)
+ ... end subroutine iop_setinitial
... '''
>>> print("\n".join([str(item) for item in sorted(parse_origin(teststr, ["p3_get_tables", "p3_init_b"]).items())]))
('p3_get_tables', [('mu_r_user', 'real', 'out', ('150',)), ('revap_user', 'real', 'out', ('300', '10')), ('tracerd', 'real', 'out', ('300', '10', '42')), ('vn_user', 'real', 'out', ('300', '10')), ('vm_user', 'real', 'out', ('300', '10'))])
('p3_init_b', [])
>>> print("\n".join([str(item) for item in parse_origin(teststr, ["impli_srf_stress_term"]).items()]))
('impli_srf_stress_term', [('shcol', 'integer', 'in', None), ('rho_zi_sfc', 'real', 'in', ('shcol',)), ('uw_sfc', 'real', 'in', ('shcol',)), ('vw_sfc', 'real', 'in', ('shcol',)), ('u_wind_sfc', 'real', 'in', ('shcol',)), ('v_wind_sfc', 'real', 'in', ('shcol',)), ('ksrf', 'real', 'out', ('shcol',))])
+ >>> print("\n".join([str(item) for item in parse_origin(teststr, ["advance_iop_forcing"]).items()]))
+ ('advance_iop_forcing', [('plev', 'integer', 'in', None), ('pcnst', 'integer', 'in', None), ('scm_dt', 'real', 'in', None), ('ps_in', 'real', 'in', None), ('u_in', 'real', 'in', ('plev',)), ('v_in', 'real', 'in', ('plev',)), ('t_in', 'real', 'in', ('plev',)), ('q_in', 'real', 'in', ('plev', 'pcnst')), ('t_phys_frc', 'real', 'in', ('plev',)), ('u_update', 'real', 'out', ('plev',)), ('v_update', 'real', 'out', ('plev',)), ('t_update', 'real', 'out', ('plev',)), ('q_update', 'real', 'out', ('plev', 'pcnst'))])
+ >>> print("\n".join([str(item) for item in parse_origin(teststr, ["iop_setinitial"]).items()]))
+ ('iop_setinitial', [('elem', 'type::element_t', 'inout', (':',))])
"""
begin_sub_regexes = [get_subroutine_begin_regex(sub) for sub in subs]
begin_func_regexes = [get_function_begin_regex(sub) for sub in subs]
@@ -856,11 +902,11 @@ def parse_origin(contents, subs):
begin_sub_match = begin_sub_regex.match(line)
begin_func_match = begin_func_regex.match(line)
if begin_sub_match is not None:
- expect(active_sub is None, "subroutine {} was still active when {} began".format(active_sub, sub))
+ expect(active_sub is None, f"subroutine {active_sub} was still active when {sub} began")
active_sub = sub
arg_order = get_arg_order(line)
elif begin_func_match is not None:
- expect(active_sub is None, "subroutine {} was still active when {} began".format(active_sub, sub))
+ expect(active_sub is None, f"subroutine {active_sub} was still active when {sub} began")
active_sub = sub
arg_order = get_arg_order(line)
result_name = begin_func_match.groups()[-1]
@@ -870,7 +916,7 @@ def parse_origin(contents, subs):
if decl_match is not None:
arg_decls.extend(parse_f90_args(line))
elif result_name:
- result_decl_regex = re.compile(r".+::\s*{}([^\w]|$)".format(result_name))
+ result_decl_regex = re.compile(fr".+::\s*{result_name}([^\w]|$)")
result_decl_match = result_decl_regex.match(line)
if result_decl_match is not None:
line = line.replace("::", " , intent(out) ::")
@@ -879,9 +925,9 @@ def parse_origin(contents, subs):
end_regex = get_subroutine_end_regex(active_sub)
end_match = end_regex.match(line)
if end_match is not None:
- expect(active_sub not in db, "Found multiple matches for {}".format(active_sub))
+ expect(active_sub not in db, f"Found multiple matches for {active_sub}")
expect(len(arg_order) == len(arg_decls),
- "Number of decls:\n{}\nDid not match arg list: {}".format(arg_decls, arg_order))
+ f"Number of decls:\n{arg_decls}\nDid not match arg list: {arg_order}")
# we need our decls to be ordered based on arg list order
ordered_decls = []
@@ -893,9 +939,25 @@ def parse_origin(contents, subs):
found = True
break
- expect(found, "Could not find decl for arg {} in\n{}".format(arg, arg_decls))
-
- db[active_sub] = ordered_decls
+ expect(found, f"Could not find decl for arg {arg} in\n{arg_decls}")
+
+ # Dim resolution. Arrays with global dims must have the
+ # dim as an input in the converted code.
+ global_ints_to_insert = []
+ arg_names = set()
+ for arg_datum in ordered_decls:
+ arg_name = arg_datum[ARG_NAME]
+ arg_names.add(arg_name)
+
+ for arg_datum in ordered_decls:
+ arg_dims = arg_datum[ARG_DIMS]
+ if arg_dims is not None:
+ for arg_dim in arg_dims:
+ if not arg_dim.isdigit() and arg_dim not in arg_names and arg_dim != ":":
+ global_ints_to_insert.append((arg_dim, "integer", "in", None))
+ arg_names.add(arg_dim)
+
+ db[active_sub] = global_ints_to_insert + ordered_decls
active_sub = None
result_name = None
arg_decls = []
@@ -921,17 +983,38 @@ def gen_arg_f90_decl(argtype, intent, dims, names):
'integer(kind=c_int) , intent(inout) :: barg'
>>> gen_arg_f90_decl("integer", "out", None, ["barg"])
'integer(kind=c_int) , intent(out) :: barg'
+ >>> gen_arg_f90_decl('type::element_t', 'inout', (':',), ["foo"])
+ 'type(c_ptr) , intent(inout), dimension(:) :: foo'
"""
- expect(argtype in C_TYPE_MAP, "Unrecognized argtype for C_TYPE_MAP: {}".format(argtype))
- c_type = C_TYPE_MAP[argtype]
value = ", value" if dims is None and intent == "in" else ""
- intent_s = ", intent({})".format(intent)
- dimension_s = ", dimension({})".format(", ".join(dims)) if dims is not None else ""
+ intent_s = f", intent({intent})"
+ dimension_s = f", dimension({', '.join(dims)})" if dims is not None else ""
names_s = ", ".join(names)
- return "{argtype}(kind={c_type}) {value}{intent}{dimension} :: {names}".\
- format(argtype=argtype, c_type=c_type, value=value, intent=intent_s, dimension=dimension_s, names=names_s)
+
+ if is_custom_type(argtype):
+ return f"type(c_ptr) {intent_s}{dimension_s} :: {names_s}"
+ else:
+ expect(argtype in C_TYPE_MAP, f"Unrecognized argtype for C_TYPE_MAP: {argtype}")
+ c_type = C_TYPE_MAP[argtype]
+ return f"{argtype}(kind={c_type}) {value}{intent_s}{dimension_s} :: {names_s}"
+
+###############################################################################
+def is_custom_type(arg_type):
+###############################################################################
+ return arg_type.startswith("type::")
CXX_TYPE_MAP = {"real" : "Real", "integer" : "Int", "logical" : "bool"}
+###############################################################################
+def get_cxx_scalar_type(arg_type):
+###############################################################################
+ if is_custom_type(arg_type):
+ arg_cxx_type = arg_type.split("::")[-1]
+ else:
+ expect(arg_type in CXX_TYPE_MAP, f"Unrecognized argtype for CXX_TYPE_MAP: {arg_type}")
+ arg_cxx_type = CXX_TYPE_MAP[arg_type]
+
+ return arg_cxx_type
+
###############################################################################
def get_cxx_type(arg_datum):
###############################################################################
@@ -952,12 +1035,13 @@ def get_cxx_type(arg_datum):
'Real*'
>>> get_cxx_type(("foo", "integer", "inout", None))
'Int*'
+ >>> get_cxx_type(('elem', 'type::element_t', 'inout', (':',)))
+ 'element_t*'
"""
is_ptr = arg_datum[ARG_DIMS] is not None or arg_datum[ARG_INTENT] != "in"
arg_type = arg_datum[ARG_TYPE]
- expect(arg_type in CXX_TYPE_MAP, "Unrecognized argtype for CXX_TYPE_MAP: {}".format(arg_type))
- arg_cxx_type = CXX_TYPE_MAP[arg_type]
- return "{}{}".format(arg_cxx_type, "*" if is_ptr else "")
+ arg_cxx_type = get_cxx_scalar_type(arg_type)
+ return f"{arg_cxx_type}{'*' if is_ptr else ''}"
KOKKOS_TYPE_MAP = {"real" : "Spack", "integer" : "Int", "logical" : "bool"}
###############################################################################
@@ -983,14 +1067,22 @@ def get_kokkos_type(arg_datum):
'Spack&'
>>> get_kokkos_type(("foo", "integer", "inout", None))
'Int&'
+ >>> get_kokkos_type(('elem', 'type::element_t', 'inout', (':',)))
+ 'const uview_1d&'
"""
is_const = arg_datum[ARG_INTENT] == "in"
is_view = arg_datum[ARG_DIMS] is not None
- base_type = "{}{}".format("const " if is_const else "", KOKKOS_TYPE_MAP[arg_datum[ARG_TYPE]])
+ arg_type = arg_datum[ARG_TYPE]
+ if is_custom_type(arg_type):
+ kokkos_type = arg_type.split("::")[-1]
+ else:
+ kokkos_type = KOKKOS_TYPE_MAP[arg_type]
+
+ base_type = f"{'const ' if is_const else ''}{kokkos_type}"
# We assume 1d even if the f90 array is 2d since we assume c++ will spawn a kernel
# over one of the dimensions
- return "const uview_1d<{}>&".format(base_type) if is_view else "{}&".format(base_type)
+ return f"const uview_1d<{base_type}>&" if is_view else f"{base_type}&"
###############################################################################
def gen_arg_cxx_decls(arg_data, kokkos=False):
@@ -1006,7 +1098,7 @@ def gen_arg_cxx_decls(arg_data, kokkos=False):
arg_names = [item[ARG_NAME] for item in arg_data]
get_type = get_kokkos_type if kokkos else get_cxx_type
arg_types = [get_type(item) for item in arg_data]
- arg_sig_list = ["{} {}".format(arg_type, arg_name) for arg_name, arg_type in zip(arg_names, arg_types)]
+ arg_sig_list = [f"{arg_type} {arg_name}" for arg_name, arg_type in zip(arg_names, arg_types)]
return arg_sig_list
###############################################################################
@@ -1049,7 +1141,7 @@ def split_by_intent(arg_data):
elif intent == "out":
outputs.append(name)
else:
- expect(False, "Unhandled intent: {}".format(intent))
+ expect(False, f"Unhandled intent: {intent}")
return inputs, inouts, outputs
@@ -1070,8 +1162,10 @@ def split_by_type(arg_data):
ints.append(name)
elif argtype == "logical":
logicals.append(name)
+ elif is_custom_type(argtype):
+ pass
else:
- expect(False, "Unhandled argtype: {}".format(argtype))
+ expect(False, f"Unhandled argtype: {argtype}")
return reals, ints, logicals
@@ -1088,7 +1182,7 @@ def gen_cxx_data_args(physics, arg_data):
args_needs_ptr = [item[ARG_DIMS] is None and item[ARG_INTENT] != "in" for item in arg_data]
arg_names = [item[ARG_NAME] for item in arg_data]
arg_dim_call = [item[ARG_NAME] in all_dims for item in arg_data]
- args = ["{}d.{}".format("&" if need_ptr else "", arg_name)
+ args = [f"{'&' if need_ptr else ''}d.{arg_name}"
for arg_name, need_ptr, dim_call in zip(arg_names, args_needs_ptr, arg_dim_call)]
return args
@@ -1163,12 +1257,12 @@ def gen_struct_members(arg_data):
result = []
for intent, comment in intent_order:
if intent in metadata:
- result.append("// {}".format(comment))
+ result.append(f"// {comment}")
type_map = metadata[intent]
for type_info, names in type_map.items():
type_name, is_ptr = type_info
- decl_str = CXX_TYPE_MAP[type_name]
- decl_str += " {};".format(", ".join(["{}{}".format("*" if is_ptr else "", name) for name in names]))
+ decl_str = get_cxx_scalar_type(type_name)
+ decl_str += f" {', '.join(['{}{}'.format('*' if is_ptr else '', name) for name in names])};"
result.append(decl_str)
result.append("")
@@ -1176,7 +1270,7 @@ def gen_struct_members(arg_data):
return result
###############################################################################
-def group_data(arg_data, filter_out_intent=None):
+def group_data(arg_data, filter_out_intent=None, filter_scalar_custom_types=False):
###############################################################################
r"""
Given data, return ([fst_dims], [snd_dims], [trd_dims], [all-dims], [scalars], {dims->[real_data]}, {dims->[int_data]}, {dims->[bool_data]})
@@ -1209,7 +1303,7 @@ def group_data(arg_data, filter_out_intent=None):
for name, argtype, _, dims in arg_data:
if dims is not None:
expect(len(dims) >= 1 and len(dims) <= 3,
- "Only 1d-3d data is supported, {} has too many dims: {}".format(name, len(dims)))
+ f"Only 1d-3d data is supported, {name} has too many dims: {len(dims)}")
if dims[0] not in fst_dims:
fst_dims.append(dims[0])
@@ -1226,10 +1320,11 @@ def group_data(arg_data, filter_out_intent=None):
for name, argtype, intent, dims in arg_data:
if filter_out_intent is None or intent != filter_out_intent:
if dims is None:
- if name not in all_dims:
- scalars.append( (name, CXX_TYPE_MAP[argtype]))
- else:
- expect(argtype == "integer", "Expected dimension {} to be of type integer".format(name))
+ if not (is_custom_type(argtype) and filter_scalar_custom_types):
+ if name not in all_dims:
+ scalars.append( (name, get_cxx_scalar_type(argtype)))
+ else:
+ expect(argtype == "integer", f"Expected dimension {name} to be of type integer")
elif argtype == "integer":
int_data.setdefault(dims, []).append(name)
@@ -1237,7 +1332,7 @@ def group_data(arg_data, filter_out_intent=None):
elif argtype == "real":
real_data.setdefault(dims, []).append(name)
- else:
+ elif argtype == "logical":
bool_data.setdefault(dims, []).append(name)
return fst_dims, snd_dims, trd_dims, all_dims, scalars, real_data, int_data, bool_data
@@ -1254,7 +1349,7 @@ def gen_struct_api(physics, struct_name, arg_data):
PTD_STD_DEF(DataSubName, 8, shcol, nlev, nlevi, ntracers, gag, bab1, bab2, val);
"""
- _, _, _, all_dims, scalars, real_data, int_data, bool_data = group_data(arg_data)
+ _, _, _, all_dims, scalars, real_data, int_data, bool_data = group_data(arg_data, filter_scalar_custom_types=True)
result = []
dim_args = [(item, "Int") for item in all_dims if item is not None]
@@ -1269,17 +1364,17 @@ def gen_struct_api(physics, struct_name, arg_data):
bool_vec = []
for data, data_vec in zip([real_data, int_data, bool_data], [real_vec, int_vec, bool_vec]):
for dims, items in data.items():
- dim_cxx_vec.append("{{ {} }}".format(", ".join(["{}_".format(item) for item in dims])))
- data_vec.append("{{ {} }}".format(", ".join(["&{}".format(item) for item in items])))
+ dim_cxx_vec.append(f"{{ {', '.join(['{}_'.format(item) for item in dims])} }}")
+ data_vec.append(f"{{ {', '.join(['&{}'.format(item) for item in items])} }}")
- parent_call = " PhysicsTestData({{{}}}, {{{}}}".format(", ".join(dim_cxx_vec), ", ".join(real_vec))
+ parent_call = f" PhysicsTestData({{{', '.join(dim_cxx_vec)}}}, {{{', '.join(real_vec)}}}"
if int_vec or bool_vec:
- parent_call += ", {{{}}}".format(", ".join(int_vec))
+ parent_call += f", {{{', '.join(int_vec)}}}"
if bool_vec:
- parent_call += ", {{{}}}".format(", ".join(bool_vec))
+ parent_call += f", {{{', '.join(bool_vec)}}}"
parent_call += ")"
- parent_call += ", {}".format(", ".join(["{0}({0}_)".format(name) for name, _ in cons_args]))
+ parent_call += f", {', '.join(['{0}({0}_)'.format(name) for name, _ in cons_args])}"
parent_call += " {}"
result.append(parent_call)
@@ -1332,8 +1427,7 @@ def check_existing_piece(lines, begin_regex, end_regex):
if begin_match:
expect(begin_idx is None,
- "Found multiple begin matches for pattern '{}' before end pattern '{}' was found".\
- format(begin_regex.pattern, end_regex.pattern))
+ f"Found multiple begin matches for pattern '{begin_regex.pattern}' before end pattern '{end_regex.pattern}' was found")
begin_idx = idx
@@ -1343,8 +1437,7 @@ def check_existing_piece(lines, begin_regex, end_regex):
if begin_idx is not None:
expect(end_idx is not None,
- "Found no ending match for begin pattern '{}' starting on line {} and searching end pattern '{}'".\
- format(begin_regex.pattern, begin_idx, end_regex.pattern))
+ "Found no ending match for begin pattern '{begin_regex.pattern}' starting on line {begin_idx} and searching end pattern '{end_regex.pattern}'")
return None if begin_idx is None else (begin_idx, end_idx+1)
@@ -1372,11 +1465,11 @@ def __init__(self,
expect(target_repo is not None, "Must either run from a valid repo or provide a --target-repo")
normalized_source_repo = get_git_toplevel_dir(repo=source_repo)
- expect(normalized_source_repo is not None, "source repo {} is not a valid repo".format(source_repo))
+ expect(normalized_source_repo is not None, f"source repo {source_repo} is not a valid repo")
source_repo = normalized_source_repo
normalized_target_repo = get_git_toplevel_dir(repo=target_repo)
- expect(normalized_target_repo is not None, "target repo {} is not a valid repo".format(target_repo))
+ expect(normalized_target_repo is not None, f"target repo {target_repo} is not a valid repo")
target_repo = normalized_target_repo
# configuration
@@ -1385,8 +1478,8 @@ def __init__(self,
self._physics = physics
self._overwrite = overwrite
self._kernel = kernel
- self._source_repo = pathlib.Path(source_repo).resolve()
- self._target_repo = pathlib.Path(target_repo).resolve()
+ self._source_repo = Path(source_repo).resolve()
+ self._target_repo = Path(target_repo).resolve()
self._dry_run = dry_run
self._verbose = verbose
@@ -1403,28 +1496,30 @@ def __init__(self,
###########################################################################
def _get_db(self, phys):
###########################################################################
- if phys in self._db:
- return self._db[phys]
- else:
- origin_file = self._source_repo / get_physics_data(phys, ORIGIN_FILE)
- expect(origin_file.exists(), "Missing origin file for physics {}: {}".format(phys, origin_file))
- db = parse_origin(origin_file.open(encoding="utf-8").read(), self._subs)
- self._db[phys] = db
- if self._verbose:
- print("For physics {}, found:")
- for sub in self._subs:
- if sub in db:
- print(" For subroutine {}, found args:")
- for name, argtype, intent, dims in db[sub]:
- print(" name:{} type:{} intent:{} dims:({})".\
- format(name, argtype, intent, ",".join(dims) if dims else "scalar"))
- return db
+ if phys not in self._db:
+ origin_files = get_physics_data(phys, ORIGIN_FILES)
+ self._db[phys] = {}
+ for origin_file in origin_files:
+ origin_file = self._source_repo / origin_file
+ expect(origin_file.exists(), f"Missing origin file for physics {phys}: {origin_file}")
+ db = parse_origin(origin_file.open(encoding="utf-8").read(), self._subs)
+ self._db[phys].update(db)
+ if self._verbose:
+ print("For physics {}, found:")
+ for sub in self._subs:
+ if sub in db:
+ print(" For subroutine {}, found args:")
+ for name, argtype, intent, dims in db[sub]:
+ print(" name:{} type:{} intent:{} dims:({})".\
+ format(name, argtype, intent, ",".join(dims) if dims else "scalar"))
+
+ return self._db[phys]
###########################################################################
def _get_arg_data(self, phys, sub):
###########################################################################
phys_db = self._get_db(phys)
- expect(sub in phys_db, "No data for subroutine {} in physics {}".format(sub, phys))
+ expect(sub in phys_db, f"No data for subroutine {sub} in physics {phys}")
return phys_db[sub]
###########################################################################
@@ -1435,7 +1530,7 @@ def dry_run(self):
###############################################################################
def get_path_for_piece_file(self, physics, sub, piece):
###############################################################################
- root_dir = pathlib.Path(get_physics_data(physics, CXX_ROOT))
+ root_dir = Path(get_physics_data(physics, CXX_ROOT))
filepath = self._target_repo / root_dir / get_piece_data(physics, sub, piece, FILEPATH, self)
return filepath
@@ -1529,7 +1624,7 @@ def gen_cxx_c2f_bind_decl(self, phys, sub, force_arg_data=None):
"""
arg_data = force_arg_data if force_arg_data else self._get_arg_data(phys, sub)
arg_decls = gen_arg_cxx_decls(arg_data)
- result = "void {sub}_c({arg_sig});\n".format(sub=sub, arg_sig=", ".join(arg_decls))
+ result = f"void {sub}_c({', '.join(arg_decls)});\n"
return result
###########################################################################
@@ -1541,7 +1636,7 @@ def gen_cxx_c2f_glue_decl(self, phys, sub, force_arg_data=None):
void fake_sub(FakeSubData& d);
"""
struct_name = get_data_struct_name(sub)
- result = "void {sub}({struct_name}& d);".format(sub=sub, struct_name=struct_name)
+ result = f"void {sub}({struct_name}& d);"
return result
###########################################################################
@@ -1567,16 +1662,15 @@ def gen_cxx_c2f_glue_impl(self, phys, sub, force_arg_data=None):
transpose_code_2 = "\n d.transpose();" if need_transpose else ""
data_struct = get_data_struct_name(sub)
init_code = get_physics_data(phys, INIT_CODE)
- init_code = init_code.replace("REPLACE_ME", "d.nlev")
result = \
-"""void {sub}({data_struct}& d)
+f"""void {sub}({data_struct}& d)
{{
{init_code}{transpose_code_1}
{sub}_c({arg_data_args});{transpose_code_2}
}}
-""".format(sub=sub, data_struct=data_struct, init_code=init_code, transpose_code_1=transpose_code_1, transpose_code_2=transpose_code_2, arg_data_args=arg_data_args)
+"""
return result
###########################################################################
@@ -1617,11 +1711,11 @@ def gen_cxx_c2f_data(self, phys, sub, force_arg_data=None):
api = "\n " + "\n ".join(gen_struct_api(phys, struct_name, arg_data) if any_arrays else "")
result = \
-"""struct {struct_name}{inheritance} {{
+f"""struct {struct_name}{inheritance} {{
{struct_members}{api}
}};
-""".format(struct_name=struct_name, inheritance=inheritance, struct_members=struct_members, api=api)
+"""
return result
###########################################################################
@@ -1635,7 +1729,7 @@ def gen_cxx_f2c_bind_decl(self, phys, sub, force_arg_data=None):
arg_data = force_arg_data if force_arg_data else self._get_arg_data(phys, sub)
arg_decls = gen_arg_cxx_decls(arg_data)
- return "void {sub}_f({arg_sig});".format(sub=sub, arg_sig=", ".join(arg_decls))
+ return f"void {sub}_f({', '.join(arg_decls)});"
###########################################################################
def gen_cxx_f2c_bind_impl(self, phys, sub, force_arg_data=None):
@@ -1748,21 +1842,21 @@ def gen_cxx_f2c_bind_impl(self, phys, sub, force_arg_data=None):
# make necessary view types
for output_group, prefix_char, typename in zip([oreals, oints, obools], prefix_list, type_list):
if output_group:
- impl += " using {}view_1d = typename PF::view_1d<{}>;\n".format(prefix_char, typename)
+ impl += f" using {prefix_char}view_1d = typename PF::view_1d<{typename}>;\n"
impl += "\n"
# make output views for host and device
for output_group, prefix_char in zip([oreals, oints, obools], prefix_list):
if output_group:
- impl += ' {0}view_1d {0}t_d("{0}t_d", {1});\n'.format(prefix_char, len(output_group))
- impl += " const auto {0}t_h = Kokkos::create_mirror_view({0}t_d);\n".format(prefix_char)
+ impl += f' {prefix_char}view_1d {prefix_char}t_d("{prefix_char}t_d", {len(output_group)});\n'
+ impl += f" const auto {prefix_char}t_h = Kokkos::create_mirror_view({prefix_char}t_d);\n"
impl += "\n"
# inout data must be derefenced before the kernel
for io_group, typename in zip([ioreals, ioints, iobools], type_list):
if io_group:
- impl += " {} {};\n".format(typename, ", ".join(["local_{0}(*{0})".format(item) for item in io_group]))
+ impl += f" {typename} {', '.join(['local_{0}(*{0})'.format(item) for item in io_group])};\n"
# start a kernel
impl += " Kokkos::parallel_for(1, KOKKOS_LAMBDA(const Int&) {\n"
@@ -1772,17 +1866,17 @@ def gen_cxx_f2c_bind_impl(self, phys, sub, force_arg_data=None):
# not be packed (like dt)
for output_group, typename in zip([list(ireals) + list(ooreals), oints, obools], ktype_list):
if output_group:
- impl += " {} ".format(typename)
+ impl += f" {typename} "
temp_cons = []
for item in output_group:
if item in inouts:
- temp_cons.append("{0}_(local_{0})".format(item))
+ temp_cons.append(f"{item}_(local_{item})")
elif item in outputs:
- temp_cons.append("{0}_()".format(item))
+ temp_cons.append(f"{item}_()")
else:
- temp_cons.append("{0}_({0})".format(item))
+ temp_cons.append(f"{item}_({item})")
- impl += "{};\n".format(", ".join(temp_cons))
+ impl += f"{', '.join(temp_cons)};\n"
# Make cxx call
kernel_arg_names = []
@@ -1792,15 +1886,15 @@ def gen_cxx_f2c_bind_impl(self, phys, sub, force_arg_data=None):
else:
kernel_arg_names.append(arg_name + "_")
- impl += " PF::{}({});\n".format(sub, ", ".join(kernel_arg_names))
+ impl += f" PF::{sub}({', '.join(kernel_arg_names)});\n"
# Load output data into views
for output_group, prefix_char in zip([oreals, oints, obools], prefix_list):
for idx, item in enumerate(output_group):
if output_group == oreals:
- impl += " {}t_d({}) = {}_[0];\n".format(prefix_char, idx, item)
+ impl += f" {prefix_char}t_d({idx}) = {item}_[0];\n"
else:
- impl += " {}t_d({}) = {}_;\n".format(prefix_char, idx, item)
+ impl += f" {prefix_char}t_d({idx}) = {item}_;\n"
# finish kernel
impl += " });\n"
@@ -1808,21 +1902,21 @@ def gen_cxx_f2c_bind_impl(self, phys, sub, force_arg_data=None):
# copy outputs back to host
for output_group, prefix_char in zip([oreals, oints, obools], prefix_list):
if output_group:
- impl += " Kokkos::deep_copy({0}t_h, {0}t_d);\n".format(prefix_char)
+ impl += f" Kokkos::deep_copy({prefix_char}t_h, {prefix_char}t_d);\n"
# copy from views into pointer args
for output_group, prefix_char in zip([oreals, oints, obools], prefix_list):
for idx, item in enumerate(output_group):
- impl += " *{} = {}t_h({});\n".format(item, prefix_char, idx)
+ impl += f" *{item} = {prefix_char}t_h({idx});\n"
impl += "#endif\n"
result = \
-"""{decl}
+f"""{decl}
{{
{impl}
}}
-""".format(decl=decl, impl=impl)
+"""
return result
###########################################################################
@@ -1837,7 +1931,7 @@ def gen_cxx_func_decl(self, phys, sub, force_arg_data=None):
arg_data = force_arg_data if force_arg_data else self._get_arg_data(phys, sub)
arg_decls = gen_arg_cxx_decls(arg_data, kokkos=True)
- return " KOKKOS_FUNCTION\n static void {sub}({arg_sig});".format(sub=sub, arg_sig=", ".join(arg_decls))
+ return f" KOKKOS_FUNCTION\n static void {sub}({', '.join(arg_decls)});"
###########################################################################
def gen_cxx_incl_impl(self, phys, sub, force_arg_data=None):
@@ -1845,10 +1939,10 @@ def gen_cxx_incl_impl(self, phys, sub, force_arg_data=None):
"""
>>> gb = GenBoiler([])
>>> print(gb.gen_cxx_incl_impl("shoc", "fake_sub", force_arg_data=UT_ARG_DATA))
- # include "shoc_fake_sub_impl.hpp"
+ # include "impl/shoc_fake_sub_impl.hpp"
"""
impl_path = get_piece_data(phys, sub, "cxx_func_impl", FILEPATH, self)
- return '# include "{}"'.format(impl_path)
+ return f'# include "{impl_path}"'
###########################################################################
def gen_cxx_func_impl(self, phys, sub, force_arg_data=None):
@@ -1868,11 +1962,11 @@ def gen_cxx_func_impl(self, phys, sub, force_arg_data=None):
# I don't think any intelligent guess at an impl is possible here
result = \
-"""{decl}
+f"""{decl}
{{
// TODO
// Note, argument types may need tweaking. Generator is not always able to tell what needs to be packed
-}}""".format(decl=decl)
+}}"""
return result
###########################################################################
@@ -1884,7 +1978,7 @@ def gen_cxx_bfb_unit_decl(self, phys, sub, force_arg_data=None):
struct TestFakeSub;
"""
test_struct = get_data_test_struct_name(sub)
- return " struct {};".format(test_struct)
+ return f" struct {test_struct};"
###########################################################################
def gen_cxx_bfb_unit_impl(self, phys, sub, force_arg_data=None):
@@ -1894,6 +1988,8 @@ def gen_cxx_bfb_unit_impl(self, phys, sub, force_arg_data=None):
>>> print(gb.gen_cxx_bfb_unit_impl("shoc", "fake_sub", force_arg_data=UT_ARG_DATA))
static void run_bfb()
{
+ auto engine = setup_random_test();
+
FakeSubData f90_data[] = {
// TODO
};
@@ -1903,7 +1999,7 @@ def gen_cxx_bfb_unit_impl(self, phys, sub, force_arg_data=None):
// Generate random input data
// Alternatively, you can use the f90_data construtors/initializer lists to hardcode data
for (auto& d : f90_data) {
- d.randomize();
+ d.randomize(engine);
}
// Create copies of data for use by cxx. Needs to happen before fortran calls so that
@@ -1942,12 +2038,15 @@ def gen_cxx_bfb_unit_impl(self, phys, sub, force_arg_data=None):
REQUIRE(d_f90.total(d_f90.baz) == d_cxx.total(d_cxx.ball2));
REQUIRE(d_f90.ball2[k] == d_cxx.ball2[k]);
}
+
}
}
} // run_bfb
>>> print(gb.gen_cxx_bfb_unit_impl("shoc", "fake_sub", force_arg_data=UT_ARG_DATA_ALL_SCALAR))
static void run_bfb()
{
+ auto engine = setup_random_test();
+
FakeSubData f90_data[max_pack_size] = {
// TODO
};
@@ -1957,7 +2056,7 @@ def gen_cxx_bfb_unit_impl(self, phys, sub, force_arg_data=None):
// Generate random input data
// Alternatively, you can use the f90_data construtors/initializer lists to hardcode data
for (auto& d : f90_data) {
- d.randomize();
+ d.randomize(engine);
}
// Create copies of data for use by cxx and sync it to device. Needs to happen before fortran calls so that
@@ -2036,13 +2135,13 @@ def gen_cxx_bfb_unit_impl(self, phys, sub, force_arg_data=None):
// Generate random input data
// Alternatively, you can use the f90_data construtors/initializer lists to hardcode data
for (auto& d : f90_data) {
- d.randomize();
+ d.randomize(engine);
}"""
_, _, _, _, scalars, real_data, int_data, bool_data = group_data(arg_data, filter_out_intent="in")
check_scalars, check_arrays = "", ""
for scalar in scalars:
- check_scalars += " REQUIRE(d_f90.{name} == d_cxx.{name});\n".format(name=scalar[0])
+ check_scalars += f" REQUIRE(d_f90.{scalar[0]} == d_cxx.{scalar[0]});\n"
if has_array:
c2f_transpose_code = "" if not need_transpose else \
@@ -2061,17 +2160,19 @@ def gen_cxx_bfb_unit_impl(self, phys, sub, force_arg_data=None):
all_data[k] = v
for _, data in all_data.items():
- check_arrays += " for (Int k = 0; k < d_f90.total(d_f90.{}); ++k) {{\n".format(data[0])
+ check_arrays += f" for (Int k = 0; k < d_f90.total(d_f90.{data[0]}); ++k) {{\n"
for datum in data:
- check_arrays += " REQUIRE(d_f90.total(d_f90.{orig}) == d_cxx.total(d_cxx.{name}));\n".format(orig=data[0], name=datum)
- check_arrays += " REQUIRE(d_f90.{name}[k] == d_cxx.{name}[k]);\n".format(name=datum)
+ check_arrays += f" REQUIRE(d_f90.total(d_f90.{data[0]}) == d_cxx.total(d_cxx.{datum}));\n"
+ check_arrays += f" REQUIRE(d_f90.{datum}[k] == d_cxx.{datum}[k]);\n"
- check_arrays += " }"
+ check_arrays += " }\n"
if has_array:
result = \
""" static void run_bfb()
{{
+ auto engine = setup_random_test();
+
{data_struct} f90_data[] = {{
// TODO
}};
@@ -2136,12 +2237,12 @@ def gen_cxx_bfb_unit_impl(self, phys, sub, force_arg_data=None):
spack_output_init = ""
if ooreals:
spack_output_init = \
-"""// Init outputs
- Spack {};
-""".format(", ".join(["{}(0)".format(ooreal) for ooreal in ooreals]))
+f"""// Init outputs
+ Spack {', '.join(['{}(0)'.format(ooreal) for ooreal in ooreals])};
+"""
scalars = group_data(arg_data)[4]
- func_call = "Functions::{}({});".format(sub, ", ".join([(scalar if scalar in reals else "cxx_device(0).{}".format(scalar)) for scalar, _ in scalars]))
+ func_call = f"Functions::{sub}({', '.join([(scalar if scalar in reals else 'cxx_device(0).{}'.format(scalar)) for scalar, _ in scalars])});"
spack_output_to_dview = ""
if oreals:
@@ -2155,6 +2256,8 @@ def gen_cxx_bfb_unit_impl(self, phys, sub, force_arg_data=None):
result = \
""" static void run_bfb()
{{
+ auto engine = setup_random_test();
+
{data_struct} f90_data[max_pack_size] = {{
// TODO
}};
@@ -2211,7 +2314,7 @@ def gen_cxx_eti(self, phys, sub, force_arg_data=None):
"""
>>> gb = GenBoiler([])
>>> print(gb.gen_cxx_eti("shoc", "fake_sub", force_arg_data=UT_ARG_DATA))
- #include "shoc_fake_sub_impl.hpp"
+ #include "impl/shoc_fake_sub_impl.hpp"
namespace scream {
namespace shoc {
@@ -2230,7 +2333,7 @@ def gen_cxx_eti(self, phys, sub, force_arg_data=None):
include_file = get_piece_data(phys, sub, "cxx_func_impl", FILEPATH, self)
result = \
-"""#include "{include_file}"
+f"""#include "{include_file}"
namespace scream {{
namespace {phys} {{
@@ -2244,7 +2347,7 @@ def gen_cxx_eti(self, phys, sub, force_arg_data=None):
}} // namespace {phys}
}} // namespace scream
-""".format(sub=sub, include_file=include_file, phys=phys)
+"""
return result
@@ -2254,10 +2357,10 @@ def gen_cmake_impl_eti(self, phys, sub, force_arg_data=None):
"""
>>> gb = GenBoiler([])
>>> print(gb.gen_cmake_impl_eti("shoc", "fake_sub", force_arg_data=UT_ARG_DATA))
- shoc_fake_sub.cpp
+ eti/shoc_fake_sub.cpp
"""
eti_src = get_piece_data(phys, sub, "cxx_eti", FILEPATH, self)
- return " {}".format(eti_src)
+ return f" {eti_src}"
###########################################################################
def gen_cmake_unit_test(self, phys, sub, force_arg_data=None):
@@ -2267,8 +2370,8 @@ def gen_cmake_unit_test(self, phys, sub, force_arg_data=None):
>>> print(gb.gen_cmake_unit_test("shoc", "fake_sub", force_arg_data=UT_ARG_DATA))
shoc_fake_sub_tests.cpp
"""
- test_src = os.path.basename(get_piece_data(phys, sub, "cxx_bfb_unit_impl", FILEPATH, self))
- return " {}".format(test_src)
+ test_src = Path(get_piece_data(phys, sub, "cxx_bfb_unit_impl", FILEPATH, self)).name
+ return f" {test_src}"
#
# Main methods
@@ -2353,7 +2456,7 @@ def gen_piece(self, phys, sub, piece, force_arg_data=None, force_file_lines=None
"""
if force_arg_data is None: # don't want unit tests printing this
print("===============================================================================")
- print("Trying to generate piece {} for subroutine {} for physics {}\n".format(piece, sub, phys))
+ print(f"Trying to generate piece {piece} for subroutine {sub} for physics {phys}\n")
base_filepath, was_filegen, insert_regex, self_begin_regex, self_end_regex, _ \
= [item(phys, sub, self) for item in PIECES[piece]]
@@ -2366,13 +2469,13 @@ def gen_piece(self, phys, sub, piece, force_arg_data=None, force_file_lines=None
else:
orig_lines = force_file_lines if force_file_lines else filepath.open(encoding="utf-8").read().splitlines()
needs_rewrite = False
- gen_lines = getattr(self, "gen_{}".format(piece))(phys, sub, force_arg_data=force_arg_data).splitlines()
+ gen_lines = getattr(self, f"gen_{piece}")(phys, sub, force_arg_data=force_arg_data).splitlines()
# Check to see if piece already exists
try:
existing_piece_line_range = check_existing_piece(orig_lines, self_begin_regex, self_end_regex)
except SystemExit as e:
- expect(False, "Problem parsing file {} for existing piece {}: {}".format(filepath, piece, e))
+ expect(False, f"Problem parsing file {filepath} for existing piece {piece}: {e}")
if existing_piece_line_range is not None:
# Replace existing
@@ -2414,8 +2517,7 @@ def gen_boiler(self):
try:
self.gen_piece(phys, sub, piece)
except SystemExit as e:
- print("Warning: failed to generate subroutine {} piece {} for physics {}, error: {}".\
- format(sub, piece, phys, e))
+ print(f"Warning: failed to generate subroutine {sub} piece {piece} for physics {phys}, error: {e}")
all_success = False
return all_success
diff --git a/components/eamxx/scripts/jenkins/chrysalis_setup b/components/eamxx/scripts/jenkins/chrysalis_setup
new file mode 100644
index 000000000000..e32f2a7083e4
--- /dev/null
+++ b/components/eamxx/scripts/jenkins/chrysalis_setup
@@ -0,0 +1,7 @@
+source /lcrc/soft/climate/e3sm-unified/load_latest_cime_env.sh
+
+source /gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/sh
+
+export PROJECT=e3sm
+
+SCREAM_MACHINE=chrysalis
diff --git a/components/eamxx/scripts/jenkins/jenkins_common_impl.sh b/components/eamxx/scripts/jenkins/jenkins_common_impl.sh
index dfd5cbb178a6..fe293debe446 100755
--- a/components/eamxx/scripts/jenkins/jenkins_common_impl.sh
+++ b/components/eamxx/scripts/jenkins/jenkins_common_impl.sh
@@ -88,12 +88,17 @@ if [ $skip_testing -eq 0 ]; then
fi
fi
+ SA_FAILURES_DETAILS=""
# Run scream stand-alone tests (SA)
if [ $test_SA -eq 1 ]; then
- ./scripts/gather-all-data "./scripts/test-all-scream ${TAS_ARGS}" -l -m $SCREAM_MACHINE
+ this_output=$(./scripts/gather-all-data "./scripts/test-all-scream ${TAS_ARGS}" -l -m $SCREAM_MACHINE)
if [[ $? != 0 ]]; then
fails=$fails+1;
sa_fail=1
+ if [[ $is_at_run == 1 ]]; then
+ errors=$(echo "$this_output" | grep -m1 -A 100000 'Build type ')
+ SA_FAILURES_DETAILS+="$errors"
+ fi
fi
# Add memcheck and coverage tests for nightlies on specific machines
@@ -116,7 +121,7 @@ if [ $skip_testing -eq 0 ]; then
fi
if [[ "$SCREAM_MACHINE" == "weaver" ]]; then
- ./scripts/gather-all-data "./scripts/test-all-scream -t cmc -t csr -t csi -t css ${TAS_ARGS}" -l -m $SCREAM_MACHINE
+ ./scripts/gather-all-data "./scripts/test-all-scream -t csm -t csr -t csi -t css ${TAS_ARGS}" -l -m $SCREAM_MACHINE
if [[ $? != 0 ]]; then
fails=$fails+1;
memcheck_fail=1
@@ -190,10 +195,14 @@ if [ $skip_testing -eq 0 ]; then
if [[ $test_v1 == 1 ]]; then
# AT runs should be fast. => run only low resolution
- ../../cime/scripts/create_test e3sm_scream_v1_lowres --compiler=gnu9 -c -b master --wait
+ this_output=$(../../cime/scripts/create_test e3sm_scream_v1_at --compiler=gnu9 -c -b master --wait)
if [[ $? != 0 ]]; then
fails=$fails+1;
v1_fail=1
+ if [[ $is_at_run == 1 ]]; then
+ errors=$(echo "$this_output" | grep -m1 -A 100000 'Waiting for tests to finish')
+ V1_FAILURES_DETAILS+="$errors"
+ fi
fi
else
echo "SCREAM v1 tests were skipped, since the Github label 'AT: Skip v1 Testing' was found.\n"
@@ -209,9 +218,11 @@ if [ $skip_testing -eq 0 ]; then
echo "FAILS DETECTED:"
if [[ $sa_fail == 1 ]]; then
echo " SCREAM STANDALONE TESTING FAILED!"
+ echo "$SA_FAILURES_DETAILS"
fi
if [[ $v1_fail == 1 ]]; then
echo " SCREAM V1 TESTING FAILED!"
+ echo "$V1_FAILURES_DETAILS"
fi
if [[ $v0_fail == 1 ]]; then
echo " SCREAM V0 TESTING FAILED!"
diff --git a/components/eamxx/scripts/jenkins/weaver_setup b/components/eamxx/scripts/jenkins/weaver_setup
index daa5483de6ae..22f1e996b8b9 100644
--- a/components/eamxx/scripts/jenkins/weaver_setup
+++ b/components/eamxx/scripts/jenkins/weaver_setup
@@ -1,3 +1,5 @@
-module load python/3.7.3
+source /etc/profile.d/modules.sh
+
+module load python/3.10.8
SCREAM_MACHINE=weaver
diff --git a/components/eamxx/scripts/machines_specs.py b/components/eamxx/scripts/machines_specs.py
index d9566ad1a256..cd717cba6b97 100644
--- a/components/eamxx/scripts/machines_specs.py
+++ b/components/eamxx/scripts/machines_specs.py
@@ -21,24 +21,27 @@
["mpicxx","mpifort","mpicc"],
"salloc -N 1 srun -n1 --preserve-env",
"/home/projects/e3sm/scream/pr-autotester/master-baselines/blake/"),
- "weaver" : (["source /etc/profile.d/modules.sh", "module purge", "module load git/2.10.1 python/3.7.3 cmake/3.23.1 cuda/11.2.2/gcc/8.3.1 openmpi/4.1.1/gcc/8.3.1/cuda/11.2.2 netcdf-c/4.8.1/gcc/8.3.1/openmpi/4.1.1 netcdf-cxx/4.2/gcc/8.3.1/openmpi/4.1.1 netcdf-fortran/4.5.4/gcc/8.3.1/openmpi/4.1.1 parallel-netcdf/1.12.2/gcc/8.3.1/openmpi/4.1.1",
+ "weaver" : (["source /etc/profile.d/modules.sh", "module purge", "module load cmake/3.25.1 git/2.39.1 python/3.10.8 py-netcdf4/1.5.8 gcc/11.3.0 cuda/11.8.0 openmpi netcdf-c netcdf-fortran parallel-netcdf netlib-lapack",
],
["mpicxx","mpifort","mpicc"],
- "bsub -I -q rhel8 -n 4",
+ "bsub -I -q rhel8 -n 4 -gpu num=4",
"/home/projects/e3sm/scream/pr-autotester/master-baselines/weaver/"),
- "mappy" : (["module purge", "module load sems-archive-env acme-env sems-archive-gcc/9.2.0 sems-archive-cmake/3.19.1 sems-archive-git/2.10.1 acme-openmpi/4.0.7 acme-netcdf/4.7.4/acme"],
+ "mappy" : (["module purge", "module load sems-archive-env acme-env acme-cmake/3.26.3 sems-archive-gcc/9.2.0 sems-archive-git/2.10.1 acme-openmpi/4.0.7 acme-netcdf/4.7.4/acme"],
["mpicxx","mpifort","mpicc"],
"",
"/sems-data-store/ACME/baselines/scream/master-baselines"),
- "lassen" : (["module --force purge", "module load git gcc/8.3.1 cuda/10.1.243 cmake/3.16.8 spectrum-mpi python/3.7.2", "export LLNL_USE_OMPI_VARS='y'"],
- ["mpicxx","mpifort","mpicc"],
- "bsub -Ip -qpdebug",
- ""),
- "ruby-intel" : (["module --force purge", "module load StdEnv cmake/3.18.0 mkl/2019.0 intel/19.0.4 netcdf-fortran/4.4.4 netcdf/4.4.1.1 pnetcdf/1.9.0 mvapich2/2.3 python/3.8.2"],
+ "lassen" : (["module --force purge", "module load git gcc/8.3.1 cuda/11.8.0 cmake/3.16.8 spectrum-mpi python/3.7.2", "export LLNL_USE_OMPI_VARS='y'",
+ "export PATH=/usr/gdata/climdat/netcdf/bin:$PATH",
+ "export LD_LIBRARY_PATH=/usr/gdata/climdat/netcdf/lib:$LD_LIBRARY_PATH",
+ ],
+ ["mpicxx","mpifort","mpicc"],
+ "bsub -Ip -qpdebug",
+ ""),
+ "ruby-intel" : (["module --force purge", "module use --append /usr/gdata/climdat/install/quartz/modulefiles", "module load StdEnv cmake/3.19.2 mkl/2022.1.0 intel-classic/2021.6.0-magic mvapich2/2.3.7 hdf5/1.12.2 netcdf-c/4.9.0 netcdf-fortran/4.6.0 parallel-netcdf/1.12.3 python/3.9.12 screamML-venv/0.0.1"],
["mpicxx","mpifort","mpicc"],
"salloc --partition=pdebug",
""),
- "quartz-intel" : (["module --force purge", "module load StdEnv cmake/3.19.2 mkl/2022.1.0 intel-classic/2021.6.0-magic netcdf-c-parallel/4.9.0 netcdf-fortran-parallel/4.6.0 mvapich2/2.3.6 python/3.9.12"],
+ "quartz-intel" : (["module --force purge", "module use --append /usr/gdata/climdat/install/quartz/modulefiles", "module load StdEnv cmake/3.19.2 mkl/2022.1.0 intel-classic/2021.6.0-magic mvapich2/2.3.7 hdf5/1.12.2 netcdf-c/4.9.0 netcdf-fortran/4.6.0 parallel-netcdf/1.12.3 python/3.9.12 screamML-venv/0.0.1"],
["mpicxx","mpifort","mpicc"],
"salloc --partition=pdebug",
""),
@@ -54,14 +57,10 @@
["mpicxx","mpifort","mpicc"],
"bsub -I -q batch -W 0:30 -P cli115 -nnodes 1",
"/gpfs/alpine/cli115/proj-shared/scream/master-baselines"),
-"perlmutter" : (["module load PrgEnv-gnu gcc/10.3.0 cudatoolkit craype-accel-nvidia80 cray-libsci craype cray-mpich cray-hdf5-parallel cray-netcdf-hdf5parallel cray-parallel-netcdf cmake","module unload craype-accel-host perftools-base perftools darshan", "export NVCC_WRAPPER_DEFAULT_COMPILER=CC", "export NVCC_WRAPPER_DEFAULT_ARCH=sm_80"],
+ "pm-gpu" : (["module load PrgEnv-gnu gcc/10.3.0 cudatoolkit craype-accel-nvidia80 cray-libsci craype cray-mpich cray-hdf5-parallel cray-netcdf-hdf5parallel cray-parallel-netcdf cmake evp-patch","module unload craype-accel-host perftools-base perftools darshan", "export NVCC_WRAPPER_DEFAULT_COMPILER=CC", "export NVCC_WRAPPER_DEFAULT_ARCH=sm_80"],
["CC","ftn","cc"],
"srun --time 00:30:00 --nodes=1 --constraint=gpu --exclusive -q regular --account e3sm_g",
""),
- "cori-knl" : (["eval $(../../cime/CIME/Tools/get_case_env)", "export OMP_NUM_THREADS=68"],
- ["CC","ftn","cc"],
- "srun --time 02:00:00 --nodes=1 --constraint=knl,quad,cache --exclusive -q regular --account e3sm",
- ""),
"compy" : (["module purge", "module load cmake/3.19.6 gcc/8.1.0 mvapich2/2.3.1 python/3.7.3"],
["mpicxx","mpifort","mpicc"],
"srun --time 02:00:00 --nodes=1 -p short --exclusive --account e3sm",
diff --git a/components/eamxx/scripts/scripts-tests b/components/eamxx/scripts/scripts-tests
index e80dbbb92b4f..b70a697a7d55 100755
--- a/components/eamxx/scripts/scripts-tests
+++ b/components/eamxx/scripts/scripts-tests
@@ -70,7 +70,7 @@ def test_cmake_cache_contents(test_obj, build_name, cache_var, expected_value):
test_obj.assertTrue(cache_file.is_file(), "Missing cache file {}".format(cache_file)) # pylint: disable=no-member
grep_output = run_cmd_assert_result(test_obj, "grep ^{} CMakeCache.txt".format(cache_var), from_dir=cache_file.parent)
- value = grep_output.split("=")[-1]
+ value = grep_output.split("=", maxsplit=1)[-1]
test_obj.assertEqual(expected_value.upper(), value.upper(),
msg="For CMake cache variable {}, expected value '{}', got '{}'".format(cache_var, expected_value, value))
@@ -173,7 +173,7 @@ class TestBaseOuter: # Hides the TestBase class from test scanner
def test_pylint(self):
ensure_pylint()
- run_cmd_assert_result(self, "python3 -m pylint --disable C --disable R {}".format(self._source_file), from_dir=TEST_DIR)
+ run_cmd_assert_result(self, "python3 -m pylint --disable C --disable R {}".format(self._source_file), from_dir=TEST_DIR, verbose=True)
def test_gen_baseline(self):
if self._generate:
@@ -349,11 +349,12 @@ class TestTestAllScream(TestBaseOuter.TestBase):
cmd = self.get_cmd("./test-all-scream -m $machine {}".format(options),
self._machine, dry_run=False)
run_cmd_assert_result(self, cmd, from_dir=TEST_DIR)
- builddir = "cuda_mem_check" if is_cuda_machine(self._machine) else "valgrind"
+ builddir = "compute_sanitizer_memcheck" if is_cuda_machine(self._machine) else "valgrind"
test_cmake_cache_contents(self, builddir, "CMAKE_BUILD_TYPE", "Debug")
test_cmake_cache_contents(self, builddir, "SCREAM_TEST_SIZE", "SHORT")
if is_cuda_machine(self._machine):
- test_cmake_cache_contents(self, builddir, "EKAT_ENABLE_CUDA_MEMCHECK", "TRUE")
+ test_cmake_cache_contents(self, builddir, "EKAT_ENABLE_COMPUTE_SANITIZER", "TRUE")
+ test_cmake_cache_contents(self, builddir, "EKAT_COMPUTE_SANITIZER_OPTIONS", "--tool=memcheck")
else:
test_cmake_cache_contents(self, builddir, "EKAT_ENABLE_VALGRIND", "TRUE")
else:
diff --git a/components/eamxx/scripts/test_all_scream.py b/components/eamxx/scripts/test_all_scream.py
index 94b642846265..dfade8beda0d 100644
--- a/components/eamxx/scripts/test_all_scream.py
+++ b/components/eamxx/scripts/test_all_scream.py
@@ -46,8 +46,13 @@ def __init__(self, longname, description, cmake_args,
# A longer decription of the test
self.description = description
- # Cmake config args for this test
+ # Cmake config args for this test. Check that quoting is done with
+ # single quotes.
self.cmake_args = cmake_args
+ for name, arg in self.cmake_args:
+ expect('"' not in arg,
+ f"In test definition for {longname}, found cmake args with double quotes {name}='{arg}'"
+ "Please use single quotes if quotes are needed.")
# Does the test do baseline testing
self.uses_baselines = uses_baselines
@@ -172,21 +177,6 @@ def __init__(self, tas):
if persistent_supp_file.exists():
self.cmake_args.append( ("EKAT_VALGRIND_SUPPRESSION_FILE", str(persistent_supp_file)) )
-###############################################################################
-class CMC(TestProperty):
-###############################################################################
-
- def __init__(self, _):
- TestProperty.__init__(
- self,
- "cuda_mem_check",
- "debug with cuda memcheck",
- [("CMAKE_BUILD_TYPE", "Debug"), ("EKAT_ENABLE_CUDA_MEMCHECK", "True")],
- uses_baselines=False,
- on_by_default=False,
- default_test_len="short"
- )
-
###############################################################################
class CSM(TestProperty):
###############################################################################
@@ -194,9 +184,11 @@ class CSM(TestProperty):
def __init__(self, _):
TestProperty.__init__(
self,
- "compute_santizer_memcheck",
+ "compute_sanitizer_memcheck",
"debug with compute sanitizer memcheck",
- [("CMAKE_BUILD_TYPE", "Debug"), ("EKAT_ENABLE_COMPUTE_SANITIZER", "True")],
+ [("CMAKE_BUILD_TYPE", "Debug"),
+ ("EKAT_ENABLE_COMPUTE_SANITIZER", "True"),
+ ("EKAT_COMPUTE_SANITIZER_OPTIONS", "--tool=memcheck")],
uses_baselines=False,
on_by_default=False,
default_test_len="short"
@@ -209,11 +201,11 @@ class CSR(TestProperty):
def __init__(self, _):
TestProperty.__init__(
self,
- "compute_santizer_racecheck",
+ "compute_sanitizer_racecheck",
"debug with compute sanitizer racecheck",
[("CMAKE_BUILD_TYPE", "Debug"),
("EKAT_ENABLE_COMPUTE_SANITIZER", "True"),
- ("EKAT_COMPUTE_SANITIZER_OPTIONS", "'--tool racecheck'")],
+ ("EKAT_COMPUTE_SANITIZER_OPTIONS", "'--tool=racecheck --racecheck-detect-level=error'")],
uses_baselines=False,
on_by_default=False,
default_test_len="short"
@@ -226,11 +218,11 @@ class CSI(TestProperty):
def __init__(self, _):
TestProperty.__init__(
self,
- "compute_santizer_initcheck",
+ "compute_sanitizer_initcheck",
"debug with compute sanitizer initcheck",
[("CMAKE_BUILD_TYPE", "Debug"),
("EKAT_ENABLE_COMPUTE_SANITIZER", "True"),
- ("EKAT_COMPUTE_SANITIZER_OPTIONS", "'--tool initcheck'")],
+ ("EKAT_COMPUTE_SANITIZER_OPTIONS", "--tool=initcheck")],
uses_baselines=False,
on_by_default=False,
default_test_len="short"
@@ -243,11 +235,11 @@ class CSS(TestProperty):
def __init__(self, _):
TestProperty.__init__(
self,
- "compute_santizer_synccheck",
+ "compute_sanitizer_synccheck",
"debug with compute sanitizer synccheck",
[("CMAKE_BUILD_TYPE", "Debug"),
("EKAT_ENABLE_COMPUTE_SANITIZER", "True"),
- ("EKAT_COMPUTE_SANITIZER_OPTIONS", "'--tool synccheck'")],
+ ("EKAT_COMPUTE_SANITIZER_OPTIONS", "--tool=synccheck")],
uses_baselines=False,
on_by_default=False,
default_test_len="short"
@@ -363,12 +355,12 @@ def __init__(self, cxx_compiler=None, f90_compiler=None, c_compiler=None,
self._root_dir = Path(__file__).resolve().parent.parent
else:
self._root_dir = Path(self._root_dir).resolve()
- expect(self._root_dir.is_dir() and self._root_dir.parts()[-2:] == ('scream', 'components'),
+ expect(self._root_dir.is_dir() and self._root_dir.parts()[-2:] == ("scream", "components"),
f"Bad root-dir '{self._root_dir}', should be: $scream_repo/components/eamxx")
# Make our test objects! Change mem to default mem-check test for current platform
if "mem" in tests:
- tests[tests.index("mem")] = "cmc" if self.on_cuda() else "valg"
+ tests[tests.index("mem")] = "csm" if self.on_cuda() else "valg"
self._tests = test_factory(tests, self)
if self._work_dir is not None:
@@ -508,7 +500,7 @@ def __init__(self, cxx_compiler=None, f90_compiler=None, c_compiler=None,
else:
if self._baseline_dir == "AUTO":
- expect (self._baseline_ref is None or self._baseline_ref == 'origin/master',
+ expect (self._baseline_ref is None or self._baseline_ref == "origin/master",
"Do not specify `-b XYZ` when using `--baseline-dir AUTO`. The AUTO baseline dir should be used for the master baselines only.\n"
" `-b XYZ` needs to probably build baselines for ref XYZ. However, no baselines will be built if the dir already contains baselines.\n")
# We treat the "AUTO" string as a request for automatic baseline dir.
@@ -793,7 +785,7 @@ def create_ctest_resource_file(self, test, build_dir):
data = {}
# This is the only version numbering supported by ctest, so far
- data['version'] = {"major":1,"minor":0}
+ data["version"] = {"major":1,"minor":0}
# We add leading zeroes to ensure that ids will sort correctly
# both alphabetically and numerically
@@ -802,9 +794,9 @@ def create_ctest_resource_file(self, test, build_dir):
devices.append({"id":f"{res_id:05d}"})
# Add resource groups
- data['local'] = [{"devices":devices}]
+ data["local"] = [{"devices":devices}]
- with (build_dir/"ctest_resource_file.json").open('w', encoding="utf-8") as outfile:
+ with (build_dir/"ctest_resource_file.json").open("w", encoding="utf-8") as outfile:
json.dump(data,outfile,indent=2)
return (end-start)+1
@@ -849,6 +841,7 @@ def generate_ctest_config(self, cmake_config, extra_configs, test):
# taskset range even though the ctest script is also running the tests
if self._parallel:
start, end = self.get_taskset_range(test)
+ result = result.replace("'", r"'\''") # handle nested quoting
result = f"taskset -c {start}-{end} sh -c '{result}'"
return result
@@ -1037,7 +1030,7 @@ def get_last_ctest_file(self,test,phase):
# of tie, $IDX as tiebreaker
for file in files:
file_no_path = file.name
- tokens = re.split(r'_|-|\.',str(file_no_path))
+ tokens = re.split(r"_|-|\.",str(file_no_path))
if latest is None:
latest = file
curr_tag = int(tokens[1])
diff --git a/components/eamxx/scripts/update-all-pip b/components/eamxx/scripts/update-all-pip
old mode 100644
new mode 100755
diff --git a/components/eamxx/src/CMakeLists.txt b/components/eamxx/src/CMakeLists.txt
index 59a5d6646443..9568d3cf85ce 100644
--- a/components/eamxx/src/CMakeLists.txt
+++ b/components/eamxx/src/CMakeLists.txt
@@ -4,6 +4,9 @@ add_subdirectory(dynamics)
add_subdirectory(physics)
add_subdirectory(diagnostics)
add_subdirectory(control)
+if ("${SCREAM_DYNAMICS_DYCORE}" STREQUAL "HOMME")
+ add_subdirectory(doubly-periodic)
+endif()
if (PROJECT_NAME STREQUAL "E3SM")
add_subdirectory(mct_coupling)
endif()
diff --git a/components/eamxx/src/control/CMakeLists.txt b/components/eamxx/src/control/CMakeLists.txt
index 281794f95ab9..19ce3b6dc318 100644
--- a/components/eamxx/src/control/CMakeLists.txt
+++ b/components/eamxx/src/control/CMakeLists.txt
@@ -1,14 +1,15 @@
set(SCREAM_CONTROL_SOURCES
atmosphere_driver.cpp
- fvphyshack.cpp
atmosphere_surface_coupling_importer.cpp
atmosphere_surface_coupling_exporter.cpp
+ intensive_observation_period.cpp
surface_coupling_utils.cpp
)
set(SCREAM_CONTROL_HEADERS
atmosphere_driver.hpp
atmosphere_surface_coupling.hpp
+ intensive_observation_period.hpp
surface_coupling_utils.hpp
)
diff --git a/components/eamxx/src/control/atmosphere_driver.cpp b/components/eamxx/src/control/atmosphere_driver.cpp
index 0afb7a6328b8..96adc56127d4 100644
--- a/components/eamxx/src/control/atmosphere_driver.cpp
+++ b/components/eamxx/src/control/atmosphere_driver.cpp
@@ -2,6 +2,8 @@
#include "control/atmosphere_surface_coupling_importer.hpp"
#include "control/atmosphere_surface_coupling_exporter.hpp"
+#include "physics/share/physics_constants.hpp"
+
#include "share/atm_process/atmosphere_process_group.hpp"
#include "share/atm_process/atmosphere_process_dag.hpp"
#include "share/field/field_utils.hpp"
@@ -22,9 +24,14 @@
// find blocks that eventually should be removed in favor of a design that
// accounts for pg2. Some blocks may turn out to be unnecessary, and I simply
// didn't realize I could do without the workaround.
-#include "control/fvphyshack.hpp"
+#include "share/util/eamxx_fv_phys_rrtmgp_active_gases_workaround.hpp"
+
+#ifndef SCREAM_CIME_BUILD
+#include
+#endif
#include
+#include
namespace scream {
@@ -123,17 +130,6 @@ set_params(const ekat::ParameterList& atm_params)
create_logger ();
m_ad_status |= s_params_set;
-
-#ifdef SCREAM_CIME_BUILD
- const auto pg_type = "PG2";
- fvphyshack = m_atm_params.sublist("grids_manager").get("physics_grid_type") == pg_type;
- if (fvphyshack) {
- // See the [rrtmgp active gases] note in dynamics/homme/atmosphere_dynamics_fv_phys.cpp.
- fv_phys_rrtmgp_active_gases_init(m_atm_params);
- }
-#else
- fvphyshack = false;
-#endif
}
void AtmosphereDriver::
@@ -168,6 +164,49 @@ init_time_stamps (const util::TimeStamp& run_t0, const util::TimeStamp& case_t0)
m_case_t0 = case_t0;
}
+
+
+void AtmosphereDriver::
+setup_intensive_observation_period ()
+{
+ // At this point, must have comm, params, initialized timestamps, and grids created.
+ check_ad_status(s_comm_set | s_params_set | s_ts_inited | s_grids_created);
+
+ // Check to make sure iop is not already initialized
+ EKAT_REQUIRE_MSG(not m_intensive_observation_period, "Error! setup_intensive_observation_period() is "
+ "called, but IOP already set up.\n");
+
+ // This function should only be called if we are enabling IOP
+ const bool enable_iop =
+ m_atm_params.sublist("driver_options").get("enable_intensive_observation_period", false);
+ EKAT_REQUIRE_MSG(enable_iop, "Error! setup_intensive_observation_period() is called, but "
+ "enable_intensive_observation_period=false "
+ "in driver_options parameters.\n");
+
+ // Params must include intensive_observation_period_options sublist.
+ const auto iop_sublist_exists = m_atm_params.isSublist("intensive_observation_period_options");
+ EKAT_REQUIRE_MSG(iop_sublist_exists,
+ "Error! setup_intensive_observation_period() is called, but no intensive_observation_period_options "
+ "defined in parameters.\n");
+
+ const auto iop_params = m_atm_params.sublist("intensive_observation_period_options");
+ const auto phys_grid = m_grids_manager->get_grid("Physics");
+ const auto nlevs = phys_grid->get_num_vertical_levels();
+ const auto hyam = phys_grid->get_geometry_data("hyam");
+ const auto hybm = phys_grid->get_geometry_data("hybm");
+
+ m_intensive_observation_period =
+ std::make_shared(m_atm_comm,
+ iop_params,
+ m_run_t0,
+ nlevs,
+ hyam,
+ hybm);
+
+ auto dx_short_f = phys_grid->get_geometry_data("dx_short");
+ m_intensive_observation_period->set_grid_spacing(dx_short_f.get_view()());
+}
+
void AtmosphereDriver::create_atm_processes()
{
m_atm_logger->info("[EAMxx] create_atm_processes ...");
@@ -213,9 +252,12 @@ void AtmosphereDriver::create_grids()
const auto& casename = ic_pl.get("restart_casename");
auto filename = find_filename_in_rpointer (casename,true,m_atm_comm,m_run_t0);
gm_params.set("ic_filename", filename);
+ m_atm_params.sublist("provenance").set("initial_conditions_file",filename);
} else if (ic_pl.isParameter("Filename")) {
// Initial run, if an IC file is present, pass it.
- gm_params.set("ic_filename", ic_pl.get("Filename"));
+ auto filename = ic_pl.get("Filename");
+ gm_params.set("ic_filename", filename);
+ m_atm_params.sublist("provenance").set("initial_conditions_file",filename);
}
m_atm_logger->debug(" [EAMxx] Creating grid manager '" + gm_type + "' ...");
@@ -229,6 +271,14 @@ void AtmosphereDriver::create_grids()
m_atm_logger->debug(" [EAMxx] Grids created.");
+ // If TMS process is enabled, SHOC needs to know to request tms' surface drag coefficient
+ // as a required field during the set_grid() call below, but SHOC does not have knowledge
+ // of other processes. The driver needs propgate this information to SHOC.
+ if(m_atm_process_group->has_process("tms") &&
+ m_atm_process_group->has_process("shoc")) {
+ setup_shoc_tms_links();
+ }
+
// Set the grids in the processes. Do this by passing the grids manager.
// Each process will grab what they need
m_atm_process_group->set_grids(m_grids_manager);
@@ -288,6 +338,10 @@ void AtmosphereDriver::setup_surface_coupling_processes () const
std::shared_ptr importer = std::dynamic_pointer_cast(atm_proc);
importer->setup_surface_coupling_data(*m_surface_coupling_import_data_manager);
+
+ if (m_intensive_observation_period) {
+ importer->set_intensive_observation_period(m_intensive_observation_period);
+ }
}
if (atm_proc->type() == AtmosphereProcessType::SurfaceCouplingExporter) {
exporter_found = true;
@@ -314,7 +368,7 @@ void AtmosphereDriver::setup_surface_coupling_processes () const
}
}
-void AtmosphereDriver::reset_accummulated_fields ()
+void AtmosphereDriver::reset_accumulated_fields ()
{
constexpr Real zero = 0;
for (auto fm_it : m_field_mgrs) {
@@ -346,32 +400,20 @@ void AtmosphereDriver::setup_column_conservation_checks ()
// Get fields needed to run the mass and energy conservation checks. Require that
// all fields exist.
- const auto pseudo_density_ptr = phys_field_mgr->get_field_ptr("pseudo_density");
- const auto ps_ptr = phys_field_mgr->get_field_ptr("ps");
- const auto phis_ptr = phys_field_mgr->get_field_ptr("phis");
- const auto horiz_winds_ptr = phys_field_mgr->get_field_ptr("horiz_winds");
- const auto T_mid_ptr = phys_field_mgr->get_field_ptr("T_mid");
- const auto qv_ptr = phys_field_mgr->get_field_ptr("qv");
- const auto qc_ptr = phys_field_mgr->get_field_ptr("qc");
- const auto qr_ptr = phys_field_mgr->get_field_ptr("qr");
- const auto qi_ptr = phys_field_mgr->get_field_ptr("qi");
- const auto vapor_flux_ptr = phys_field_mgr->get_field_ptr("vapor_flux");
- const auto water_flux_ptr = phys_field_mgr->get_field_ptr("water_flux");
- const auto ice_flux_ptr = phys_field_mgr->get_field_ptr("ice_flux");
- const auto heat_flux_ptr = phys_field_mgr->get_field_ptr("heat_flux");
- EKAT_REQUIRE_MSG(pseudo_density_ptr != nullptr &&
- ps_ptr != nullptr &&
- phis_ptr != nullptr &&
- horiz_winds_ptr != nullptr &&
- T_mid_ptr != nullptr &&
- qv_ptr != nullptr &&
- qc_ptr != nullptr &&
- qr_ptr != nullptr &&
- qi_ptr != nullptr &&
- vapor_flux_ptr != nullptr &&
- water_flux_ptr != nullptr &&
- ice_flux_ptr != nullptr &&
- heat_flux_ptr != nullptr,
+ EKAT_REQUIRE_MSG (
+ phys_field_mgr->has_field("pseudo_density") and
+ phys_field_mgr->has_field("ps") and
+ phys_field_mgr->has_field("phis") and
+ phys_field_mgr->has_field("horiz_winds") and
+ phys_field_mgr->has_field("T_mid") and
+ phys_field_mgr->has_field("qv") and
+ phys_field_mgr->has_field("qc") and
+ phys_field_mgr->has_field("qr") and
+ phys_field_mgr->has_field("qi") and
+ phys_field_mgr->has_field("vapor_flux") and
+ phys_field_mgr->has_field("water_flux") and
+ phys_field_mgr->has_field("ice_flux") and
+ phys_field_mgr->has_field("heat_flux"),
"Error! enable_column_conservation_checks=true for some atm process, "
"but not all fields needed for this check exist in the FieldManager.\n");
@@ -381,14 +423,28 @@ void AtmosphereDriver::setup_column_conservation_checks ()
const Real energy_error_tol = driver_options_pl.get("energy_column_conservation_error_tolerance", 1e-14);
// Create energy checker
+ const auto pseudo_density = phys_field_mgr->get_field("pseudo_density");
+ const auto ps = phys_field_mgr->get_field("ps");
+ const auto phis = phys_field_mgr->get_field("phis");
+ const auto horiz_winds = phys_field_mgr->get_field("horiz_winds");
+ const auto T_mid = phys_field_mgr->get_field("T_mid");
+ const auto qv = phys_field_mgr->get_field("qv");
+ const auto qc = phys_field_mgr->get_field("qc");
+ const auto qr = phys_field_mgr->get_field("qr");
+ const auto qi = phys_field_mgr->get_field("qi");
+ const auto vapor_flux = phys_field_mgr->get_field("vapor_flux");
+ const auto water_flux = phys_field_mgr->get_field("water_flux");
+ const auto ice_flux = phys_field_mgr->get_field("ice_flux");
+ const auto heat_flux = phys_field_mgr->get_field("heat_flux");
+
auto conservation_check =
std::make_shared(phys_grid,
mass_error_tol, energy_error_tol,
- pseudo_density_ptr, ps_ptr, phis_ptr,
- horiz_winds_ptr, T_mid_ptr, qv_ptr,
- qc_ptr, qr_ptr, qi_ptr,
- vapor_flux_ptr, water_flux_ptr,
- ice_flux_ptr, heat_flux_ptr);
+ pseudo_density, ps, phis,
+ horiz_winds, T_mid, qv,
+ qc, qr, qi,
+ vapor_flux, water_flux,
+ ice_flux, heat_flux);
//Get fail handling type from driver_option parameters.
const std::string fail_handling_type_str =
@@ -409,6 +465,37 @@ void AtmosphereDriver::setup_column_conservation_checks ()
m_atm_process_group->setup_column_conservation_checks(conservation_check, fail_handling_type);
}
+void AtmosphereDriver::setup_shoc_tms_links ()
+{
+ EKAT_REQUIRE_MSG(m_atm_process_group->has_process("tms"),
+ "Error! Attempting to setup link between "
+ "SHOC and TMS, but TMS is not defined.\n");
+ EKAT_REQUIRE_MSG(m_atm_process_group->has_process("shoc"),
+ "Error! Attempting to setup link between "
+ "SHOC and TMS, but SHOC is not defined.\n");
+
+ auto shoc_process = m_atm_process_group->get_process_nonconst("shoc");
+ shoc_process->get_params().set("apply_tms", true);
+}
+
+void AtmosphereDriver::add_additional_column_data_to_property_checks () {
+ // Get list of additional data fields from driver_options parameters.
+ // If no fields given, return.
+ using vos_t = std::vector;
+ auto additional_data_fields = m_atm_params.sublist("driver_options").get("property_check_data_fields",
+ {"NONE"});
+ if (additional_data_fields == vos_t{"NONE"}) return;
+
+ // Add requested fields to property checks
+ auto phys_field_mgr = m_field_mgrs[m_grids_manager->get_grid("Physics")->name()];
+ for (auto fname : additional_data_fields) {
+ EKAT_REQUIRE_MSG(phys_field_mgr->has_field(fname), "Error! The field "+fname+" is requested for property check output "
+ "but does not exist in the physics field manager.\n");
+
+ m_atm_process_group->add_additional_data_fields_to_property_checks(phys_field_mgr->get_field(fname));
+ }
+}
+
void AtmosphereDriver::create_fields()
{
m_atm_logger->info("[EAMxx] create_fields ...");
@@ -468,20 +555,19 @@ void AtmosphereDriver::create_fields()
// Loop over all fields in group src_name on grid src_grid.
for (const auto& fname : rel_info->m_fields_names) {
// Get field on src_grid
- auto f = rel_fm->get_field_ptr(fname);
+ const auto& rel_fid = rel_fm->get_field_id(fname);
// Build a FieldRequest for the same field on greq's grid,
// and add it to the group of this request
if (fvphyshack) {
- const auto& sfid = f->get_header().get_identifier();
- auto dims = sfid.get_layout().dims();
+ auto dims = rel_fid.get_layout().dims();
dims[0] = fm->get_grid()->get_num_local_dofs();
- FieldLayout fl(sfid.get_layout().tags(), dims);
- FieldIdentifier fid(sfid.name(), fl, sfid.get_units(), req.grid);
+ FieldLayout fl(rel_fid.get_layout().tags(), dims);
+ FieldIdentifier fid(rel_fid.name(), fl, rel_fid.get_units(), req.grid);
FieldRequest freq(fid,req.name,req.pack_size);
fm->register_field(freq);
} else {
- const auto fid = r->create_tgt_fid(f->get_header().get_identifier());
+ const auto fid = r->create_tgt_fid(rel_fid);
FieldRequest freq(fid,req.name,req.pack_size);
fm->register_field(freq);
}
@@ -594,9 +680,10 @@ void AtmosphereDriver::initialize_output_managers () {
auto& io_params = m_atm_params.sublist("Scorpio");
- // IMPORTANT: create model restart OutputManager first! This OM will be able to
- // retrieve the original simulation start date, which we later pass to the
- // OM of all the requested outputs.
+ // IMPORTANT: create model restart OutputManager first! This OM will be in charge
+ // of creating rpointer.atm, while other OM's will simply append to it.
+ // If this assumption is not verified, we must always append to rpointer, which
+ // can make the rpointer file a bit confusing.
// Check for model restart output
ekat::ParameterList checkpoint_params;
@@ -604,8 +691,8 @@ void AtmosphereDriver::initialize_output_managers () {
checkpoint_params.set("Frequency",-1);
if (io_params.isSublist("model_restart")) {
auto restart_pl = io_params.sublist("model_restart");
- // Signal that this is not a normal output, but the model restart one
m_output_managers.emplace_back();
+ restart_pl.sublist("provenance") = m_atm_params.sublist("provenance");
auto& om = m_output_managers.back();
if (fvphyshack) {
// Don't save CGLL fields from ICs to the restart file.
@@ -646,6 +733,7 @@ void AtmosphereDriver::initialize_output_managers () {
params.set("filename_prefix",m_casename+".scream.h"+std::to_string(om_tally));
om_tally++;
}
+ params.sublist("provenance") = m_atm_params.sublist("provenance");
// Add a new output manager
m_output_managers.emplace_back();
auto& om = m_output_managers.back();
@@ -660,6 +748,43 @@ void AtmosphereDriver::initialize_output_managers () {
m_atm_logger->info("[EAMxx] initialize_output_managers ... done!");
}
+void AtmosphereDriver::
+set_provenance_data (std::string caseid,
+ std::string hostname,
+ std::string username)
+{
+#ifdef SCREAM_CIME_BUILD
+ // Check the inputs are valid
+ EKAT_REQUIRE_MSG (caseid!="", "Error! Invalid case id: " + caseid + "\n");
+ EKAT_REQUIRE_MSG (hostname!="", "Error! Invalid hostname: " + hostname + "\n");
+ EKAT_REQUIRE_MSG (username!="", "Error! Invalid username: " + username + "\n");
+#else
+ caseid = "EAMxx standalone";
+ char* user = new char[32];
+ char* host = new char[256];
+ int err;
+ err = gethostname(host,255);
+ if (err==0) {
+ hostname = std::string(host);
+ } else {
+ hostname = "UNKNOWN";
+ }
+ err = getlogin_r(user,31);
+ if (err==0) {
+ username = std::string(user);
+ } else {
+ username = "UNKNOWN";
+ }
+ delete[] user;
+ delete[] host;
+#endif
+ auto& provenance = m_atm_params.sublist("provenance");
+ provenance.set("caseid",caseid);
+ provenance.set("hostname",hostname);
+ provenance.set("username",username);
+ provenance.set("version",std::string(EAMXX_GIT_VERSION));
+}
+
void AtmosphereDriver::
initialize_fields ()
{
@@ -669,10 +794,8 @@ initialize_fields ()
start_timer("EAMxx::init");
start_timer("EAMxx::initialize_fields");
-#ifdef SCREAM_CIME_BUILD
- // See the [rrtmgp active gases] note in dynamics/homme/atmosphere_dynamics_fv_phys.cpp.
+ // See the [rrtmgp active gases] note in share/util/eamxx_fv_phys_rrtmgp_active_gases_workaround.hpp
if (fvphyshack) fv_phys_rrtmgp_active_gases_set_restart(m_case_t0 < m_run_t0);
-#endif
// See if we need to print a DAG. We do this first, cause if any input
// field is missing from the initial condition file, an error will be thrown.
@@ -875,33 +998,31 @@ void AtmosphereDriver::set_initial_conditions ()
const auto& fname = fid.name();
const auto& grid_name = fid.get_grid_name();
- // First, check if the input file contains constant values for some of the fields
if (ic_pl.isParameter(fname)) {
- // The user provided a constant value for this field. Simply use that.
+ // This is the case that the user provided an initialization
+ // for this field in the parameter file.
if (ic_pl.isType(fname) or ic_pl.isType>(fname)) {
+ // Initial condition is a constant
initialize_constant_field(fid, ic_pl);
- fields_inited[grid_name].push_back(fname);
// Note: f is const, so we can't modify the tracking. So get the same field from the fm
auto f_nonconst = m_field_mgrs.at(grid_name)->get_field(fid.name());
f_nonconst.get_header().get_tracking().update_time_stamp(m_current_ts);
} else if (ic_pl.isType(fname)) {
+ // Initial condition is a string
ic_fields_to_copy.push_back(fid);
- fields_inited[grid_name].push_back(fname);
} else {
- EKAT_REQUIRE_MSG (false, "ERROR: invalid assignment for variable " + fname + ", only scalar double or string, or vector double arguments are allowed");
+ EKAT_ERROR_MSG ("ERROR: invalid assignment for variable " + fname + ", only scalar "
+ "double or string, or vector double arguments are allowed");
}
- } else if (not (fvphyshack and grid_name == "Physics PG2")) {
- auto& this_grid_ic_fnames = ic_fields_names[grid_name];
+ fields_inited[grid_name].push_back(fname);
+ } else if (fname == "phis" or fname == "sgh30") {
+ // Both phis and sgh30 need to be loaded from the topography file
auto& this_grid_topo_file_fnames = topography_file_fields_names[grid_name];
auto& this_grid_topo_eamxx_fnames = topography_eamxx_fields_names[grid_name];
- auto c = f.get_header().get_children();
-
if (fname == "phis") {
- // Topography (phis) is a special case that should
- // be loaded from the topography file, where the
- // eamxx field "phis" corresponds to the name
+ // The eamxx field "phis" corresponds to the name
// "PHIS_d" on the GLL and Point grids and "PHIS"
// on the PG2 grid in the topography file.
if (grid_name == "Physics PG2") {
@@ -912,11 +1033,29 @@ void AtmosphereDriver::set_initial_conditions ()
} else {
EKAT_ERROR_MSG ("Error! Requesting phis on an unknown grid: " + grid_name + ".\n");
}
- this_grid_topo_eamxx_fnames.push_back("phis");
- } else if (c.size()==0) {
+ this_grid_topo_eamxx_fnames.push_back(fname);
+ fields_inited[grid_name].push_back(fname);
+ } else if (fname == "sgh30") {
+ // The eamxx field "sgh30" is called "SGH30" in the
+ // topography file and is only available on the PG2 grid.
+ EKAT_ASSERT_MSG(grid_name == "Physics PG2",
+ "Error! Requesting sgh30 field on " + grid_name +
+ " topo file only has sgh30 for Physics PG2.\n");
+ topography_file_fields_names[grid_name].push_back("SGH30");
+ topography_eamxx_fields_names[grid_name].push_back(fname);
+ fields_inited[grid_name].push_back(fname);
+ }
+ } else if (not (fvphyshack and grid_name == "Physics PG2")) {
+ // The IC file is written for the GLL grid, so we only load
+ // fields from there. Any other input fields on the PG2 grid
+ // will be properly computed in the dynamics interface.
+ auto& this_grid_ic_fnames = ic_fields_names[grid_name];
+ auto c = f.get_header().get_children();
+ if (c.size()==0) {
// If this field is the parent of other subfields, we only read from file the subfields.
if (not ekat::contains(this_grid_ic_fnames,fname)) {
this_grid_ic_fnames.push_back(fname);
+ fields_inited[grid_name].push_back(fname);
}
} else if (fvphyshack and grid_name == "Physics GLL") {
// [CGLL ICs in pg2] I tried doing something like this in
@@ -930,10 +1069,10 @@ void AtmosphereDriver::set_initial_conditions ()
const auto& fname = fid.name();
if (ic_pl.isParameter(fname) and ic_pl.isType(fname)) {
initialize_constant_field(fid, ic_pl);
- fields_inited[grid_name].push_back(fname);
} else {
this_grid_ic_fnames.push_back(fname);
}
+ fields_inited[grid_name].push_back(fname);
}
}
}
@@ -990,6 +1129,22 @@ void AtmosphereDriver::set_initial_conditions ()
}
}
+ if (m_intensive_observation_period) {
+ // For runs with IOP, call to setup io grids and lat
+ // lon information needed for reading from file
+ for (const auto& it : m_field_mgrs) {
+ const auto& grid_name = it.first;
+ if (ic_fields_names[grid_name].size() > 0) {
+ const auto& file_name = grid_name == "Physics GLL"
+ ?
+ ic_pl.get("Filename")
+ :
+ ic_pl.get("topography_filename");
+ m_intensive_observation_period->setup_io_info(file_name, it.second->get_grid());
+ }
+ }
+ }
+
// If a filename is specified, use it to load inputs on all grids
if (ic_pl.isParameter("Filename")) {
// Now loop over all grids, and load from file the needed fields on each grid (if any).
@@ -997,7 +1152,16 @@ void AtmosphereDriver::set_initial_conditions ()
m_atm_logger->info(" [EAMxx] IC filename: " + file_name);
for (const auto& it : m_field_mgrs) {
const auto& grid_name = it.first;
- read_fields_from_file (ic_fields_names[grid_name],it.second->get_grid(),file_name,m_current_ts);
+ if (not m_intensive_observation_period) {
+ read_fields_from_file (ic_fields_names[grid_name],it.second->get_grid(),file_name,m_current_ts);
+ } else {
+ // For IOP enabled, we load from file and copy data from the closest
+ // lat/lon column to every other column
+ m_intensive_observation_period->read_fields_from_file_for_iop(file_name,
+ ic_fields_names[grid_name],
+ m_current_ts,
+ it.second);
+ }
}
}
@@ -1063,20 +1227,31 @@ void AtmosphereDriver::set_initial_conditions ()
m_atm_logger->info(" filename: " + file_name);
for (const auto& it : m_field_mgrs) {
const auto& grid_name = it.first;
- // Topography files always use "ncol_d" for the GLL grid value of ncol.
- // To ensure we read in the correct value, we must change the name for that dimension
- auto io_grid = it.second->get_grid();
- if (grid_name=="Physics GLL") {
- using namespace ShortFieldTagsNames;
- auto grid = io_grid->clone(io_grid->name(),true);
- grid->reset_field_tag_name(COL,"ncol_d");
- io_grid = grid;
+ if (not m_intensive_observation_period) {
+ // Topography files always use "ncol_d" for the GLL grid value of ncol.
+ // To ensure we read in the correct value, we must change the name for that dimension
+ auto io_grid = it.second->get_grid();
+ if (grid_name=="Physics GLL") {
+ using namespace ShortFieldTagsNames;
+ auto grid = io_grid->clone(io_grid->name(),true);
+ grid->reset_field_tag_name(COL,"ncol_d");
+ io_grid = grid;
+ }
+ read_fields_from_file (topography_file_fields_names[grid_name],
+ topography_eamxx_fields_names[grid_name],
+ io_grid,file_name,m_current_ts);
+ } else {
+ // For IOP enabled, we load from file and copy data from the closest
+ // lat/lon column to every other column
+ m_intensive_observation_period->read_fields_from_file_for_iop(file_name,
+ topography_file_fields_names[grid_name],
+ topography_eamxx_fields_names[grid_name],
+ m_current_ts,
+ it.second);
}
-
- read_fields_from_file (topography_file_fields_names[grid_name],
- topography_eamxx_fields_names[grid_name],
- io_grid,file_name,m_current_ts);
}
+ // Store in provenance list, for later usage in output file metadata
+ m_atm_params.sublist("provenance").set("topography_file",file_name);
m_atm_logger->debug(" [EAMxx] Processing topography from file ... done!");
} else {
// Ensure that, if no topography_filename is given, no
@@ -1089,6 +1264,88 @@ void AtmosphereDriver::set_initial_conditions ()
"topography_filename or entry matching the field name "
"was given in IC parameters.\n");
}
+
+ m_atm_params.sublist("provenance").set("topography_file","NONE");
+ }
+
+ if (m_intensive_observation_period) {
+ // Load IOP data file data for initial time stamp
+ m_intensive_observation_period->read_iop_file_data(m_current_ts);
+
+ // Now that ICs are processed, set appropriate fields using IOP file data.
+ // Since ICs are loaded on GLL grid, we set those fields only and dynamics
+ // will take care of the rest (for PG2 case).
+ if (m_field_mgrs.count("Physics GLL") > 0) {
+ const auto& fm = m_field_mgrs.at("Physics GLL");
+ m_intensive_observation_period->set_fields_from_iop_data(fm);
+ }
+ }
+
+ // Compute IC perturbations of GLL fields (if requested)
+ using vos = std::vector;
+ const auto perturbed_fields = ic_pl.get("perturbed_fields", {});
+ const auto num_perturb_fields = perturbed_fields.size();
+ if (num_perturb_fields > 0) {
+ m_atm_logger->info(" [EAMxx] Adding random perturbation to ICs ...");
+
+ EKAT_REQUIRE_MSG(m_field_mgrs.count("Physics GLL") > 0,
+ "Error! Random perturbation can only be applied to fields on "
+ "the GLL grid, but no Physics GLL FieldManager was defined.\n");
+ const auto& fm = m_field_mgrs.at("Physics GLL");
+
+ // Setup RNG. There are two relevant params: generate_perturbation_random_seed and
+ // perturbation_random_seed. We have 3 cases:
+ // 1. Parameter generate_perturbation_random_seed is set true, assert perturbation_random_seed
+ // is not given and generate a random seed using std::rand() to get an integer random value.
+ // 2. Parameter perturbation_random_seed is given, use this value for the seed.
+ // 3. Parameter perturbation_random_seed is not given and generate_perturbation_random_seed is
+ // not given, use 0 as the random seed.
+ // Case 3 is considered the default (using seed=0).
+ int seed;
+ if (ic_pl.get("generate_perturbation_random_seed", false)) {
+ EKAT_REQUIRE_MSG(not ic_pl.isParameter("perturbation_random_seed"),
+ "Error! Param generate_perturbation_random_seed=true, and "
+ "a perturbation_random_seed is given. Only one of these can "
+ "be defined for a simulation.\n");
+ std::srand(std::time(nullptr));
+ seed = std::rand();
+ } else {
+ seed = ic_pl.get("perturbation_random_seed", 0);
+ }
+ m_atm_logger->info(" For IC perturbation, random seed: "+std::to_string(seed));
+ std::mt19937_64 engine(seed);
+
+ // Get perturbation limit. Defines a range [1-perturbation_limit, 1+perturbation_limit]
+ // for which the perturbation value will be randomly generated from. Create a uniform
+ // distribution for this range.
+ const auto perturbation_limit = ic_pl.get("perturbation_limit", 0.001);
+ std::uniform_real_distribution pdf(1-perturbation_limit, 1+perturbation_limit);
+
+ // Define a level mask using reference pressure and the perturbation_minimum_pressure parameter.
+ // This mask dictates which levels we apply a perturbation.
+ const auto gll_grid = m_grids_manager->get_grid("Physics GLL");
+ const auto hyam_h = gll_grid->get_geometry_data("hyam").get_view();
+ const auto hybm_h = gll_grid->get_geometry_data("hybm").get_view();
+ constexpr auto ps0 = physics::Constants::P0;
+ const auto min_pressure = ic_pl.get("perturbation_minimum_pressure", 1050.0);
+ auto pressure_mask = [&] (const int ilev) {
+ const auto pref = (hyam_h(ilev)*ps0 + hybm_h(ilev)*ps0)/100; // Reference pressure ps0 is in Pa, convert to millibar
+ return pref > min_pressure;
+ };
+
+ // Loop through fields and apply perturbation.
+ for (size_t f=0; fget_grid()->name()], fname),
+ "Error! Attempting to apply perturbation to field not in initial_conditions.\n"
+ " - Field: "+fname+"\n"
+ " - Grid: "+fm->get_grid()->name()+"\n");
+
+ auto field = fm->get_field(fname);
+ perturb(field, engine, pdf, seed, pressure_mask, fm->get_grid()->get_dofs_gids());
+ }
+
+ m_atm_logger->info(" [EAMxx] Adding random perturbation to ICs ... done!");
}
m_atm_logger->info(" [EAMxx] set_initial_conditions ... done!");
@@ -1125,6 +1382,7 @@ read_fields_from_file (const std::vector& field_names_nc,
}
AtmosphereInput ic_reader(file_name,grid,fields);
+ ic_reader.set_logger(m_atm_logger);
ic_reader.read_variables();
ic_reader.finalize();
@@ -1163,6 +1421,7 @@ read_fields_from_file (const std::vector& field_names,
}
AtmosphereInput ic_reader(file_name,grid,fields);
+ ic_reader.set_logger(m_atm_logger);
ic_reader.read_variables();
ic_reader.finalize();
@@ -1258,6 +1517,9 @@ void AtmosphereDriver::initialize_atm_procs ()
m_atm_process_group->add_postcondition_nan_checks();
}
+ // Add additional column data fields to pre/postcondition checks (if they exist)
+ add_additional_column_data_to_property_checks();
+
if (fvphyshack) {
// [CGLL ICs in pg2] See related notes in atmosphere_dynamics.cpp.
const auto gn = "Physics GLL";
@@ -1282,6 +1544,7 @@ initialize (const ekat::Comm& atm_comm,
{
set_comm(atm_comm);
set_params(params);
+ set_provenance_data ();
init_scorpio ();
@@ -1291,37 +1554,65 @@ initialize (const ekat::Comm& atm_comm,
create_grids ();
+ const bool enable_iop =
+ m_atm_params.sublist("driver_options").get("enable_intensive_observation_period", false);
+ if (enable_iop) {
+ setup_intensive_observation_period ();
+ }
+
create_fields ();
initialize_fields ();
initialize_atm_procs ();
+ // Do this before init-ing the output managers,
+ // so the fields are valid if outputing at t=0
+ reset_accumulated_fields();
+
initialize_output_managers ();
}
void AtmosphereDriver::run (const int dt) {
start_timer("EAMxx::run");
- // Zero out accumulated fields
- reset_accummulated_fields();
-
// Make sure the end of the time step is after the current start_time
EKAT_REQUIRE_MSG (dt>0, "Error! Input time step must be positive.\n");
// Print current timestamp information
m_atm_logger->log(ekat::logger::LogLevel::info,
"Atmosphere step = " + std::to_string(m_current_ts.get_num_steps()) + "\n" +
- " model time = " + m_current_ts.get_date_string() + " " + m_current_ts.get_time_string() + "\n");
+ " model start-of-step time = " + m_current_ts.get_date_string() + " " + m_current_ts.get_time_string() + "\n");
+
+ // Reset accum fields to 0
+ // Note: at the 1st timestep this is redundant, since we did it at init,
+ // to ensure t=0 INSTANT output was correct. However, it's not a
+ // very expensive operation, so it's not worth the effort of the
+ // nano-opt of removing the call for the 1st timestep.
+ reset_accumulated_fields();
// The class AtmosphereProcessGroup will take care of dispatching arguments to
// the individual processes, which will be called in the correct order.
m_atm_process_group->run(dt);
+ // Some accumulated fields need to be divided by dt at the end of the atm step
+ for (auto fm_it : m_field_mgrs) {
+ const auto& fm = fm_it.second;
+ if (not fm->has_group("DIVIDE_BY_DT")) {
+ continue;
+ }
+
+ auto rescale_group = fm->get_field_group("DIVIDE_BY_DT");
+ for (auto f_it : rescale_group.m_fields) {
+ f_it.second->scale(Real(1) / dt);
+ }
+ }
+
// Update current time stamps
m_current_ts += dt;
// Update output streams
+ m_atm_logger->debug("[EAMxx::run] running output managers...");
for (auto& out_mgr : m_output_managers) {
out_mgr.run(m_current_ts);
}
@@ -1358,8 +1649,10 @@ void AtmosphereDriver::finalize ( /* inputs? */ ) {
m_output_managers.clear();
// Finalize, and then destroy all atmosphere processes
- m_atm_process_group->finalize( /* inputs ? */ );
- m_atm_process_group = nullptr;
+ if (m_atm_process_group.get()) {
+ m_atm_process_group->finalize( /* inputs ? */ );
+ m_atm_process_group = nullptr;
+ }
// Destroy the buffer manager
m_memory_buffer = nullptr;
@@ -1395,6 +1688,7 @@ void AtmosphereDriver::finalize ( /* inputs? */ ) {
m_atm_comm.all_reduce(&my_mem_usage,&max_mem_usage,1,MPI_MAX);
m_atm_logger->debug("[EAMxx::finalize] memory usage: " + std::to_string(max_mem_usage) + "MB");
#endif
+ m_atm_logger->flush();
m_ad_status = 0;
diff --git a/components/eamxx/src/control/atmosphere_driver.hpp b/components/eamxx/src/control/atmosphere_driver.hpp
index 0ba183cf74d3..bf694da3fb3e 100644
--- a/components/eamxx/src/control/atmosphere_driver.hpp
+++ b/components/eamxx/src/control/atmosphere_driver.hpp
@@ -2,6 +2,7 @@
#define SCREAM_ATMOSPHERE_DRIVER_HPP
#include "control/surface_coupling_utils.hpp"
+#include "control/intensive_observation_period.hpp"
#include "share/field/field_manager.hpp"
#include "share/grid/grids_manager.hpp"
#include "share/util/scream_time_stamp.hpp"
@@ -70,6 +71,9 @@ class AtmosphereDriver
// Set AD params
void init_scorpio (const int atm_id = 0);
+ // Setup IntensiveObservationPeriod
+ void setup_intensive_observation_period ();
+
// Create atm processes, without initializing them
void create_atm_processes ();
@@ -91,12 +95,24 @@ class AtmosphereDriver
void setup_surface_coupling_processes() const;
// Zero out precipitation flux
- void reset_accummulated_fields();
+ void reset_accumulated_fields();
// Create and add mass and energy conservation checks
// and pass to m_atm_process_group.
void setup_column_conservation_checks ();
+ // If TMS process exists, creates link to SHOC for applying
+ // tms' surface drag coefficient.
+ void setup_shoc_tms_links();
+
+ // Add column data to all pre/postcondition property checks
+ // for use in output.
+ void add_additional_column_data_to_property_checks ();
+
+ void set_provenance_data (std::string caseid = "",
+ std::string hostname = "",
+ std::string username = "");
+
// Load initial conditions for atm inputs
void initialize_fields ();
@@ -136,7 +152,6 @@ class AtmosphereDriver
// NOTE: if already finalized, this is a no-op
void finalize ();
- field_mgr_ptr get_ref_grid_field_mgr () const;
field_mgr_ptr get_field_mgr (const std::string& grid_name) const;
// Get atmosphere time stamp
@@ -192,6 +207,8 @@ class AtmosphereDriver
std::shared_ptr m_surface_coupling_import_data_manager;
std::shared_ptr m_surface_coupling_export_data_manager;
+ std::shared_ptr m_intensive_observation_period;
+
// This is the time stamp at the beginning of the time step.
util::TimeStamp m_current_ts;
diff --git a/components/eamxx/src/control/atmosphere_surface_coupling_exporter.cpp b/components/eamxx/src/control/atmosphere_surface_coupling_exporter.cpp
index ebbd993b4f94..dfdea16ae93f 100644
--- a/components/eamxx/src/control/atmosphere_surface_coupling_exporter.cpp
+++ b/components/eamxx/src/control/atmosphere_surface_coupling_exporter.cpp
@@ -64,12 +64,12 @@ void SurfaceCouplingExporter::set_grids(const std::shared_ptr("precip_ice_surf_mass", scalar2d_layout, kg/m2, grid_name);
create_helper_field("Sa_z", scalar2d_layout, grid_name);
- create_helper_field("Sa_u", scalar2d_layout, grid_name);
- create_helper_field("Sa_v", scalar2d_layout, grid_name);
- create_helper_field("Sa_tbot", scalar2d_layout, grid_name);
+ create_helper_field("Sa_u", scalar2d_layout, grid_name);
+ create_helper_field("Sa_v", scalar2d_layout, grid_name);
+ create_helper_field("Sa_tbot", scalar2d_layout, grid_name);
create_helper_field("Sa_ptem", scalar2d_layout, grid_name);
- create_helper_field("Sa_pbot", scalar2d_layout, grid_name);
- create_helper_field("Sa_shum", scalar2d_layout, grid_name);
+ create_helper_field("Sa_pbot", scalar2d_layout, grid_name);
+ create_helper_field("Sa_shum", scalar2d_layout, grid_name);
create_helper_field("Sa_dens", scalar2d_layout, grid_name);
create_helper_field("Sa_pslv", scalar2d_layout, grid_name);
create_helper_field("Faxa_rainl", scalar2d_layout, grid_name);
@@ -80,6 +80,7 @@ void SurfaceCouplingExporter::set_grids(const std::shared_ptr("fields");
+
+ EKAT_REQUIRE_MSG(export_from_file_params.isParameter("files"),
+ "Error! surface_coupling_exporter::init - prescribed_from_file does not have 'files' parameter.");
+ auto export_from_file_names = export_from_file_params.get("files");
+
+ bool are_fields_present = export_from_file_fields.size() > 0;
+ bool are_files_present = export_from_file_names.size() > 0;
+ EKAT_REQUIRE_MSG(are_files_present==are_fields_present,
+ "ERROR!! When prescribing export fields from file, you must provide both fields names and file name(s).\n");
+ bool do_export_from_file = are_fields_present and are_files_present;
+ if (do_export_from_file) {
+ vos_type export_from_file_reg_names;
+ export_from_file_reg_names = export_from_file_fields;
+ // Check if alternative names have been provided. This is useful for source data files
+ // that don't use the conventional EAMxx ATM->SRFC variable names.
+ auto alt_names = export_from_file_params.get("fields_alt_name",{});
+ for (auto entry : alt_names) {
+ ekat::strip(entry, ' '); // remove empty spaces in case user did `a : b`
+ auto tokens = ekat::split(entry,':');
+ EKAT_REQUIRE_MSG(tokens.size()==2,
+ "Error! surface_coupling_exporter::init - expected 'EAMxx_var_name:FILE_var_name' entry in fields_alt_names, got '" + entry + "' instead.\n");
+ auto it = ekat::find(export_from_file_fields,tokens[0]);
+ EKAT_REQUIRE_MSG(it!=export_from_file_fields.end(),
+ "Error! surface_coupling_exporter::init - LHS of entry '" + entry + "' in field_alt_names does not match a valid EAMxx field.\n");
+ // Make sure that a user hasn't accidentally copy/pasted
+ auto chk = ekat::find(export_from_file_reg_names,tokens[1]);
+ EKAT_REQUIRE_MSG(chk==export_from_file_reg_names.end(),
+ "Error! surface_coupling_exporter::init - RHS of entry '" + entry + "' in field_alt_names has already been used for a different field.\n");
+ auto idx = std::distance(export_from_file_fields.begin(),it);
+ export_from_file_reg_names[idx] = tokens[1];
+ }
+ // Construct a time interpolation object
+ m_time_interp = util::TimeInterpolation(m_grid,export_from_file_names);
+ for (size_t ii=0; ii("fields");
auto export_constant_values = export_constant_params.get("values");
EKAT_REQUIRE_MSG(export_constant_fields.size()==export_constant_values.size(),"Error! surface_coupling_exporter::init - prescribed_constants 'fields' and 'values' are not the same size");
- if (export_constant_fields.size()>0) {
+ bool are_fields_present = export_constant_fields.size() > 0;
+ if (are_fields_present) {
// Determine which fields need constants
- for (int i=0; i=0,"Error! surface_coupling_exporter - The number of exports derived from EAMxx < 0, something must have gone wrong in assigning the types of exports for all variables.");
// Perform initial export (if any are marked for export during initialization)
@@ -250,8 +318,13 @@ void SurfaceCouplingExporter::run_impl (const double dt)
void SurfaceCouplingExporter::do_export(const double dt, const bool called_during_initialization)
{
if (m_num_const_exports>0) {
- set_constant_exports(dt,called_during_initialization);
+ set_constant_exports();
+ }
+
+ if (m_num_from_file_exports>0) {
+ set_from_file_exports(dt);
}
+
if (m_num_from_model_exports>0) {
compute_eamxx_exports(dt,called_during_initialization);
}
@@ -260,17 +333,32 @@ void SurfaceCouplingExporter::do_export(const double dt, const bool called_durin
do_export_to_cpl(called_during_initialization);
}
// =========================================================================================
-void SurfaceCouplingExporter::set_constant_exports(const double dt, const bool called_during_initialization)
+void SurfaceCouplingExporter::set_constant_exports()
{
// Cycle through those fields that will be set to a constant value:
+ int num_set = 0; // Checker to make sure we got all the fields we wanted.
for (int i=0; i();
Kokkos::deep_copy(field_view,m_export_constants.at(fname));
+ num_set++;
}
}
-
+ // Gotta catch em all
+ EKAT_REQUIRE_MSG(num_set==m_num_const_exports,"ERROR! SurfaceCouplingExporter::set_constant_exports() - Number of fields set to a constant (" + std::to_string(num_set) +") doesn't match the number recorded at initialization (" + std::to_string(m_num_const_exports) +"). Something went wrong.");
+
+}
+// =========================================================================================
+void SurfaceCouplingExporter::set_from_file_exports(const int dt)
+{
+ // Perform interpolation on the data with the latest timestamp
+ auto ts = timestamp();
+ if (dt > 0) {
+ ts += dt;
+ }
+ m_time_interp.perform_time_interpolation(ts);
+
}
// =========================================================================================
// This compute_eamxx_exports routine handles all export variables that are derived from the EAMxx state.
@@ -370,7 +458,7 @@ void SurfaceCouplingExporter::compute_eamxx_exports(const double dt, const bool
// Currently only needed for Sa_z, Sa_dens and Sa_pslv
const bool calculate_z_vars = export_source(idx_Sa_z)==FROM_MODEL
|| export_source(idx_Sa_dens)==FROM_MODEL
- || export_source(idx_Sa_pslv)==FROM_MODEL;
+ || export_source(idx_Sa_pslv)==FROM_MODEL;
if (calculate_z_vars) {
PF::calculate_dz(team, pseudo_density_i, p_mid_i, T_mid_i, qv_i, dz_i);
team.team_barrier();
@@ -383,10 +471,10 @@ void SurfaceCouplingExporter::compute_eamxx_exports(const double dt, const bool
// Set the values in the helper fields which correspond to the exported variables
- if (export_source(idx_Sa_z)==FROM_MODEL) {
+ if (export_source(idx_Sa_z)==FROM_MODEL) {
// Assugb to Sa_z
const auto s_z_mid_i = ekat::scalarize(z_mid_i);
- Sa_z(i) = s_z_mid_i(num_levs-1);
+ Sa_z(i) = s_z_mid_i(num_levs-1);
}
if (export_source(idx_Sa_u)==FROM_MODEL) {
@@ -411,9 +499,9 @@ void SurfaceCouplingExporter::compute_eamxx_exports(const double dt, const bool
Sa_pbot(i) = s_p_mid_i(num_levs-1);
}
- if (export_source(idx_Sa_shum)==FROM_MODEL) {
+ if (export_source(idx_Sa_shum)==FROM_MODEL) {
const auto s_qv_i = ekat::scalarize(qv_i);
- Sa_shum(i) = s_qv_i(num_levs-1);
+ Sa_shum(i) = s_qv_i(num_levs-1);
}
if (export_source(idx_Sa_dens)==FROM_MODEL) {
@@ -446,6 +534,7 @@ void SurfaceCouplingExporter::compute_eamxx_exports(const double dt, const bool
if (m_export_source_h(idx_Faxa_swvdf)==FROM_MODEL) { Kokkos::deep_copy(Faxa_swvdf, sfc_flux_dif_vis); }
if (m_export_source_h(idx_Faxa_swnet)==FROM_MODEL) { Kokkos::deep_copy(Faxa_swnet, sfc_flux_sw_net); }
if (m_export_source_h(idx_Faxa_lwdn )==FROM_MODEL) { Kokkos::deep_copy(Faxa_lwdn, sfc_flux_lw_dn); }
+
}
// =========================================================================================
void SurfaceCouplingExporter::do_export_to_cpl(const bool called_during_initialization)
@@ -479,7 +568,7 @@ void SurfaceCouplingExporter::do_export_to_cpl(const bool called_during_initiali
// =========================================================================================
void SurfaceCouplingExporter::finalize_impl()
{
-
+ // Nothing to do
}
// =========================================================================================
} // namespace scream
diff --git a/components/eamxx/src/control/atmosphere_surface_coupling_exporter.hpp b/components/eamxx/src/control/atmosphere_surface_coupling_exporter.hpp
index 285401d1193b..d8ccf5862f3c 100644
--- a/components/eamxx/src/control/atmosphere_surface_coupling_exporter.hpp
+++ b/components/eamxx/src/control/atmosphere_surface_coupling_exporter.hpp
@@ -1,14 +1,15 @@
#ifndef SCREAM_EXPORTER_HPP
#define SCREAM_EXPORTER_HPP
+#include "surface_coupling_utils.hpp"
+
#include "share/atm_process/atmosphere_process.hpp"
-#include "ekat/ekat_parameter_list.hpp"
#include "share/util/scream_common_physics_functions.hpp"
+#include "share/util/eamxx_time_interpolation.hpp"
#include "share/atm_process/ATMBufferManager.hpp"
#include "share/atm_process/SCDataManager.hpp"
-#include "surface_coupling_utils.hpp"
-
+#include
#include
namespace scream
@@ -79,12 +80,12 @@ class SurfaceCouplingExporter : public AtmosphereProcess
// which do not have valid entries.
void do_export(const double dt, const bool called_during_initialization=false); // Main export routine
void compute_eamxx_exports(const double dt, const bool called_during_initialization=false); // Export vars are derived from eamxx state
- void set_constant_exports(const double dt, const bool called_during_initialization=false); // Export vars are set to a constant
+ void set_constant_exports(); // Export vars are set to a constant
+ void set_from_file_exports(const int dt); // Export vars are set by interpolation of data from files
void do_export_to_cpl(const bool called_during_initialization=false); // Finish export by copying data to cpl structures.
// Take and store data from SCDataManager
void setup_surface_coupling_data(const SCDataManager &sc_data_manager);
-
protected:
// The three main overrides for the subcomponent
@@ -123,12 +124,16 @@ class SurfaceCouplingExporter : public AtmosphereProcess
Int m_num_cpl_exports;
// Number of exports from EAMxx and how they will be handled
- Int m_num_scream_exports;
+ int m_num_scream_exports;
view_1d m_export_source;
view_1d m_export_source_h;
std::map m_export_constants;
int m_num_from_model_exports=0;
int m_num_const_exports=0;
+ // For exporting from file
+ int m_num_from_file_exports=0;
+ util::TimeInterpolation m_time_interp;
+ std::vector m_export_from_file_field_names;
// Views storing a 2d array with dims (num_cols,num_fields) for cpl export data.
// The field idx strides faster, since that's what mct does (so we can "view" the
@@ -137,7 +142,8 @@ class SurfaceCouplingExporter : public AtmosphereProcess
uview_2d m_cpl_exports_view_h;
// Array storing the field names for exports
- name_t* m_export_field_names;
+ name_t* m_export_field_names;
+ std::vector m_export_field_names_vector;
// Views storing information for each export
uview_1d m_cpl_indices_view;
diff --git a/components/eamxx/src/control/atmosphere_surface_coupling_importer.cpp b/components/eamxx/src/control/atmosphere_surface_coupling_importer.cpp
index 828bc4a1abfb..1bf32b924215 100644
--- a/components/eamxx/src/control/atmosphere_surface_coupling_importer.cpp
+++ b/components/eamxx/src/control/atmosphere_surface_coupling_importer.cpp
@@ -1,6 +1,7 @@
#include "atmosphere_surface_coupling_importer.hpp"
#include "share/property_checks/field_within_interval_check.hpp"
+#include "physics/share/physics_constants.hpp"
#include "ekat/ekat_assert.hpp"
#include "ekat/util/ekat_units.hpp"
@@ -24,7 +25,7 @@ void SurfaceCouplingImporter::set_grids(const std::shared_ptrname();
m_num_cols = m_grid->get_num_local_dofs(); // Number of columns on this rank
-
+
// The units of mixing ratio Q are technically non-dimensional.
// Nevertheless, for output reasons, we like to see 'kg/kg'.
auto Qunit = kg/kg;
@@ -130,7 +131,7 @@ void SurfaceCouplingImporter::initialize_impl (const RunType /* run_type */)
add_postcondition_check(get_field_out("sfc_alb_dif_vis"),m_grid,0.0,1.0,true);
add_postcondition_check(get_field_out("sfc_alb_dif_nir"),m_grid,0.0,1.0,true);
- // Perform initial import (if any are marked for import during initialization)
+ // Perform initial import (if any are marked for import during initialization)
if (any_initial_imports) do_import(true);
}
// =========================================================================================
@@ -168,6 +169,70 @@ void SurfaceCouplingImporter::do_import(const bool called_during_initialization)
info.data[offset] = cpl_imports_view_d(icol,info.cpl_indx)*info.constant_multiple;
}
});
+
+ // If IOP is defined, potentially overwrite imports with data from IOP file
+ if (m_intensive_observation_period) {
+ overwrite_iop_imports(called_during_initialization);
+ }
+}
+// =========================================================================================
+void SurfaceCouplingImporter::overwrite_iop_imports (const bool called_during_initialization)
+{
+ using policy_type = KokkosTypes::RangePolicy;
+ using C = physics::Constants;
+
+ const auto& iop = m_intensive_observation_period;
+
+ const auto has_lhflx = iop->has_iop_field("lhflx");
+ const auto has_shflx = iop->has_iop_field("shflx");
+ const auto has_Tg = iop->has_iop_field("Tg");
+
+ static constexpr Real latvap = C::LatVap;
+ static constexpr Real stebol = C::stebol;
+
+ const auto& col_info_h = m_column_info_h;
+ const auto& col_info_d = m_column_info_d;
+
+ for (int ifield=0; ifieldget_iop_field("lhflx");
+ f.sync_to_host();
+ col_val = f.get_view()()/latvap;
+ } else if (fname == "surf_sens_flux" && has_shflx) {
+ const auto f = iop->get_iop_field("shflx");
+ f.sync_to_host();
+ col_val = f.get_view()();
+ } else if (fname == "surf_radiative_T" && has_Tg) {
+ const auto f = iop->get_iop_field("Tg");
+ f.sync_to_host();
+ col_val = f.get_view()();
+ } else if (fname == "surf_lw_flux_up" && has_Tg) {
+ const auto f = iop->get_iop_field("Tg");
+ f.sync_to_host();
+ col_val = stebol*std::pow(f.get_view()(), 4);
+ } else {
+ // If import field doesn't satisify above, skip
+ continue;
+ }
+
+ // Overwrite iop imports with col_val for each column
+ auto policy = policy_type(0, m_num_cols);
+ Kokkos::parallel_for(policy, KOKKOS_LAMBDA(const int& icol) {
+ const auto& info_d = col_info_d(ifield);
+ const auto offset = icol*info_d.col_stride + info_d.col_offset;
+ info_d.data[offset] = col_val;
+ });
+ }
}
// =========================================================================================
void SurfaceCouplingImporter::finalize_impl()
diff --git a/components/eamxx/src/control/atmosphere_surface_coupling_importer.hpp b/components/eamxx/src/control/atmosphere_surface_coupling_importer.hpp
index 14a29f373d4c..5884c9d40af2 100644
--- a/components/eamxx/src/control/atmosphere_surface_coupling_importer.hpp
+++ b/components/eamxx/src/control/atmosphere_surface_coupling_importer.hpp
@@ -5,6 +5,8 @@
#include "ekat/ekat_parameter_list.hpp"
#include "share/atm_process/SCDataManager.hpp"
+#include "control/intensive_observation_period.hpp"
+
#include "surface_coupling_utils.hpp"
#include
@@ -33,7 +35,8 @@ class SurfaceCouplingImporter : public AtmosphereProcess
template
using uview_2d = Unmanaged>;
- using name_t = char[32];
+ using name_t = char[32];
+ using iop_ptr = std::shared_ptr;
// Constructors
SurfaceCouplingImporter (const ekat::Comm& comm, const ekat::ParameterList& params);
@@ -58,6 +61,12 @@ class SurfaceCouplingImporter : public AtmosphereProcess
// Take and store data from SCDataManager
void setup_surface_coupling_data(const SCDataManager &sc_data_manager);
+ // Overwrite imports for IOP cases with IOP file surface data
+ void overwrite_iop_imports (const bool called_during_initialization);
+
+ void set_intensive_observation_period (const iop_ptr& iop) {
+ m_intensive_observation_period = iop;
+ }
protected:
// The three main overrides for the subcomponent
@@ -66,7 +75,7 @@ class SurfaceCouplingImporter : public AtmosphereProcess
void finalize_impl ();
// Keep track of field dimensions
- Int m_num_cols;
+ Int m_num_cols;
// Number of fields in cpl data
Int m_num_cpl_imports;
@@ -93,10 +102,11 @@ class SurfaceCouplingImporter : public AtmosphereProcess
view_1d m_column_info_d;
decltype(m_column_info_d)::HostMirror m_column_info_h;
+ // Intensive observation period object.
+ iop_ptr m_intensive_observation_period;
// The grid is needed for property checks
std::shared_ptr m_grid;
-
}; // class SurfaceCouplingImporter
} // namespace scream
diff --git a/components/eamxx/src/control/fvphyshack.cpp b/components/eamxx/src/control/fvphyshack.cpp
deleted file mode 100644
index 515f66edd537..000000000000
--- a/components/eamxx/src/control/fvphyshack.cpp
+++ /dev/null
@@ -1,3 +0,0 @@
-namespace scream {
-bool fvphyshack;
-}
diff --git a/components/eamxx/src/control/fvphyshack.hpp b/components/eamxx/src/control/fvphyshack.hpp
deleted file mode 100644
index a1c57bbd34df..000000000000
--- a/components/eamxx/src/control/fvphyshack.hpp
+++ /dev/null
@@ -1,7 +0,0 @@
-#include "ekat/ekat_parameter_list.hpp"
-
-namespace scream {
-extern bool fvphyshack;
-void fv_phys_rrtmgp_active_gases_init(const ekat::ParameterList& p);
-void fv_phys_rrtmgp_active_gases_set_restart(const bool restart);
-}
diff --git a/components/eamxx/src/control/intensive_observation_period.cpp b/components/eamxx/src/control/intensive_observation_period.cpp
new file mode 100644
index 000000000000..f4ab466c17ba
--- /dev/null
+++ b/components/eamxx/src/control/intensive_observation_period.cpp
@@ -0,0 +1,849 @@
+#include "control/intensive_observation_period.hpp"
+
+#include "share/grid/point_grid.hpp"
+#include "share/io/scorpio_input.hpp"
+#include "share/util/scream_vertical_interpolation.hpp"
+
+#include "ekat/ekat_assert.hpp"
+#include "ekat/util/ekat_lin_interp.hpp"
+
+#include "pio.h"
+
+#include
+
+// Extend ekat mpi type for pairs,
+// used for reduction of type MPI_MINLOC.
+namespace ekat {
+#ifdef SCREAM_DOUBLE_PRECISION
+ template<>
+ MPI_Datatype get_mpi_type> () {
+ return MPI_DOUBLE_INT;
+ }
+#else
+ template<>
+ MPI_Datatype get_mpi_type> () {
+ return MPI_FLOAT_INT;
+ }
+#endif
+}
+
+namespace scream {
+namespace control {
+
+// Helper functions for reading data from .nc file to support
+// cases not currently supported in EAMxx scorpio interface.
+namespace {
+// Read the value of a dimensionless variable from file.
+template
+void read_dimensionless_variable_from_file(const std::string& filename,
+ const std::string& varname,
+ T* value)
+{
+ EKAT_REQUIRE_MSG(scorpio::has_variable(filename,varname),
+ "Error! IOP file does not have variable "+varname+".\n");
+
+ int ncid, varid, err1, err2;
+ bool was_open = scorpio::is_file_open_c2f(filename.c_str(),-1);
+ if (not was_open) {
+ scorpio::register_file(filename,scorpio::FileMode::Read);
+ }
+ ncid = scorpio::get_file_ncid_c2f (filename.c_str());
+ err1 = PIOc_inq_varid(ncid,varname.c_str(),&varid);
+ EKAT_REQUIRE_MSG(err1==PIO_NOERR,
+ "Error! Something went wrong while retrieving variable id.\n"
+ " - filename : " + filename + "\n"
+ " - varname : " + varname + "\n"
+ " - pio error: " + std::to_string(err1) + "\n");
+
+ err2 = PIOc_get_var(ncid, varid, value);
+ EKAT_REQUIRE_MSG(err2==PIO_NOERR,
+ "Error! Something went wrong while retrieving variable.\n"
+ " - filename : " + filename + "\n"
+ " - varname : " + varname + "\n"
+ " - pio error: " + std::to_string(err2) + "\n");
+
+ if (not was_open) {
+ scorpio::eam_pio_closefile(filename);
+ }
+}
+
+// Read variable with arbitrary number of dimensions from file.
+template
+void read_variable_from_file(const std::string& filename,
+ const std::string& varname,
+ const std::string& vartype,
+ const std::vector& dimnames,
+ const int time_idx,
+ T* data)
+{
+ EKAT_REQUIRE_MSG(scorpio::has_variable(filename,varname),
+ "Error! IOP file does not have variable "+varname+".\n");
+
+ // Compute total size of data to read
+ int data_size = 1;
+ for (auto dim : dimnames) {
+ const auto dim_len = scorpio::get_dimlen(filename, dim);
+ data_size *= dim_len;
+ }
+
+ // Read into data
+ scorpio::register_file(filename, scorpio::FileMode::Read);
+ std::string io_decomp_tag = varname+","+filename;
+ scorpio::register_variable(filename, varname, varname, dimnames, vartype, io_decomp_tag);
+ std::vector dof_offsets(data_size);
+ std::iota(dof_offsets.begin(), dof_offsets.end(), 0);
+ scorpio::set_dof(filename, varname, dof_offsets.size(), dof_offsets.data());
+ scorpio::set_decomp(filename);
+ scorpio::grid_read_data_array(filename, varname, time_idx, data, data_size);
+ scorpio::eam_pio_closefile(filename);
+}
+}
+
+IntensiveObservationPeriod::
+IntensiveObservationPeriod(const ekat::Comm& comm,
+ const ekat::ParameterList& params,
+ const util::TimeStamp& run_t0,
+ const int model_nlevs,
+ const Field& hyam,
+ const Field& hybm)
+{
+ m_comm = comm;
+ m_params = params;
+ EKAT_REQUIRE_MSG(m_params.get("doubly_periodic_mode", false),
+ "Error! Currently doubly_periodic_mode is the only use case for "
+ "intensive observation period files.\n");
+
+ EKAT_REQUIRE_MSG(m_params.isParameter("target_latitude") && m_params.isParameter("target_longitude"),
+ "Error! Using intensive observation period files requires "
+ "target_latitude and target_longitude be gives as parameters in "
+ "\"intensive_observation_period_options\" in the input yaml file.\n");
+ const auto target_lat = m_params.get("target_latitude");
+ const auto target_lon = m_params.get("target_longitude");
+ EKAT_REQUIRE_MSG(-90 <= target_lat and target_lat <= 90,
+ "Error! IOP target_lat="+std::to_string(target_lat)+" outside of expected range [-90, 90].\n");
+ EKAT_REQUIRE_MSG(0 <= target_lon and target_lon <= 360,
+ "Error! IOP target_lat="+std::to_string(target_lon)+" outside of expected range [0, 360].\n");
+
+ // Set defaults for some parameters
+ if (not m_params.isParameter("iop_srf_prop")) m_params.set("iop_srf_prop", false);
+ if (not m_params.isParameter("iop_dosubsidence")) m_params.set("iop_dosubsidence", false);
+ if (not m_params.isParameter("iop_coriolis")) m_params.set("iop_coriolis", false);
+ if (not m_params.isParameter("iop_nudge_tq")) m_params.set("iop_nudge_tq", false);
+ if (not m_params.isParameter("iop_nudge_uv")) m_params.set("iop_nudge_uv", false);
+ if (not m_params.isParameter("iop_nudge_tq_low")) m_params.set("iop_nudge_tq_low", 1050);
+ if (not m_params.isParameter("iop_nudge_tq_high")) m_params.set("iop_nudge_tq_high", 0);
+ if (not m_params.isParameter("iop_nudge_tscale")) m_params.set("iop_nudge_tscale", 10800);
+ if (not m_params.isParameter("zero_non_iop_tracers")) m_params.set("zero_non_iop_tracers", false);
+
+ // Use IOP file to initialize parameters
+ // and timestepping information
+ initialize_iop_file(run_t0, model_nlevs, hyam, hybm);
+}
+
+void IntensiveObservationPeriod::
+initialize_iop_file(const util::TimeStamp& run_t0,
+ int model_nlevs,
+ const Field& hyam,
+ const Field& hybm)
+{
+ EKAT_REQUIRE_MSG(m_params.isParameter("iop_file"),
+ "Error! Using IOP requires defining an iop_file parameter.\n");
+
+ const auto iop_file = m_params.get("iop_file");
+
+ // Lambda for allocating space and storing information for potential iop fields.
+ // Inputs:
+ // - varnames: Vector of possible variable names in the iop file.
+ // First entry will be the variable name used when accessing in class
+ // - fl: IOP field layout (acceptable ranks: 0, 1)
+ // - srf_varname: Name of surface variable potentially in iop file associated with iop variable.
+ auto setup_iop_field = [&, this] (const vos& varnames,
+ const FieldLayout& fl,
+ const std::string& srf_varname = "none") {
+ EKAT_REQUIRE_MSG(fl.rank() == 0 || fl.rank() == 1,
+ "Error! IOP fields must have rank 0 or 1. "
+ "Attempting to setup "+varnames[0]+" with rank "
+ +std::to_string(fl.rank())+".\n");
+
+ // Check if var exists in IOP file. Some variables will
+ // need to check alternate names.
+ const auto iop_varname = varnames[0];
+ bool has_var = false;
+ std::string file_varname = "";
+ for (auto varname : varnames) {
+ if (scorpio::has_variable(iop_file, varname)) {
+ has_var = true;
+ file_varname = varname;
+ break;
+ };
+ }
+ if (has_var) {
+ // Store if iop file has a different varname than the iop field
+ if (iop_varname != file_varname) m_iop_file_varnames.insert({iop_varname, file_varname});
+ // Store if variable contains a surface value in iop file
+ if (scorpio::has_variable(iop_file, srf_varname)) {
+ m_iop_field_surface_varnames.insert({iop_varname, srf_varname});
+ }
+
+ // Allocate field for variable
+ FieldIdentifier fid(iop_varname, fl, ekat::units::Units::nondimensional(), "");
+ const auto field_rank = fl.rank();
+ EKAT_REQUIRE_MSG(field_rank <= 1,
+ "Error! Unexpected field rank "+std::to_string(field_rank)+" for iop file fields.\n");
+ Field field(fid);
+ field.allocate_view();
+ m_iop_fields.insert({iop_varname, field});
+ }
+ };
+
+ // Check if the following variables exist in the iop file
+
+ // Scalar data
+ FieldLayout fl_scalar({},{}); // Zero dim fields used for iop file scalars
+ setup_iop_field({"Ps"}, fl_scalar);
+ setup_iop_field({"Tg"}, fl_scalar);
+ setup_iop_field({"lhflx", "lh"}, fl_scalar);
+ setup_iop_field({"shflx", "sh"}, fl_scalar);
+
+ // Level data
+ FieldLayout fl_vector({FieldTag::LevelMidPoint}, {model_nlevs});
+ setup_iop_field({"T"}, fl_vector, "Tsair");
+ setup_iop_field({"q"}, fl_vector, "qsrf");
+ setup_iop_field({"cld"}, fl_vector);
+ setup_iop_field({"clwp"}, fl_vector);
+ setup_iop_field({"divq"}, fl_vector, "divqsrf");
+ setup_iop_field({"vertdivq"}, fl_vector, "vertdivqsrf");
+ setup_iop_field({"NUMLIQ"}, fl_vector);
+ setup_iop_field({"CLDLIQ"}, fl_vector);
+ setup_iop_field({"CLDICE"}, fl_vector);
+ setup_iop_field({"NUMICE"}, fl_vector);
+ setup_iop_field({"divu"}, fl_vector, "divusrf");
+ setup_iop_field({"divv"}, fl_vector, "divvsrf");
+ setup_iop_field({"divT"}, fl_vector, "divtsrf");
+ setup_iop_field({"vertdivT"}, fl_vector, "vertdivTsrf");
+ setup_iop_field({"divT3d"}, fl_vector, "divT3dsrf");
+ setup_iop_field({"u"}, fl_vector, "usrf");
+ setup_iop_field({"u_ls"}, fl_vector, "usrf");
+ setup_iop_field({"v"}, fl_vector, "vsrf");
+ setup_iop_field({"v_ls"}, fl_vector, "vsrf");
+ setup_iop_field({"Q1"}, fl_vector);
+ setup_iop_field({"Q2"}, fl_vector);
+ setup_iop_field({"omega"}, fl_vector, "Ptend");
+
+ // Make sure Ps, T, and q are defined in the iop file
+ EKAT_REQUIRE_MSG(has_iop_field("Ps"),
+ "Error! Using IOP file requires variable \"Ps\".\n");
+ EKAT_REQUIRE_MSG(has_iop_field("T"),
+ "Error! Using IOP file requires variable \"T\".\n");
+ EKAT_REQUIRE_MSG(has_iop_field("q"),
+ "Error! Using IOP file requires variable \"q\".\n");
+
+ // Initialize time information
+ int bdate;
+ std::string bdate_name;
+ if (scorpio::has_variable(iop_file, "bdate")) bdate_name = "bdate";
+ else if (scorpio::has_variable(iop_file, "basedate")) bdate_name = "basedate";
+ else if (scorpio::has_variable(iop_file, "nbdate")) bdate_name = "nbdate";
+ else EKAT_ERROR_MSG("Error! No valid name for bdate in "+iop_file+".\n");
+ read_dimensionless_variable_from_file(iop_file, bdate_name, &bdate);
+
+ int yr=bdate/10000;
+ int mo=(bdate/100) - yr*100;
+ int day=bdate - (yr*10000+mo*100);
+ m_time_info.iop_file_begin_time = util::TimeStamp(yr,mo,day,0,0,0);
+
+ std::string time_dimname;
+ if (scorpio::has_dim(iop_file, "time")) time_dimname = "time";
+ else if (scorpio::has_dim(iop_file, "tsec")) time_dimname = "tsec";
+ else EKAT_ERROR_MSG("Error! No valid dimension for tsec in "+iop_file+".\n");
+ const auto ntimes = scorpio::get_dimlen(iop_file, time_dimname);
+ m_time_info.iop_file_times_in_sec =
+ decltype(m_time_info.iop_file_times_in_sec)("iop_file_times", ntimes);
+ read_variable_from_file(iop_file, "tsec", "int", {time_dimname}, -1,
+ m_time_info.iop_file_times_in_sec.data());
+
+ // Check that lat/lon from iop file match the targets in parameters. Note that
+ // longitude may be negtive in the iop file, we convert to positive before checking.
+ const auto nlats = scorpio::get_dimlen(iop_file, "lat");
+ const auto nlons = scorpio::get_dimlen(iop_file, "lon");
+ EKAT_REQUIRE_MSG(nlats==1 and nlons==1, "Error! IOP data file requires a single lat/lon pair.\n");
+ Real iop_file_lat, iop_file_lon;
+ read_variable_from_file(iop_file, "lat", "real", {"lat"}, -1, &iop_file_lat);
+ read_variable_from_file(iop_file, "lon", "real", {"lon"}, -1, &iop_file_lon);
+ EKAT_REQUIRE_MSG(iop_file_lat == m_params.get("target_latitude"),
+ "Error! IOP file variable \"lat\" does not match target_latitude from IOP parameters.\n");
+ EKAT_REQUIRE_MSG(std::fmod(iop_file_lon + 360, 360) == m_params.get("target_longitude"),
+ "Error! IOP file variable \"lat\" does not match target_latitude from IOP parameters.\n");
+
+ // Store iop file pressure as helper field with dimension lev+1.
+ // Load the first lev entries from iop file, the lev+1 entry will
+ // be set when reading iop data.
+ EKAT_REQUIRE_MSG(scorpio::has_variable(iop_file, "lev"),
+ "Error! Using IOP file requires variable \"lev\".\n");
+ const auto file_levs = scorpio::get_dimlen(iop_file, "lev");
+ FieldIdentifier fid("iop_file_pressure",
+ FieldLayout({FieldTag::LevelMidPoint}, {file_levs+1}),
+ ekat::units::Units::nondimensional(),
+ "");
+ Field iop_file_pressure(fid);
+ iop_file_pressure.allocate_view();
+ auto data = iop_file_pressure.get_view().data();
+ read_variable_from_file(iop_file, "lev", "real", {"lev"}, -1, data);
+ // Convert to pressure to millibar (file gives pressure in Pa)
+ for (int ilev=0; ilevname();
+
+ // Create io grid if doesn't exist
+ if (m_io_grids.count(grid_name) == 0) {
+ // IO grid needs to have ncol dimension equal to the IC/topo file
+ const auto nc_file_ncols = scorpio::get_dimlen(file_name, "ncol");
+ const auto nlevs = grid->get_num_vertical_levels();
+ m_io_grids[grid_name] = create_point_grid(grid_name,
+ nc_file_ncols,
+ nlevs,
+ m_comm);
+ }
+
+ // Store closest lat/lon info for this grid if doesn't exist
+ if (m_lat_lon_info.count(grid_name) == 0) {
+ const auto& io_grid = m_io_grids[grid_name];
+
+ // Create lat/lon fields
+ const auto ncols = io_grid->get_num_local_dofs();
+ std::vector fields;
+
+ FieldIdentifier lat_fid("lat",
+ FieldLayout({FieldTag::Column},{ncols}),
+ ekat::units::Units::nondimensional(),
+ grid_name);
+ Field lat_f(lat_fid);
+ lat_f.allocate_view();
+ fields.push_back(lat_f);
+
+ FieldIdentifier lon_fid("lon",
+ FieldLayout({FieldTag::Column},{ncols}),
+ ekat::units::Units::nondimensional(),
+ grid_name);
+ Field lon_f(lon_fid);
+ lon_f.allocate_view();
+ fields.push_back(lon_f);
+
+ // Read from file
+ AtmosphereInput file_reader(file_name, io_grid, fields);
+ file_reader.read_variables();
+ file_reader.finalize();
+
+ // Find column index of closest lat/lon to target_lat/lon params
+ auto lat_v = fields[0].get_view();
+ auto lon_v = fields[1].get_view();
+ const auto target_lat = m_params.get("target_latitude");
+ const auto target_lon = m_params.get("target_longitude");
+ using minloc_t = Kokkos::MinLoc;
+ using minloc_value_t = typename minloc_t::value_type;
+ minloc_value_t minloc;
+ Kokkos::parallel_reduce(ncols, KOKKOS_LAMBDA (int icol, minloc_value_t& result) {
+ auto dist = std::abs(lat_v(icol)-target_lat)+std::abs(lon_v(icol)-target_lon);
+ if(dist min_dist_and_rank = {minloc.val, my_rank};
+ m_comm.all_reduce>(&min_dist_and_rank, 1, MPI_MINLOC);
+
+ // Broadcast closest lat/lon values to all ranks
+ const auto lat_v_h = lat_f.get_view();
+ const auto lon_v_h = lon_f.get_view();
+ auto local_column_idx = minloc.loc;
+ auto min_dist_rank = min_dist_and_rank.second;
+ Real lat_lon_vals[2];
+ if (my_rank == min_dist_rank) {
+ lat_lon_vals[0] = lat_v_h(local_column_idx);
+ lat_lon_vals[1] = lon_v_h(local_column_idx);
+ }
+ m_comm.broadcast(lat_lon_vals, 2, min_dist_rank);
+
+ // Set local_column_idx=-1 for mpi ranks not containing minimum lat/lon distance
+ if (my_rank != min_dist_rank) local_column_idx = -1;
+
+ // Store closest lat/lon info for this grid, used later when reading ICs
+ m_lat_lon_info[grid_name] = ClosestLatLonInfo{lat_lon_vals[0], lat_lon_vals[1], min_dist_rank, local_column_idx};
+ }
+}
+
+void IntensiveObservationPeriod::
+read_fields_from_file_for_iop (const std::string& file_name,
+ const vos& field_names_nc,
+ const vos& field_names_eamxx,
+ const util::TimeStamp& initial_ts,
+ const field_mgr_ptr field_mgr)
+{
+ const auto dummy_units = ekat::units::Units::nondimensional();
+
+ EKAT_REQUIRE_MSG(field_names_nc.size()==field_names_eamxx.size(),
+ "Error! Field name arrays must have same size.\n");
+
+ if (field_names_nc.size()==0) {
+ return;
+ }
+
+ const auto& grid_name = field_mgr->get_grid()->name();
+ EKAT_REQUIRE_MSG(m_io_grids.count(grid_name) > 0,
+ "Error! Attempting to read IOP initial conditions on "
+ +grid_name+" grid, but m_io_grid entry has not been created.\n");
+ EKAT_REQUIRE_MSG(m_lat_lon_info.count(grid_name) > 0,
+ "Error! Attempting to read IOP initial conditions on "
+ +grid_name+" grid, but m_lat_lon_info entry has not been created.\n");
+
+ auto io_grid = m_io_grids[grid_name];
+ if (grid_name=="Physics GLL" && scorpio::has_dim(file_name,"ncol_d")) {
+ // If we are on GLL grid, and nc file contains "ncol_d" dimension,
+ // we need to reset COL dim tag
+ using namespace ShortFieldTagsNames;
+ auto grid = io_grid->clone(io_grid->name(),true);
+ grid->reset_field_tag_name(COL,"ncol_d");
+ io_grid = grid;
+ }
+
+ // Create vector of fields with correct dimensions to read from file
+ std::vector io_fields;
+ for (size_t i=0; iget_field(eamxx_name).alias(nc_name)
+ :
+ field_mgr->get_field(eamxx_name);
+ auto fm_fid = fm_field.get_header().get_identifier();
+ EKAT_REQUIRE_MSG(fm_fid.get_layout().tag(0)==FieldTag::Column,
+ "Error! IOP inputs read from IC/topo file must have Column "
+ "as first dim tag.\n");
+
+ // Set first dimension to match input file
+ auto dims = fm_fid.get_layout().dims();
+ dims[0] = io_grid->get_num_local_dofs();
+ FieldLayout io_fl(fm_fid.get_layout().tags(), dims);
+ FieldIdentifier io_fid(fm_fid.name(), io_fl, fm_fid.get_units(), io_grid->name());
+ Field io_field(io_fid);
+ io_field.allocate_view();
+ io_fields.push_back(io_field);
+ }
+
+ // Read data from file
+ AtmosphereInput file_reader(file_name,io_grid,io_fields);
+ file_reader.read_variables();
+ file_reader.finalize();
+
+ // For each field, broadcast data from closest lat/lon column to all processors
+ // and copy data into each field's column in the field manager.
+ for (size_t i=0; iget_field(fname);
+
+ // Create a temporary field to store the data from the
+ // single column of the closest lat/lon pair
+ const auto io_fid = io_field.get_header().get_identifier();
+ FieldLayout col_data_fl = io_fid.get_layout().strip_dim(0);
+ FieldIdentifier col_data_fid("col_data", col_data_fl, dummy_units, "");
+ Field col_data(col_data_fid);
+ col_data.allocate_view();
+
+ // MPI rank with closest column index store column data
+ const auto mpi_rank_with_col = m_lat_lon_info[grid_name].mpi_rank_of_closest_column;
+ if (m_comm.rank() == mpi_rank_with_col) {
+ const auto col_idx_with_data = m_lat_lon_info[grid_name].local_column_index_of_closest_column;
+ col_data.deep_copy(io_field.subfield(0,col_idx_with_data));
+ }
+
+ // Broadcast column data to all other ranks
+ const auto col_size = col_data.get_header().get_identifier().get_layout().size();
+ m_comm.broadcast(col_data.get_internal_view_data(), col_size, mpi_rank_with_col);
+
+ // Copy column data to all columns in field manager field
+ const auto ncols = fm_field.get_header().get_identifier().get_layout().dim(0);
+ for (auto icol=0; icol(col_data);
+ }
+
+ // Sync fields to device
+ fm_field.sync_to_dev();
+
+ // Set the initial time stamp on FM fields
+ fm_field.get_header().get_tracking().update_time_stamp(initial_ts);
+ }
+}
+
+void IntensiveObservationPeriod::
+read_iop_file_data (const util::TimeStamp& current_ts)
+{
+ const auto iop_file = m_params.get("iop_file");
+ const auto iop_file_time_idx = m_time_info.get_iop_file_time_idx(current_ts);
+
+ // Sanity check
+ EKAT_REQUIRE_MSG(iop_file_time_idx >= m_time_info.time_idx_of_current_data,
+ "Error! Attempting to read previous iop file data time index.\n");
+
+ // If we are still in the time interval as the previous read from iop file,
+ // there is no need to reload data. Return early
+ if (iop_file_time_idx == m_time_info.time_idx_of_current_data) return;
+
+ const auto file_levs = scorpio::get_dimlen(iop_file, "lev");
+ const auto iop_file_pressure = m_helper_fields["iop_file_pressure"];
+ const auto model_pressure = m_helper_fields["model_pressure"];
+ const auto surface_pressure = m_iop_fields["Ps"];
+
+ // Loop through iop fields, if rank 1 fields exist we need to
+ // gather information for vertically interpolating views
+ bool has_level_data = false;
+ for (auto& it : m_iop_fields) {
+ if (it.second.rank() == 1) {
+ has_level_data = true;
+ break;
+ }
+ }
+
+ // Compute values and indices associate with pressure for interpolating data (if necessary).
+ int adjusted_file_levs;
+ int iop_file_start;
+ int iop_file_end;
+ int model_start;
+ int model_end;
+ if (has_level_data) {
+ // Load surface pressure (Ps) from iop file
+ auto ps_data = surface_pressure.get_view().data();
+ read_variable_from_file(iop_file, "Ps", "real", {"lon","lat"}, iop_file_time_idx, ps_data);
+ surface_pressure.sync_to_dev();
+
+ // Pre-process file pressures, store number of file levels
+ // where the last level is the first level equal to surface pressure.
+ const auto iop_file_pres_v = iop_file_pressure.get_view();
+ // Sanity check
+ EKAT_REQUIRE_MSG(file_levs+1 == iop_file_pressure.get_header().get_identifier().get_layout().dim(0),
+ "Error! Unexpected size for helper field \"iop_file_pressure\"\n");
+ const auto& Ps = surface_pressure.get_view();
+ Kokkos::parallel_reduce(file_levs+1, KOKKOS_LAMBDA (const int ilev, int& lmin) {
+ if (ilev == file_levs) {
+ // Add surface pressure to last iop file pressure entry
+ iop_file_pres_v(ilev) = Ps()/100;
+ }
+ if (iop_file_pres_v(ilev) > Ps()/100) {
+ // Set upper bound on pressure values
+ iop_file_pres_v(ilev) = Ps()/100;
+ }
+ if (iop_file_pres_v(ilev) == Ps()/100) {
+ // Find minimum number of levels where the final
+ // level would contain the largest value.
+ if (ilev < lmin) lmin = ilev+1;
+ }
+ }, Kokkos::Min(adjusted_file_levs));
+
+ EKAT_REQUIRE_MSG(adjusted_file_levs > 1,
+ "Error! Pressures in iop file "+iop_file+" is are inccorrectly set. "
+ "Surface pressure \"Ps\" (converted to millibar) should be greater "
+ "than at least the 1st entry in midpoint pressures \"lev\".\n");
+
+ // Compute model pressure levels
+ const auto model_pres_v = model_pressure.get_view();
+ const auto model_nlevs = model_pressure.get_header().get_identifier().get_layout().dim(0);
+ const auto hyam_v = m_helper_fields["hyam"].get_view();
+ const auto hybm_v = m_helper_fields["hybm"].get_view();
+ Kokkos::parallel_for(model_nlevs, KOKKOS_LAMBDA (const int ilev) {
+ model_pres_v(ilev) = 1000*hyam_v(ilev) + Ps()*hybm_v(ilev)/100;
+ });
+
+ // Find file pressure levels just outside the range of model pressure levels
+ Kokkos::parallel_reduce(adjusted_file_levs, KOKKOS_LAMBDA (const int& ilev, int& lmax, int& lmin) {
+ if (iop_file_pres_v(ilev) <= model_pres_v(0) && ilev > lmax) {
+ lmax = ilev;
+ }
+ if (iop_file_pres_v(ilev) >= model_pres_v(model_nlevs-1) && ilev+1 < lmin) {
+ lmin = ilev+1;
+ }
+ },
+ Kokkos::Max(iop_file_start),
+ Kokkos::Min(iop_file_end));
+
+ // Find model pressure levels just inside range of file pressure levels
+ Kokkos::parallel_reduce(model_nlevs, KOKKOS_LAMBDA (const int& ilev, int& lmin, int& lmax) {
+ if (model_pres_v(ilev) >= iop_file_pres_v(iop_file_start) && ilev < lmin) {
+ lmin = ilev;
+ }
+ if (model_pres_v(ilev) <= iop_file_pres_v(iop_file_end-1) && ilev+1 > lmax) {
+ lmax = ilev+1;
+ }
+ },
+ Kokkos::Min(model_start),
+ Kokkos::Max(model_end));
+ }
+
+ // Loop through fields and store data from file
+ for (auto& it : m_iop_fields) {
+ auto fname = it.first;
+ auto field = it.second;
+
+ // File may use different varname than IOP class
+ auto file_varname = (m_iop_file_varnames.count(fname) > 0) ? m_iop_file_varnames[fname] : fname;
+
+ if (field.rank()==0) {
+ // For scalar data, read iop file variable directly into field data
+ auto data = field.get_view().data();
+ read_variable_from_file(iop_file, file_varname, "real", {"lon","lat"}, iop_file_time_idx, data);
+ field.sync_to_dev();
+ } else if (field.rank()==1) {
+ // Create temporary fields for reading iop file variables. We use
+ // adjusted_file_levels (computed above) which contains an unset
+ // value for surface.
+ FieldIdentifier fid(file_varname+"_iop_file",
+ FieldLayout({FieldTag::LevelMidPoint},
+ {adjusted_file_levs}),
+ ekat::units::Units::nondimensional(),
+ "");
+ Field iop_file_field(fid);
+ iop_file_field.allocate_view();
+
+ // Read data from iop file.
+ std::vector data(file_levs);
+ read_variable_from_file(iop_file, file_varname, "real", {"lon","lat","lev"}, iop_file_time_idx, data.data());
+
+ // Copy first adjusted_file_levs-1 values to field
+ auto iop_file_v_h = iop_file_field.get_view();
+ for (int ilev=0; ilev0;
+ if (has_srf) {
+ const auto srf_varname = m_iop_field_surface_varnames[fname];
+ read_variable_from_file(iop_file, srf_varname, "real", {"lon","lat"}, iop_file_time_idx, &iop_file_v_h(adjusted_file_levs-1));
+ } else {
+ // No surface value exists, compute surface value
+ const auto dx = iop_file_v_h(adjusted_file_levs-2) - iop_file_v_h(adjusted_file_levs-3);
+ if (dx == 0) iop_file_v_h(adjusted_file_levs-1) = iop_file_v_h(adjusted_file_levs-2);
+ else {
+ const auto iop_file_pres_v_h = iop_file_pressure.get_view();
+ const auto dy = iop_file_pres_v_h(adjusted_file_levs-2) - iop_file_pres_v_h(adjusted_file_levs-3);
+ const auto scale = dy/dx;
+
+ iop_file_v_h(adjusted_file_levs-1) =
+ (iop_file_pres_v_h(adjusted_file_levs-1)-iop_file_pres_v_h(adjusted_file_levs-2))/scale
+ + iop_file_v_h(adjusted_file_levs-2);
+ }
+ }
+ iop_file_field.sync_to_dev();
+
+ // Vertically interpolate iop file data to iop fields.
+ // Note: ekat lininterp requires packs. Use 1d packs here.
+ // TODO: allow for nontrivial packsize.
+ const auto iop_file_pres_v = iop_file_pressure.get_view();
+ const auto model_pres_v = model_pressure.get_view();
+ const auto iop_file_v = iop_file_field.get_view();
+ auto iop_field_v = field.get_view();
+
+ const auto nlevs_input = iop_file_end - iop_file_start;
+ const auto nlevs_output = model_end - model_start;
+ const auto total_nlevs = field.get_header().get_identifier().get_layout().dim(0);
+
+ ekat::LinInterp vert_interp(1, nlevs_input, nlevs_output);
+ const auto policy = ESU::get_default_team_policy(1, total_nlevs);
+ Kokkos::parallel_for(policy, KOKKOS_LAMBDA (const KT::MemberType& team) {
+ const auto x_src = Kokkos::subview(iop_file_pres_v, Kokkos::pair(iop_file_start,iop_file_end));
+ const auto x_tgt = Kokkos::subview(model_pres_v, Kokkos::pair(model_start,model_end));
+ const auto input = Kokkos::subview(iop_file_v, Kokkos::pair(iop_file_start,iop_file_end));
+ const auto output = Kokkos::subview(iop_field_v, Kokkos::pair(model_start,model_end));
+
+ vert_interp.setup(team, x_src, x_tgt);
+ vert_interp.lin_interp(team, x_src, x_tgt, input, output);
+ });
+ Kokkos::fence();
+
+ // For certain fields we need to make sure to fill in the ends of
+ // the interpolated region with the value at model_start/model_end
+ if (fname == "T" || fname == "q" || fname == "u" ||
+ fname == "u_ls" || fname == "v" || fname == "v_ls") {
+ if (model_start > 0) {
+ Kokkos::parallel_for(Kokkos::RangePolicy<>(0, model_start),
+ KOKKOS_LAMBDA (const int ilev) {
+ iop_field_v(ilev) = iop_field_v(model_start);
+ });
+ }
+ if (model_end < total_nlevs) {
+ Kokkos::parallel_for(Kokkos::RangePolicy<>(model_end, total_nlevs),
+ KOKKOS_LAMBDA (const int ilev) {
+ iop_field_v(ilev) = iop_field_v(model_end-1);
+ });
+ }
+ }
+ }
+ }
+
+ // Now that data is loaded, reset the index of the currently loaded data.
+ m_time_info.time_idx_of_current_data = iop_file_time_idx;
+}
+
+void IntensiveObservationPeriod::
+set_fields_from_iop_data(const field_mgr_ptr field_mgr)
+{
+ if (m_params.get("zero_non_iop_tracers") && field_mgr->has_group("tracers")) {
+ // Zero out all tracers before setting iop tracers (if requested)
+ field_mgr->get_field_group("tracers").m_bundle->deep_copy(0);
+ }
+
+ EKAT_REQUIRE_MSG(field_mgr->get_grid()->name() == "Physics GLL",
+ "Error! Attempting to set non-GLL fields using "
+ "data from the IOP file.\n");
+
+ // Find which fields need to be written
+ const bool set_ps = field_mgr->has_field("ps") && has_iop_field("Ps");
+ const bool set_T_mid = field_mgr->has_field("T_mid") && has_iop_field("T");
+ const bool set_horiz_winds_u = field_mgr->has_field("horiz_winds") && has_iop_field("u");
+ const bool set_horiz_winds_v = field_mgr->has_field("horiz_winds") && has_iop_field("v");
+ const bool set_qv = field_mgr->has_field("qv") && has_iop_field("q");
+ const bool set_nc = field_mgr->has_field("nc") && has_iop_field("NUMLIQ");
+ const bool set_qc = field_mgr->has_field("qc") && has_iop_field("CLDLIQ");
+ const bool set_qi = field_mgr->has_field("qi") && has_iop_field("CLDICE");
+ const bool set_ni = field_mgr->has_field("ni") && has_iop_field("NUMICE");
+
+ // Create views/scalars for these field's data
+ view_1d ps;
+ view_2d T_mid, qv, nc, qc, qi, ni;
+ view_3d horiz_winds;
+
+ Real ps_iop;
+ view_1d t_iop, u_iop, v_iop, qv_iop, nc_iop, qc_iop, qi_iop, ni_iop;
+
+ if (set_ps) {
+ ps = field_mgr->get_field("ps").get_view();
+ get_iop_field("Ps").sync_to_host();
+ ps_iop = get_iop_field("Ps").get_view()();
+ }
+ if (set_T_mid) {
+ T_mid = field_mgr->get_field("T_mid").get_view();
+ t_iop = get_iop_field("T").get_view();
+ }
+ if (set_horiz_winds_u || set_horiz_winds_v) {
+ horiz_winds = field_mgr->get_field("horiz_winds").get_view();
+ if (set_horiz_winds_u) u_iop = get_iop_field("u").get_view();
+ if (set_horiz_winds_v) v_iop = get_iop_field("v").get_view();
+ }
+ if (set_qv) {
+ qv = field_mgr->get_field("qv").get_view();
+ qv_iop = get_iop_field("q").get_view();
+ }
+ if (set_nc) {
+ nc = field_mgr->get_field("nc").get_view();
+ nc_iop = get_iop_field("NUMLIQ").get_view();
+ }
+ if (set_qc) {
+ qc = field_mgr->get_field("qc").get_view();
+ qc_iop = get_iop_field("CLDLIQ").get_view();
+ }
+ if (set_qi) {
+ qi = field_mgr->get_field("qi").get_view();
+ qi_iop = get_iop_field("CLDICE").get_view();
+ }
+ if (set_ni) {
+ ni = field_mgr->get_field("ni").get_view();
+ ni_iop = get_iop_field("NUMICE").get_view();
+ }
+
+ // Check if t_iop has any 0 entires near the top of the model
+ // and correct t_iop and q_iop accordingly.
+ correct_temperature_and_water_vapor(field_mgr);
+
+ // Loop over all columns and copy IOP field values to FM views
+ const auto ncols = field_mgr->get_grid()->get_num_local_dofs();
+ const auto nlevs = field_mgr->get_grid()->get_num_vertical_levels();
+ const auto policy = ESU::get_default_team_policy(ncols, nlevs);
+ Kokkos::parallel_for(policy, KOKKOS_LAMBDA(const KT::MemberType& team) {
+ const auto icol = team.league_rank();
+
+ if (set_ps) {
+ ps(icol) = ps_iop;
+ }
+ Kokkos::parallel_for(Kokkos::TeamVectorRange(team, nlevs), [&] (const int ilev) {
+ if (set_T_mid) {
+ T_mid(icol, ilev) = t_iop(ilev);
+ }
+ if (set_horiz_winds_u) {
+ horiz_winds(icol, 0, ilev) = u_iop(ilev);
+ }
+ if (set_horiz_winds_v) {
+ horiz_winds(icol, 1, ilev) = v_iop(ilev);
+ }
+ if (set_qv) {
+ qv(icol, ilev) = qv_iop(ilev);
+ }
+ if (set_nc) {
+ nc(icol, ilev) = nc_iop(ilev);
+ }
+ if (set_qc) {
+ qc(icol, ilev) = qc_iop(ilev);
+ }
+ if (set_qi) {
+ qi(icol, ilev) = qi_iop(ilev);
+ }
+ if (set_ni) {
+ ni(icol, ilev) = ni_iop(ilev);
+ }
+ });
+ });
+}
+
+void IntensiveObservationPeriod::
+correct_temperature_and_water_vapor(const field_mgr_ptr field_mgr)
+{
+ // Find the first valid level index for t_iop, i.e., first non-zero entry
+ int first_valid_idx;
+ const auto nlevs = field_mgr->get_grid()->get_num_vertical_levels();
+ auto t_iop = get_iop_field("T").get_view();
+ Kokkos::parallel_reduce(nlevs, KOKKOS_LAMBDA (const int ilev, int& lmin) {
+ if (t_iop(ilev) > 0 && ilev < lmin) lmin = ilev;
+ }, Kokkos::Min(first_valid_idx));
+
+ // If first_valid_idx>0, we must correct IOP fields T and q corresponding to
+ // levels 0,...,first_valid_idx-1
+ if (first_valid_idx > 0) {
+ // If we have values of T and q to correct, we must have both T_mid and qv as FM fields
+ EKAT_REQUIRE_MSG(field_mgr->has_field("T_mid"), "Error! IOP requires FM to define T_mid.\n");
+ EKAT_REQUIRE_MSG(field_mgr->has_field("qv"), "Error! IOP requires FM to define qv.\n");
+
+ // Replace values of T and q where t_iop contains zeros
+ auto T_mid = field_mgr->get_field("T_mid").get_view();
+ auto qv = field_mgr->get_field("qv").get_view();
+ auto q_iop = get_iop_field("q").get_view();
+ Kokkos::parallel_for(Kokkos::RangePolicy<>(0, first_valid_idx), KOKKOS_LAMBDA (const int ilev) {
+ t_iop(ilev) = T_mid(0, ilev);
+ q_iop(ilev) = qv(0, ilev);
+ });
+ }
+}
+
+} // namespace control
+} // namespace scream
+
+
diff --git a/components/eamxx/src/control/intensive_observation_period.hpp b/components/eamxx/src/control/intensive_observation_period.hpp
new file mode 100644
index 000000000000..6e70f44a0f56
--- /dev/null
+++ b/components/eamxx/src/control/intensive_observation_period.hpp
@@ -0,0 +1,204 @@
+#ifndef SCREAM_IOP_HPP
+#define SCREAM_IOP_HPP
+
+#include "share/scream_types.hpp"
+#include "share/field/field_manager.hpp"
+#include "share/grid/abstract_grid.hpp"
+#include "share/util/scream_time_stamp.hpp"
+
+#include "ekat/ekat_parameter_list.hpp"
+#include "ekat/ekat_pack.hpp"
+#include "ekat/mpi/ekat_comm.hpp"
+#include "ekat/kokkos/ekat_kokkos_utils.hpp"
+
+namespace scream {
+namespace control {
+/*
+ * Class which provides functionality for running EAMxx with an intensive
+ * observation period (IOP). Currently the only use case is the doubly
+ * periodic model (DP-SCREAM).
+ */
+class IntensiveObservationPeriod
+{
+ using vos = std::vector;
+ using field_mgr_ptr = std::shared_ptr;
+ using grid_ptr = std::shared_ptr;
+
+ using KT = ekat::KokkosTypes;
+ using ESU = ekat::ExeSpaceUtils;
+
+ template
+ using view_1d = KT::template view_1d;
+ template
+ using view_2d = KT::template view_2d;
+ template
+ using view_3d = KT::template view_3d;
+ template
+ using view_1d_host = typename view_1d::HostMirror;
+ using Pack1d = ekat::Pack;
+
+public:
+
+ // Constructor
+ // Input:
+ // - comm: MPI communicator
+ // - params: Input yaml file needs intensive_observation_period_options sublist
+ // - run_t0: Initial timestamp for the simulation
+ // - model_nlevs: Number of vertical levels in the simulation. Needed since
+ // the iop file contains a (potentially) different number of levels
+ IntensiveObservationPeriod(const ekat::Comm& comm,
+ const ekat::ParameterList& params,
+ const util::TimeStamp& run_t0,
+ const int model_nlevs,
+ const Field& hyam,
+ const Field& hybm);
+
+ // Default destructor
+ ~IntensiveObservationPeriod() = default;
+
+ // Read data from IOP file and store internally.
+ void read_iop_file_data(const util::TimeStamp& current_ts);
+
+ // Setup io grids for reading data from file and determine the closest lat/lon
+ // pair in a IC/topo file to the target lat/lon params for a specific grid. This
+ // should be called on each grid that loads field data from file before reading
+ // data since the data file is not guarenteed to contain lat/lon for the correct
+ // grid (e.g., loading PHIS_d from topography file which only contains lat/lon on
+ // PG2 grid). EAMxx expects the ic file to contain lat/lon on GLL grid, and
+ // topography file to contain lat/lon on PG2 grid.
+ void setup_io_info (const std::string& file_name,
+ const grid_ptr& grid);
+
+ // Read ICs from file for IOP cases. We set all columns in the
+ // given fields to the values of the column in the file with the
+ // closest lat,lon pair to the target lat,lon in the parameters.
+ // The setup_io_info must be called for the correct grids before
+ // this function can be called.
+ // Input:
+ // - file_name: Name of the file used to load field data (IC or topo file)
+ // - field_names_nc: Field names used by the input file
+ // - field_names_eamxx: Field names used by eamxx
+ // - initial_ts: Inital timestamp
+ // Input/output
+ // - field_mgr: Field manager containing fields that need data read from files
+ void read_fields_from_file_for_iop(const std::string& file_name,
+ const vos& field_names_nc,
+ const vos& field_names_eamxx,
+ const util::TimeStamp& initial_ts,
+ const field_mgr_ptr field_mgr);
+
+ // Version of above, but where nc and eamxx field names are identical
+ void read_fields_from_file_for_iop(const std::string& file_name,
+ const vos& field_names,
+ const util::TimeStamp& initial_ts,
+ const field_mgr_ptr field_mgr)
+ {
+ read_fields_from_file_for_iop(file_name, field_names, field_names, initial_ts, field_mgr);
+ }
+
+ // Set fields using data loaded from the iop file
+ void set_fields_from_iop_data(const field_mgr_ptr field_mgr);
+
+ // The IOP file may contain temperature values that are
+ // 0 at or above the surface. Correct these values using
+ // the temperature T_mid (from field_mgr) where we
+ // replace all values T_iop(k) == 0 with T_mid(0, k).
+ // Likewise, at these k indices, we will replace q_iop(k)
+ // with qv(0, k).
+ // Note: We only need to use the first column because during
+ // the loading of ICs, every columns will have the same
+ // data.
+ void correct_temperature_and_water_vapor(const field_mgr_ptr field_mgr);
+
+ // Store grid spacing for use in SHOC ad interface
+ void set_grid_spacing (const Real dx_short) {
+ m_dynamics_dx_size = dx_short*1000;
+ }
+
+ Real get_dynamics_dx_size () { return m_dynamics_dx_size; }
+
+ ekat::ParameterList& get_params() { return m_params; }
+
+ bool has_iop_field(const std::string& fname) {
+ return m_iop_fields.count(fname) > 0;
+ }
+
+ Field get_iop_field(const std::string& fname) {
+ EKAT_REQUIRE_MSG(has_iop_field(fname), "Error! Requesting IOP field \""+fname+"\", but field is not stored in object.\n");
+ return m_iop_fields[fname];
+ }
+
+private:
+
+ // Struct for storing info related
+ // to the closest lat,lon pair
+ struct ClosestLatLonInfo {
+ // Value for the closest lat/lon in file.
+ Real closest_lat;
+ Real closest_lon;
+ // MPI rank which owns the columns whose
+ // lat,lon pair is closest to target lat,
+ // lon parameters.
+ int mpi_rank_of_closest_column;
+ // Local column index of closest lat,lon pair.
+ // Should be set -1 on ranks not equal to the
+ // one above.
+ int local_column_index_of_closest_column;
+ };
+
+ // Struct for storing relevant time information
+ struct TimeInfo {
+ util::TimeStamp iop_file_begin_time;
+ view_1d_host iop_file_times_in_sec;
+
+ int time_idx_of_current_data = -1;
+
+ int get_iop_file_time_idx (const util::TimeStamp& current_ts)
+ {
+ // Get iop file time index that the given timestamp falls between.
+ // Note: the last time in iop file represents the non-inclusive
+ // upper bound of acceptable model times.
+ const auto n_iop_times = iop_file_times_in_sec.extent(0);
+ int time_idx=-1;
+ for (size_t t=0; t=0,
+ "Error! Current model time ("+current_ts.to_string()+") is not within "
+ "IOP time period: ["+iop_file_begin_time.to_string()+", "+
+ (iop_file_begin_time+iop_file_times_in_sec(n_iop_times-1)).to_string()+").\n");
+ return time_idx;
+ }
+ };
+
+ void initialize_iop_file(const util::TimeStamp& run_t0,
+ int model_nlevs,
+ const Field& hyam,
+ const Field& hybm);
+
+ ekat::Comm m_comm;
+ ekat::ParameterList m_params;
+
+ std::map m_lat_lon_info;
+ TimeInfo m_time_info;
+
+ Real m_dynamics_dx_size;
+
+ std::map m_io_grids;
+
+ std::map m_iop_fields;
+ std::map m_helper_fields;
+
+ std::map m_iop_file_varnames;
+ std::map m_iop_field_surface_varnames;
+}; // class IntensiveObservationPeriod
+
+} // namespace control
+} // namespace scream
+
+#endif // #ifndef SCREAM_IOP_HPP
+
diff --git a/components/eamxx/src/control/tests/CMakeLists.txt b/components/eamxx/src/control/tests/CMakeLists.txt
index 5bc8a490d93c..43aa5cebab1b 100644
--- a/components/eamxx/src/control/tests/CMakeLists.txt
+++ b/components/eamxx/src/control/tests/CMakeLists.txt
@@ -3,7 +3,9 @@ if (NOT ${SCREAM_BASELINES_ONLY})
include (ScreamUtils)
# Unit test the ad
- CreateUnitTest(ad_ut "ad_tests.cpp" "scream_control;scream_share" LABELS "driver")
+ CreateUnitTest(ad_ut "ad_tests.cpp"
+ LIBS scream_control
+ LABELS driver)
# Copy yaml input file to run directory
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/ad_tests.yaml
diff --git a/components/eamxx/src/control/tests/ad_tests.cpp b/components/eamxx/src/control/tests/ad_tests.cpp
index da6d3c335012..cd19e2a6fd76 100644
--- a/components/eamxx/src/control/tests/ad_tests.cpp
+++ b/components/eamxx/src/control/tests/ad_tests.cpp
@@ -16,7 +16,7 @@ TEST_CASE ("ad_tests","[!throws]")
// Load ad parameter list
std::string fname = "ad_tests.yaml";
ekat::ParameterList ad_params("Atmosphere Driver");
- REQUIRE_NOTHROW ( parse_yaml_file(fname,ad_params) );
+ parse_yaml_file(fname,ad_params);
// Create a comm
ekat::Comm atm_comm (MPI_COMM_WORLD);
diff --git a/components/eamxx/src/control/tests/ad_tests.yaml b/components/eamxx/src/control/tests/ad_tests.yaml
index 8ac0a9bcb4cb..4b1c00865fc2 100644
--- a/components/eamxx/src/control/tests/ad_tests.yaml
+++ b/components/eamxx/src/control/tests/ad_tests.yaml
@@ -10,7 +10,7 @@ initial_conditions:
Z: "A"
atmosphere_processes:
- atm_procs_list: (dummy1, dummy2, dummy3)
+ atm_procs_list: [dummy1, dummy2, dummy3]
schedule_type: Sequential
dummy1:
@@ -28,6 +28,9 @@ atmosphere_processes:
grids_manager:
Type: Mesh Free
- number_of_global_columns: 24
- number_of_vertical_levels: 3
+ grids_names: ["Point Grid"]
+ Point Grid:
+ type: point_grid
+ number_of_global_columns: 24
+ number_of_vertical_levels: 3
...
diff --git a/components/eamxx/src/control/tests/dummy_atm_proc.hpp b/components/eamxx/src/control/tests/dummy_atm_proc.hpp
index d80d54ab36a2..ad11940c71c7 100644
--- a/components/eamxx/src/control/tests/dummy_atm_proc.hpp
+++ b/components/eamxx/src/control/tests/dummy_atm_proc.hpp
@@ -48,7 +48,8 @@ class DummyProcess : public scream::AtmosphereProcess {
FieldLayout layout_vec ( {COL,CMP,LEV}, {num_cols,2,num_levs} );
if (m_dummy_type==A2G) {
- add_field("A",layout,ekat::units::m,m_grid->name());
+ // Check request by field/grid name only works
+ add_field("A",m_grid->name());
add_field("B",layout,ekat::units::m,m_grid->name(),"The Group");
add_field("C",layout,ekat::units::m,m_grid->name(),"The Group");
// These are not used at run time, but we use them to test
diff --git a/components/eamxx/src/diagnostics/CMakeLists.txt b/components/eamxx/src/diagnostics/CMakeLists.txt
index 7f0bd47e3296..5818c4d836a8 100644
--- a/components/eamxx/src/diagnostics/CMakeLists.txt
+++ b/components/eamxx/src/diagnostics/CMakeLists.txt
@@ -2,31 +2,23 @@ set(DIAGNOSTIC_SRCS
atm_density.cpp
dry_static_energy.cpp
exner.cpp
+ field_at_height.cpp
field_at_level.cpp
field_at_pressure_level.cpp
- ice_water_path.cpp
- liquid_water_path.cpp
longwave_cloud_forcing.cpp
- meridional_vapor_flux.cpp
potential_temperature.cpp
- precip_ice_surf_mass_flux.cpp
- precip_liq_surf_mass_flux.cpp
- precip_total_surf_mass_flux.cpp
- rain_water_path.cpp
+ precip_surf_mass_flux.cpp
relative_humidity.cpp
- rime_water_path.cpp
sea_level_pressure.cpp
shortwave_cloud_forcing.cpp
- vapor_water_path.cpp
- vertical_layer_interface.cpp
- vertical_layer_midpoint.cpp
- vertical_layer_thickness.cpp
+ surf_upward_latent_heat_flux.cpp
+ vapor_flux.cpp
+ vertical_layer.cpp
virtual_temperature.cpp
- zonal_vapor_flux.cpp
+ water_path.cpp
)
add_library(diagnostics ${DIAGNOSTIC_SRCS})
-target_include_directories(diagnostics PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/../share)
target_link_libraries(diagnostics PUBLIC scream_share)
if (NOT SCREAM_LIB_ONLY)
diff --git a/components/eamxx/src/diagnostics/atm_density.cpp b/components/eamxx/src/diagnostics/atm_density.cpp
index 40fe23d73c07..24749ba1a1ef 100644
--- a/components/eamxx/src/diagnostics/atm_density.cpp
+++ b/components/eamxx/src/diagnostics/atm_density.cpp
@@ -1,16 +1,16 @@
#include "diagnostics/atm_density.hpp"
+#include "share/util/scream_common_physics_functions.hpp"
namespace scream
{
-// =========================================================================================
-AtmDensityDiagnostic::AtmDensityDiagnostic (const ekat::Comm& comm, const ekat::ParameterList& params)
- : AtmosphereDiagnostic(comm,params)
+AtmDensityDiagnostic::
+AtmDensityDiagnostic (const ekat::Comm& comm, const ekat::ParameterList& params)
+ : AtmosphereDiagnostic(comm,params)
{
// Nothing to do here
}
-// =========================================================================================
void AtmDensityDiagnostic::set_grids(const std::shared_ptr grids_manager)
{
using namespace ekat::units;
@@ -27,30 +27,32 @@ void AtmDensityDiagnostic::set_grids(const std::shared_ptr g
// Set Field Layouts
FieldLayout scalar3d_layout_mid { {COL,LEV}, {m_num_cols,m_num_levs} };
- constexpr int ps = Pack::n;
// The fields required for this diagnostic to be computed
- add_field("T_mid", scalar3d_layout_mid, K, grid_name, ps);
- add_field("pseudo_density", scalar3d_layout_mid, Pa, grid_name, ps);
- add_field("p_mid", scalar3d_layout_mid, Pa, grid_name, ps);
- add_field("qv", scalar3d_layout_mid, Q, grid_name, "tracers", ps);
+ add_field("T_mid", scalar3d_layout_mid, K, grid_name, SCREAM_PACK_SIZE);
+ add_field("pseudo_density", scalar3d_layout_mid, Pa, grid_name, SCREAM_PACK_SIZE);
+ add_field("p_mid", scalar3d_layout_mid, Pa, grid_name, SCREAM_PACK_SIZE);
+ add_field("qv", scalar3d_layout_mid, Q, grid_name, SCREAM_PACK_SIZE);
// Construct and allocate the diagnostic field
FieldIdentifier fid (name(), scalar3d_layout_mid, kg/(m*m*m), grid_name);
m_diagnostic_output = Field(fid);
auto& C_ap = m_diagnostic_output.get_header().get_alloc_properties();
- C_ap.request_allocation(ps);
+ C_ap.request_allocation(SCREAM_PACK_SIZE);
m_diagnostic_output.allocate_view();
}
-// =========================================================================================
+
void AtmDensityDiagnostic::compute_diagnostic_impl()
{
+ using Pack = ekat::Pack