diff --git a/.github/actions/check-skip-labels/README.md b/.github/actions/check-skip-labels/README.md
deleted file mode 100644
index 42d1e3bc64e..00000000000
--- a/.github/actions/check-skip-labels/README.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Composite action to check if we can skip a job for a PR
-
-This action is meant to be used inside a PR testing workflow, as
-
-```yaml
-jobs:
- my-testing:
- steps:
- ...
- - name: check-pr-labels
- if: github.event_name == "pull_request" || github.event_name == "pull_request_review"
- uses: ./.github/actions/check-skip-labels
- with:
- skip_labels: label1,label2,label3
-```
-
-The input skip_label is a comma-separated list of labels that, if found
-on the PR, will cause this job to terminate immediately with a PASS state.
-
-Ideally, we would like to run this check at the job level, so that we can
-skip the job altogether (without using runner time). But while for the
-pull_request event we DO have access to the labels from the gh context
-(and therefore can check), for pull_request_review we don't, so we need
-to ping github for some information
diff --git a/.github/actions/check-skip-labels/action.yml b/.github/actions/check-skip-labels/action.yml
deleted file mode 100644
index 55d35bfa2b0..00000000000
--- a/.github/actions/check-skip-labels/action.yml
+++ /dev/null
@@ -1,48 +0,0 @@
-name: 'Check Skip Labels'
-description: 'Check for specific skip labels in a pull request'
-inputs:
- skip_labels:
- description: 'Comma-separated list of skip labels'
- required: true
- token:
- description: 'GitHub token for authentication'
- required: true
- pr_number:
- description: 'Pull request number'
- required: true
-
-# Note: inputs are available as env vars in the shell run steps, convertet to uppercase
-
-runs:
- using: "composite"
- steps:
- - name: Get Pull Request Labels
- run: |
- echo "Fetching pull request labels..."
- if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then
- LABELS="$SKIP_LABELS"
- elif [[ "$GITHUB_EVENT_NAME" == "pull_request_review" ]]; then
- response=$(curl -s -H "Authorization: token $TOKEN" \
- "https://api.github.com/repos/$GITHUB_REPOSITORY/pulls/$PR_NUMBER")
- # TODO: reinstante jq once the SNL image is rebuilt!
- # LABELS=$(echo "$response" | jq -r '.labels | map(.name) | join(",")')
- LABELS=$(echo "$response" | grep -o '"name": *"[^"]*"' | sed 's/"name": *//;s/"//g' | tr '\n' ',' | sed 's/,$//')
- fi
- echo "labels=$LABELS" >> $GITHUB_ENV
- shell: sh
- - name: Check for Skip Labels
- run: |
- echo "Checking for skip labels..."
- IFS=',' read -r -a SKIP_LABELS <<< "$SKIP_LABELS"
- IFS=',' read -r -a LABEL_ARRAY <<< "$labels"
-
- for label in "${SKIP_LABELS[@]}"; do
- for pr_label in "${LABEL_ARRAY[@]}"; do
- if [[ "$pr_label" == "$label" ]]; then
- echo "Found skip label '$label'. Skipping this job."
- exit 0 # Exit with success status
- fi
- done
- done
- echo "No relevant skip labels found. Continuing with the job."
- shell: sh
diff --git a/.github/workflows/e3sm-gh-ci-cime-tests.yml b/.github/workflows/e3sm-gh-ci-cime-tests.yml
index 04f7fcb4ffc..5c6ff081f73 100644
--- a/.github/workflows/e3sm-gh-ci-cime-tests.yml
+++ b/.github/workflows/e3sm-gh-ci-cime-tests.yml
@@ -22,10 +22,14 @@ on:
workflow_dispatch:
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event.pull_request.number || github.run_id }}
+ cancel-in-progress: true
+
jobs:
ci:
- if: false
+ if: ${{ github.repository == 'E3SM-Project/E3SM' }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
@@ -36,7 +40,7 @@ jobs:
- SMS_D_Ln5_P4.ne4pg2_oQU480.F2010-SCREAMv1-MPASSI.ghci-oci_gnu
- ERS_Ld5_P4.ne4pg2_oQU480.F2010-SCREAMv1-MPASSI.ghci-oci_gnu.eamxx-prod
container:
- image: ghcr.io/e3sm-project/containers-ghci:ghci-0.1.0
+ image: ghcr.io/e3sm-project/containers-ghci:ghci-0.2.0
steps:
-
diff --git a/.github/workflows/e3sm-gh-ci-w-cime-tests.yml b/.github/workflows/e3sm-gh-ci-w-cime-tests.yml
index 48c367c8f62..f51aa88a34c 100644
--- a/.github/workflows/e3sm-gh-ci-w-cime-tests.yml
+++ b/.github/workflows/e3sm-gh-ci-w-cime-tests.yml
@@ -1,4 +1,4 @@
-name: gh
+name: gh-w
on:
pull_request:
@@ -11,10 +11,14 @@ on:
workflow_dispatch:
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event.pull_request.number || github.run_id }}
+ cancel-in-progress: true
+
jobs:
- ci-w:
- if: ${{ github.event.repository.name == 'e3sm' }}
+ ci:
+ if: ${{ github.repository == 'E3SM-Project/E3SM' }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
@@ -23,7 +27,7 @@ jobs:
- SMS_D_Ld1_P8.ne4pg2_oQU480.WCYCL2010NS.ghci-oci_gnu
- ERS_Ld3_P8.ne4pg2_oQU480.WCYCL2010NS.ghci-oci_gnu.allactive-wcprod_1850
container:
- image: ghcr.io/e3sm-project/containers-ghci:ghci-0.1.0
+ image: ghcr.io/e3sm-project/containers-ghci:ghci-0.2.0
steps:
-
diff --git a/.github/workflows/e3sm-gh-md-linter.yml b/.github/workflows/e3sm-gh-md-linter.yml
index 424a871637b..ad24487695e 100644
--- a/.github/workflows/e3sm-gh-md-linter.yml
+++ b/.github/workflows/e3sm-gh-md-linter.yml
@@ -10,8 +10,13 @@ on:
# for now let's not lint files in eamxx
- '!components/eamxx/**/*.md'
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event.pull_request.number || github.run_id }}
+ cancel-in-progress: true
+
jobs:
linter:
+ if: ${{ github.repository == 'E3SM-Project/E3SM' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
@@ -22,7 +27,7 @@ jobs:
with:
files: '**/*.md'
separator: ","
- - uses: DavidAnson/markdownlint-cli2-action@v17
+ - uses: DavidAnson/markdownlint-cli2-action@v18
if: steps.changed-files.outputs.any_changed == 'true'
with:
config: 'docs/.markdownlint.json'
diff --git a/.github/workflows/e3sm-gh-pages.yml b/.github/workflows/e3sm-gh-pages.yml
index ebd2ac9c1e9..dec9bc696bf 100644
--- a/.github/workflows/e3sm-gh-pages.yml
+++ b/.github/workflows/e3sm-gh-pages.yml
@@ -15,7 +15,7 @@ concurrency:
jobs:
Build-and-Deploy-docs:
- if: ${{ github.event.repository.name == 'e3sm' }}
+ if: ${{ github.repository == 'E3SM-Project/E3SM' }}
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
diff --git a/.github/workflows/e3sm-gh-tools-mkatmsrffile-test.yml b/.github/workflows/e3sm-gh-tools-mkatmsrffile-test.yml
index 8fe212886d9..cacb951b8a8 100644
--- a/.github/workflows/e3sm-gh-tools-mkatmsrffile-test.yml
+++ b/.github/workflows/e3sm-gh-tools-mkatmsrffile-test.yml
@@ -11,8 +11,13 @@ on:
- cron: '00 15 * * 2'
workflow_dispatch:
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event.pull_request.number || github.run_id }}
+ cancel-in-progress: true
+
jobs:
mkatmsrffile-test:
+ if: ${{ github.repository == 'E3SM-Project/E3SM' }}
runs-on: ubuntu-latest
defaults:
run:
@@ -31,10 +36,7 @@ jobs:
uses: conda-incubator/setup-miniconda@v3
with:
activate-environment: "envmkatmsrffile"
- miniforge-variant: Mambaforge
miniforge-version: latest
- use-mamba: true
- mamba-version: "*"
channel-priority: strict
auto-update-conda: true
python-version: 3.11
@@ -42,7 +44,7 @@ jobs:
name: Install dependencies
run: |
echo $CONDA_PREFIX
- mamba install -y nco xarray numba numpy netcdf4
+ conda install -y nco xarray numba numpy netcdf4 -c conda-forge
-
name: Run tests
working-directory: components/eam/tools/mkatmsrffile
diff --git a/.github/workflows/eamxx-gh-ci-standalone.yml b/.github/workflows/eamxx-gh-ci-standalone.yml
new file mode 100644
index 00000000000..19a2ec9cd8e
--- /dev/null
+++ b/.github/workflows/eamxx-gh-ci-standalone.yml
@@ -0,0 +1,60 @@
+name: gh-standalone
+
+on:
+ pull_request:
+ branches: [ master ]
+ paths:
+ # first, yes to these
+ - '.github/workflows/eamxx-gh-ci-standalone.yml'
+ - 'cime_config/machine/config_machines.xml'
+ - 'components/eamxx/**'
+ - 'components/homme/**'
+ # second, no to these
+ - '!components/eamxx/docs/**'
+ - '!components/eamxx/mkdocs.yml'
+
+ workflow_dispatch:
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event.pull_request.number || github.run_id }}
+ cancel-in-progress: true
+
+jobs:
+
+ ci:
+ if: ${{ github.repository == 'E3SM-Project/E3SM' }}
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ test:
+ - sp
+ - opt
+ - dbg
+ - fpe
+ container:
+ image: ghcr.io/e3sm-project/containers-standalone-ghci:standalone-ghci-0.1.0
+
+ steps:
+ -
+ name: Checkout
+ uses: actions/checkout@v4
+ with:
+ show-progress: false
+ submodules: recursive
+ -
+ name: standalone
+ env:
+ SHELL: sh
+ run: |
+ # TODO: get rid of this extra line if we can?
+ git config --global safe.directory '*'
+ ./components/eamxx/scripts/test-all-scream -m ghci-oci -t ${{ matrix.test }} -c BUILD_SHARED_LIBS=ON
+ -
+ name: Artifacts
+ uses: actions/upload-artifact@v4
+ if: ${{ always() }}
+ with:
+ name: ${{ matrix.test }}
+ path: |
+ components/eamxx/ctest-build/*/Testing/Temporary/Last*.log
diff --git a/.github/workflows/eamxx-gh-pages.yml b/.github/workflows/eamxx-gh-pages.yml
index 28415b54dbd..2e763c544cd 100644
--- a/.github/workflows/eamxx-gh-pages.yml
+++ b/.github/workflows/eamxx-gh-pages.yml
@@ -34,7 +34,7 @@ concurrency:
jobs:
eamxx-docs:
- if: ${{ github.event.repository.name == 'scream' }}
+ if: ${{ github.repository == 'E3SM-Project/scream' }}
runs-on: ubuntu-latest
steps:
diff --git a/.github/workflows/eamxx-sa-coverage.yml b/.github/workflows/eamxx-sa-coverage.yml
index aa69d6263e6..46e87f8b8b0 100644
--- a/.github/workflows/eamxx-sa-coverage.yml
+++ b/.github/workflows/eamxx-sa-coverage.yml
@@ -2,6 +2,11 @@ name: eamxx-sa-coverage
on:
workflow_dispatch:
+ inputs:
+ submit:
+ description: 'Force cdash submission'
+ required: true
+ type: boolean
# Add schedule trigger for nightly runs at midnight MT (Standard Time)
schedule:
@@ -13,7 +18,8 @@ concurrency:
cancel-in-progress: true
env:
- submit: ${{ github.event_name == 'schedule' && 'true' || 'false' }} # Submit to cdash only for nightlies
+ # Submit to cdash only for nightlies or if the user explicitly forced a submission via workflow dispatch
+ submit: ${{ github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && inputs.submit) }}
jobs:
gcc-openmp:
@@ -48,6 +54,35 @@ jobs:
submodules: recursive
- name: Show action trigger
uses: ./.github/actions/show-workflow-trigger
+ - name: Get CUDA Arch
+ run: |
+ # Ensure nvidia-smi is available
+ if ! command -v nvidia-smi &> /dev/null; then
+ echo "nvidia-smi could not be found. Please ensure you have Nvidia drivers installed."
+ exit 1
+ fi
+
+ # Get the GPU model from nvidia-smi, and set env for next step
+ gpu_model=$(nvidia-smi --query-gpu=name --format=csv,noheader | head -n 1)
+ case "$gpu_model" in
+ *"H100"*)
+ echo "Hopper=ON" >> $GITHUB_ENV
+ echo "CUDA_ARCH=90" >> $GITHUB_ENV
+ ARCH=90
+ ;;
+ *"A100"*)
+ echo "Ampere=ON" >> $GITHUB_ENV
+ echo "CUDA_ARCH=80" >> $GITHUB_ENV
+ ;;
+ *"V100"*)
+ echo "Volta=ON" >> $GITHUB_ENV
+ echo "CUDA_ARCH=70" >> $GITHUB_ENV
+ ;;
+ *)
+ echo "Unsupported GPU model: $gpu_model"
+ exit 1
+ ;;
+ esac
- name: Run tests
uses: ./.github/actions/test-all-scream
with:
@@ -55,4 +90,4 @@ jobs:
machine: ghci-snl-cuda
generate: false
submit: ${{ env.submit }}
- cmake-configs: Kokkos_ARCH_VOLTA70=ON;CMAKE_CUDA_ARCHITECTURES=70
+ cmake-configs: Kokkos_ARCH_HOPPER90=${{ env.Hopper }};Kokkos_ARCH_AMPERE80=${{ env.Ampere }};Kokkos_ARCH_VOLTA70=${{ env.Volta }};CMAKE_CUDA_ARCHITECTURES=${{ env.CUDA_ARCH }}
diff --git a/.github/workflows/eamxx-sa-sanitizer.yml b/.github/workflows/eamxx-sa-sanitizer.yml
index 7e3a1a49fcf..00f60f7da2e 100644
--- a/.github/workflows/eamxx-sa-sanitizer.yml
+++ b/.github/workflows/eamxx-sa-sanitizer.yml
@@ -2,6 +2,11 @@ name: eamxx-sa-sanitizer
on:
workflow_dispatch:
+ inputs:
+ submit:
+ description: 'Force cdash submission'
+ required: true
+ type: boolean
# Add schedule trigger for nightly runs at midnight MT (Standard Time)
schedule:
@@ -13,12 +18,13 @@ concurrency:
cancel-in-progress: true
env:
- submit: ${{ github.event_name == 'schedule' && 'true' || 'false' }} # Submit to cdash only for nightlies
+ # Submit to cdash only for nightlies or if the user explicitly forced a submission via workflow dispatch
+ submit: ${{ github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && inputs.submit) }}
jobs:
gcc-openmp:
runs-on: [self-hosted, ghci-snl-cpu, gcc]
- name: gcc-openmp / cov
+ name: gcc-openmp / valg
steps:
- name: Check out the repository
uses: actions/checkout@v4
@@ -52,6 +58,35 @@ jobs:
submodules: recursive
- name: Show action trigger
uses: ./.github/actions/show-workflow-trigger
+ - name: Get CUDA Arch
+ run: |
+ # Ensure nvidia-smi is available
+ if ! command -v nvidia-smi &> /dev/null; then
+ echo "nvidia-smi could not be found. Please ensure you have Nvidia drivers installed."
+ exit 1
+ fi
+
+ # Get the GPU model from nvidia-smi, and set env for next step
+ gpu_model=$(nvidia-smi --query-gpu=name --format=csv,noheader | head -n 1)
+ case "$gpu_model" in
+ *"H100"*)
+ echo "Hopper=ON" >> $GITHUB_ENV
+ echo "CUDA_ARCH=90" >> $GITHUB_ENV
+ ARCH=90
+ ;;
+ *"A100"*)
+ echo "Ampere=ON" >> $GITHUB_ENV
+ echo "CUDA_ARCH=80" >> $GITHUB_ENV
+ ;;
+ *"V100"*)
+ echo "Volta=ON" >> $GITHUB_ENV
+ echo "CUDA_ARCH=70" >> $GITHUB_ENV
+ ;;
+ *)
+ echo "Unsupported GPU model: $gpu_model"
+ exit 1
+ ;;
+ esac
- name: Run tests
uses: ./.github/actions/test-all-scream
with:
@@ -59,4 +94,4 @@ jobs:
machine: ghci-snl-cuda
generate: false
submit: ${{ env.submit }}
- cmake-configs: Kokkos_ARCH_VOLTA70=ON;CMAKE_CUDA_ARCHITECTURES=70
+ cmake-configs: Kokkos_ARCH_HOPPER90=${{ env.Hopper }};Kokkos_ARCH_AMPERE80=${{ env.Ampere }};Kokkos_ARCH_VOLTA70=${{ env.Volta }};CMAKE_CUDA_ARCHITECTURES=${{ env.CUDA_ARCH }}
diff --git a/.github/workflows/eamxx-sa-testing.yml b/.github/workflows/eamxx-sa-testing.yml
index 12e45514ad0..a4397d4fdba 100644
--- a/.github/workflows/eamxx-sa-testing.yml
+++ b/.github/workflows/eamxx-sa-testing.yml
@@ -5,17 +5,6 @@ on:
pull_request:
branches: [ master ]
types: [opened, synchronize, ready_for_review, reopened]
- paths:
- - components/eamxx/**
- - components/eam/src/physics/rrtmgp/**
- - components/eam/src/physics/p3/scream/**
- - components/eam/src/physics/cam/**
- - .github/workflows/eamxx-standalone-testing.yml
- - externals/ekat/**
- - externals/scorpio/**
- - externals/haero/**
- - externals/YAKL/**
- - components/eam/src/physics/rrtmgp/external/**
# Manual run is used to bless
workflow_dispatch:
@@ -32,6 +21,10 @@ on:
description: 'Generate baselines'
required: true
type: boolean
+ submit:
+ description: 'Force cdash submission'
+ required: true
+ type: boolean
# Add schedule trigger for nightly runs at midnight MT (Standard Time)
schedule:
@@ -45,18 +38,90 @@ concurrency:
cancel-in-progress: true
env:
- submit: ${{ github.event_name == 'schedule' && 'true' || 'false' }} # Submit to cdash only for nightlies
+ # Submit to cdash only for nightlies or if the user explicitly forced a submission via workflow dispatch
+ submit: ${{ github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && inputs.submit) }}
jobs:
+ pre_process_pr:
+ if: ${{ github.event_name == 'pull_request' }}
+ runs-on: ubuntu-latest # This job can run anywhere
+ outputs:
+ relevant_paths: ${{ steps.check_paths.outputs.value }}
+ labels: ${{ steps.get_labels.outputs.labels }}
+ steps:
+ - name: Check files modified by PR
+ id: check_paths
+ run: |
+ paths=(
+ components/eamxx
+ components/eam/src/physics/rrtmgp
+ components/eam/src/physics/p3/scream
+ components/eam/src/physics/cam
+ components/eam/src/physics/rrtmgp/external
+ externals/ekat
+ externals/scorpio
+ externals/haero
+ externals/YAKL
+ .github/workflows/eamxx-sa-testing.yml
+ )
+ pattern=$(IFS=\|; echo "${paths[*]}")
+
+ # Use the GitHub API to get the list of changed files
+ # There are page size limits, so do it in chunks
+ page=1
+ while true; do
+ response=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ "https://api.github.com/repos/E3SM-Project/scream/pulls/${{ github.event.number }}/files?per_page=100&page=$page")
+
+ # Check if the response is empty, and break if it is
+ [ -z "$response" ] && break
+
+ changed_files+=$(echo "$response" | grep -o '"filename": *"[^"]*"' | sed 's/"filename": *//; s/"//g')$'\n'
+
+ # Check if there are more pages, and quite if there aren't
+ [[ $(echo "$response" | jq '. | length') -lt 100 ]] && break
+
+ page=$((page + 1))
+ done
+
+ # Check for matches and echo the matching files (or "" if none)
+ matching_files=$(echo "$changed_files" | grep -E "^($pattern)" || echo "")
+ if [[ -n "$matching_files" ]]; then
+ echo "Found relevant files: $matching_files"
+ echo "value=true" >> $GITHUB_OUTPUT
+ else
+ echo "No relevant files touched by this PR."
+ echo "value=false" >> $GITHUB_OUTPUT
+ fi
+ - name: Retrieve PR labels
+ id: get_labels
+ run: |
+ labels="${{ join(github.event.pull_request.labels.*.name, ',') }}"
+ echo "labels=${labels}" >> $GITHUB_OUTPUT
gcc-openmp:
+ needs: [pre_process_pr]
+ if: |
+ !failure() && !cancelled() &&
+ (
+ github.event_name == 'schedule' ||
+ (
+ github.event_name == 'pull_request' &&
+ needs.pre_process_pr.outputs.relevant_paths=='true' &&
+ !contains(needs.pre_process_pr.outputs.labels,'CI: skip gcc') &&
+ !contains(needs.pre_process_pr.outputs.labels,'CI: skip openmp') &&
+ !contains(needs.pre_process_pr.outputs.labels,'CI: skip eamxx-sa') &&
+ !contains(needs.pre_process_pr.outputs.labels,'CI: skip eamxx-all')
+ ) || (
+ github.event_name == 'workflow_dispatch' &&
+ github.event.inputs.job_to_run == 'gcc-openmp' ||
+ github.event.inputs.job_to_run == 'all'
+ )
+ )
runs-on: [self-hosted, ghci-snl-cpu, gcc]
strategy:
fail-fast: false
matrix:
build_type: [sp, dbg, fpe, opt]
- if: ${{ github.event_name != 'workflow_dispatch' ||
- github.event.inputs.job_to_run == 'gcc-openmp' ||
- github.event.inputs.job_to_run == 'all' }}
name: gcc-openmp / ${{ matrix.build_type }}
steps:
- name: Check out the repository
@@ -67,13 +132,6 @@ jobs:
submodules: recursive
- name: Show action trigger
uses: ./.github/actions/show-workflow-trigger
- - name: Check for skip labels
- if: ${{ github.event_name == 'pull_request' || github.event_name == 'pull_request_review' }}
- uses: ./.github/actions/check-skip-labels
- with:
- skip_labels: 'AT: skip gcc,AT: skip openmp,AT: skip eamxx-sa,AT: skip eamxx-all'
- token: ${{ secrets.GITHUB_TOKEN }}
- pr_number: ${{ github.event.pull_request.number }}
- name: Set test-all inputs based on event specs
run: |
echo "generate=false" >> $GITHUB_ENV
@@ -91,14 +149,29 @@ jobs:
submit: ${{ env.submit }}
cmake-configs: Kokkos_ENABLE_OPENMP=ON
gcc-cuda:
+ needs: [pre_process_pr]
+ if: |
+ !failure() && !cancelled() &&
+ (
+ github.event_name == 'schedule' ||
+ (
+ github.event_name == 'pull_request' &&
+ needs.pre_process_pr.outputs.relevant_paths=='true' &&
+ !contains(needs.pre_process_pr.outputs.labels,'CI: skip gcc') &&
+ !contains(needs.pre_process_pr.outputs.labels,'CI: skip cuda') &&
+ !contains(needs.pre_process_pr.outputs.labels,'CI: skip eamxx-sa') &&
+ !contains(needs.pre_process_pr.outputs.labels,'CI: skip eamxx-all')
+ ) || (
+ github.event_name == 'workflow_dispatch' &&
+ github.event.inputs.job_to_run == 'gcc-cuda' ||
+ github.event.inputs.job_to_run == 'all'
+ )
+ )
runs-on: [self-hosted, ghci-snl-cuda, cuda, gcc]
strategy:
fail-fast: false
matrix:
build_type: [sp, dbg, opt]
- if: ${{ github.event_name != 'workflow_dispatch' ||
- github.event.inputs.job_to_run == 'gcc-cuda' ||
- github.event.inputs.job_to_run == 'all' }}
name: gcc-cuda / ${{ matrix.build_type }}
steps:
- name: Check out the repository
@@ -109,13 +182,6 @@ jobs:
submodules: recursive
- name: Show action trigger
uses: ./.github/actions/show-workflow-trigger
- - name: Check for skip labels
- if: ${{ github.event_name == 'pull_request' || github.event_name == 'pull_request_review' }}
- uses: ./.github/actions/check-skip-labels
- with:
- skip_labels: 'AT: skip gcc,AT: skip cuda,AT: skip eamxx-sa,AT: skip eamxx-all'
- token: ${{ secrets.GITHUB_TOKEN }}
- pr_number: ${{ github.event.pull_request.number }}
- name: Set test-all inputs based on event specs
run: |
echo "generate=false" >> $GITHUB_ENV
@@ -124,6 +190,35 @@ jobs:
echo "generate=true" >> $GITHUB_ENV
fi
fi
+ - name: Get CUDA Arch
+ run: |
+ # Ensure nvidia-smi is available
+ if ! command -v nvidia-smi &> /dev/null; then
+ echo "nvidia-smi could not be found. Please ensure you have Nvidia drivers installed."
+ exit 1
+ fi
+
+ # Get the GPU model from nvidia-smi, and set env for next step
+ gpu_model=$(nvidia-smi --query-gpu=name --format=csv,noheader | head -n 1)
+ case "$gpu_model" in
+ *"H100"*)
+ echo "Hopper=ON" >> $GITHUB_ENV
+ echo "CUDA_ARCH=90" >> $GITHUB_ENV
+ ARCH=90
+ ;;
+ *"A100"*)
+ echo "Ampere=ON" >> $GITHUB_ENV
+ echo "CUDA_ARCH=80" >> $GITHUB_ENV
+ ;;
+ *"V100"*)
+ echo "Volta=ON" >> $GITHUB_ENV
+ echo "CUDA_ARCH=70" >> $GITHUB_ENV
+ ;;
+ *)
+ echo "Unsupported GPU model: $gpu_model"
+ exit 1
+ ;;
+ esac
- name: Run tests
uses: ./.github/actions/test-all-scream
with:
@@ -131,4 +226,4 @@ jobs:
machine: ghci-snl-cuda
generate: ${{ env.generate }}
submit: ${{ env.submit }}
- cmake-configs: Kokkos_ARCH_VOLTA70=ON;CMAKE_CUDA_ARCHITECTURES=70
+ cmake-configs: Kokkos_ARCH_HOPPER90=${{ env.Hopper }};Kokkos_ARCH_AMPERE80=${{ env.Ampere }};Kokkos_ARCH_VOLTA70=${{ env.Volta }};CMAKE_CUDA_ARCHITECTURES=${{ env.CUDA_ARCH }}
diff --git a/.github/workflows/eamxx-scripts-tests.yml b/.github/workflows/eamxx-scripts-tests.yml
index bd719539bab..a14cdc4f350 100644
--- a/.github/workflows/eamxx-scripts-tests.yml
+++ b/.github/workflows/eamxx-scripts-tests.yml
@@ -5,18 +5,14 @@ on:
pull_request:
branches: [ master ]
types: [opened, synchronize, ready_for_review, reopened]
- paths:
- - components/eamxx/scripts/**
- - components/eamxx/cime_config/*.py
- - .github/workflows/eamxx-scripts-tests.yml
- - externals/ekat/**
- - externals/scorpio/**
- - externals/haero/**
- - externals/YAKL/**
- - components/eam/src/physics/rrtmgp/external/**
# Manual run for debug purposes only
workflow_dispatch:
+ inputs:
+ submit:
+ description: 'Force cdash submission'
+ required: true
+ type: boolean
# Add schedule trigger for nightly runs at midnight MT (Standard Time)
schedule:
@@ -29,8 +25,73 @@ concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
+env:
+ # Submit to cdash only for nightlies or if the user explicitly forced a submission via workflow dispatch
+ submit: ${{ github.event_name == 'schedule' || (github.event_name == 'workflow_dispatch' && inputs.submit) }}
+
jobs:
+ pre_process_pr:
+ if: ${{ github.event_name == 'pull_request' }}
+ runs-on: ubuntu-latest # This job can run anywhere
+ outputs:
+ relevant_paths: ${{ steps.check_paths.outputs.value}}
+ labels: ${{ steps.get_labels.outputs.labels }}
+ steps:
+ - name: Check files modified by PR
+ id: check_paths
+ run: |
+ paths=(
+ components/eamxx/scripts
+ components/eamxx/cime_config/eamxx
+ components/eamxx/cime_config/build
+ components/eamxx/cime_config/yaml_utils.py
+ .github/workflows/eamxx-scripts-tests.yml
+ )
+ pattern=$(IFS=\|; echo "${paths[*]}")
+
+ # Use the GitHub API to get the list of changed files
+ # There are page size limits, so do it in chunks
+ page=1
+ while true; do
+ response=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ "https://api.github.com/repos/E3SM-Project/scream/pulls/${{ github.event.number }}/files?per_page=100&page=$page")
+
+ # Check if the response is empty, and break if it is
+ [ -z "$response" ] && break
+
+ changed_files+=$(echo "$response" | grep -o '"filename": *"[^"]*"' | sed 's/"filename": *//; s/"//g')$'\n'
+
+ # Check if there are more pages, and quite if there aren't
+ [[ $(echo "$response" | jq '. | length') -lt 100 ]] && break
+
+ page=$((page + 1))
+ done
+
+ # Check for matches and echo the matching files (or "" if none)
+ matching_files=$(echo "$changed_files" | grep -E "^($pattern)" || echo "")
+ if [[ -n "$matching_files" ]]; then
+ echo "Found relevant files: $matching_files"
+ echo "value=true" >> $GITHUB_OUTPUT
+ else
+ echo "No relevant files touched by this PR."
+ echo "value=false" >> $GITHUB_OUTPUT
+ fi
+ - name: Retrieve PR labels
+ id: get_labels
+ run: |
+ labels="${{ join(github.event.pull_request.labels.*.name, ',') }}"
+ echo "labels=${labels}" >> $GITHUB_OUTPUT
cpu-gcc:
+ needs: [pre_process_pr]
+ if: |
+ !failure() && !cancelled() &&
+ (
+ github.event_name != 'pull_request' ||
+ (
+ needs.pre_process_pr.outputs.relevant_paths == 'true' &&
+ !contains(needs.pre_process_pr.outputs.labels, 'CI: skip eamxx-all')
+ )
+ )
runs-on: [self-hosted, gcc, ghci-snl-cpu]
steps:
- name: Check out the repository
@@ -41,17 +102,10 @@ jobs:
submodules: recursive
- name: Show action trigger
uses: ./.github/actions/show-workflow-trigger
- - name: Check for skip labels
- if: ${{ github.event_name == 'pull_request' || github.event_name == 'pull_request_review' }}
- uses: ./.github/actions/check-skip-labels
- with:
- skip_labels: 'AT: skip eamxx-all'
- token: ${{ secrets.GITHUB_TOKEN }}
- pr_number: ${{ github.event.pull_request.number }}
- name: Run test
run: |
cd components/eamxx
- if [ ${{ github.event_name == 'schedule' }} ]; then
+ if [ "${{ env.submit }}" == "true" ]; then
./scripts/scripts-ctest-driver -s -m ghci-snl-cpu
else
./scripts/scripts-tests -f -m ghci-snl-cpu
diff --git a/.github/workflows/eamxx-v1-testing.yml b/.github/workflows/eamxx-v1-testing.yml
index 47129159f82..d55ed8252a5 100644
--- a/.github/workflows/eamxx-v1-testing.yml
+++ b/.github/workflows/eamxx-v1-testing.yml
@@ -5,17 +5,6 @@ on:
pull_request:
branches: [ master ]
types: [opened, synchronize, ready_for_review, reopened]
- paths:
- - components/eamxx/**
- - components/eam/src/physics/rrtmgp/**
- - components/eam/src/physics/p3/scream/**
- - components/eam/src/physics/cam/**
- - .github/workflows/eamxx-v1-testing.yml
- - externals/ekat/**
- - externals/scorpio/**
- - externals/haero/**
- - externals/YAKL/**
- - components/eam/src/physics/rrtmgp/external/**
# Manual run is used to bless
workflow_dispatch:
@@ -40,7 +29,80 @@ concurrency:
cancel-in-progress: true
jobs:
+ pre_process_pr:
+ if: ${{ github.event_name == 'pull_request' }}
+ runs-on: ubuntu-latest # This job can run anywhere
+ outputs:
+ relevant_paths: ${{ steps.check_paths.outputs.value }}
+ labels: ${{ steps.get_labels.outputs.labels }}
+ steps:
+ - name: Check files modified by PR
+ id: check_paths
+ run: |
+ paths=(
+ components/eamxx
+ components/eam/src/physics/rrtmgp
+ components/eam/src/physics/p3/scream
+ components/eam/src/physics/cam
+ components/eam/src/physics/rrtmgp/external
+ externals/ekat
+ externals/scorpio
+ externals/haero
+ externals/YAKL
+ .github/workflows/eamxx-v1-testing.yml
+ )
+ pattern=$(IFS=\|; echo "${paths[*]}")
+
+ # Use the GitHub API to get the list of changed files
+ # There are page size limits, so do it in chunks
+ page=1
+ while true; do
+ response=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ "https://api.github.com/repos/E3SM-Project/scream/pulls/${{ github.event.number }}/files?per_page=100&page=$page")
+
+ # Check if the response is empty, and break if it is
+ [ -z "$response" ] && break
+
+ changed_files+=$(echo "$response" | grep -o '"filename": *"[^"]*"' | sed 's/"filename": *//; s/"//g')$'\n'
+
+ # Check if there are more pages, and quite if there aren't
+ [[ $(echo "$response" | jq '. | length') -lt 100 ]] && break
+
+ page=$((page + 1))
+ done
+
+ # Check for matches and echo the matching files (or "" if none)
+ matching_files=$(echo "$changed_files" | grep -E "^($pattern)" || echo "")
+ if [[ -n "$matching_files" ]]; then
+ echo "Found relevant files: $matching_files"
+ echo "value=true" >> $GITHUB_OUTPUT
+ else
+ echo "No relevant files touched by this PR."
+ echo "value=false" >> $GITHUB_OUTPUT
+ fi
+ - name: Retrieve PR labels
+ id: get_labels
+ run: |
+ labels="${{ join(github.event.pull_request.labels.*.name, ',') }}"
+ echo "labels=${labels}" >> $GITHUB_OUTPUT
cpu-gcc:
+ needs: [pre_process_pr]
+ if: |
+ !failure() && !cancelled() &&
+ (
+ github.event_name == 'schedule' ||
+ (
+ github.event_name == 'pull_request' &&
+ needs.pre_process_pr.outputs.relevant_paths=='true' &&
+ !contains(needs.pre_process_pr.outputs.labels,'CI: skip gcc') &&
+ !contains(needs.pre_process_pr.outputs.labels,'CI: skip eamxx-v1') &&
+ !contains(needs.pre_process_pr.outputs.labels,'CI: skip eamxx-all')
+ ) || (
+ github.event_name == 'workflow_dispatch' &&
+ github.event.inputs.job_to_run == 'cpu-gcc' ||
+ github.event.inputs.job_to_run == 'all'
+ )
+ )
runs-on: [self-hosted, gcc, ghci-snl-cpu]
strategy:
matrix:
@@ -55,9 +117,6 @@ jobs:
short_name: SMS_D_Ln5.ne4pg2_oQU480.F2010-SCREAMv1-MPASSI.scream-mam4xx-all_mam4xx_procs
fail-fast: false
name: cpu-gcc / ${{ matrix.test.short_name }}
- if: ${{ github.event_name != 'workflow_dispatch' ||
- github.event.inputs.job_to_run == 'cpu-gcc' ||
- github.event.inputs.job_to_run == 'all' }}
steps:
- name: Check out the repository
uses: actions/checkout@v4
@@ -67,13 +126,6 @@ jobs:
submodules: recursive
- name: Show action trigger
uses: ./.github/actions/show-workflow-trigger
- - name: Check for skip labels
- if: ${{ github.event_name == 'pull_request' || github.event_name == 'pull_request_review' }}
- uses: ./.github/actions/check-skip-labels
- with:
- skip_labels: 'AT: skip gcc,AT: skip openmp,AT: skip eamxx-sa,AT: skip eamxx-all'
- token: ${{ secrets.GITHUB_TOKEN }}
- pr_number: ${{ github.event.pull_request.number }}
- name: Set CA certificates env var
run: |
# Ensure the operating system is Linux
diff --git a/.github/workflows/eamxx_default_files.yml b/.github/workflows/eamxx_default_files.yml
index d3971758991..38c528306c4 100644
--- a/.github/workflows/eamxx_default_files.yml
+++ b/.github/workflows/eamxx_default_files.yml
@@ -11,9 +11,13 @@ on:
- cron: '00 00 * * *'
workflow_dispatch:
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event_name }}-${{ github.event.pull_request.number || github.run_id }}
+ cancel-in-progress: true
+
jobs:
scream-defaults:
- if: false
+ if: ${{ github.repository == 'E3SM-Project/E3SM' }}
runs-on: ubuntu-latest
outputs:
event_name: ${{ github.event_name }}
diff --git a/.mergify.yml b/.mergify.yml
new file mode 100644
index 00000000000..89fcc821e57
--- /dev/null
+++ b/.mergify.yml
@@ -0,0 +1,53 @@
+merge_protections:
+ - name: Enforce checks passing
+ description: Make sure that checks are not failing on the PR, and reviewers approved
+ if:
+ - base = master
+ success_conditions:
+ - "#approved-reviews-by >= 1" # At least 1 approval
+ - "#changes-requested-reviews-by == 0" # No reviewer asked for changes
+ - or:
+ - and:
+ - check-success="gcc-openmp / dbg"
+ - check-success="gcc-openmp / sp"
+ - check-success="gcc-openmp / fpe"
+ - check-success="gcc-openmp / opt"
+ - check-skipped={% raw %}gcc-openmp / ${{ matrix.build_type }}{% endraw %}
+ - or:
+ - and:
+ - check-success="gcc-cuda / dbg"
+ - check-success="gcc-cuda / sp"
+ - check-success="gcc-cuda / opt"
+ - check-skipped={% raw %}gcc-cuda / ${{ matrix.build_type }}{% endraw %}
+ - or:
+ - and:
+ - check-success="cpu-gcc / ERS_Ln9.ne4_ne4.F2000-SCREAMv1-AQP1.scream-output-preset-2"
+ - check-success="cpu-gcc / ERS_P16_Ln22.ne30pg2_ne30pg2.FIOP-SCREAMv1-DP.scream-dpxx-arm97"
+ - check-success="cpu-gcc / ERS_Ln22.ne4pg2_ne4pg2.F2010-SCREAMv1.scream-small_kernels--scream-output-preset-5"
+ - check-success="cpu-gcc / SMS_D_Ln5.ne4pg2_oQU480.F2010-SCREAMv1-MPASSI.scream-mam4xx-all_mam4xx_procs"
+ - check-skipped={% raw %}cpu-gcc / ${{ matrix.test.short_name }}{% endraw %}
+ - or:
+ - check-success=cpu-gcc
+ - check-skipped=cpu-gcc
+
+pull_request_rules:
+ - name: dismiss stale reviews
+ conditions:
+ - base=master
+ actions:
+ dismiss_reviews:
+ when: synchronize # Dismiss reviews when synchronize event happens
+ - name: Automatic merge when CI passes and approved
+ conditions:
+ - "label=CI: automerge"
+ - base=master
+ actions:
+ merge:
+ method: merge
+ commit_message_template: |
+ Merge pull request #{{number}} from {{head}}
+
+ Automatically merged using mergify
+ PR title: {{title}}
+ PR author: {{author}}
+ PR labels: {{label}}
diff --git a/CITATION.cff b/CITATION.cff
index 9542a7d01ef..78099055fc3 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -9,7 +9,7 @@ authors:
- given-names: E3SM
family-names: Project
version: 3.0.0
-doi: 10.11578/E3SM/dc.20240301.3
+doi: 10.11578/E3SM/dc.20240930.1
repository-code: 'https://github.com/E3SM-Project/E3SM'
url: 'https://e3sm.org'
license: BSD-3-Clause
diff --git a/LICENSE b/LICENSE
index d74a2aa127a..247287ab4c0 100644
--- a/LICENSE
+++ b/LICENSE
@@ -2,7 +2,7 @@ Except for the separable pieces descibed below, E3SM is released
under the following 3-Clause BSD Open Source license.
*******************************************************************************
-Copyright ©2023, UChicago Argonne, LLC All Rights Reserved
+Copyright 2024, UChicago Argonne, LLC All Rights Reserved
Software Name: Energy Exascale Earth System Model (E3SM)
@@ -55,15 +55,29 @@ GPTL share/timing author non-commeric
MCT externals/mct ANL BSD
YAKL externals/YAKL author BSD
cub externals/cub author, NVIDIA BSD
-kokkos externals/kokkos SNL BSD
+kokkos externals/ekat SNL BSD
+haero externals/haero SNL/Battelle BSD
+mam4xx externals/mam4xx SNL/Battelle BSD
Ocean/Ice under components/
----------- -----------------
CICE cice LANL BSD
MPAS Framework mpas-framework LANL BSD
-MPAS Ocean mpas-ocean LANL BSD
-MPAS SeaIce mpas-seaice LANL BSD
-MPAS-Albany LandIce mpas-albany-landice LANL, SNL BSD
+FFTW mpas-ocean/src/FFTW author/MIT GPL
+MARBL mpas-ocean/src/MARBL NCAR BSD
+SHTNS mpas-ocean/src/SHTNS CeCILL GPL
+cvmix mpas-ocean/src/cvmix NCAR LGPL
+gotm mpas-ocean/src/gotm authors GPL
+ppr mpas-ocean/src/ppr author custom
+Icepack mpas-seaice/src/icepack LANL BSD
+
+Waves under components/ww3
+----------- --------------------
+WW3 src/WW3 NWS/NOAA custom
+
+Land-ice under components/mpas-albany-landice
+----------- ------------------------------------
+SeaLevelModel src/SeaLevelModel author MIT
Land under components/elm/src
----------- ------------------------
@@ -91,7 +105,7 @@ HOMMEXX components/homme/src/share/cxx SNL BSD
Actual copyright holder for above Institutions:
NCAR = University Corporation for Atmospheric Research
-LANL = Los Alamos National Security, LLC
+LANL = Los Alamos National Security, LLC, Triad National Security, LLC
SNL = National Technology & Engineering Solutions of Sandia, LLC
LBNL = The Regents of the University of California,
through Lawrence Berkeley National Laboratory
diff --git a/README.md b/README.md
index 192c288d78e..13c927b8c7f 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,5 @@
+
+
[![E3SM Logo](https://e3sm.org/wp-content/themes/e3sm/assets/images/e3sm-logo.png)](https://e3sm.org)
Energy Exascale Earth System Model (E3SM)
@@ -9,14 +11,14 @@ the most challenging and demanding climate-change research problems and
Department of Energy mission needs while efficiently using DOE Leadership
Computing Facilities.
-DOI: [10.11578/E3SM/dc.20240301.3](http://dx.doi.org/10.11578/E3SM/dc.20240301.3)
+DOI: [10.11578/E3SM/dc.20240930.1](http://dx.doi.org/10.11578/E3SM/dc.20240930.1)
Please visit the [project website](https://e3sm.org) or our [Confluence site](https://acme-climate.atlassian.net/wiki/spaces/DOC/overview)
for further details.
For questions about the model, use [Github Discussions](https://github.com/E3SM-Project/E3SM/discussions).
-See our Github-hosted documentation at [https://e3sm-project.github.io/E3SM/](https://e3sm-project.github.io/E3SM/).
+See our Github-hosted documentation at [https://docs.e3sm.org/E3SM](https://docs.e3sm.org/E3SM/).
Table of Contents
--------------------------------------------------------------------------------
diff --git a/cime_config/allactive/config_compsets.xml b/cime_config/allactive/config_compsets.xml
index e66f1e473d8..d5a52a8bac3 100755
--- a/cime_config/allactive/config_compsets.xml
+++ b/cime_config/allactive/config_compsets.xml
@@ -383,42 +383,52 @@
CRYO1850
- 1850SOI_EAM%CMIP6_ELM%SPBC_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
+ 1850SOI_EAM%CMIP6_ELM%CNPRDCTCBCTOP_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
CRYO1850-4xCO2
- 1850SOI_EAM%CMIP6-4xCO2_ELM%SPBC_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
+ 1850SOI_EAM%CMIP6-4xCO2_ELM%CNPRDCTCBCTOP_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
+
+
+
+ CRYO1850-1pctCO2
+ 1850SOI_EAM%CMIP6-1pctCO2_ELM%CNPRDCTCBCTOP_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
CRYO1950
- 1950SOI_EAM%CMIP6_ELM%SPBC_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
+ 1950SOI_EAM%CMIP6_ELM%CNPRDCTCBCTOP_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
CRYO1850-DISMF
- 1850SOI_EAM%CMIP6_ELM%SPBC_MPASSI%DIB_MPASO%IBDISMF_MOSART_SGLC_SWAV
+ 1850SOI_EAM%CMIP6_ELM%CNPRDCTCBCTOP_MPASSI%DIB_MPASO%IBDISMF_MOSART_SGLC_SWAV
CRYO1950-DISMF
- 1950SOI_EAM%CMIP6_ELM%SPBC_MPASSI%DIB_MPASO%IBDISMF_MOSART_SGLC_SWAV
+ 1950SOI_EAM%CMIP6_ELM%CNPRDCTCBCTOP_MPASSI%DIB_MPASO%IBDISMF_MOSART_SGLC_SWAV
CRYO20TR
- 20TRSOI_EAM%CMIP6_ELM%SPBC_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
+ 20TRSOI_EAM%CMIP6_ELM%CNPRDCTCBCTOP_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
+
+
+
+ CRYOSSP245
+ SSP245SOI_EAM%CMIP6_ELM%CNPRDCTCBCTOP_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
CRYOSSP585
- SSP585SOI_EAM%CMIP6_ELM%SPBC_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
+ SSP585SOI_EAM%CMIP6_ELM%CNPRDCTCBCTOP_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
CRYOSSP370
- SSP370SOI_EAM%CMIP6_ELM%SPBC_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
+ SSP370SOI_EAM%CMIP6_ELM%CNPRDCTCBCTOP_MPASSI%DIB_MPASO%IBPISMF_MOSART_SGLC_SWAV
diff --git a/cime_config/allactive/config_pesall.xml b/cime_config/allactive/config_pesall.xml
index 3392ad71ca1..0d5f2456596 100644
--- a/cime_config/allactive/config_pesall.xml
+++ b/cime_config/allactive/config_pesall.xml
@@ -843,6 +843,29 @@
+
+
+
+ -compset WCYCL*/CRYO* -res SOwISC12to30E3r3* on 52 nodes pure-MPI, ~8.5 sypd
+
+ 1408
+ 384
+ 384
+ 1024
+ 1920
+ 1408
+
+
+ 0
+ 1024
+ 1024
+ 0
+ 1408
+ 0
+
+
+
+
@@ -1774,6 +1797,21 @@
+
+
+ allactive+pm-cpu: default, 1 node, 96 tasks, 1 thread
+
+ 96
+ 96
+ 96
+ 96
+ 96
+ 96
+ 96
+ 96
+
+
+
diff --git a/cime_config/config_grids.xml b/cime_config/config_grids.xml
index 2d43f3390a3..b093285e59d 100755
--- a/cime_config/config_grids.xml
+++ b/cime_config/config_grids.xml
@@ -416,6 +416,16 @@
RRSwISC6to18E3r5
+
+ T62
+ T62
+ SOwISC12to30E3r3
+ rx1
+ null
+ null
+ SOwISC12to30E3r3
+
+
TL319
TL319
@@ -656,6 +666,16 @@
RRSwISC6to18E3r5
+
+ TL319
+ TL319
+ SOwISC12to30E3r3
+ JRA025
+ null
+ null
+ SOwISC12to30E3r3
+
+
TL319
TL319
@@ -1400,6 +1420,16 @@
RRSwISC6to18E3r5
+
+ ne30np4.pg2
+ ne30np4.pg2
+ SOwISC12to30E3r3
+ r05
+ null
+ null
+ SOwISC12to30E3r3
+
+
ne0np4_northamericax4v1
r0125
@@ -2009,6 +2039,26 @@
IcoswISC30E3r5
+
+ ERA5r025
+ r05
+ IcoswISC30E3r5
+ r05
+ mpas.gis1to10kmR2
+ null
+ IcoswISC30E3r5
+
+
+
+ ERA5r025
+ r025
+ IcoswISC30E3r5
+ r025
+ mpas.gis1to10kmR2
+ null
+ IcoswISC30E3r5
+
+
ne120np4.pg2
r0125
@@ -2365,6 +2415,16 @@
EC30to60E2r2
+
+ ne30np4.pg2
+ r05
+ IcoswISC30E3r5
+ r05
+ null
+ wQU225Icos30E3r5
+ IcoswISC30E3r5
+
+
ne30np4.pg2
ne30np4.pg2
@@ -2457,6 +2517,16 @@
RRSwISC6to18E3r5
+
+ ne30np4.pg2
+ r05
+ SOwISC12to30E3r3
+ r05
+ null
+ null
+ SOwISC12to30E3r3
+
+
ne30np4.pg2
r05
@@ -2768,6 +2838,7 @@
$DIN_LOC_ROOT/share/domains/domain.lnd.T62_ECwISC30to60E2r1.201007.nc
$DIN_LOC_ROOT/share/domains/domain.lnd.T62_IcoswISC30E3r5.231121.nc
$DIN_LOC_ROOT/share/domains/domain.lnd.T62_RRSwISC6to18E3r5.240328.nc
+ $DIN_LOC_ROOT/share/domains/domain.lnd.T62_SOwISC12to30E3r3.240808.nc
T62 is Gaussian grid:
@@ -2826,11 +2897,21 @@
$DIN_LOC_ROOT/share/domains/domain.ocn.TL319_IcosXISC30E3r7.240326.nc
$DIN_LOC_ROOT/share/domains/domain.lnd.TL319_RRSwISC6to18E3r5.240328.nc
$DIN_LOC_ROOT/share/domains/domain.ocn.TL319_RRSwISC6to18E3r5.240328.nc
+ $DIN_LOC_ROOT/share/domains/domain.lnd.TL319_SOwISC12to30E3r3.240808.nc
+ $DIN_LOC_ROOT/share/domains/domain.ocn.TL319_SOwISC12to30E3r3.240808.nc
$DIN_LOC_ROOT/share/domains/domain.lnd.TL319_oRRS18to6v3.220124.nc
$DIN_LOC_ROOT/share/domains/domain.ocn.TL319_oRRS18to6v3.220124.nc
TL319 is JRA lat/lon grid:
+
+ 1440
+ 721
+ $DIN_LOC_ROOT/share/domains/domain.lnd.ERA5r025_IcoswISC30E3r5.240903.nc
+ $DIN_LOC_ROOT/share/domains/domain.ocn.ERA5r025_IcoswISC30E3r5.240903.nc
+ ERA5r025 is the lat/lon cap grid used by ERA5 data:
+
+
@@ -2939,6 +3020,8 @@
$DIN_LOC_ROOT/share/domains/domain.ocn.ne30pg2_IcosXISC30E3r7.240326.nc
$DIN_LOC_ROOT/share/domains/domain.lnd.ne30pg2_RRSwISC6to18E3r5.240328.nc
$DIN_LOC_ROOT/share/domains/domain.ocn.ne30pg2_RRSwISC6to18E3r5.240328.nc
+ $DIN_LOC_ROOT/share/domains/domain.lnd.ne30pg2_SOwISC12to30E3r3.240808.nc
+ $DIN_LOC_ROOT/share/domains/domain.ocn.ne30pg2_SOwISC12to30E3r3.240808.nc
$DIN_LOC_ROOT/share/domains/domain.lnd.ne30pg2_gx1v6.190806.nc
$DIN_LOC_ROOT/share/domains/domain.ocn.ne30pg2_gx1v6.190806.nc
ne30np4.pg2 is Spectral Elem 1-deg grid w/ 2x2 FV physics grid per element:
@@ -3259,6 +3342,13 @@
RRSwISC6to18E3r5 is a MPAS ocean grid generated with the jigsaw/compass process using a mesh density function that is roughly proportional to the Rossby radius of deformation, with 18 km gridcells at low and 6 km gridcells at high latitudes. Additionally, it has ocean in ice-shelf cavities:
+
+ 807630
+ 1
+ $DIN_LOC_ROOT/share/domains/domain.ocn.SOwISC12to30E3r3.240808.nc
+ SOwISC12to30E3r3 is a MPAS ocean grid generated with the jigsaw/compass process using XXXXX. Additionally, it has ocean in ice-shelf cavities:
+
+
@@ -3295,6 +3385,8 @@
$DIN_LOC_ROOT/share/domains/domain.lnd.r05_IcosXISC30E3r7.240326.nc
$DIN_LOC_ROOT/share/domains/domain.lnd.r05_RRSwISC6to18E3r5.240328.nc
$DIN_LOC_ROOT/share/domains/domain.lnd.r05_RRSwISC6to18E3r5.240328.nc
+ $DIN_LOC_ROOT/share/domains/domain.lnd.r05_SOwISC12to30E3r3.240808.nc
+ $DIN_LOC_ROOT/share/domains/domain.lnd.r05_SOwISC12to30E3r3.240808.nc
$DIN_LOC_ROOT/share/domains/domain.lnd.r05_gx1v6.191014.nc
r05 is 1/2 degree river routing grid:
@@ -3404,6 +3496,13 @@
$DIN_LOC_ROOT/share/domains/domain.ocn.wQU225EC30to60E2r2.220224.nc
WW3 unstructured QU 225km global grid with EC30to60E2r2 coastlines
+
+
+ 97988
+ 1
+ $DIN_LOC_ROOT/share/domains/domain.ocn.wQU225Icos30E3r5.240910.nc
+ WW3 unstructured QU 225km global grid with ICOS30 coastlines
+
@@ -3573,6 +3672,7 @@
ATM2ROF_SMAPNAME
ATM2WAV_SMAPNAME
OCN2WAV_SMAPNAME
+ WAV2OCN_SMAPNAME
ICE2WAV_SMAPNAME
ROF2OCN_LIQ_RMAPNAME
@@ -3817,6 +3917,16 @@
+
+
+
+
+
+
+
+
+
+
@@ -4626,6 +4736,14 @@
+
+
+
+
+
+
+
+
@@ -4778,6 +4896,14 @@
+
+
+
+
+
+
+
+
@@ -4793,6 +4919,38 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -4927,6 +5085,10 @@
+
+
+
+
@@ -4943,6 +5105,12 @@
+
+
+
+
+
+
@@ -5172,6 +5340,10 @@
+
+
+
+
@@ -5287,6 +5459,11 @@
+
+
+
+
+
@@ -5382,6 +5559,11 @@
+
+
+
+
+
@@ -5473,10 +5655,15 @@
-
+
+
+
+
+
+
@@ -5706,6 +5893,13 @@
+
+
+
+
+
+
+
diff --git a/cime_config/machines/Depends.muller-cpu.gnu.cmake b/cime_config/machines/Depends.muller-cpu.gnu.cmake
index 5c7331f979a..53f8b536651 100644
--- a/cime_config/machines/Depends.muller-cpu.gnu.cmake
+++ b/cime_config/machines/Depends.muller-cpu.gnu.cmake
@@ -7,23 +7,3 @@ if (NOT DEBUG)
e3sm_deoptimize_file("${ITEM}")
endforeach()
endif()
-
-# On pm-cpu (and muller-cpu), with gcc-native/12.3, we see hang with DEBUG runs of certain tests.
-# https://github.com/E3SM-Project/E3SM/issues/6516
-# Currently, we have pm-cpu using gcc/12.2.0 which does not have this issue, but using muller-cpu to test 12.3
-# Turning off -O0 for these 2 files (by adding -O) at least avoids hang and will produce FPE in HOMME code
-if (CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 12.3)
- if (DEBUG)
-
- set(ADJUST
- eam/src/dynamics/se/inidat.F90
- eam/src/dynamics/se/dyn_comp.F90
- )
-
- foreach(ITEM IN LISTS ADJUST)
- e3sm_add_flags("${ITEM}" "-O")
- #e3sm_add_flags("${ITEM}" "-DNDEBUG -O")
- endforeach()
-
- endif()
-endif()
diff --git a/cime_config/machines/cmake_macros/amdclanggpu_frontier.cmake b/cime_config/machines/cmake_macros/amdclanggpu_frontier.cmake
index 4412ea0de7b..1deebdac85d 100644
--- a/cime_config/machines/cmake_macros/amdclanggpu_frontier.cmake
+++ b/cime_config/machines/cmake_macros/amdclanggpu_frontier.cmake
@@ -13,8 +13,6 @@ string(APPEND CMAKE_C_FLAGS_RELEASE " -O2")
string(APPEND CMAKE_CXX_FLAGS_RELEASE " -O2")
string(APPEND CMAKE_Fortran_FLAGS_RELEASE " -O2")
-string(APPEND SPIO_CMAKE_OPTS " -DPIO_ENABLE_TOOLS:BOOL=OFF")
-
string(APPEND CMAKE_CXX_FLAGS " --offload-arch=gfx90a")
string(APPEND CMAKE_EXE_LINKER_FLAGS " -L$ENV{CRAY_MPICH_ROOTDIR}/gtl/lib -lmpi_gtl_hsa")
string(APPEND CMAKE_EXE_LINKER_FLAGS " -L/opt/cray/pe/gcc/12.2.0/snos/lib64 -lgfortran -lstdc++")
diff --git a/cime_config/machines/cmake_macros/crayclanggpu_frontier.cmake b/cime_config/machines/cmake_macros/crayclanggpu_frontier.cmake
index a37ccde439e..92567416c56 100644
--- a/cime_config/machines/cmake_macros/crayclanggpu_frontier.cmake
+++ b/cime_config/machines/cmake_macros/crayclanggpu_frontier.cmake
@@ -52,8 +52,6 @@ endif()
# https://github.com/E3SM-Project/E3SM/pull/5208
string(APPEND CMAKE_Fortran_FLAGS " -hipa0 -hzero -em -ef -hnoacc")
-string(APPEND SPIO_CMAKE_OPTS " -DPIO_ENABLE_TOOLS:BOOL=OFF")
-
string(APPEND CMAKE_CXX_FLAGS " --offload-arch=gfx90a")
string(APPEND CMAKE_EXE_LINKER_FLAGS " -L$ENV{CRAY_MPICH_ROOTDIR}/gtl/lib -lmpi_gtl_hsa")
string(APPEND CMAKE_EXE_LINKER_FLAGS " -L$ENV{ROCM_PATH}/lib -lamdhip64")
diff --git a/cime_config/machines/cmake_macros/gnu_chicoma-cpu.cmake b/cime_config/machines/cmake_macros/gnu_chicoma-cpu.cmake
index a6148451eb7..a6c13942620 100644
--- a/cime_config/machines/cmake_macros/gnu_chicoma-cpu.cmake
+++ b/cime_config/machines/cmake_macros/gnu_chicoma-cpu.cmake
@@ -5,6 +5,7 @@ endif()
set(PIO_FILESYSTEM_HINTS "lustre")
string(APPEND CMAKE_C_FLAGS_RELEASE " -O2 -g")
string(APPEND CMAKE_Fortran_FLAGS_RELEASE " -O2 -g")
+string(APPEND CMAKE_EXE_LINKER_FLAGS " -Wl,--enable-new-dtags")
set(MPICC "cc")
set(MPICXX "CC")
set(MPIFC "ftn")
diff --git a/cime_config/machines/cmake_macros/gnugpu_frontier.cmake b/cime_config/machines/cmake_macros/gnugpu_frontier.cmake
index 6ca4b83d9c2..7a29a5ca154 100644
--- a/cime_config/machines/cmake_macros/gnugpu_frontier.cmake
+++ b/cime_config/machines/cmake_macros/gnugpu_frontier.cmake
@@ -14,7 +14,6 @@ string(APPEND CMAKE_Fortran_FLAGS " -Wno-implicit-interface")
string(APPEND CMAKE_C_FLAGS_RELEASE " -O2")
string(APPEND CMAKE_CXX_FLAGS_RELEASE " -O2")
string(APPEND CMAKE_Fortran_FLAGS_RELEASE " -O2")
-string(APPEND SPIO_CMAKE_OPTS " -DPIO_ENABLE_TOOLS:BOOL=OFF")
string(APPEND CMAKE_CXX_FLAGS " --offload-arch=gfx90a")
string(APPEND CMAKE_EXE_LINKER_FLAGS " -L$ENV{CRAY_MPICH_ROOTDIR}/gtl/lib -lmpi_gtl_hsa")
diff --git a/cime_config/machines/cmake_macros/intel_dane.cmake b/cime_config/machines/cmake_macros/intel_dane.cmake
index 8091325c6ce..ef25a97b300 100644
--- a/cime_config/machines/cmake_macros/intel_dane.cmake
+++ b/cime_config/machines/cmake_macros/intel_dane.cmake
@@ -1,4 +1,10 @@
string(APPEND CPPDEFS " -DNO_SHR_VMATH -DCNL")
string(APPEND CMAKE_Fortran_FLAGS_DEBUG " -check all -ftrapuv")
string(APPEND CMAKE_EXE_LINKER_FLAGS " -L/usr/tce/packages/gcc/gcc-10.3.1-magic/lib/gcc/x86_64-redhat-linux/10/")
+
+list(APPEND CMAKE_BUILD_RPATH "/usr/workspace/e3sm/spack/libs/linux-rhel8-sapphirerapids/intel-2021.6.0/hdf5-1.10.7-766kapalbrdntu2pcgdgbhg2ch26gsuv/lib")
+list(APPEND CMAKE_BUILD_RPATH "/usr/workspace/e3sm/spack/libs/linux-rhel8-sapphirerapids/intel-2021.6.0/netcdf-c-4.4.1.1-2uznnlwgiezxute6iyqzqjrpolokeaib/lib")
+list(APPEND CMAKE_BUILD_RPATH "/usr/workspace/e3sm/spack/libs/linux-rhel8-sapphirerapids/intel-2021.6.0/netcdf-fortran-4.4.4-itpstyordbern7vlulmlnt47eeeokzfp/lib")
+list(APPEND CMAKE_BUILD_RPATH "/usr/workspace/e3sm/spack/libs/linux-rhel8-sapphirerapids/intel-2021.6.0/parallel-netcdf-1.11.0-26sxm4mormsglmhi24poix7sugbigkck/lib")
+
set(KOKKOS_OPTIONS "--with-serial --ldflags='-L/usr/tce/packages/gcc/gcc-10.3.1-magic/lib/gcc/x86_64-redhat-linux/10/'")
diff --git a/cime_config/machines/cmake_macros/intel_ruby.cmake b/cime_config/machines/cmake_macros/intel_ruby.cmake
index 8091325c6ce..e874bfb7eaf 100644
--- a/cime_config/machines/cmake_macros/intel_ruby.cmake
+++ b/cime_config/machines/cmake_macros/intel_ruby.cmake
@@ -1,4 +1,10 @@
string(APPEND CPPDEFS " -DNO_SHR_VMATH -DCNL")
string(APPEND CMAKE_Fortran_FLAGS_DEBUG " -check all -ftrapuv")
string(APPEND CMAKE_EXE_LINKER_FLAGS " -L/usr/tce/packages/gcc/gcc-10.3.1-magic/lib/gcc/x86_64-redhat-linux/10/")
+
+list(APPEND CMAKE_BUILD_RPATH "/usr/workspace/e3sm/spack/libs/linux-rhel8-cascadelake/intel-2021.6.0/hdf5-1.10.7-ewjpbjdhjgjzrzjcvwyjyuulaesbsjhg/lib")
+list(APPEND CMAKE_BUILD_RPATH "/usr/workspace/e3sm/spack/libs/linux-rhel8-cascadelake/intel-2021.6.0/netcdf-c-4.4.1.1-vaxofekwvnvngh7wptmzkwdb7tkzvesn/lib")
+list(APPEND CMAKE_BUILD_RPATH "/usr/workspace/e3sm/spack/libs/linux-rhel8-cascadelake/intel-2021.6.0/netcdf-fortran-4.4.4-3pzbx2unddhladhubaahhhysjmprzqi2/lib")
+list(APPEND CMAKE_BUILD_RPATH "/usr/workspace/e3sm/spack/libs/linux-rhel8-cascadelake/intel-2021.6.0/parallel-netcdf-1.11.0-tzgdalakmem7tod6cruhqyeackeix5q5/lib")
+
set(KOKKOS_OPTIONS "--with-serial --ldflags='-L/usr/tce/packages/gcc/gcc-10.3.1-magic/lib/gcc/x86_64-redhat-linux/10/'")
diff --git a/cime_config/machines/config_batch.xml b/cime_config/machines/config_batch.xml
index 13cfdfb337b..ee271d1f9dc 100644
--- a/cime_config/machines/config_batch.xml
+++ b/cime_config/machines/config_batch.xml
@@ -237,33 +237,17 @@
-
- squeue
- sbatch
- scancel
- #SBATCH
- (\d+)$
- --dependency=afterok:jobid
- --dependency=afterany:jobid
- :
- %H:%M:%S
- --mail-user
- --mail-type
- none, all, begin, end, fail
-
- --export=ALL
- -p {{ job_queue }}
- -J {{ job_id }}
- -N {{ num_nodes }}
- -n {{ total_tasks }}
- -t {{ job_wallclock_time }}
- -o {{ job_id }}.out
- -e {{ job_id }}.err
- -A {{ project }}
-
+
- pbatch
- pdebug
+ pbatch
+ pdebug
+
+
+
+
+
+ pbatch
+ pdebug
@@ -464,6 +448,7 @@
regular
preempt
shared
+ overrun
debug
@@ -518,10 +503,11 @@
regular
preempt
shared
+ overrun
debug
-
+
--constraint=cpu
diff --git a/cime_config/machines/config_machines.xml b/cime_config/machines/config_machines.xml
index 5bed00ea5d5..fce20fca185 100644
--- a/cime_config/machines/config_machines.xml
+++ b/cime_config/machines/config_machines.xml
@@ -190,6 +190,7 @@
module
+ cpe
cray-hdf5-parallel
cray-netcdf-hdf5parallel
cray-parallel-netcdf
@@ -218,34 +219,34 @@
PrgEnv-gnu/8.5.0
- gcc/12.2.0
- cray-libsci/23.02.1.1
+ gcc-native/12.3
+ cray-libsci/23.12.5
- PrgEnv-intel/8.3.3
- intel/2023.1.0
+ PrgEnv-intel/8.5.0
+ intel/2023.2.0
PrgEnv-nvidia
- nvidia/22.7
- cray-libsci/23.02.1.1
+ nvidia/24.5
+ cray-libsci/23.12.5
PrgEnv-aocc
- aocc/4.0.0
- cray-libsci/23.02.1.1
+ aocc/4.1.0
+ cray-libsci/23.12.5
craype-accel-host
- craype/2.7.20
- cray-mpich/8.1.25
- cray-hdf5-parallel/1.12.2.3
- cray-netcdf-hdf5parallel/4.9.0.3
- cray-parallel-netcdf/1.12.3.3
+ craype/2.7.30
+ cray-mpich/8.1.28
+ cray-hdf5-parallel/1.12.2.9
+ cray-netcdf-hdf5parallel/4.9.0.9
+ cray-parallel-netcdf/1.12.3.9
cmake/3.24.3
evp-patch
@@ -265,7 +266,7 @@
threads
FALSE
/global/cfs/cdirs/e3sm/perl/lib/perl5-only-switch
- software
+ kdreg2
MPI_Bcast
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
@@ -355,6 +356,7 @@
module
+ cpe
cray-hdf5-parallel
cray-netcdf-hdf5parallel
cray-parallel-netcdf
@@ -365,13 +367,14 @@
PrgEnv-nvidia
PrgEnv-cray
PrgEnv-aocc
+ gcc-native
intel
intel-oneapi
nvidia
aocc
cudatoolkit
- cray-libsci
climate-utils
+ cray-libsci
matlab
craype-accel-nvidia80
craype-accel-host
@@ -423,6 +426,7 @@
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
+ 0.20
1
@@ -433,6 +437,7 @@
threads
FALSE
/global/cfs/cdirs/e3sm/perl/lib/perl5-only-switch
+ kdreg2
MPI_Bcast
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
@@ -578,12 +583,11 @@
threads
FALSE
/global/cfs/cdirs/e3sm/perl/lib/perl5-only-switch
- software
+ kdreg2
MPI_Bcast
- $SHELL{if [ -z "$Albany_ROOT" ]; then echo /global/common/software/e3sm/mali_tpls/albany-e3sm-serial-release-gcc; else echo "$Albany_ROOT"; fi}
- $SHELL{if [ -z "$Trilinos_ROOT" ]; then echo /global/common/software/e3sm/mali_tpls/trilinos-e3sm-serial-release-gcc; else echo "$Trilinos_ROOT"; fi}
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
+ 4000MB
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.9.1/cray-mpich-8.1.25/intel-2023.1.0; else echo "$ADIOS2_ROOT"; fi}
@@ -591,6 +595,8 @@
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.9.1/cray-mpich-8.1.25/gcc-11.2.0; else echo "$ADIOS2_ROOT"; fi}
Generic
+ $SHELL{if [ -z "$Albany_ROOT" ]; then echo /global/common/software/e3sm/albany/2024.03.26/gcc/11.2.0; else echo "$Albany_ROOT"; fi}
+ $SHELL{if [ -z "$Trilinos_ROOT" ]; then echo /global/common/software/e3sm/trilinos/15.1.1/gcc/11.2.0; else echo "$Trilinos_ROOT"; fi}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.9.1/cray-mpich-8.1.25/nvidia-22.7; else echo "$ADIOS2_ROOT"; fi}
@@ -606,6 +612,13 @@
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.9.1/cray-mpich-8.1.25/aocc-4.0.0; else echo "$ADIOS2_ROOT"; fi}
+
+ $SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/intel; else echo "$MOAB_ROOT"; fi}
+
+
+ $SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/gnu; else echo "$MOAB_ROOT"; fi}
+
+
-1
@@ -672,13 +685,14 @@
PrgEnv-nvidia
PrgEnv-cray
PrgEnv-aocc
+ gcc-native
intel
intel-oneapi
nvidia
aocc
cudatoolkit
- cray-libsci
climate-utils
+ cray-libsci
matlab
craype-accel-nvidia80
craype-accel-host
@@ -730,6 +744,7 @@
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
+ 0.20
1
@@ -740,6 +755,7 @@
threads
FALSE
/global/cfs/cdirs/e3sm/perl/lib/perl5-only-switch
+ kdreg2
MPI_Bcast
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
@@ -750,6 +766,9 @@
1
+
+ $SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/gnugpu ; else echo "$MOAB_ROOT"; fi}
+
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.9.1/cray-mpich-8.1.25/gcc-11.2.0; else echo "$ADIOS2_ROOT"; fi}
@@ -807,6 +826,7 @@
module
+ cpe
cray-hdf5-parallel
cray-netcdf-hdf5parallel
cray-parallel-netcdf
@@ -834,36 +854,35 @@
- PrgEnv-gnu
- gcc-native
- cray-libsci
+ PrgEnv-gnu/8.5.0
+ gcc-native/13.2
+ cray-libsci/24.03.0
- PrgEnv-intel
- intel
+ PrgEnv-intel/8.5.0
+ intel/2024.1.0
PrgEnv-nvidia
nvidia/24.5
- cray-libsci
+ cray-libsci/24.03.0
PrgEnv-aocc
- aocc/4.0.1
- cray-libsci
+ aocc/4.1.0
+ cray-libsci/24.03.0
craype-accel-host
- cray-libsci
- craype/2.7.30
- cray-mpich/8.1.28
- cray-hdf5-parallel/1.12.2.9
- cray-netcdf-hdf5parallel/4.9.0.9
- cray-parallel-netcdf/1.12.3.9
+ craype/2.7.31.11
+ cray-mpich/8.1.29
+ cray-hdf5-parallel/1.12.2.11
+ cray-netcdf-hdf5parallel/4.9.0.11
+ cray-parallel-netcdf/1.12.3.11
cmake/3.24.3
@@ -871,6 +890,7 @@
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
+ 0.20
1
@@ -881,16 +901,48 @@
threads
FALSE
/global/cfs/cdirs/e3sm/perl/lib/perl5-only-switch
- software
+ kdreg2
MPI_Bcast
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
+ 4000MB
+
+
+ $SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.9.1/cray-mpich-8.1.25/intel-2023.1.0; else echo "$ADIOS2_ROOT"; fi}
+
+
+ $SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.9.1/cray-mpich-8.1.25/gcc-11.2.0; else echo "$ADIOS2_ROOT"; fi}
+ Generic
+ $SHELL{if [ -z "$Albany_ROOT" ]; then echo /global/common/software/e3sm/albany/2024.03.26/gcc/11.2.0; else echo "$Albany_ROOT"; fi}
+ $SHELL{if [ -z "$Trilinos_ROOT" ]; then echo /global/common/software/e3sm/trilinos/15.1.1/gcc/11.2.0; else echo "$Trilinos_ROOT"; fi}
+
+
+ $SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.9.1/cray-mpich-8.1.25/nvidia-22.7; else echo "$ADIOS2_ROOT"; fi}
+
+
+ $SHELL{if [ -z "$BLAS_ROOT" ]; then echo $NVIDIA_PATH/compilers; else echo "$BLAS_ROOT"; fi}
+ $SHELL{if [ -z "$LAPACK_ROOT" ]; then echo $NVIDIA_PATH/compilers; else echo "$LAPACK_ROOT"; fi}
+ NVHPC
+
+
+ Intel10_64_dyn
+
+
+ $SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.9.1/cray-mpich-8.1.25/aocc-4.0.0; else echo "$ADIOS2_ROOT"; fi}
+
+ $SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/intel; else echo "$MOAB_ROOT"; fi}
+
+
+ $SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/gnu; else echo "$MOAB_ROOT"; fi}
+
+
-1
+
Spock. NCCS moderate-security system that contains similar hardware and software as the upcoming Frontier system at ORNL.
.*spock.*
@@ -1017,6 +1069,7 @@
/usr/share/lmod/lmod/libexec/lmod python
+ Core Core/24.07
PrgEnv-cray PrgEnv-cray/8.3.3
cce cce/15.0.1
@@ -1029,6 +1082,7 @@
+ Core Core/24.07
PrgEnv-cray PrgEnv-amd/8.3.3
amd amd/5.4.0
@@ -1037,6 +1091,7 @@
+ Core Core/24.07
PrgEnv-cray PrgEnv-gnu/8.3.3
gcc gcc/12.2.0
@@ -1045,12 +1100,13 @@
rocm/5.4.0
- cray-python/3.9.13.1
+ cray-python/3.11.5
cray-libsci
- subversion/1.14.1
- git/2.36.1
- cmake/3.21.3
- zlib/1.2.11
+ cmake/3.27.9
+ subversion
+ git
+ zlib
+ libfabric/1.15.2.0
cray-hdf5-parallel/1.12.2.1
cray-netcdf-hdf5parallel/4.9.0.1
cray-parallel-netcdf/1.12.3.1
@@ -1084,9 +1140,6 @@
spread
threads
-
@@ -1976,7 +2028,8 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
/nfs/gce/projects/climate/software/perl5/lib/perl5
- $SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/adios2/2.9.1/mpich-4.1.2/gcc-12.1.0; else echo "$ADIOS2_ROOT"; fi}
+ $SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/adios2/2.10.1/mpich-4.1.2/gcc-12.1.0; else echo "$ADIOS2_ROOT"; fi}
+ $SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/c-blosc2/2.15.1/gcc-12.1.0; else echo "$BLOSC2_ROOT"; fi}
@@ -2398,17 +2451,15 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
openmpi/4.1.6-2mm63n2
hdf5/1.10.7-4cghwvq
- netcdf-c/4.4.1-a4hji6e
- netcdf-cxx/4.2-ldoxr43
- netcdf-fortran/4.4.4-husened
+ netcdf-c/4.7.4-4qjdadt
+ netcdf-fortran/4.5.3-qozrykr
parallel-netcdf/1.11.0-icrpxty
intel-mpi/2019.9.304-tkzvizk
- hdf5/1.8.16-se4xyo7
- netcdf-c/4.4.1-qvxyzq2
- netcdf-cxx/4.2-binixgj
- netcdf-fortran/4.4.4-rdxohvp
+ hdf5/1.10.7-wczt56s
+ netcdf-c/4.7.4-ba6agmb
+ netcdf-fortran/4.5.3-5lvy5p4
parallel-netcdf/1.11.0-b74wv4m
@@ -2418,17 +2469,19 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
openmpi/4.1.6-ggebj5o
hdf5/1.10.7-ol6xuae
- netcdf-c/4.4.1-2njo6xx
- netcdf-cxx/4.2-7pdzqua
- netcdf-fortran/4.4.4-52c6oqi
+ netcdf-c/4.7.4-pfocec2
+ netcdf-fortran/4.5.3-va3hoor
parallel-netcdf/1.11.0-d7h4ysd
+ gcc/11.2.0-bgddrif
+ intel-oneapi-mkl/2022.1.0-w4kgsn4
+ gcc/9.2.0-ugetvbp
+ intel-mkl/2020.4.304-n3b5fye
intel-mpi/2019.9.304-jdih7h5
hdf5/1.8.16-dtbpce3
- netcdf-c/4.4.1-zcoa44z
- netcdf-cxx/4.2-ayxg4c7
- netcdf-fortran/4.4.4-2lfr2lr
+ netcdf-c/4.7.4-seagl7g
+ netcdf-fortran/4.5.3-ova6t37
parallel-netcdf/1.11.0-ifdodru
@@ -2798,6 +2851,7 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
/lcrc/group/e3sm/soft/improv/pnetcdf/1.12.3/gcc-12.3.0/openmpi-4.1.6
/lcrc/group/e3sm/soft/improv/pnetcdf/1.12.3/gcc-12.3.0/openmpi-4.1.6/bin:/lcrc/group/e3sm/soft/improv/netcdf-fortran/4.6.1b/gcc-12.3.0/openmpi-4.1.6/bin:/lcrc/group/e3sm/soft/improv/netcdf-c/4.9.2b/gcc-12.3.0/openmpi-4.1.6/bin:/lcrc/group/e3sm/soft/improv/openmpi/4.1.6/gcc-12.3.0/bin:/lcrc/group/e3sm/soft/perl/improv/bin:$ENV{PATH}
$SHELL{lp=/lcrc/group/e3sm/soft/improv/netlib-lapack/3.12.0/gcc-12.3.0:/lcrc/group/e3sm/soft/improv/pnetcdf/1.12.3/gcc-12.3.0/openmpi-4.1.6/lib:/lcrc/group/e3sm/soft/improv/netcdf-fortran/4.6.1b/gcc-12.3.0/openmpi-4.1.6/lib:/lcrc/group/e3sm/soft/improv/netcdf-c/4.9.2b/gcc-12.3.0/openmpi-4.1.6/lib:/opt/pbs/lib:/lcrc/group/e3sm/soft/improv/openmpi/4.1.6/gcc-12.3.0/lib; if [ -z "$LD_LIBRARY_PATH" ]; then echo $lp; else echo "$lp:$LD_LIBRARY_PATH"; fi}
+ $SHELL{if [ -z "$MOAB_ROOT" ]; then echo /lcrc/soft/climate/moab/improv/gnu; else echo "$MOAB_ROOT"; fi}
^lockedfile
@@ -2819,9 +2873,9 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
/usr/workspace/e3sm/ccsm3data/inputdata/atm/datm7
/p/lustre2/$USER/archive/$CASE
/p/lustre2/$USER/ccsm_baselines/$COMPILER
- /usr/workspace/e3sm/tools/cprnc
+ /usr/workspace/e3sm/apps/cprnc
8
- lc_slurm
+ slurm
boutte3 -at- llnl.gov
56
56
@@ -2830,8 +2884,16 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
srun
+
+ --mpi=pmi2
+ --export=ALL
+ -n {{ total_tasks }} -N {{ num_nodes }}
+ -c 1
+ --cpu_bind=cores
+ -m plane={{ tasks_per_node }}
+
-
+
/usr/share/lmod/lmod/init/env_modules_python.py
/usr/share/lmod/lmod/init/perl
/usr/share/lmod/lmod/init/sh
@@ -2843,24 +2905,27 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
python/3.9.12
git
+ subversion
+ cmake/3.19.2
mkl/2022.1.0
intel-classic/2021.6.0-magic
- mvapich2/2.3.7
- cmake/3.19.2
- /usr/workspace/e3sm/install/quartz/modulefiles
- hdf5/1.12.2
- netcdf-c/4.9.0
- netcdf-fortran/4.6.0
- parallel-netcdf/1.12.3
- screamML-venv/0.0.1
- subversion
+ /usr/workspace/e3sm/spack/modules/ruby/linux-rhel8-x86_64/Core
+ mvapich2/2.3.7-ll7cmqm
+ hdf5/1.10.7-ewjpbjd
+ netcdf-c/4.4.1.1-vaxofek
+ netcdf-fortran/4.4.4-3pzbx2u
+ parallel-netcdf/1.11.0-tzgdala
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
- /usr/workspace/e3sm/install/quartz/netcdf-fortran/
- /usr/tce/packages/parallel-netcdf/parallel-netcdf-1.12.3-mvapich2-2.3.7-intel-classic-2021.6.0
+ 128M
+ FALSE
+ /usr/workspace/e3sm/spack/libs/linux-rhel8-cascadelake/intel-2021.6.0/hdf5-1.10.7-ewjpbjdhjgjzrzjcvwyjyuulaesbsjhg
+ /usr/workspace/e3sm/spack/libs/linux-rhel8-cascadelake/intel-2021.6.0/netcdf-c-4.4.1.1-vaxofekwvnvngh7wptmzkwdb7tkzvesn
+ /usr/workspace/e3sm/spack/libs/linux-rhel8-cascadelake/intel-2021.6.0/netcdf-fortran-4.4.4-3pzbx2unddhladhubaahhhysjmprzqi2
+ /usr/workspace/e3sm/spack/libs/linux-rhel8-cascadelake/intel-2021.6.0/parallel-netcdf-1.11.0-tzgdalakmem7tod6cruhqyeackeix5q5
@@ -2875,9 +2940,9 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
/usr/workspace/e3sm/ccsm3data/inputdata/atm/datm7
/p/lustre2/$USER/archive/$CASE
/p/lustre2/$USER/ccsm_baselines/$COMPILER
- /usr/workspace/e3sm/tools/cprnc
+ /usr/workspace/e3sm/apps/cprnc
8
- lc_slurm
+ slurm
boutte3 -at- llnl.gov
224
112
@@ -2886,8 +2951,16 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
srun
+
+ --mpi=pmi2
+ --export=ALL
+ -n {{ total_tasks }} -N {{ num_nodes }}
+ -c 1
+ --cpu_bind=cores
+ -m plane={{ tasks_per_node }}
+
-
+
/usr/share/lmod/lmod/init/env_modules_python.py
/usr/share/lmod/lmod/init/perl
/usr/share/lmod/lmod/init/sh
@@ -2899,24 +2972,27 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
python/3.9.12
git
+ subversion
mkl/2022.1.0
intel-classic/2021.6.0-magic
- mvapich2/2.3.7
cmake/3.19.2
- /usr/workspace/e3sm/install/quartz/modulefiles
- hdf5/1.12.2
- netcdf-c/4.9.0
- netcdf-fortran/4.6.0
- parallel-netcdf/1.12.3
- screamML-venv/0.0.1
- subversion
+ /usr/workspace/e3sm/spack/modules/dane/linux-rhel8-x86_64/Core
+ mvapich2/2.3.7-27jao34
+ hdf5/1.10.7-766kapa
+ netcdf-c/4.4.1.1-2uznnlw
+ netcdf-fortran/4.4.4-itpstyo
+ parallel-netcdf/1.11.0-26sxm4m
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
- /usr/workspace/e3sm/install/quartz/netcdf-fortran/
- /usr/tce/packages/parallel-netcdf/parallel-netcdf-1.12.3-mvapich2-2.3.7-intel-classic-2021.6.0
+ 128M
+ FALSE
+ /usr/workspace/e3sm/spack/libs/linux-rhel8-sapphirerapids/intel-2021.6.0/hdf5-1.10.7-766kapalbrdntu2pcgdgbhg2ch26gsuv
+ /usr/workspace/e3sm/spack/libs/linux-rhel8-sapphirerapids/intel-2021.6.0/netcdf-c-4.4.1.1-2uznnlwgiezxute6iyqzqjrpolokeaib
+ /usr/workspace/e3sm/spack/libs/linux-rhel8-sapphirerapids/intel-2021.6.0/netcdf-fortran-4.4.4-itpstyordbern7vlulmlnt47eeeokzfp
+ /usr/workspace/e3sm/spack/libs/linux-rhel8-sapphirerapids/intel-2021.6.0/parallel-netcdf-1.11.0-26sxm4mormsglmhi24poix7sugbigkck
@@ -3946,7 +4022,7 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
/usr/projects/e3sm/inputdata/atm/datm7
/lustre/scratch5/$ENV{USER}/E3SM/archive/$CASE
/lustre/scratch5/$ENV{USER}/E3SM/input_data/ccsm_baselines/$COMPILER
- /usr/projects/climate/SHARED_CLIMATE/software/badger/cprnc
+ /usr/projects/e3sm/software/chicoma-cpu/cprnc
10
e3sm_developer
4
@@ -3966,11 +4042,11 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
- /usr/share/lmod/8.3.1/init/perl
+ /usr/share/lmod/lmod/init/perl
- /usr/share/lmod/8.3.1/init/python
- /usr/share/lmod/8.3.1/init/sh
- /usr/share/lmod/8.3.1/init/csh
+ /usr/share/lmod/lmod/init/python
+ /usr/share/lmod/lmod/init/sh
+ /usr/share/lmod/lmod/init/csh
/usr/share/lmod/lmod/libexec/lmod perl
/usr/share/lmod/lmod/libexec/lmod python
module
@@ -3982,39 +4058,42 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
cray-parallel-netcdf
cray-netcdf
cray-hdf5
- PrgEnv-gnu
- PrgEnv-intel
- PrgEnv-nvidia
- PrgEnv-cray
- PrgEnv-aocc
+ gcc
+ gcc-native
intel
intel-oneapi
nvidia
aocc
cudatoolkit
climate-utils
+ cray-libsci
craype-accel-nvidia80
craype-accel-host
perftools-base
perftools
darshan
+ PrgEnv-gnu
+ PrgEnv-intel
+ PrgEnv-nvidia
+ PrgEnv-cray
+ PrgEnv-aocc
- PrgEnv-gnu/8.4.0
- gcc/12.2.0
- cray-libsci/23.05.1.4
+ PrgEnv-gnu/8.5.0
+ gcc-native/12.3
+ cray-libsci/23.12.5
- PrgEnv-nvidia/8.4.0
- nvidia/22.7
- cray-libsci/23.05.1.4
+ PrgEnv-nvidia/8.5.0
+ nvidia/24.7
+ cray-libsci/23.12.5
- PrgEnv-intel/8.4.0
- intel-classic/2023.2.0
+ PrgEnv-intel/8.5.0
+ intel/2023.2.0
@@ -4025,13 +4104,12 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
craype-accel-host
- craype/2.7.21
- cray-mpich/8.1.26
- libfabric/1.15.2.0
- cray-hdf5-parallel/1.12.2.3
- cray-netcdf-hdf5parallel/4.9.0.3
- cray-parallel-netcdf/1.12.3.3
- cmake/3.25.1
+ craype/2.7.30
+ cray-mpich/8.1.28
+ cray-hdf5-parallel/1.12.2.9
+ cray-netcdf-hdf5parallel/4.9.0.9
+ cray-parallel-netcdf/1.12.3.9
+ cmake/3.27.7
@@ -4053,6 +4131,9 @@ commented out until "*** No rule to make target '.../libadios2pio-nm-lib.a'" iss
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
+
+ /usr/lib64/gcc/x86_64-suse-linux/12:$ENV{LD_LIBRARY_PATH}
+
-1
diff --git a/cime_config/testmods_dirs/io/force_adiosc/shell_commands b/cime_config/testmods_dirs/io/force_adiosc/shell_commands
new file mode 100644
index 00000000000..543d5dbfef0
--- /dev/null
+++ b/cime_config/testmods_dirs/io/force_adiosc/shell_commands
@@ -0,0 +1,2 @@
+#!/bin/bash
+./xmlchange PIO_TYPENAME="adiosc"
diff --git a/cime_config/tests.py b/cime_config/tests.py
index 3c89e8e9099..46894e6ea96 100644
--- a/cime_config/tests.py
+++ b/cime_config/tests.py
@@ -49,6 +49,9 @@
"ERS.f19_g16.I20TRGSWCNPECACNTBC.elm-eca_f19_g16_I20TRGSWCNPECACNTBC",
"ERS.f19_g16.I20TRGSWCNPRDCTCBC.elm-ctc_f19_g16_I20TRGSWCNPRDCTCBC",
"ERS.r05_r05.ICNPRDCTCBC.elm-cbudget",
+ "ERS.ELM_USRDAT.I1850CNPRDCTCBC.elm-snowveg_arctic",
+ "ERS.ELM_USRDAT.I1850CNPRDCTCBC.elm-usrpft_default_I1850CNPRDCTCBC",
+ "ERS.ELM_USRDAT.I1850CNPRDCTCBC.elm-usrpft_codetest_I1850CNPRDCTCBC",
)
},
@@ -94,8 +97,6 @@
"SMS.r05_r05.IELM.elm-topounit",
"ERS.ELM_USRDAT.I1850ELM.elm-usrdat",
"ERS.r05_r05.IELM.elm-lnd_rof_2way",
- "ERS.ELM_USRDAT.I1850CNPRDCTCBC.elm-usrpft_default_I1850CNPRDCTCBC",
- "ERS.ELM_USRDAT.I1850CNPRDCTCBC.elm-usrpft_codetest_I1850CNPRDCTCBC",
"ERS.r05_r05.IELM.elm-V2_ELM_MOSART_features",
"ERS.ELM_USRDAT.IELM.elm-surface_water_dynamics"
)
@@ -207,6 +208,7 @@
"REP_Ln5.ne4pg2_oQU480.F2010",
"SMS_Ld3.ne4pg2_oQU480.F2010.eam-thetahy_sl_pg2_mass",
"ERP_Ld3.ne4pg2_ne4pg2.FIDEAL.allactive-pioroot1",
+ "ERS_Ld5.ne4pg2_oQU480.F2010.eam-sathist_F2010",
)
},
@@ -266,6 +268,7 @@
"SMS_D_Ld1.T62_oQU240wLI.GMPAS-IAF-PISMF.mpaso-impl_top_drag",
"SMS_D_Ld1.T62_oQU240.GMPAS-IAF.mpaso-harmonic_mean_drag",
"SMS_D_Ld1.T62_oQU240.GMPAS-IAF.mpaso-upwind_advection",
+ "ERS_Ld5_D.T62_oQU240.GMPAS-IAF.mpaso-conservation_check",
)
},
@@ -275,7 +278,7 @@
"ERS_P480_Ld5.TL319_IcoswISC30E3r5.GMPAS-JRA1p5-DIB-PISMF.mpaso-jra_1958",
"PEM_P480_Ld5.TL319_IcoswISC30E3r5.GMPAS-JRA1p5-DIB-PISMF.mpaso-jra_1958",
"SMS_P480_Ld5.TL319_IcoswISC30E3r5.GMPAS-JRA1p5-DIB-PISMF-TMIX.mpaso-jra_1958",
- "SMS_P480_Ld5.TL319_IcoswISC30E3r5.GMPAS-JRA1p5-DIB-PISMF-DSGR.mpaso-jra_1958",
+ "PET_P480_Ld2.TL319_IcoswISC30E3r5.GMPAS-JRA1p5-DIB-PISMF-DSGR.mpaso-jra_1958",
)
},
@@ -706,7 +709,8 @@
"PEM_Ln90.ne30pg2_ne30pg2.F2010-SCREAMv1.scream-spa_remap--scream-output-preset-4",
"ERS_Ln90.ne30pg2_ne30pg2.F2010-SCREAMv1.scream-small_kernels--scream-output-preset-5",
"ERP_Ln22.conusx4v1pg2_r05_oECv3.F2010-SCREAMv1-noAero.scream-bfbhash--scream-output-preset-6",
- "ERS_Ln22.ne30pg2_ne30pg2.F2010-SCREAMv1.scream-L128--scream-output-preset-4"
+ "ERS_Ln22.ne30pg2_ne30pg2.F2010-SCREAMv1.scream-L128--scream-output-preset-4",
+ "REP_Ld5.ne30pg2_ne30pg2.F2010-SCREAMv1.scream-L128--scream-output-preset-6"
)
},
@@ -757,6 +761,7 @@
"SMS_D_Ln5.ne4pg2_oQU480.F2010-SCREAMv1-MPASSI.scream-mam4xx-aci",
"SMS_D_Ln5.ne4pg2_oQU480.F2010-SCREAMv1-MPASSI.scream-mam4xx-wetscav",
"SMS_D_Ln5.ne4pg2_oQU480.F2010-SCREAMv1-MPASSI.scream-mam4xx-drydep",
+ "SMS_D_Ln5.ne30pg2_oECv3.F2010-SCREAMv1-MPASSI.scream-mam4xx-remap_emiss_ne4_ne30"
)
},
@@ -819,9 +824,13 @@
},
"e3sm_wav_developer" : {
- "time" : "0:45:00",
+ "time" : "1:00:00",
"tests" : (
- "ERS.T62_oEC60to30v3_wQU225EC60to30.GMPAS-IAF-WW3",
+ "SMS_D_Ln3.TL319_EC30to60E2r2_wQU225EC30to60E2r2.GMPAS-JRA1p5-WW3.ww3-jra_1958",
+ "ERS.ne30pg2_IcoswISC30E3r5_wQU225Icos30E3r5.WCYCL1850-WW3",
+ "PEM_P480.ne30pg2_IcoswISC30E3r5_wQU225Icos30E3r5.WCYCL1850-WW3",
+ "PET.ne30pg2_IcoswISC30E3r5_wQU225Icos30E3r5.WCYCL1850-WW3",
+ "SMS_D_Ln3.ne30pg2_IcoswISC30E3r5_wQU225Icos30E3r5.WCYCL1850-WW3",
)
},
diff --git a/components/cmake/modules/FindPIO.cmake b/components/cmake/modules/FindPIO.cmake
index 0277918ac8e..5589dff0ed0 100644
--- a/components/cmake/modules/FindPIO.cmake
+++ b/components/cmake/modules/FindPIO.cmake
@@ -42,6 +42,9 @@ endif()
# we can assume that an MPI case with ADIOS2_ROOT set is probably
# using adios.
if (NOT MPILIB STREQUAL "mpi-serial" AND DEFINED ENV{ADIOS2_ROOT})
+ if(DEFINED ENV{BLOSC2_ROOT})
+ set(ENV{Blosc2_DIR} "$ENV{BLOSC2_ROOT}")
+ endif()
find_package(MPI REQUIRED COMPONENTS C)
find_package(ADIOS2 REQUIRED COMPONENTS C)
list(APPEND PIOLIBS adios2::adios2)
diff --git a/components/data_comps/datm/cime_config/config_component.xml b/components/data_comps/datm/cime_config/config_component.xml
index 9145bdf4b16..5b884ecd1ad 100644
--- a/components/data_comps/datm/cime_config/config_component.xml
+++ b/components/data_comps/datm/cime_config/config_component.xml
@@ -10,7 +10,7 @@
This file may have atm desc entries.
-->
- Data driven ATM
+ Data driven ATM
QIAN data set
QIAN with water isotopes
CRUNCEP data set
@@ -28,8 +28,9 @@
COREv2 normal year forcing
COREv2 interannual forcing
interannual JRA55 forcing
- interannual JRA55 forcing, v1.5, through 2020
interannual JRA55 forcing, v1.4, through 2018
+ interannual JRA55 forcing, v1.5, through 2020
+ interannual JRA55 forcing, v1.5, through 2023
JRA55 Repeat Year Forcing v1.3 1984-1985
JRA55 Repeat Year Forcing v1.3 1990-1991
JRA55 Repeat Year Forcing v1.3 2003-2004
@@ -63,8 +64,9 @@ data (see cime issue #3653 -- https://github.com/ESMCI/cime/issues/3653).
CORE2_NYF
CORE2_IAF
CORE_IAF_JRA
- IAF_JRA_1p5
CORE_IAF_JRA_1p4_2018
+ IAF_JRA_1p5
+ IAF_JRA_1p5
CORE_RYF8485_JRA
CORE_RYF9091_JRA
CORE_RYF0304_JRA
@@ -391,6 +393,7 @@ data (see cime issue #3653 -- https://github.com/ESMCI/cime/issues/3653).
$DATM_CLMNCEP_YR_START
1
1
+ 1
$DATM_CLMNCEP_YR_START
$DATM_CLMNCEP_YR_START
$DATM_CLMNCEP_YR_START
@@ -488,6 +491,7 @@ data (see cime issue #3653 -- https://github.com/ESMCI/cime/issues/3653).
2003
2016
2020
+ 2023
1979
1979
diff --git a/components/data_comps/dlnd/src/dlnd_comp_mod.F90 b/components/data_comps/dlnd/src/dlnd_comp_mod.F90
index bf723259be4..fbd6e35cf87 100644
--- a/components/data_comps/dlnd/src/dlnd_comp_mod.F90
+++ b/components/data_comps/dlnd/src/dlnd_comp_mod.F90
@@ -29,6 +29,10 @@ module dlnd_comp_mod
use dlnd_shr_mod , only: domain_fracname ! namelist input
use dlnd_shr_mod , only: nullstr
+#ifdef HAVE_MOAB
+ use seq_comm_mct, only : mlnid ! id of moab lnd app
+ use iso_c_binding
+#endif
! !PUBLIC TYPES:
implicit none
private ! except
@@ -100,6 +104,15 @@ subroutine dlnd_comp_init(Eclock, x2l, l2x, &
scmMode, scmlat, scmlon)
! !DESCRIPTION: initialize dlnd model
+#ifdef HAVE_MOAB
+ use iMOAB, only: iMOAB_DefineTagStorage, &
+ iMOAB_SetIntTagStorage, iMOAB_SetDoubleTagStorage, &
+ iMOAB_ResolveSharedEntities, iMOAB_CreateVertices, &
+ iMOAB_UpdateMeshInfo
+#ifdef MOABDEBUG
+ use iMOAB, only: iMOAB_WriteMesh
+#endif
+#endif
implicit none
! !INPUT/OUTPUT PARAMETERS:
@@ -135,6 +148,18 @@ subroutine dlnd_comp_init(Eclock, x2l, l2x, &
character(nec_len) :: nec_str ! elevation class, as character string
character(*), parameter :: domain_fracname_unset = 'null'
+#ifdef HAVE_MOAB
+ character*400 tagname
+ real(R8) latv, lonv
+ integer iv, tagindex, ilat, ilon
+ real(R8), allocatable, target :: data(:)
+ integer(IN), pointer :: idata(:) ! temporary
+ real(R8), dimension(:), allocatable :: moab_vert_coords ! temporary
+#ifdef MOABDEBUG
+ character*100 outfile, wopts
+#endif
+#endif
+
!--- formats ---
character(*), parameter :: F00 = "('(dlnd_comp_init) ',8a)"
character(*), parameter :: F0L = "('(dlnd_comp_init) ',a, l2)"
@@ -256,6 +281,119 @@ subroutine dlnd_comp_init(Eclock, x2l, l2x, &
call t_stopf('dlnd_initmctdom')
+#ifdef HAVE_MOAB
+ ilat = mct_aVect_indexRA(ggrid%data,'lat')
+ ilon = mct_aVect_indexRA(ggrid%data,'lon')
+ allocate(moab_vert_coords(lsize*3))
+ do iv = 1, lsize
+ lonv = ggrid%data%rAttr(ilon, iv) * SHR_CONST_PI/180.
+ latv = ggrid%data%rAttr(ilat, iv) * SHR_CONST_PI/180.
+ moab_vert_coords(3*iv-2)=COS(latv)*COS(lonv)
+ moab_vert_coords(3*iv-1)=COS(latv)*SIN(lonv)
+ moab_vert_coords(3*iv )=SIN(latv)
+ enddo
+
+ ! create the vertices with coordinates from MCT domain
+ ierr = iMOAB_CreateVertices(mlnid, lsize*3, 3, moab_vert_coords)
+ if (ierr .ne. 0) &
+ call shr_sys_abort('Error: fail to create MOAB vertices in data lnd model')
+
+ tagname='GLOBAL_ID'//C_NULL_CHAR
+ ierr = iMOAB_DefineTagStorage(mlnid, tagname, &
+ 0, & ! dense, integer
+ 1, & ! number of components
+ tagindex )
+ if (ierr .ne. 0) &
+ call shr_sys_abort('Error: fail to retrieve GLOBAL_ID tag ')
+
+ ! get list of global IDs for Dofs
+ call mct_gsMap_orderedPoints(gsMap, my_task, idata)
+
+ ierr = iMOAB_SetIntTagStorage ( mlnid, tagname, lsize, &
+ 0, & ! vertex type
+ idata)
+ if (ierr .ne. 0) &
+ call shr_sys_abort('Error: fail to set GLOBAL_ID tag ')
+
+ ierr = iMOAB_ResolveSharedEntities( mlnid, lsize, idata );
+ if (ierr .ne. 0) &
+ call shr_sys_abort('Error: fail to resolve shared entities')
+
+ deallocate(moab_vert_coords)
+ deallocate(idata)
+
+ ierr = iMOAB_UpdateMeshInfo( mlnid )
+ if (ierr .ne. 0) &
+ call shr_sys_abort('Error: fail to update mesh info ')
+
+ allocate(data(lsize))
+ ierr = iMOAB_DefineTagStorage( mlnid, "area:aream:frac:mask"//C_NULL_CHAR, &
+ 1, & ! dense, double
+ 1, & ! number of components
+ tagindex )
+ if (ierr > 0 ) &
+ call shr_sys_abort('Error: fail to create tag: area:aream:frac:mask' )
+
+ data(:) = ggrid%data%rAttr(mct_aVect_indexRA(ggrid%data,'area'),:)
+ tagname='area'//C_NULL_CHAR
+ ierr = iMOAB_SetDoubleTagStorage ( mlnid, tagname, lsize, &
+ 0, & ! set data on vertices
+ data)
+ if (ierr > 0 ) &
+ call shr_sys_abort('Error: fail to get area tag ')
+
+ ! set the same data for aream (model area) as area
+ ! data(:) = ggrid%data%rAttr(mct_aVect_indexRA(ggrid%data,'aream'),:)
+ tagname='aream'//C_NULL_CHAR
+ ierr = iMOAB_SetDoubleTagStorage ( mlnid, tagname, lsize, &
+ 0, & ! set data on vertices
+ data)
+ if (ierr > 0 ) &
+ call shr_sys_abort('Error: fail to set aream tag ')
+
+ data(:) = ggrid%data%rAttr(mct_aVect_indexRA(ggrid%data,'mask'),:)
+ tagname='mask'//C_NULL_CHAR
+ ierr = iMOAB_SetDoubleTagStorage ( mlnid, tagname, lsize, &
+ 0, & ! set data on vertices
+ data)
+ if (ierr > 0 ) &
+ call shr_sys_abort('Error: fail to set mask tag ')
+
+ data(:) = ggrid%data%rAttr(mct_aVect_indexRA(ggrid%data,'frac'),:)
+ tagname='frac'//C_NULL_CHAR
+ ierr = iMOAB_SetDoubleTagStorage ( mlnid, tagname, lsize, &
+ 0, & ! set data on vertices
+ data)
+ if (ierr > 0 ) &
+ call shr_sys_abort('Error: fail to set frac tag ')
+
+ deallocate(data)
+
+ ! define tags
+ ierr = iMOAB_DefineTagStorage( mlnid, trim(seq_flds_x2l_fields)//C_NULL_CHAR, &
+ 1, & ! dense, double
+ 1, & ! number of components
+ tagindex )
+ if (ierr > 0 ) &
+ call shr_sys_abort('Error: fail to create seq_flds_x2l_fields tags ')
+
+ ierr = iMOAB_DefineTagStorage( mlnid, trim(seq_flds_l2x_fields)//C_NULL_CHAR, &
+ 1, & ! dense, double
+ 1, & ! number of components
+ tagindex )
+ if (ierr > 0 ) &
+ call shr_sys_abort('Error: fail to create seq_flds_l2x_fields tags ')
+#ifdef MOABDEBUG
+ ! debug test
+ outfile = 'LndDataMesh.h5m'//C_NULL_CHAR
+ wopts = ';PARALLEL=WRITE_PART'//C_NULL_CHAR !
+ ! write out the mesh file to disk
+ ierr = iMOAB_WriteMesh(mlnid, trim(outfile), trim(wopts))
+ if (ierr .ne. 0) then
+ call shr_sys_abort(subname//' ERROR in writing data mesh lnd ')
+ endif
+#endif
+#endif
!----------------------------------------------------------------------------
! Initialize MCT attribute vectors
!----------------------------------------------------------------------------
@@ -339,8 +477,15 @@ subroutine dlnd_comp_run(EClock, x2l, l2x, &
inst_suffix, logunit, case_name)
! !DESCRIPTION: run method for dlnd model
- implicit none
+#ifdef HAVE_MOAB
+#ifdef MOABDEBUG
+ use iMOAB, only: iMOAB_WriteMesh
+#endif
+ use seq_flds_mod , only: seq_flds_l2x_fields
+ use seq_flds_mod , only: moab_set_tag_from_av
+#endif
+ implicit none
! !INPUT/OUTPUT PARAMETERS:
type(ESMF_Clock) , intent(in) :: EClock
type(mct_aVect) , intent(inout) :: x2l
@@ -366,6 +511,17 @@ subroutine dlnd_comp_run(EClock, x2l, l2x, &
integer(IN) :: nu ! unit number
logical :: write_restart ! restart now
character(len=18) :: date_str
+#ifdef HAVE_MOAB
+ real(R8), allocatable, target :: datam(:)
+ type(mct_list) :: temp_list
+ integer :: size_list, index_list, lsize
+ type(mct_string) :: mctOStr !
+ character*400 tagname, mct_field
+#ifdef MOABDEBUG
+ integer :: cur_dlnd_stepno, ierr
+ character*100 outfile, wopts, lnum
+#endif
+#endif
character(*), parameter :: F00 = "('(dlnd_comp_run) ',8a)"
character(*), parameter :: F04 = "('(dlnd_comp_run) ',2a,2i8,'s')"
@@ -464,6 +620,32 @@ subroutine dlnd_comp_run(EClock, x2l, l2x, &
call t_stopf('DLND_RUN')
+#ifdef HAVE_MOAB
+ lsize = mct_avect_lsize(l2x) ! is it the same as mct_avect_lsize(avstrm) ?
+ allocate(datam(lsize)) !
+ call mct_list_init(temp_list ,seq_flds_l2x_fields)
+ size_list=mct_list_nitem (temp_list)
+ do index_list = 1, size_list
+ call mct_list_get(mctOStr,index_list,temp_list)
+ mct_field = mct_string_toChar(mctOStr)
+ tagname= trim(mct_field)//C_NULL_CHAR
+ call moab_set_tag_from_av(tagname, l2x, index_list, mlnid, datam, lsize) ! loop over all a2x fields, not just a few
+ enddo
+ call mct_list_clean(temp_list)
+ deallocate(datam) ! maybe we should keep it around, deallocate at the final only?
+
+#ifdef MOABDEBUG
+ call seq_timemgr_EClockGetData( EClock, stepno=cur_dlnd_stepno )
+ write(lnum,"(I0.2)")cur_dlnd_stepno
+ outfile = 'dlnd_comp_run_'//trim(lnum)//'.h5m'//C_NULL_CHAR
+ wopts = 'PARALLEL=WRITE_PART'//C_NULL_CHAR
+ ierr = iMOAB_WriteMesh(mlnid, outfile, wopts)
+ if (ierr > 0 ) then
+ write(logunit,*) 'Failed to write data lnd component state '
+ endif
+#endif
+#endif
+
end subroutine dlnd_comp_run
!===============================================================================
diff --git a/components/data_comps/dlnd/src/lnd_comp_mct.F90 b/components/data_comps/dlnd/src/lnd_comp_mct.F90
index f5193ca8458..b699ec217f0 100644
--- a/components/data_comps/dlnd/src/lnd_comp_mct.F90
+++ b/components/data_comps/dlnd/src/lnd_comp_mct.F90
@@ -16,7 +16,11 @@ module lnd_comp_mct
use dlnd_comp_mod , only: dlnd_comp_init, dlnd_comp_run, dlnd_comp_final
use dlnd_shr_mod , only: dlnd_shr_read_namelists
use seq_flds_mod , only: seq_flds_x2l_fields, seq_flds_l2x_fields
-
+#ifdef HAVE_MOAB
+ use seq_comm_mct, only : mlnid ! iMOAB app id for lnd
+ use iso_c_binding
+ use iMOAB , only: iMOAB_RegisterApplication
+#endif
! !PUBLIC TYPES:
implicit none
private ! except
@@ -52,7 +56,9 @@ module lnd_comp_mct
!===============================================================================
subroutine lnd_init_mct( EClock, cdata, x2l, l2x, NLFilename )
-
+#ifdef HAVE_MOAB
+ use shr_stream_mod, only: shr_stream_getDomainInfo, shr_stream_getFile
+#endif
! !DESCRIPTION: initialize dlnd model
implicit none
@@ -146,13 +152,25 @@ subroutine lnd_init_mct( EClock, cdata, x2l, l2x, NLFilename )
!----------------------------------------------------------------------------
! Initialize dlnd
!----------------------------------------------------------------------------
-
+#ifdef HAVE_MOAB
+ ierr = iMOAB_RegisterApplication(trim("DLND")//C_NULL_CHAR, mpicom, compid, mlnid)
+ if (ierr .ne. 0) then
+ write(logunit,*) subname,' error in registering data lnd comp'
+ call shr_sys_abort(subname//' ERROR in registering data lnd comp')
+ endif
+#endif
call dlnd_comp_init(Eclock, x2l, l2x, &
seq_flds_x2l_fields, seq_flds_l2x_fields, &
SDLND, gsmap, ggrid, mpicom, compid, my_task, master_task, &
inst_suffix, inst_name, logunit, read_restart, &
scmMode, scmlat, scmlon)
-
+#ifdef HAVE_MOAB
+ if (my_task == master_task) then
+ call seq_infodata_PutData( infodata, lnd_domain=SDLND%domainFile) ! we use the same one for regular case
+ ! in regular case, it is copied from fatmlndfrc ; so we don't know if it is data land or not
+ write(logunit,*), ' use this land domain file: ', SDLND%domainFile
+ endif
+#endif
!----------------------------------------------------------------------------
! Fill infodata that needs to be returned from dlnd
!----------------------------------------------------------------------------
diff --git a/components/data_comps/docn/cime_config/config_component.xml b/components/data_comps/docn/cime_config/config_component.xml
index 5a30c69df6c..431d358f995 100644
--- a/components/data_comps/docn/cime_config/config_component.xml
+++ b/components/data_comps/docn/cime_config/config_component.xml
@@ -13,7 +13,7 @@
This file may have ocn desc entries.
-->
- DOCN
+ DOCN
null mode
prescribed ocean mode
slab ocean mode
@@ -45,7 +45,7 @@
char
- prescribed,sst_aquap1,sst_aquap2,sst_aquap3,sst_aquap4,sst_aquap5,sst_aquap6,sst_aquap7,sst_aquap8,sst_aquap9,sst_aquap10,sst_aquapfile,som,som_aquap,sst_aquap_constant,interannual,null
+ prescribed,sst_aquap1,sst_aquap2,sst_aquap3,sst_aquap4,sst_aquap5,sst_aquap6,sst_aquap7,sst_aquap8,sst_aquap9,sst_aquap10,sst_aquap11,sst_aquap12,sst_aquap13,sst_aquap14,sst_aquap15,sst_aquapfile,som,som_aquap,sst_aquap_constant,interannual,null
prescribed
null
@@ -63,6 +63,12 @@
sst_aquap8
sst_aquap9
sst_aquap10
+
+ sst_aquap11
+ sst_aquap12
+ sst_aquap13
+ sst_aquap14
+ sst_aquap15
sst_aquapfile
sst_aquap_constant
diff --git a/components/data_comps/docn/cime_config/namelist_definition_docn.xml b/components/data_comps/docn/cime_config/namelist_definition_docn.xml
index 948902e3732..a191d088d7f 100644
--- a/components/data_comps/docn/cime_config/namelist_definition_docn.xml
+++ b/components/data_comps/docn/cime_config/namelist_definition_docn.xml
@@ -257,7 +257,7 @@
char
streams
shr_strdata_nml
- SSTDATA,SST_AQUAP1,SST_AQUAP2,SST_AQUAP3,SST_AQUAP4,SST_AQUAP5,SST_AQUAP6,SST_AQUAP7,SST_AQUAP8,SST_AQUAP9,SST_AQUAP10,SST_AQUAPFILE,SST_AQUAP_CONSTANT,SOM,SOM_AQUAP,IAF,NULL,COPYALL
+ SSTDATA,SST_AQUAP1,SST_AQUAP2,SST_AQUAP3,SST_AQUAP4,SST_AQUAP5,SST_AQUAP6,SST_AQUAP7,SST_AQUAP8,SST_AQUAP9,SST_AQUAP10,SST_AQUAP11,SST_AQUAP12,SST_AQUAP13,SST_AQUAP14,SST_AQUAP15,SST_AQUAPFILE,SST_AQUAP_CONSTANT,SOM,SOM_AQUAP,IAF,NULL,COPYALL
General method that operates on the data. This is generally
implemented in the data models but is set in the strdata method for
@@ -323,6 +323,11 @@
SST_AQUAP8
SST_AQUAP9
SST_AQUAP10
+ SST_AQUAP11
+ SST_AQUAP12
+ SST_AQUAP13
+ SST_AQUAP14
+ SST_AQUAP15
SST_AQUAPFILE
SST_AQUAP_CONSTANT
SOM
diff --git a/components/data_comps/docn/src/docn_comp_mod.F90 b/components/data_comps/docn/src/docn_comp_mod.F90
index e692882c9db..43bac32bff7 100644
--- a/components/data_comps/docn/src/docn_comp_mod.F90
+++ b/components/data_comps/docn/src/docn_comp_mod.F90
@@ -984,6 +984,7 @@ subroutine prescribed_sst(xc, yc, lsize, sst_option, sst)
integer :: i
real(r8) :: tmp, tmp1, pi
real(r8) :: rlon(lsize), rlat(lsize)
+ real(r8) :: mean_SST, delta_SST
real(r8), parameter :: pio180 = SHR_CONST_PI/180._r8
@@ -1013,8 +1014,8 @@ subroutine prescribed_sst(xc, yc, lsize, sst_option, sst)
! Control
- if (sst_option < 1 .or. sst_option > 10) then
- call shr_sys_abort ('prescribed_sst: ERROR: sst_option must be between 1 and 10')
+ if (sst_option < 1 .or. sst_option > 15) then
+ call shr_sys_abort ('prescribed_sst: ERROR: sst_option must be between 1 and 15')
end if
if (sst_option == 1 .or. sst_option == 6 .or. sst_option == 7 .or. sst_option == 8) then
@@ -1174,6 +1175,20 @@ subroutine prescribed_sst(xc, yc, lsize, sst_option, sst)
end do
end if
+ !-------------------------------------------------------------------------------
+ ! RCEMIP phase 2 - Mock-Walker
+ if (sst_option>=11 .and. sst_option<=15) then
+ if (sst_option==11) then; mean_SST = 295 - TkFrz; delta_SST = 1.250; end if ! MW_295dT1p25
+ if (sst_option==12) then; mean_SST = 300 - TkFrz; delta_SST = 0.625; end if ! MW_300dT0p625
+ if (sst_option==13) then; mean_SST = 300 - TkFrz; delta_SST = 1.250; end if ! MW_300dT1p25
+ if (sst_option==14) then; mean_SST = 300 - TkFrz; delta_SST = 2.500; end if ! MW_300dT2p5
+ if (sst_option==15) then; mean_SST = 305 - TkFrz; delta_SST = 1.250; end if ! MW_305dT1p25
+ do i = 1, lsize
+ sst(i) = mean_SST + (delta_SST/2) * cos( rlat(i) * 360/54 )
+ end do
+ end if
+ !-------------------------------------------------------------------------------
+
end subroutine prescribed_sst
end module docn_comp_mod
diff --git a/components/data_comps/drof/cime_config/config_component.xml b/components/data_comps/drof/cime_config/config_component.xml
index b5e0a8071fa..1c0052d9364 100644
--- a/components/data_comps/drof/cime_config/config_component.xml
+++ b/components/data_comps/drof/cime_config/config_component.xml
@@ -13,23 +13,14 @@
-->
- Data runoff model
+ Data runoff model
NULL mode
COREv2 normal year forcing:
- COREv2 normal year forcing:
- COREv2 normal year forcing:
- COREv2 normal year forcing:
COREv2 interannual year forcing:
- COREv2 interannual year forcing:
- COREv2 interannual year forcing:
- COREv2 interannual year forcing:
CPLHIST mode:
+ JRA55 interannual forcing, v1.5, through 2023
JRA55 interannual forcing, v1.5, through 2020
- JRA55 interannual forcing, v1.5, through 2020, no rofi or rofl around AIS
JRA55 interannual forcing, v1.4, through 2018
- JRA55 interannual forcing, v1.4, through 2018, no rofi around AIS
- JRA55 interannual forcing, v1.4, through 2018, no rofl around AIS
- JRA55 interannual forcing, v1.4, through 2018, no rofi or rofl around AIS
JRA55 interannual forcing
JRA55 Repeat Year Forcing v1.3 1984-1985
JRA55 Repeat Year Forcing v1.3 1990-1991
@@ -47,26 +38,17 @@
char
- CPLHIST,DIATREN_ANN_RX1,DIATREN_ANN_AIS00_RX1,DIATREN_ANN_AIS45_RX1,DIATREN_ANN_AIS55_RX1,DIATREN_IAF_RX1,DIATREN_IAF_AIS00_RX1,DIATREN_IAF_AIS45_RX1,DIATREN_IAF_AIS55_RX1,IAF_JRA,IAF_JRA_1p5,IAF_JRA_1p5_AIS0ROF,IAF_JRA_1p4_2018,IAF_JRA_1p4_2018_AIS0ICE,IAF_JRA_1p4_2018_AIS0LIQ,IAF_JRA_1p4_2018_AIS0ROF,RYF8485_JRA,RYF9091_JRA,RYF0304_JRA,NULL
+ CPLHIST,DIATREN_ANN_RX1,DIATREN_IAF_RX1,IAF_JRA,IAF_JRA_1p5,IAF_JRA_1p4_2018,RYF8485_JRA,RYF9091_JRA,RYF0304_JRA,NULL
DIATREN_ANN_RX1
NULL
DIATREN_ANN_RX1
- DIATREN_ANN_AIS00_RX1
- DIATREN_ANN_AIS45_RX1
- DIATREN_ANN_AIS55_RX1
DIATREN_IAF_RX1
- DIATREN_IAF_AIS00_RX1
- DIATREN_IAF_AIS45_RX1
- DIATREN_IAF_AIS55_RX1
CPLHIST
IAF_JRA
+ IAF_JRA_1p5
IAF_JRA_1p5
- IAF_JRA_1p5_AIS0ROF
IAF_JRA_1p4_2018
- IAF_JRA_1p4_2018_AIS0ICE
- IAF_JRA_1p4_2018_AIS0LIQ
- IAF_JRA_1p4_2018_AIS0ROF
RYF8485_JRA
RYF9091_JRA
RYF0304_JRA
@@ -165,6 +147,7 @@
1
1
1
+ 1
run_component_drof
env_run.xml
@@ -179,6 +162,7 @@
1958
1958
1958
+ 1958
run_component_drof
env_run.xml
@@ -193,6 +177,7 @@
2016
2018
2020
+ 2023
run_component_drof
env_run.xml
diff --git a/components/data_comps/drof/cime_config/namelist_definition_drof.xml b/components/data_comps/drof/cime_config/namelist_definition_drof.xml
index c4139552c70..d4d70074596 100644
--- a/components/data_comps/drof/cime_config/namelist_definition_drof.xml
+++ b/components/data_comps/drof/cime_config/namelist_definition_drof.xml
@@ -52,18 +52,8 @@
NULL
rof.cplhist
rof.diatren_ann_rx1
- rof.diatren_ann_ais00_rx1
- rof.diatren_ann_ais45_rx1
- rof.diatren_ann_ais55_rx1
rof.diatren_iaf_rx1
- rof.diatren_iaf_ais00_rx1
- rof.diatren_iaf_ais45_rx1
- rof.diatren_iaf_ais55_rx1
- rof.iaf_jra_1p4_2018_ais0ice
- rof.iaf_jra_1p4_2018_ais0liq
- rof.iaf_jra_1p4_2018_ais0rof
rof.iaf_jra_1p4_2018
- rof.iaf_jra_1p5_ais0rof
rof.iaf_jra_1p5
rof.iaf_jra
rof.ryf8485_jra
@@ -80,13 +70,7 @@
$DIN_LOC_ROOT/lnd/dlnd7/RX1
$DIN_LOC_ROOT/lnd/dlnd7/RX1
- $DIN_LOC_ROOT/lnd/dlnd7/RX1
- $DIN_LOC_ROOT/lnd/dlnd7/RX1
- $DIN_LOC_ROOT/lnd/dlnd7/RX1
$DIN_LOC_ROOT/lnd/dlnd7/RX1
- $DIN_LOC_ROOT/lnd/dlnd7/RX1
- $DIN_LOC_ROOT/lnd/dlnd7/RX1
- $DIN_LOC_ROOT/lnd/dlnd7/RX1
$DIN_LOC_ROOT/lnd/dlnd7/JRA55
$DIN_LOC_ROOT/lnd/dlnd7/JRA55
$DIN_LOC_ROOT/lnd/dlnd7/JRA55
@@ -101,13 +85,7 @@
Stream domain file path(s).
runoff.daitren.annual.20190226.nc
- runoff.daitren.annual-AISx00.20190226.nc
- runoff.daitren.annual-AISx45.20190226.nc
- runoff.daitren.annual-AISx55.20190226.nc
runoff.daitren.iaf.20120419.nc
- runoff.daitren.iaf-AISx00.20120419.nc
- runoff.daitren.iaf-AISx45.20120419.nc
- runoff.daitren.iaf-AISx55.20120419.nc
domain.roff.JRA025.170111.nc
domain.roff.JRA025.170111.nc
null
@@ -134,27 +112,6 @@
arear area
mask mask
-
- time time
- xc lon
- yc lat
- arear area
- mask mask
-
-
- time time
- xc lon
- yc lat
- arear area
- mask mask
-
-
- time time
- xc lon
- yc lat
- arear area
- mask mask
-
time time
domrb_lon lon
@@ -173,13 +130,7 @@
$DIN_LOC_ROOT/lnd/dlnd7/RX1
$DIN_LOC_ROOT/lnd/dlnd7/RX1
- $DIN_LOC_ROOT/lnd/dlnd7/RX1
- $DIN_LOC_ROOT/lnd/dlnd7/RX1
- $DIN_LOC_ROOT/lnd/dlnd7/RX1
$DIN_LOC_ROOT/lnd/dlnd7/RX1
- $DIN_LOC_ROOT/lnd/dlnd7/RX1
- $DIN_LOC_ROOT/lnd/dlnd7/RX1
- $DIN_LOC_ROOT/lnd/dlnd7/RX1
$DIN_LOC_ROOT/ocn/jra55/v1.5_noleap
$DIN_LOC_ROOT/lnd/dlnd7/JRA55
$DIN_LOC_ROOT/lnd/dlnd7/JRA55
@@ -194,31 +145,13 @@
Stream data file path(s).
runoff.daitren.annual.20190226.nc
- runoff.daitren.annual.20190226.nc
- runoff.daitren.annual.20190226.nc
- runoff.daitren.annual.20190226.nc
runoff.daitren.iaf.20120419.nc
- runoff.daitren.iaf-AISx00.20120419.nc
- runoff.daitren.iaf-AISx45.20120419.nc
- runoff.daitren.iaf-AISx55.20120419.nc
RAF_8485.JRA.v1.3.runoff.180404.nc
RAF_9091.JRA.v1.3.runoff.180404.nc
RAF_0304.JRA.v1.3.runoff.180404.nc
-
- JRA.v1.5.runoff.%y.no_rofi_no_rofl.240411.nc
-
JRA.v1.5.runoff.%y.240411.nc
-
- JRA.v1.4.runoff.%y.no_rofi.190214.nc
-
-
- JRA.v1.4.runoff.%y.no_rofl.190214.nc
-
-
- JRA.v1.4.runoff.%y.no_rofi_no_rofl.190214.nc
-
JRA.v1.4.runoff.%y.190214.nc
@@ -244,9 +177,6 @@
runoff rofl
-
- runoff rofl
-
rofl rofl
rofi rofi
@@ -282,7 +212,6 @@
1
1
1
- 1
$DROF_STRM_YR_ALIGN
$DROF_STRM_YR_ALIGN
$DROF_CPLHIST_YR_ALIGN
@@ -296,13 +225,7 @@
First year of stream.
1
- 1
- 1
- 1
1948
- 1948
- 1948
- 1948
$DROF_STRM_YR_START
1984
1990
@@ -318,18 +241,9 @@
Last year of stream.
1
- 1
- 1
- 1
2009
- 2009
- 2009
- 2009
$DROF_STRM_YR_END
$DROF_STRM_YR_END
- $DROF_STRM_YR_END
- $DROF_STRM_YR_END
- $DROF_STRM_YR_END
$DROF_STRM_YR_END
1984
1990
diff --git a/components/eam/bld/namelist_files/use_cases/RCEMIP_EAMv1.xml b/components/eam/bld/namelist_files/use_cases/RCEMIP_EAMv1.xml
index c1c8e9e027a..146a9483b36 100644
--- a/components/eam/bld/namelist_files/use_cases/RCEMIP_EAMv1.xml
+++ b/components/eam/bld/namelist_files/use_cases/RCEMIP_EAMv1.xml
@@ -35,7 +35,7 @@
0.0
-
+
diff --git a/components/eam/cime_config/config_compsets.xml b/components/eam/cime_config/config_compsets.xml
index 7050bd0c8f9..6b3c41321ee 100644
--- a/components/eam/cime_config/config_compsets.xml
+++ b/components/eam/cime_config/config_compsets.xml
@@ -229,6 +229,50 @@
2000_EAM%RCE-MMF2_SLND_SICE_DOCN%AQPCONST_SROF_SGLC_SWAV
+
+
+ FRCE-MW_295dT1p25
+ 2000_EAM%RCE_SLND_SICE_DOCN%AQP11_SROF_SGLC_SWAV
+
+
+ FRCE-MW_300dT0p625
+ 2000_EAM%RCE_SLND_SICE_DOCN%AQP12_SROF_SGLC_SWAV
+
+
+ FRCE-MW_300dT1p25
+ 2000_EAM%RCE_SLND_SICE_DOCN%AQP13_SROF_SGLC_SWAV
+
+
+ FRCE-MW_300dT2p5
+ 2000_EAM%RCE_SLND_SICE_DOCN%AQP14_SROF_SGLC_SWAV
+
+
+ FRCE-MW_305dT1p25
+ 2000_EAM%RCE_SLND_SICE_DOCN%AQP15_SROF_SGLC_SWAV
+
+
+
+ FRCE-MW-MMF1_295dT1p25
+ 2000_EAM%RCE-MMF1_SLND_SICE_DOCN%AQP11_SROF_SGLC_SWAV
+
+
+ FRCE-MW-MMF1_300dT0p625
+ 2000_EAM%RCE-MMF1_SLND_SICE_DOCN%AQP12_SROF_SGLC_SWAV
+
+
+ FRCE-MW-MMF1_300dT1p25
+ 2000_EAM%RCE-MMF1_SLND_SICE_DOCN%AQP13_SROF_SGLC_SWAV
+
+
+ FRCE-MW-MMF1_300dT2p5
+ 2000_EAM%RCE-MMF1_SLND_SICE_DOCN%AQP14_SROF_SGLC_SWAV
+
+
+ FRCE-MW-MMF1_305dT1p25
+ 2000_EAM%RCE-MMF1_SLND_SICE_DOCN%AQP15_SROF_SGLC_SWAV
+
+
+
diff --git a/components/eam/cime_config/testdefs/testmods_dirs/eam/sathist_F2010/readme b/components/eam/cime_config/testdefs/testmods_dirs/eam/sathist_F2010/readme
new file mode 100644
index 00000000000..35f64eb4b9a
--- /dev/null
+++ b/components/eam/cime_config/testdefs/testmods_dirs/eam/sathist_F2010/readme
@@ -0,0 +1 @@
+test for sat hist capability components/eam/src/control/sat_hist.F90
diff --git a/components/eam/cime_config/testdefs/testmods_dirs/eam/sathist_F2010/shell_commands b/components/eam/cime_config/testdefs/testmods_dirs/eam/sathist_F2010/shell_commands
new file mode 100644
index 00000000000..92cb057059a
--- /dev/null
+++ b/components/eam/cime_config/testdefs/testmods_dirs/eam/sathist_F2010/shell_commands
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+./xmlchange RUN_STARTDATE=2018-01-01
diff --git a/components/eam/cime_config/testdefs/testmods_dirs/eam/sathist_F2010/user_nl_eam b/components/eam/cime_config/testdefs/testmods_dirs/eam/sathist_F2010/user_nl_eam
new file mode 100644
index 00000000000..e9b723f13b8
--- /dev/null
+++ b/components/eam/cime_config/testdefs/testmods_dirs/eam/sathist_F2010/user_nl_eam
@@ -0,0 +1,7 @@
+&satellite_options_nl
+ sathist_mfilt = 10000,
+ sathist_track_infile = '$DIN_LOC_ROOT/atm/waccm/sat/satellite_profilelist_orcas_to_socrates_c190208.nc'
+ sathist_hfilename_spec = '%c.eam.h9.sathist.%y-%m-%d-%s.nc'
+ sathist_nclosest = 1
+ sathist_ntimestep = 1
+ sathist_fincl = 'T', 'PS'
diff --git a/components/eam/cime_config/usermods_dirs/rcemip/user_nl_cpl b/components/eam/cime_config/usermods_dirs/rcemip/user_nl_cpl
index 3ecd465f7a1..3a47a8bb2ff 100644
--- a/components/eam/cime_config/usermods_dirs/rcemip/user_nl_cpl
+++ b/components/eam/cime_config/usermods_dirs/rcemip/user_nl_cpl
@@ -31,4 +31,5 @@ seq_flux_mct_albdif = 0.07
seq_flux_mct_albdir = 0.07
seq_flux_atmocn_minwind = 1
-constant_zenith_deg = 42.05
\ No newline at end of file
+constant_zenith_deg = 42.04
+
diff --git a/components/eam/src/chemistry/modal_aero/modal_aero_amicphys.F90 b/components/eam/src/chemistry/modal_aero/modal_aero_amicphys.F90
index 6546677cac3..5f7f690cf62 100644
--- a/components/eam/src/chemistry/modal_aero/modal_aero_amicphys.F90
+++ b/components/eam/src/chemistry/modal_aero/modal_aero_amicphys.F90
@@ -3530,7 +3530,7 @@ subroutine mam_soaexch_vbs_1subarea( &
! convert sat vapor conc from ug/m^3 to mol/m^3 then to mol/liter
tmpa = (c0_soa_298(ll)*1.0e-6_r8/mw_gas(igas)) * 1.0e-3_r8
! calc sat vapor pressure (atm) from molar-conc and temp [ 0.082056 = gas constant in (atm/deg-K/(mol/liter)) ]
- p0_soa_298(ll) = 0.082056_r8*tmpa*temp
+ p0_soa_298(ll) = 0.082056_r8*tmpa*298.0_r8
end do
! calc soa gas saturation molar-mixing-ratio at local temp and air-pressure
diff --git a/components/eam/src/control/sat_hist.F90 b/components/eam/src/control/sat_hist.F90
index 64b978ac0f1..17d2a92bc7b 100644
--- a/components/eam/src/control/sat_hist.F90
+++ b/components/eam/src/control/sat_hist.F90
@@ -6,18 +6,20 @@
module sat_hist
use perf_mod, only: t_startf, t_stopf
- use shr_kind_mod, only: r8 => shr_kind_r8
+ use shr_kind_mod, only: r4 => shr_kind_r4
+ use shr_kind_mod, only: r8 => shr_kind_r8, cl=>shr_kind_cl
use cam_logfile, only: iulog
- use ppgrid, only: pcols, pver, begchunk, endchunk
+ use ppgrid, only: pcols, pver, pverp, begchunk, endchunk
use cam_history_support, only: fieldname_lenp2, max_string_len, ptapes
use spmd_utils, only: masterproc, iam
use cam_abortutils, only: endrun
- use pio, only: file_desc_t, iosystem_desc_t, iosystem_desc_t, var_desc_t, io_desc_t
- use pio, only: pio_openfile, pio_redef, pio_enddef, pio_inq_dimid, pio_inq_varid, pio_seterrorhandling, pio_def_var
+ use pio, only: file_desc_t,iosystem_desc_t, var_desc_t, io_desc_t
+ use pio, only: pio_inq_dimid, pio_inq_varid
+ use pio, only: pio_seterrorhandling, pio_def_var
use pio, only: pio_inq_dimlen, pio_get_att, pio_put_att, pio_get_var, pio_put_var, pio_write_darray
- use pio, only: pio_real, pio_int, pio_double
- use pio, only: PIO_WRITE,PIO_NOWRITE, PIO_NOERR, PIO_BCAST_ERROR, PIO_INTERNAL_ERROR, PIO_Rearr_box, PIO_GLOBAL
+ use pio, only: pio_real,pio_double
+ use pio, only: PIO_NOWRITE, PIO_NOERR, PIO_BCAST_ERROR, PIO_INTERNAL_ERROR, PIO_GLOBAL
use spmd_utils, only: mpicom
#ifdef SPMD
use mpishorthand, only: mpichar, mpiint
@@ -82,6 +84,7 @@ module sat_hist
real(r8), parameter :: rad2deg = 180._r8/pi ! degrees per radian
+
contains
!-------------------------------------------------------------------------------
@@ -122,7 +125,7 @@ subroutine sat_hist_readnl(nlfile, hfilename_spec, mfilt, fincl, nhtfrq, avgflag
! set defaults
sathist_track_infile = ' '
- sathist_hfilename_spec = '%c.cam' // trim(inst_suffix) // '.hs.%y-%m-%d-%s.nc'
+ sathist_hfilename_spec = '%c.eam.hs.' // trim(inst_suffix) // '.%y-%m-%d-%s.nc'
sathist_fincl(:) = ' '
sathist_mfilt = 100000
sathist_nclosest = 1
@@ -189,14 +192,13 @@ end subroutine sat_hist_readnl
subroutine sat_hist_init
use cam_pio_utils, only: cam_pio_openfile
use ioFileMod, only: getfil
- use spmd_utils, only: npes
use time_manager, only: get_step_size
use string_utils, only: to_lower, GLC
implicit none
character(len=max_string_len) :: locfn ! Local filename
- integer :: ierr, dimid, i
+ integer :: ierr, dimid
character(len=128) :: date_format
@@ -406,17 +408,15 @@ end subroutine sat_hist_define
!-------------------------------------------------------------------------------
subroutine sat_hist_write( tape , nflds, nfils)
- use ppgrid, only : pcols, begchunk, endchunk
use phys_grid, only: phys_decomp
use dyn_grid, only: dyn_decomp
use cam_history_support, only : active_entry
- use pio, only : pio_file_is_open
- implicit none
+ use pio, only : pio_file_is_open, pio_syncfile
type(active_entry) :: tape
integer, intent(in) :: nflds
integer, intent(inout) :: nfils
- integer :: t, f, i, ncols, nocols
+ integer :: ncols, nocols
integer :: ierr
integer, allocatable :: col_ndxs(:)
@@ -430,9 +430,13 @@ subroutine sat_hist_write( tape , nflds, nfils)
real(r8),allocatable :: phs_dists(:)
integer :: coldim
-
- integer :: io_type
- logical :: has_dyn_flds
+ logical :: has_dyn_flds = .false.
+ logical :: has_phys_srf_flds = .false.
+ logical :: has_phys_lev_flds = .false.
+ logical :: has_phys_ilev_flds = .false.
+ logical :: has_dyn_srf_flds = .false.
+ logical :: has_dyn_lev_flds = .false.
+ logical :: has_dyn_ilev_flds = .false.
if (.not.has_sat_hist) return
@@ -456,13 +460,11 @@ subroutine sat_hist_write( tape , nflds, nfils)
allocate( mlons(nocols) )
allocate( phs_dists(nocols) )
- has_dyn_flds = .false.
- dyn_flds_loop: do f=1,nflds
- if ( tape%hlist(f)%field%decomp_type == dyn_decomp ) then
- has_dyn_flds = .true.
- exit dyn_flds_loop
- endif
- enddo dyn_flds_loop
+ call scan_flds( tape, nflds &
+ , has_phys_srf_flds, has_phys_lev_flds, has_phys_ilev_flds &
+ , has_dyn_srf_flds, has_dyn_lev_flds, has_dyn_ilev_flds )
+
+ has_dyn_flds = has_dyn_srf_flds .or. has_dyn_lev_flds .or. has_dyn_ilev_flds
call get_indices( obs_lats, obs_lons, ncols, nocols, has_dyn_flds, col_ndxs, chk_ndxs, &
fdyn_ndxs, ldyn_ndxs, phs_owners, dyn_owners, mlats, mlons, phs_dists )
@@ -479,16 +481,35 @@ subroutine sat_hist_write( tape , nflds, nfils)
call write_record_coord( tape, mlats(:), mlons(:), phs_dists(:), ncols, nfils )
- do f=1,nflds
+ ! dump columns of 2D fields
+ if (has_phys_srf_flds) then
+ call dump_columns( tape%File, tape%hlist, nflds, nocols, 1, nfils, &
+ col_ndxs, chk_ndxs, phs_owners, phys_decomp )
+ endif
+ if (has_dyn_srf_flds) then
+ call dump_columns( tape%File, tape%hlist, nflds, nocols, 1, nfils, &
+ fdyn_ndxs, ldyn_ndxs, dyn_owners, dyn_decomp )
+ endif
- select case (tape%hlist(f)%field%decomp_type)
- case (phys_decomp)
- call dump_columns(tape%File, tape%hlist(f), nocols, nfils, col_ndxs(:), chk_ndxs(:), phs_owners(:) )
- case (dyn_decomp)
- call dump_columns(tape%File, tape%hlist(f), nocols, nfils, fdyn_ndxs(:), ldyn_ndxs(:), dyn_owners(:) )
- end select
+ ! dump columns of 3D fields defined on mid pres levels
+ if (has_phys_lev_flds) then
+ call dump_columns( tape%File, tape%hlist, nflds, nocols, pver, nfils, &
+ col_ndxs, chk_ndxs, phs_owners, phys_decomp )
+ endif
+ if (has_dyn_lev_flds) then
+ call dump_columns( tape%File, tape%hlist, nflds, nocols, pver, nfils, &
+ fdyn_ndxs, ldyn_ndxs, dyn_owners, dyn_decomp )
+ endif
- enddo
+ ! dump columns of 3D fields defined on interface pres levels
+ if (has_phys_ilev_flds) then
+ call dump_columns( tape%File, tape%hlist, nflds, nocols, pverp, nfils, &
+ col_ndxs, chk_ndxs, phs_owners, phys_decomp )
+ endif
+ if (has_dyn_ilev_flds) then
+ call dump_columns( tape%File, tape%hlist, nflds, nocols, pverp, nfils, &
+ fdyn_ndxs, ldyn_ndxs, dyn_owners, dyn_decomp )
+ endif
deallocate( col_ndxs, chk_ndxs, fdyn_ndxs, ldyn_ndxs, phs_owners, dyn_owners )
deallocate( mlons, mlats, phs_dists )
@@ -501,92 +522,167 @@ subroutine sat_hist_write( tape , nflds, nfils)
end subroutine sat_hist_write
!-------------------------------------------------------------------------------
- subroutine dump_columns( File, hitem, ncols, nfils, fdims, ldims, owners )
- use cam_history_support, only: field_info, hentry, hist_coords, fillvalue
- use pio, only: pio_initdecomp, pio_freedecomp, pio_setframe, pio_iam_iotask, pio_setdebuglevel, pio_offset_kind
+! FIXME extra work >
+! dump_columns routine is doing unnecessary extra work serially
+! this happens because there is an unneeded mpi_allreduce call
+! and then the gathered data is written in a serial manner; this
+! could be improved by avoiding the mpi_allreduce call, and then
+! writing local data out using pio_write_darray, which is parallel
+! FIXME extra work <
+ subroutine dump_columns( File, hitems, nflds, ncols, nlevs, nfils, fdims, ldims, owners, decomp )
+ use cam_history_support, only: field_info, hentry, fillvalue
+ use pio, only: pio_setframe, pio_offset_kind
+ use spmd_utils, only: mpi_real4, mpi_real8, mpicom, mpi_sum
type(File_desc_t),intent(inout) :: File
- type(hentry), intent(in), target :: hitem
+ type(hentry), intent(in), target :: hitems(:)
+ integer, intent(in) :: nflds
integer, intent(in) :: ncols
+ integer, intent(in) :: nlevs
integer, intent(in) :: nfils
integer, intent(in) :: fdims(:)
integer, intent(in) :: ldims(:)
integer, intent(in) :: owners(:)
+ integer, intent(in) :: decomp
+
type(field_info), pointer :: field
type(var_desc_t) :: vardesc
- type(iosystem_desc_t), pointer :: sat_iosystem
- type(io_desc_t) :: iodesc
- integer :: t, ierr, ndims
- integer, allocatable :: dimlens(:)
+ integer :: ierr
- real(r8), allocatable :: buf(:)
- integer, allocatable :: dof(:)
- integer :: i,k, cnt
+ real(r8) :: sbuf1d(ncols),rbuf1d(ncols)
+ real(r4) :: buf1d(ncols)
+ real(r8) :: sbuf2d(nlevs,ncols), rbuf2d(nlevs,ncols)
+ real(r4) :: buf2d(nlevs,ncols)
+ integer :: i,k,f, cnt
call t_startf ('sat_hist::dump_columns')
- sat_iosystem => File%iosystem
- field => hitem%field
- vardesc = hitem%varid(1)
-
-
- ndims=1
- if(associated(field%mdims)) then
- ndims = size(field%mdims)+1
- else if(field%numlev>1) then
- ndims=2
- end if
- allocate(dimlens(ndims))
- dimlens(ndims)=ncols
- if(ndims>2) then
- do i=1,ndims-1
- dimlens(i)=hist_coords(field%mdims(i))%dimsize
- enddo
- else if(field%numlev>1) then
- dimlens(1) = field%numlev
- end if
-
-
- allocate( buf( product(dimlens) ) )
- allocate( dof( product(dimlens) ) )
+ do f = 1,nflds
+ field => hitems(f)%field
+
+ if (field%numlev==nlevs .and. field%decomp_type==decomp) then
+ vardesc = hitems(f)%varid(1)
+
+ if (nlevs==1) then
+ sbuf1d = 0.0_r8
+ rbuf1d = 0.0_r8
+ do i=1,ncols
+ if ( iam == owners(i) ) then
+ sbuf1d(i) = hitems(f)%hbuf( fdims(i), 1, ldims(i) )
+ endif
+ enddo
+ ! FIXME extra work: unnecessary mpi call, then serial write
+ ! FIXME extra work: can use pio_write_darray on local data instead
+ call mpi_allreduce(sbuf1d,rbuf1d,ncols,mpi_real8, mpi_sum, mpicom, ierr)
+ buf1d(:) = real(rbuf1d(:),r4)
+ ierr = pio_put_var(File, vardesc, (/nfils/),(/ncols/), buf1d(:))
+ if ( ierr /= PIO_NOERR ) then
+ call endrun('sat_hist::dump_columns: pio_put_var error')
+ endif
+ else
+ sbuf2d = 0.0_r8
+ rbuf2d = 0.0_r8
+ do i=1,ncols
+ if ( iam == owners(i) ) then
+ do k = 1,nlevs
+ sbuf2d(k,i) = hitems(f)%hbuf( fdims(i), k, ldims(i) )
+ enddo
+ endif
+ enddo
+ ! FIXME extra work: unnecessary mpi call, then serial write
+ ! FIXME extra work: can use pio_write_darray on local data instead
+ call mpi_allreduce(sbuf2d,rbuf2d,ncols*nlevs,mpi_real8, mpi_sum, mpicom, ierr)
+ buf2d(:,:) = real(rbuf2d(:,:),r4)
+ ierr = pio_put_var(File, vardesc, (/1,nfils/),(/nlevs,ncols/), buf2d(:,:))
+ if ( ierr /= PIO_NOERR ) then
+ call endrun('sat_hist::dump_columns: pio_put_var error')
+ endif
+ endif
- cnt = 0
- buf = fillvalue
- dof = 0
+ endif
- do i = 1,ncols
- do k = 1,field%numlev
- cnt = cnt+1
- if ( iam == owners(i) ) then
- buf(cnt) = hitem%hbuf( fdims(i), k, ldims(i) )
- dof(cnt) = cnt
- endif
- enddo
enddo
- call pio_setframe(File, vardesc, int(-1,kind=PIO_OFFSET_KIND))
-
- call pio_initdecomp(sat_iosystem, pio_double, dimlens, dof, iodesc )
+ call t_stopf ('sat_hist::dump_columns')
- call pio_setframe(File, vardesc, int(nfils,kind=PIO_OFFSET_KIND))
+ end subroutine dump_columns
- call pio_write_darray(File, vardesc, iodesc, buf, ierr, fillval=fillvalue)
+!-------------------------------------------------------------------------------
+! scan the fields for possible different decompositions
+!-------------------------------------------------------------------------------
+ subroutine scan_flds( tape, nflds &
+ , has_phys_srf_flds, has_phys_lev_flds, has_phys_ilev_flds &
+ , has_dyn_srf_flds, has_dyn_lev_flds, has_dyn_ilev_flds )
+ use cam_history_support, only : active_entry
+ use phys_grid, only: phys_decomp
+ use dyn_grid, only: dyn_decomp
- call pio_freedecomp(sat_iosystem, iodesc)
+ type(active_entry), intent(in) :: tape
+ integer, intent(in) :: nflds
+ logical, save :: flds_scanned
+ logical, intent(out) :: has_phys_srf_flds
+ logical, intent(out) :: has_phys_lev_flds
+ logical, intent(out) :: has_phys_ilev_flds
+ logical, intent(out) :: has_dyn_srf_flds
+ logical, intent(out) :: has_dyn_lev_flds
+ logical, intent(out) :: has_dyn_ilev_flds
+
+ integer :: f
+ character(len=cl) :: msg1, msg2
+
+ if (flds_scanned) return
+
+ do f = 1,nflds
+ if ( tape%hlist(f)%field%decomp_type == phys_decomp ) then
+ if ( tape%hlist(f)%field%numlev == 1 ) then
+ has_phys_srf_flds = .true.
+ elseif ( tape%hlist(f)%field%numlev == pver ) then
+ has_phys_lev_flds = .true.
+ elseif ( tape%hlist(f)%field%numlev == pverp ) then
+ has_phys_ilev_flds = .true.
+ else
+ call endrun('sat_hist::scan_flds numlev error : '//tape%hlist(f)%field%name)
+ endif
+ elseif ( tape%hlist(f)%field%decomp_type == dyn_decomp ) then
+ if ( tape%hlist(f)%field%numlev == 1 ) then
+ has_dyn_srf_flds = .true.
+ elseif ( tape%hlist(f)%field%numlev == pver ) then
+ has_dyn_lev_flds = .true.
+ elseif ( tape%hlist(f)%field%numlev == pverp ) then
+ has_dyn_ilev_flds = .true.
+ else
+ call endrun('sat_hist::scan_flds numlev error : '//tape%hlist(f)%field%name)
+ endif
+ else
+ call endrun('sat_hist::scan_flds decomp_type error : '//tape%hlist(f)%field%name)
+ endif
- deallocate( buf )
- deallocate( dof )
- deallocate( dimlens )
+ ! Check that the only "mdim" is the vertical coordinate.
+ if (has_phys_srf_flds .or. has_phys_lev_flds .or. has_phys_ilev_flds .or. &
+ has_dyn_srf_flds .or. has_dyn_lev_flds .or. has_dyn_ilev_flds) then
+ ! The mdims pointer is unassociated on a restart. The restart initialization
+ ! should be fixed rather than requiring the check to make sure it is associated.
+ if (associated(tape%hlist(f)%field%mdims)) then
+ if (size(tape%hlist(f)%field%mdims) > 1) then
+ msg1 = 'sat_hist::scan_flds mdims error :'//tape%hlist(f)%field%name
+ msg2 = trim(msg1)//' has mdims in addition to the vertical coordinate.'//&
+ new_line('a')//' This is not currently supported.'
+ write(iulog,*) msg2
+ call endrun(msg1)
+ end if
+ end if
+ end if
- call t_stopf ('sat_hist::dump_columns')
+ enddo
- end subroutine dump_columns
+ flds_scanned = .true.
+ end subroutine scan_flds
!-------------------------------------------------------------------------------
!-------------------------------------------------------------------------------
subroutine read_next_position( ncols )
- use time_manager, only: get_curr_date, get_prev_date
+ use time_manager, only: get_curr_date
use time_manager, only: set_time_float_from_date
implicit none
@@ -626,8 +722,14 @@ subroutine read_next_position( ncols )
call read_buffered_datetime( datetime, i )
- if ( datetime>begdatetime .and. beg_ndx<0 ) beg_ndx = i
- if ( datetime>enddatetime ) exit bnds_loop
+ if (datetime > begdatetime .and. beg_ndx < 0) then
+ beg_ndx = i
+ end if
+
+ if (datetime > enddatetime) then
+ exit bnds_loop
+ end if
+
end_ndx = i
enddo bnds_loop
@@ -660,7 +762,7 @@ end subroutine read_next_position
!-------------------------------------------------------------------------------
subroutine write_record_coord( tape, mod_lats, mod_lons, mod_dists, ncols, nfils )
- use time_manager, only: get_nstep, get_curr_date, get_curr_time
+ use time_manager, only: get_curr_date, get_curr_time
use cam_history_support, only : active_entry
implicit none
type(active_entry), intent(inout) :: tape
@@ -671,9 +773,8 @@ subroutine write_record_coord( tape, mod_lats, mod_lons, mod_dists, ncols, nfils
real(r8), intent(in) :: mod_dists(ncols * sathist_nclosest)
integer, intent(in) :: nfils
- integer :: t, ierr, i
+ integer :: ierr, i
integer :: yr, mon, day ! year, month, and day components of a date
- integer :: nstep ! current timestep number
integer :: ncdate ! current date in integer format [yyyymmdd]
integer :: ncsec ! current time of day [seconds]
integer :: ndcur ! day component of current time
@@ -686,7 +787,6 @@ subroutine write_record_coord( tape, mod_lats, mod_lons, mod_dists, ncols, nfils
call t_startf ('sat_hist::write_record_coord')
- nstep = get_nstep()
call get_curr_date(yr, mon, day, ncsec)
ncdate = yr*10000 + mon*100 + day
call get_curr_time(ndcur, nscur)
diff --git a/components/eam/src/physics/cam/cam_diagnostics.F90 b/components/eam/src/physics/cam/cam_diagnostics.F90
index ead4f558f05..5dca258775d 100644
--- a/components/eam/src/physics/cam/cam_diagnostics.F90
+++ b/components/eam/src/physics/cam/cam_diagnostics.F90
@@ -306,6 +306,7 @@ subroutine diag_init()
call addfld ('MQ',(/ 'lev' /), 'A','kg/m2','Water vapor mass in layer')
call addfld ('TMQ',horiz_only, 'A','kg/m2','Total (vertically integrated) precipitable water', &
standard_name='atmosphere_mass_content_of_water_vapor')
+ call addfld ('TMQS',horiz_only, 'A','kg/m2','Total (vertically integrated) saturated precipitable water')
call addfld ('TTQ',horiz_only, 'A', 'kg/m/s','Total (vertically integrated) vapor transport')
call addfld ('TUQ',horiz_only, 'A','kg/m/s','Total (vertically integrated) zonal water flux')
call addfld ('TVQ',horiz_only, 'A','kg/m/s','Total (vertically integrated) meridional water flux')
@@ -1368,6 +1369,14 @@ subroutine diag_phys_writeout(state, psl)
if (moist_physics) then
+ ! Mass of saturated q vertically integrated
+ call qsat(state%t(:ncol,:), state%pmid(:ncol,:), tem2(:ncol,:), ftem(:ncol,:))
+ ftem(:ncol,:) = ftem(:ncol,:) * state%pdel(:ncol,:) * rga
+ do k=2,pver
+ ftem(:ncol,1) = ftem(:ncol,1) + ftem(:ncol,k)
+ end do
+ call outfld ('TMQS ',ftem, pcols ,lchnk )
+
! Relative humidity
call qsat(state%t(:ncol,:), state%pmid(:ncol,:), &
tem2(:ncol,:), ftem(:ncol,:))
diff --git a/components/eam/src/physics/crm/pam/CMakeLists.txt b/components/eam/src/physics/crm/pam/CMakeLists.txt
index 4a8f88f669b..ac629fa89fd 100644
--- a/components/eam/src/physics/crm/pam/CMakeLists.txt
+++ b/components/eam/src/physics/crm/pam/CMakeLists.txt
@@ -9,8 +9,9 @@ set(PAM_DRIVER_SRC
params.F90)
add_library(pam_driver
- ${PAM_DRIVER_SRC})
+ ${PAM_DRIVER_SRC})
+set(SCREAM_LIBS_ONLY TRUE)
set(SCREAM_HOME ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../..)
add_library(eamxx_physics INTERFACE ${SCREAM_HOME}/components/eamxx/src/physics/)
diff --git a/components/eam/src/physics/crm/pam/external b/components/eam/src/physics/crm/pam/external
index 3ea20ad38f2..1c37054d1ff 160000
--- a/components/eam/src/physics/crm/pam/external
+++ b/components/eam/src/physics/crm/pam/external
@@ -1 +1 @@
-Subproject commit 3ea20ad38f286730973429e0d491420a6f599f11
+Subproject commit 1c37054d1ff9b160290cc286dcbd3cdc6cd7e7f6
diff --git a/components/eam/src/physics/crm/pam/pam_statistics.h b/components/eam/src/physics/crm/pam/pam_statistics.h
index bb277600dfa..8166b9912d2 100644
--- a/components/eam/src/physics/crm/pam/pam_statistics.h
+++ b/components/eam/src/physics/crm/pam/pam_statistics.h
@@ -458,29 +458,34 @@ inline void pam_statistics_compute_means( pam::PamCoupler &coupler ) {
if (clear_rh_cnt(k,iens)>0) {
clear_rh(k,iens) = clear_rh(k,iens) / clear_rh_cnt(k,iens);
}
- phys_tend_sgs_temp (k,iens) = phys_tend_sgs_temp (k,iens) / phys_tend_sgs_cnt (iens);
- phys_tend_sgs_qv (k,iens) = phys_tend_sgs_qv (k,iens) / phys_tend_sgs_cnt (iens);
- phys_tend_sgs_qc (k,iens) = phys_tend_sgs_qc (k,iens) / phys_tend_sgs_cnt (iens);
- phys_tend_sgs_qi (k,iens) = phys_tend_sgs_qi (k,iens) / phys_tend_sgs_cnt (iens);
- phys_tend_sgs_qr (k,iens) = phys_tend_sgs_qr (k,iens) / phys_tend_sgs_cnt (iens);
-
- phys_tend_micro_temp(k,iens) = phys_tend_micro_temp(k,iens) / phys_tend_micro_cnt(iens);
- phys_tend_micro_qv (k,iens) = phys_tend_micro_qv (k,iens) / phys_tend_micro_cnt(iens);
- phys_tend_micro_qc (k,iens) = phys_tend_micro_qc (k,iens) / phys_tend_micro_cnt(iens);
- phys_tend_micro_qi (k,iens) = phys_tend_micro_qi (k,iens) / phys_tend_micro_cnt(iens);
- phys_tend_micro_qr (k,iens) = phys_tend_micro_qr (k,iens) / phys_tend_micro_cnt(iens);
-
- phys_tend_dycor_temp (k,iens) = phys_tend_dycor_temp (k,iens) / phys_tend_dycor_cnt(iens);
- phys_tend_dycor_qv (k,iens) = phys_tend_dycor_qv (k,iens) / phys_tend_dycor_cnt(iens);
- phys_tend_dycor_qc (k,iens) = phys_tend_dycor_qc (k,iens) / phys_tend_dycor_cnt(iens);
- phys_tend_dycor_qi (k,iens) = phys_tend_dycor_qi (k,iens) / phys_tend_dycor_cnt(iens);
- phys_tend_dycor_qr (k,iens) = phys_tend_dycor_qr (k,iens) / phys_tend_dycor_cnt(iens);
-
- phys_tend_sponge_temp(k,iens) = phys_tend_sponge_temp(k,iens) / phys_tend_sponge_cnt(iens);
- phys_tend_sponge_qv (k,iens) = phys_tend_sponge_qv (k,iens) / phys_tend_sponge_cnt(iens);
- phys_tend_sponge_qc (k,iens) = phys_tend_sponge_qc (k,iens) / phys_tend_sponge_cnt(iens);
- phys_tend_sponge_qi (k,iens) = phys_tend_sponge_qi (k,iens) / phys_tend_sponge_cnt(iens);
- phys_tend_sponge_qr (k,iens) = phys_tend_sponge_qr (k,iens) / phys_tend_sponge_cnt(iens);
+ if (phys_tend_sgs_cnt(iens)>0) {
+ phys_tend_sgs_temp (k,iens) = phys_tend_sgs_temp (k,iens) / phys_tend_sgs_cnt (iens);
+ phys_tend_sgs_qv (k,iens) = phys_tend_sgs_qv (k,iens) / phys_tend_sgs_cnt (iens);
+ phys_tend_sgs_qc (k,iens) = phys_tend_sgs_qc (k,iens) / phys_tend_sgs_cnt (iens);
+ phys_tend_sgs_qi (k,iens) = phys_tend_sgs_qi (k,iens) / phys_tend_sgs_cnt (iens);
+ phys_tend_sgs_qr (k,iens) = phys_tend_sgs_qr (k,iens) / phys_tend_sgs_cnt (iens);
+ }
+ if (phys_tend_micro_cnt(iens)>0) {
+ phys_tend_micro_temp(k,iens) = phys_tend_micro_temp(k,iens) / phys_tend_micro_cnt(iens);
+ phys_tend_micro_qv (k,iens) = phys_tend_micro_qv (k,iens) / phys_tend_micro_cnt(iens);
+ phys_tend_micro_qc (k,iens) = phys_tend_micro_qc (k,iens) / phys_tend_micro_cnt(iens);
+ phys_tend_micro_qi (k,iens) = phys_tend_micro_qi (k,iens) / phys_tend_micro_cnt(iens);
+ phys_tend_micro_qr (k,iens) = phys_tend_micro_qr (k,iens) / phys_tend_micro_cnt(iens);
+ }
+ if (phys_tend_dycor_cnt(iens)>0) {
+ phys_tend_dycor_temp (k,iens) = phys_tend_dycor_temp (k,iens) / phys_tend_dycor_cnt(iens);
+ phys_tend_dycor_qv (k,iens) = phys_tend_dycor_qv (k,iens) / phys_tend_dycor_cnt(iens);
+ phys_tend_dycor_qc (k,iens) = phys_tend_dycor_qc (k,iens) / phys_tend_dycor_cnt(iens);
+ phys_tend_dycor_qi (k,iens) = phys_tend_dycor_qi (k,iens) / phys_tend_dycor_cnt(iens);
+ phys_tend_dycor_qr (k,iens) = phys_tend_dycor_qr (k,iens) / phys_tend_dycor_cnt(iens);
+ }
+ if (phys_tend_sponge_cnt(iens)>0) {
+ phys_tend_sponge_temp(k,iens) = phys_tend_sponge_temp(k,iens) / phys_tend_sponge_cnt(iens);
+ phys_tend_sponge_qv (k,iens) = phys_tend_sponge_qv (k,iens) / phys_tend_sponge_cnt(iens);
+ phys_tend_sponge_qc (k,iens) = phys_tend_sponge_qc (k,iens) / phys_tend_sponge_cnt(iens);
+ phys_tend_sponge_qi (k,iens) = phys_tend_sponge_qi (k,iens) / phys_tend_sponge_cnt(iens);
+ phys_tend_sponge_qr (k,iens) = phys_tend_sponge_qr (k,iens) / phys_tend_sponge_cnt(iens);
+ }
});
//------------------------------------------------------------------------------------------------
}
diff --git a/components/eam/src/physics/crm/rrtmgp/radiation.F90 b/components/eam/src/physics/crm/rrtmgp/radiation.F90
index b7b253d1b6c..b80ea0c2fe1 100644
--- a/components/eam/src/physics/crm/rrtmgp/radiation.F90
+++ b/components/eam/src/physics/crm/rrtmgp/radiation.F90
@@ -768,6 +768,18 @@ subroutine radiation_init(state)
call addfld('FLNTC'//diag(icall), horiz_only, 'A', 'W/m2', &
'Clearsky net longwave flux at top of model', &
sampling_seq='rad_lwsw', flag_xyfill=.true.)
+ call addfld('FLUTOA'//diag(icall), horiz_only, 'A', 'W/m2', &
+ 'Upwelling longwave flux at top of atmosphere', &
+ sampling_seq='rad_lwsw', flag_xyfill=.true.)
+ call addfld('FLNTOA'//diag(icall), horiz_only, 'A', 'W/m2', &
+ 'Net longwave flux at top of atmosphere', &
+ sampling_seq='rad_lwsw', flag_xyfill=.true.)
+ call addfld('FLUTOAC'//diag(icall), horiz_only, 'A', 'W/m2', &
+ 'Clearsky upwelling longwave flux at top of atmosphere', &
+ sampling_seq='rad_lwsw', flag_xyfill=.true.)
+ call addfld('FLNTOAC'//diag(icall), horiz_only, 'A', 'W/m2', &
+ 'Clearsky net longwave flux at top of atmosphere', &
+ sampling_seq='rad_lwsw', flag_xyfill=.true.)
call addfld('LWCF'//diag(icall), horiz_only, 'A', 'W/m2', &
'Longwave cloud forcing', &
sampling_seq='rad_lwsw', flag_xyfill=.true.)
@@ -2494,6 +2506,7 @@ subroutine output_fluxes_lw(icall, state, flux_all, flux_clr, qrl, qrlc)
! Working arrays
real(r8), dimension(pcols,pver+1) :: flux_up, flux_dn, flux_net
integer :: ncol
+ integer :: ktop_rad = 1
ncol = state%ncol
@@ -2531,6 +2544,12 @@ subroutine output_fluxes_lw(icall, state, flux_all, flux_clr, qrl, qrlc)
call outfld('FLUTC'//diag(icall), flux_clr%flux_up(1:ncol,ktop), ncol, state%lchnk)
call outfld('FLDSC'//diag(icall), flux_clr%flux_dn(1:ncol,kbot+1), ncol, state%lchnk)
+ ! TOA fluxes (above model top, use index to rad top)
+ call outfld('FLUTOA'//diag(icall), flux_all%flux_up(1:ncol,ktop_rad), ncol, state%lchnk)
+ call outfld('FLNTOA'//diag(icall), flux_all%flux_net(1:ncol,ktop_rad), ncol, state%lchnk)
+ call outfld('FLUTOAC'//diag(icall), flux_clr%flux_up(1:ncol,ktop_rad), ncol, state%lchnk)
+ call outfld('FLNTOAC'//diag(icall), flux_clr%flux_net(1:ncol,ktop_rad), ncol, state%lchnk)
+
! Calculate and output the cloud radiative effect (LWCF in history)
cloud_radiative_effect(1:ncol) = flux_all%flux_net(1:ncol,ktop) - flux_clr%flux_net(1:ncol,ktop)
call outfld('LWCF'//diag(icall), cloud_radiative_effect, ncol, state%lchnk)
diff --git a/components/eam/src/physics/rrtmgp/external b/components/eam/src/physics/rrtmgp/external
index e64b99cce24..b24ca1f616e 160000
--- a/components/eam/src/physics/rrtmgp/external
+++ b/components/eam/src/physics/rrtmgp/external
@@ -1 +1 @@
-Subproject commit e64b99cce24eb31bb6f317bddb6f0ffbdfaf8bb7
+Subproject commit b24ca1f616e45659b334dbd7297017cb7927367e
diff --git a/components/eam/src/physics/rrtmgp/radiation.F90 b/components/eam/src/physics/rrtmgp/radiation.F90
index 0c715000951..5c87c3376d9 100644
--- a/components/eam/src/physics/rrtmgp/radiation.F90
+++ b/components/eam/src/physics/rrtmgp/radiation.F90
@@ -767,6 +767,18 @@ subroutine radiation_init(state,pbuf)
call addfld('FLNTC'//diag(icall), horiz_only, 'A', 'W/m2', &
'Clearsky net longwave flux at top of model', &
sampling_seq='rad_lwsw', flag_xyfill=.true.)
+ call addfld('FLUTOA'//diag(icall), horiz_only, 'A', 'W/m2', &
+ 'Upwelling longwave flux at top of atmosphere', &
+ sampling_seq='rad_lwsw', flag_xyfill=.true.)
+ call addfld('FLNTOA'//diag(icall), horiz_only, 'A', 'W/m2', &
+ 'Net longwave flux at top of atmosphere', &
+ sampling_seq='rad_lwsw', flag_xyfill=.true.)
+ call addfld('FLUTOAC'//diag(icall), horiz_only, 'A', 'W/m2', &
+ 'Clearsky upwelling longwave flux at top of atmosphere', &
+ sampling_seq='rad_lwsw', flag_xyfill=.true.)
+ call addfld('FLNTOAC'//diag(icall), horiz_only, 'A', 'W/m2', &
+ 'Clearsky net longwave flux at top of atmosphere', &
+ sampling_seq='rad_lwsw', flag_xyfill=.true.)
call addfld('LWCF'//diag(icall), horiz_only, 'A', 'W/m2', &
'Longwave cloud forcing', &
sampling_seq='rad_lwsw', flag_xyfill=.true.)
@@ -2375,6 +2387,7 @@ subroutine output_fluxes_lw(icall, state, flux_all, flux_clr, qrl, qrlc)
! Working arrays
real(r8), dimension(pcols,pver+1) :: flux_up, flux_dn, flux_net
integer :: ncol
+ integer :: ktop_rad = 1
ncol = state%ncol
@@ -2412,6 +2425,12 @@ subroutine output_fluxes_lw(icall, state, flux_all, flux_clr, qrl, qrlc)
call outfld('FLUTC'//diag(icall), flux_clr%flux_up(1:ncol,ktop), ncol, state%lchnk)
call outfld('FLDSC'//diag(icall), flux_clr%flux_dn(1:ncol,kbot+1), ncol, state%lchnk)
+ ! TOA fluxes (above model top, use index to rad top)
+ call outfld('FLUTOA'//diag(icall), flux_all%flux_up(1:ncol,ktop_rad), ncol, state%lchnk)
+ call outfld('FLNTOA'//diag(icall), flux_all%flux_net(1:ncol,ktop_rad), ncol, state%lchnk)
+ call outfld('FLUTOAC'//diag(icall), flux_clr%flux_up(1:ncol,ktop_rad), ncol, state%lchnk)
+ call outfld('FLNTOAC'//diag(icall), flux_clr%flux_net(1:ncol,ktop_rad), ncol, state%lchnk)
+
! Calculate and output the cloud radiative effect (LWCF in history)
cloud_radiative_effect(1:ncol) = flux_all%flux_net(1:ncol,ktop) - flux_clr%flux_net(1:ncol,ktop)
call outfld('LWCF'//diag(icall), cloud_radiative_effect, ncol, state%lchnk)
diff --git a/components/eamxx/cime_config/namelist_defaults_scream.xml b/components/eamxx/cime_config/namelist_defaults_scream.xml
index a21a15cf6ef..5b50f703109 100644
--- a/components/eamxx/cime_config/namelist_defaults_scream.xml
+++ b/components/eamxx/cime_config/namelist_defaults_scream.xml
@@ -293,29 +293,31 @@ be lost if SCREAM_HACK_XML is not enabled.
${DIN_LOC_ROOT}/atm/scream/mam4xx/photolysis/RSF_GT200nm_v3.0_c080811.nc
${DIN_LOC_ROOT}/atm/scream/mam4xx/photolysis/temp_prs_GT200nm_JPL10_c130206.nc
-
- 20100101
+
+ 20100101
+
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/drydep/season_wes.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_so2_elev_1x1_2010_clim_ne30pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_so4_a1_elev_1x1_2010_clim_ne30pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_so4_a2_elev_1x1_2010_clim_ne30pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_pom_a4_elev_1x1_2010_clim_ne30pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_bc_a4_elev_1x1_2010_clim_ne30pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_num_a1_elev_1x1_2010_clim_ne30pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_num_a2_elev_1x1_2010_clim_ne30pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_num_a4_elev_1x1_2010_clim_ne30pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_soag_elev_1x1_2010_clim_ne30pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_so2_elev_1x1_2010_clim_ne30pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_so4_a1_elev_1x1_2010_clim_ne30pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_so4_a2_elev_1x1_2010_clim_ne30pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_pom_a4_elev_1x1_2010_clim_ne30pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_bc_a4_elev_1x1_2010_clim_ne30pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_num_a1_elev_1x1_2010_clim_ne30pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_num_a2_elev_1x1_2010_clim_ne30pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_num_a4_elev_1x1_2010_clim_ne30pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/elevated/cmip6_mam4_soag_elev_1x1_2010_clim_ne30pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_so2_elev_1x1_2010_clim_ne4pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_so4_a1_elev_1x1_2010_clim_ne4pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_so4_a2_elev_1x1_2010_clim_ne4pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_pom_a4_elev_1x1_2010_clim_ne4pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_bc_a4_elev_1x1_2010_clim_ne4pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_num_a1_elev_1x1_2010_clim_ne4pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_num_a2_elev_1x1_2010_clim_ne4pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_num_a4_elev_1x1_2010_clim_ne4pg2_c20241008.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_soag_elev_1x1_2010_clim_ne4pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_so2_elev_1x1_2010_clim_ne4pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_so4_a1_elev_1x1_2010_clim_ne4pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_so4_a2_elev_1x1_2010_clim_ne4pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_pom_a4_elev_1x1_2010_clim_ne4pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_bc_a4_elev_1x1_2010_clim_ne4pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_num_a1_elev_1x1_2010_clim_ne4pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_num_a2_elev_1x1_2010_clim_ne4pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_num_a4_elev_1x1_2010_clim_ne4pg2_c20241008.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_soag_elev_1x1_2010_clim_ne4pg2_c20241008.nc
@@ -370,17 +372,11 @@ be lost if SCREAM_HACK_XML is not enabled.
${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/surface/cmip6_mam4_so4_a1_surf_ne4pg2_2010_clim_c20240815.nc
${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/surface/cmip6_mam4_so4_a2_surf_ne4pg2_2010_clim_c20240815.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/dst_ne30pg2_c20241028.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/dst_ne4pg2_c20241028.nc
-
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/surface/DMSflux.2010.ne4pg2_conserv.POPmonthlyClimFromACES4BGC_c20240814.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/surface/cmip6_mam4_so2_surf_ne4pg2_2010_clim_c20240815.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/surface/cmip6_mam4_bc_a4_surf_ne4pg2_2010_clim_c20240815.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/surface/cmip6_mam4_num_a1_surf_ne4pg2_2010_clim_c20240815.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/surface/cmip6_mam4_num_a2_surf_ne4pg2_2010_clim_c20240815.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/surface/cmip6_mam4_num_a4_surf_ne4pg2_2010_clim_c20240815.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/surface/cmip6_mam4_pom_a4_surf_ne4pg2_2010_clim_c20240815.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/surface/cmip6_mam4_so4_a1_surf_ne4pg2_2010_clim_c20240815.nc
- ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/surface/cmip6_mam4_so4_a2_surf_ne4pg2_2010_clim_c20240815.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne30pg2/monthly_macromolecules_0.1deg_bilinear_year01_merge_ne30pg2_c20241030.nc
+ ${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/monthly_macromolecules_0.1deg_bilinear_year01_merge_ne4pg2_c20241030.nc
@@ -498,6 +494,11 @@ be lost if SCREAM_HACK_XML is not enabled.
0.0
-9999.0
0.0
+
+
+ -9999.0
+ 551.58
+
1
2
4
@@ -641,6 +642,7 @@ be lost if SCREAM_HACK_XML is not enabled.
1.37146e-07 ,3.45899e-08 ,1.00000e-06 ,9.99601e-08
1.37452e-07 ,3.46684e-08 ,1.00900e-06 ,9.99601e-08
5.08262e-12 ,1.54035e-13 ,3.09018e-13 ,9.14710e-22
+ 0.0
0.0
0.0
0.0
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/shell_commands b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/shell_commands
index 435556401a2..2520f7b6b12 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/shell_commands
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/shell_commands
@@ -3,6 +3,7 @@
cime_root=$(./xmlquery --value CIMEROOT)
input_data_dir=$(./xmlquery --value DIN_LOC_ROOT)
atmchange=$cime_root/../components/eamxx/scripts/atmchange
+case_name=$(./xmlquery --value CASE)
# Change run length
./xmlchange RUN_STARTDATE="1994-10-01"
@@ -61,7 +62,7 @@ else
fi
# set the output yaml files
-output_yaml_files=$(find ${cime_root}/../components/eamxx/cime_config/testdefs/testmods_dirs/scream/v1prod/yaml_outs/ -maxdepth 1 -type f)
+output_yaml_files=$(find ${cime_root}/../components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/ -maxdepth 1 -type f)
for file in ${output_yaml_files[@]}; do
# if the word "coarse" is in the file name, do nothing
if [[ "${file}" == *"_coarse.yaml" && "${hmapfile}" == "not-supported-yet" ]]; then
@@ -82,6 +83,8 @@ for file in ${output_yaml_files[@]}; do
sed -i "s|horiz_remap_file:.*_to_ne30.*|horiz_remap_file: ${hmapfile}|" ./$(basename ${file})
sed -i "s|horiz_remap_file:.*_to_DecadalSites.*|horiz_remap_file: ${armmapfile}|" ./$(basename ${file})
fi
+ # replace all filename prefixes so that st_archive works...
+ sed -i "s|eamxx_output.decadal|${case_name}.scream|" ./$(basename ${file})
done
# TODO:
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1dailyAVG_native.yaml b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1dailyAVG_native.yaml
similarity index 81%
rename from components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1dailyAVG_native.yaml
rename to components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1dailyAVG_native.yaml
index 4e1239b5c33..181f841eeb8 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1dailyAVG_native.yaml
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1dailyAVG_native.yaml
@@ -1,6 +1,6 @@
%YAML 1.1
---
-filename_prefix: scream_output.decadal.1dailyAVG_native.h
+filename_prefix: eamxx_output.decadal.1dailyAVG_native.h
iotype: pnetcdf
Averaging Type: Average
Max Snapshots Per File: 1
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1dailyMAX_native.yaml b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1dailyMAX_native.yaml
similarity index 82%
rename from components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1dailyMAX_native.yaml
rename to components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1dailyMAX_native.yaml
index a7d10efe070..a8974c75b35 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1dailyMAX_native.yaml
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1dailyMAX_native.yaml
@@ -1,6 +1,6 @@
%YAML 1.1
---
-filename_prefix: scream_output.decadal.1dailyMAX_native.h
+filename_prefix: eamxx_output.decadal.1dailyMAX_native.h
iotype: pnetcdf
Averaging Type: Max
Max Snapshots Per File: 1
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1dailyMIN_native.yaml b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1dailyMIN_native.yaml
similarity index 80%
rename from components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1dailyMIN_native.yaml
rename to components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1dailyMIN_native.yaml
index 653e194d278..8d48a1bedf6 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1dailyMIN_native.yaml
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1dailyMIN_native.yaml
@@ -1,6 +1,6 @@
%YAML 1.1
---
-filename_prefix: scream_output.decadal.1dailyMIN_native.h
+filename_prefix: eamxx_output.decadal.1dailyMIN_native.h
iotype: pnetcdf
Averaging Type: Min
Max Snapshots Per File: 1
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1hourlyINST_arm.yaml b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1hourlyINST_arm.yaml
similarity index 92%
rename from components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1hourlyINST_arm.yaml
rename to components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1hourlyINST_arm.yaml
index 5bb07048aed..52fc391ca4d 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1hourlyINST_arm.yaml
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1hourlyINST_arm.yaml
@@ -1,6 +1,6 @@
%YAML 1.1
---
-filename_prefix: scream_output.decadal.1hourlyINST_arm.h
+filename_prefix: eamxx_output.decadal.1hourlyINST_arm.h
iotype: pnetcdf
Averaging Type: Instant
Max Snapshots Per File: 24 # one file per day
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1hourlyINST_native.yaml b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1hourlyINST_native.yaml
similarity index 85%
rename from components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1hourlyINST_native.yaml
rename to components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1hourlyINST_native.yaml
index 7a221e89f1c..0aba4827ead 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.1hourlyINST_native.yaml
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.1hourlyINST_native.yaml
@@ -1,6 +1,6 @@
%YAML 1.1
---
-filename_prefix: scream_output.decadal.1hourlyINST_native.h
+filename_prefix: eamxx_output.decadal.1hourlyINST_native.h
iotype: pnetcdf
Averaging Type: Instant
Max Snapshots Per File: 24
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.3hourlyAVG_coarse.yaml b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.3hourlyAVG_coarse.yaml
similarity index 96%
rename from components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.3hourlyAVG_coarse.yaml
rename to components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.3hourlyAVG_coarse.yaml
index 665294c6227..d429b11ebd1 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.3hourlyAVG_coarse.yaml
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.3hourlyAVG_coarse.yaml
@@ -1,6 +1,6 @@
%YAML 1.1
---
-filename_prefix: scream_output.decadal.3hourlyAVG_coarse.h
+filename_prefix: eamxx_output.decadal.3hourlyAVG_coarse.h
iotype: pnetcdf
Averaging Type: Average
Max Snapshots Per File: 8 # one file per day
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.3hourlyINST_coarse.yaml b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.3hourlyINST_coarse.yaml
similarity index 96%
rename from components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.3hourlyINST_coarse.yaml
rename to components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.3hourlyINST_coarse.yaml
index 42c64954508..a2faa1b971c 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.3hourlyINST_coarse.yaml
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.3hourlyINST_coarse.yaml
@@ -1,6 +1,6 @@
%YAML 1.1
---
-filename_prefix: scream_output.decadal.3hourlyINST_coarse.h
+filename_prefix: eamxx_output.decadal.3hourlyINST_coarse.h
iotype: pnetcdf
Averaging Type: Instant
Max Snapshots Per File: 8 # one file per day
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.6hourlyAVG_coarse.yaml b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.6hourlyAVG_coarse.yaml
similarity index 94%
rename from components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.6hourlyAVG_coarse.yaml
rename to components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.6hourlyAVG_coarse.yaml
index 5e4aaed0738..437142ba559 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.6hourlyAVG_coarse.yaml
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.6hourlyAVG_coarse.yaml
@@ -1,6 +1,6 @@
%YAML 1.1
---
-filename_prefix: scream_output.decadal.6hourlyAVG_coarse.h
+filename_prefix: eamxx_output.decadal.6hourlyAVG_coarse.h
iotype: pnetcdf
Averaging Type: Average
Max Snapshots Per File: 4 # one file per day
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.6hourlyINST_coarse.yaml b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.6hourlyINST_coarse.yaml
similarity index 91%
rename from components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.6hourlyINST_coarse.yaml
rename to components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.6hourlyINST_coarse.yaml
index e9e0f34d5e0..bb83718a8eb 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.6hourlyINST_coarse.yaml
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.6hourlyINST_coarse.yaml
@@ -1,6 +1,6 @@
%YAML 1.1
---
-filename_prefix: scream_output.decadal.6hourlyINST_coarse.h
+filename_prefix: eamxx_output.decadal.6hourlyINST_coarse.h
iotype: pnetcdf
Averaging Type: Instant
Max Snapshots Per File: 4 # one file per day
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.6hourlyINST_native.yaml b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.6hourlyINST_native.yaml
similarity index 90%
rename from components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.6hourlyINST_native.yaml
rename to components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.6hourlyINST_native.yaml
index bb7fd275abf..c69dc4b2212 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.6hourlyINST_native.yaml
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.6hourlyINST_native.yaml
@@ -1,6 +1,6 @@
%YAML 1.1
---
-filename_prefix: scream_output.decadal.6hourlyINST_native.h
+filename_prefix: eamxx_output.decadal.6hourlyINST_native.h
iotype: pnetcdf
Averaging Type: Instant
Max Snapshots Per File: 4 # one file per day
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.dailyAVG_coarse.yaml b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.dailyAVG_coarse.yaml
similarity index 97%
rename from components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.dailyAVG_coarse.yaml
rename to components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.dailyAVG_coarse.yaml
index 7c1990a7b56..2d1e6e7221e 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/scream_output.decadal.dailyAVG_coarse.yaml
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/eamxx/prod/yaml_outs/eamxx_output.decadal.dailyAVG_coarse.yaml
@@ -1,6 +1,6 @@
%YAML 1.1
---
-filename_prefix: scream_output.decadal.dailyAVG_coarse.h
+filename_prefix: eamxx_output.decadal.dailyAVG_coarse.h
iotype: pnetcdf
Averaging Type: Average
Max Snapshots Per File: 1
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/scream/mam4xx/aero_microphysics/shell_commands b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/mam4xx/aero_microphysics/shell_commands
index 1d6757a5bd9..ac1709f7dca 100644
--- a/components/eamxx/cime_config/testdefs/testmods_dirs/scream/mam4xx/aero_microphysics/shell_commands
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/mam4xx/aero_microphysics/shell_commands
@@ -8,9 +8,8 @@
$CIMEROOT/../components/eamxx/cime_config/testdefs/testmods_dirs/scream/mam4xx/update_eamxx_num_tracers.sh -b
#------------------------------------------------------
-#Update IC file and add drydep process
+# Add microphysics process
#------------------------------------------------------
-$CIMEROOT/../components/eamxx/scripts/atmchange initial_conditions::Filename='$DIN_LOC_ROOT/atm/scream/init/screami_mam4xx_ne4np4L72_c20240208.nc' -b
$CIMEROOT/../components/eamxx/scripts/atmchange physics::atm_procs_list="mac_aero_mic,rrtmgp,mam4_aero_microphys" -b
diff --git a/components/eamxx/cime_config/testdefs/testmods_dirs/scream/mam4xx/remap_emiss_ne4_ne30/shell_commands b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/mam4xx/remap_emiss_ne4_ne30/shell_commands
new file mode 100644
index 00000000000..b2d0286b870
--- /dev/null
+++ b/components/eamxx/cime_config/testdefs/testmods_dirs/scream/mam4xx/remap_emiss_ne4_ne30/shell_commands
@@ -0,0 +1,28 @@
+
+#!/bin/sh
+#------------------------------------------------------
+# MAM4xx adds additionaltracers to the simulation
+# Increase number of tracers for MAM4xx simulations
+#------------------------------------------------------
+
+$CIMEROOT/../components/eamxx/cime_config/testdefs/testmods_dirs/scream/mam4xx/update_eamxx_num_tracers.sh -b
+
+#------------------------------------------------------
+# Add aerosol microphysics process, force ne4pg2
+# emission files and provide a ne4pg2->ne30pg2 mapping
+# file
+#------------------------------------------------------
+alias ATMCHANGE='$CIMEROOT/../components/eamxx/scripts/atmchange'
+
+ATMCHANGE physics::atm_procs_list="mac_aero_mic,rrtmgp,mam4_aero_microphys" -b
+
+ATMCHANGE mam4_aero_microphys::mam4_so2_elevated_emiss_file_name='${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_so2_elev_1x1_2010_clim_ne4pg2_c20241008.nc' -b
+ATMCHANGE mam4_aero_microphys::mam4_so4_a1_elevated_emiss_file_name='${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_so4_a1_elev_1x1_2010_clim_ne4pg2_c20241008.nc' -b
+ATMCHANGE mam4_aero_microphys::mam4_so4_a2_elevated_emiss_file_name='${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_so4_a2_elev_1x1_2010_clim_ne4pg2_c20241008.nc' -b
+ATMCHANGE mam4_aero_microphys::mam4_pom_a4_elevated_emiss_file_name='${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_pom_a4_elev_1x1_2010_clim_ne4pg2_c20241008.nc' -b
+ATMCHANGE mam4_aero_microphys::mam4_bc_a4_elevated_emiss_file_name='${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_bc_a4_elev_1x1_2010_clim_ne4pg2_c20241008.nc' -b
+ATMCHANGE mam4_aero_microphys::mam4_num_a1_elevated_emiss_file_name='${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_num_a1_elev_1x1_2010_clim_ne4pg2_c20241008.nc' -b
+ATMCHANGE mam4_aero_microphys::mam4_num_a2_elevated_emiss_file_name='${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_num_a2_elev_1x1_2010_clim_ne4pg2_c20241008.nc' -b
+ATMCHANGE mam4_aero_microphys::mam4_num_a4_elevated_emiss_file_name='${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_num_a4_elev_1x1_2010_clim_ne4pg2_c20241008.nc' -b
+ATMCHANGE mam4_aero_microphys::mam4_soag_elevated_emiss_file_name='${DIN_LOC_ROOT}/atm/scream/mam4xx/emissions/ne4pg2/elevated/cmip6_mam4_soag_elev_1x1_2010_clim_ne4pg2_c20241008.nc' -b
+ATMCHANGE mam4_aero_microphys::aero_microphys_remap_file='${DIN_LOC_ROOT}/atm/scream/maps/map_ne4pg2_to_ne30pg2_nco_c20241108.nc' -b
diff --git a/components/eamxx/cmake/BuildCprnc.cmake b/components/eamxx/cmake/BuildCprnc.cmake
index 2f4f1f00a36..287956c5a9d 100644
--- a/components/eamxx/cmake/BuildCprnc.cmake
+++ b/components/eamxx/cmake/BuildCprnc.cmake
@@ -8,32 +8,44 @@
include (EkatUtils)
macro(BuildCprnc)
- # Make sure this is built only once
- if (NOT TARGET cprnc)
- if (SCREAM_CIME_BUILD)
- string (CONCAT MSG
- "WARNING! By default, scream should not build tests in a CIME build,\n"
- "and cprnc should only be built by scream in case tests are enabled.\n"
- "If you explicitly requested tests to be on in a CIME build,\n"
- "then you can discard this warning. Otherwise, please, contact developers.\n")
- message("${MSG}")
- endif()
- set(BLDROOT ${PROJECT_BINARY_DIR}/externals/cprnc)
- file(WRITE ${BLDROOT}/Macros.cmake
- "
- set(SCC ${CMAKE_C_COMPILER})
- set(SFC ${CMAKE_Fortran_COMPILER})
- set(FFLAGS \"${CMAKE_Fortran_FLAGS}\")
- set(NETCDF_PATH ${NetCDF_Fortran_PATH})
- "
- )
- set(SRC_ROOT ${SCREAM_BASE_DIR}/../..)
- add_subdirectory(${SRC_ROOT}/cime/CIME/non_py/cprnc ${BLDROOT})
- EkatDisableAllWarning(cprnc)
-
- set(CPRNC_BINARY ${BLDROOT}/cprnc CACHE INTERNAL "")
-
+ # TODO: handle this more carefully and more gracefully in the future
+ # TODO: For now, it is just a hack to get going...
+ # find cprnc defined in machine entries
+ set(CCSM_CPRNC $ENV{CCSM_CPRNC})
+ if(EXISTS "${CCSM_CPRNC}")
+ message(STATUS "Path ${CCSM_CPRNC} exists, so we will use it")
+ set(CPRNC_BINARY ${CCSM_CPRNC} CACHE INTERNAL "")
configure_file (${SCREAM_BASE_DIR}/cmake/CprncTest.cmake.in
${CMAKE_BINARY_DIR}/bin/CprncTest.cmake @ONLY)
+ else()
+ message(WARNING "Path ${CCSM_CPRNC} does not exist, so we will try to build it")
+ # Make sure this is built only once
+ if (NOT TARGET cprnc)
+ if (SCREAM_CIME_BUILD)
+ string (CONCAT MSG
+ "WARNING! By default, scream should not build tests in a CIME build,\n"
+ "and cprnc should only be built by scream in case tests are enabled.\n"
+ "If you explicitly requested tests to be on in a CIME build,\n"
+ "then you can discard this warning. Otherwise, please, contact developers.\n")
+ message("${MSG}")
+ endif()
+ set(BLDROOT ${PROJECT_BINARY_DIR}/externals/cprnc)
+ file(WRITE ${BLDROOT}/Macros.cmake
+ "
+ set(SCC ${CMAKE_C_COMPILER})
+ set(SFC ${CMAKE_Fortran_COMPILER})
+ set(FFLAGS \"${CMAKE_Fortran_FLAGS}\")
+ set(NETCDF_PATH ${NetCDF_Fortran_PATH})
+ "
+ )
+ set(SRC_ROOT ${SCREAM_BASE_DIR}/../..)
+ add_subdirectory(${SRC_ROOT}/cime/CIME/non_py/cprnc ${BLDROOT})
+ EkatDisableAllWarning(cprnc)
+
+ set(CPRNC_BINARY ${BLDROOT}/cprnc CACHE INTERNAL "")
+
+ configure_file (${SCREAM_BASE_DIR}/cmake/CprncTest.cmake.in
+ ${CMAKE_BINARY_DIR}/bin/CprncTest.cmake @ONLY)
+ endif()
endif()
endmacro()
diff --git a/components/eamxx/cmake/machine-files/gcp.cmake b/components/eamxx/cmake/machine-files/gcp12.cmake
similarity index 100%
rename from components/eamxx/cmake/machine-files/gcp.cmake
rename to components/eamxx/cmake/machine-files/gcp12.cmake
diff --git a/components/eamxx/cmake/machine-files/ghci-oci.cmake b/components/eamxx/cmake/machine-files/ghci-oci.cmake
index 86a2fb1d530..85eabbaa848 100644
--- a/components/eamxx/cmake/machine-files/ghci-oci.cmake
+++ b/components/eamxx/cmake/machine-files/ghci-oci.cmake
@@ -1,2 +1,15 @@
include(${CMAKE_CURRENT_LIST_DIR}/common.cmake)
common_setup()
+
+set(CMAKE_Fortran_FLAGS "-Wno-maybe-uninitialized -Wno-unused-dummy-argument -fallow-argument-mismatch" CACHE STRING "" FORCE)
+set(CMAKE_CXX_FLAGS "-fvisibility-inlines-hidden -fmessage-length=0 -Wno-use-after-free -Wno-unused-variable -Wno-maybe-uninitialized" CACHE STRING "" FORCE)
+
+# TODO: figure out a better way to handle this, e.g.,
+# TODO: --map-by ppr:1:node:pe=1 doesn't work with mpich,
+# TODO: but -map-by core:1:numa:hwthread=1 may work well?
+# TODO: this will need to be handled in EKAT at some point
+set(EKAT_MPI_NP_FLAG "-np" CACHE STRING "-np")
+
+# TODO: hack in place to get eamxx to recognize CPRNC
+# TODO: See note in BuildCprnc.cmake...
+set(ENV{CCSM_CPRNC} "/usr/local/packages/bin/cprnc")
diff --git a/components/eamxx/docs/developer/ci_nightly.md b/components/eamxx/docs/developer/ci_nightly.md
index b222139dd55..0716ce4c9f3 100644
--- a/components/eamxx/docs/developer/ci_nightly.md
+++ b/components/eamxx/docs/developer/ci_nightly.md
@@ -1,17 +1,17 @@
# Continuous Integration and Nightly Testing
-## Autotester ##
+## Autotester
EAMxx using github actions and a Sandia product called Autotester 2
to run CI testing on a CPU and GPU machine for every github pull
request. By default, we run the e3sm_scream_v1_at suite and the
standalone eamxx tests (test-all-scream).
-## Nightly overview, CDash ##
+## Nightly overview, CDash
Our nightly testing is much more extensive than the CI testing. You
can see our dashboard here under the section "E3SM_SCREAM":
-https://my.cdash.org/index.php?project=E3SM
+
We run a variety of CIME test suites and standalone testing on a number
of platforms. We even do some performance testing on frontier.
diff --git a/components/eamxx/docs/developer/cime_testing.md b/components/eamxx/docs/developer/cime_testing.md
index 71233a245b4..667488960f6 100644
--- a/components/eamxx/docs/developer/cime_testing.md
+++ b/components/eamxx/docs/developer/cime_testing.md
@@ -4,33 +4,37 @@ Full model system testing of eamxx is done through CIME test cases
(much like the rest of E3SM).
We offer a number of test suites, including:
+
* e3sm_scream_v0: Test the full set of V0 (pre-C++) tests
* e3sm_scream_v1: Test the full set of V1 (C++) tests
* e3sm_scream_v1_at: A smaller and quicker set of tests for autotesting
* e3sm_scream_hires: A small number of bigger, longer-running tests to measure performance
Example for running a suite:
-```
-% cd $repo/cime/scripts
-% ./create_test e3sm_scream_v1_at --wait
+
+```shell
+cd $repo/cime/scripts
+./create_test e3sm_scream_v1_at --wait
```
Example for running a single test case:
-```
-% cd $repo/cime/scripts
-% ./create_test SMS.ne4_ne4.F2010-SCREAMv1 --wait
+
+```shell
+cd $repo/cime/scripts
+./create_test SMS.ne4_ne4.F2010-SCREAMv1 --wait
```
There are many behavioral tweaks you can make to a test case, like
changing the run length, test type, etc. Most of this is not specific
to eamxx and works for any CIME case. This generic stuff
is well-documentated here:
-http://esmci.github.io/cime/versions/master/html/users_guide/testing.html
+
When it comes to things specific to eamxx, you have grids, compsets, and
testmods.
Common EAMxx grids are:
+
* ne4_ne4 (low resolution)
* ne4pg2_ne4pg2 (low resolution with phys grid)
* ne30_ne30 (med resolution)
@@ -38,9 +42,10 @@ Common EAMxx grids are:
* ne1024pg2_ne1024pg2 (ultra high with phys grid)
More grid info can be found here:
-https://acme-climate.atlassian.net/wiki/spaces/DOC/pages/933986549/ATM+Grid+Resolution+Summary
+
Common EAMxx compsets are:
+
* F2010-SCREAM-LR: V0 low res compset with eamxx V0 atmosphere
* F2010-SCREAMv1: V1 standard compset with eamxx V1 atmosphere
* FIOP-SCREAMv1-DP: V1 with dpxx (doubly-periodic lateral boundary condition in C++)
@@ -50,10 +55,14 @@ Full info on supported compsets can be found by looking at this file:
`$scream_repo/components/eamxx/cime_config/config_compsets.xml`
Common EAMxx testmods are:
-* small_kernels: Enable smaller-granularity kernels, can improve performance on some systems
-* scream-output-preset-[1-6]: Our 6 output presets. These turn some combination of our three output streams (phys_dyn, phys, and diags),
+
+* small_kernels: Enable smaller-granularity kernels,
+ can improve performance on some systems
+* scream-output-preset-[1-6]: Our 6 output presets.
+ These turn some combination of our three output streams
+ (phys_dyn, phys, and diags),
various remaps, etc.
-* bfbhash: Turns on bit-for-bit hash output: https://acme-climate.atlassian.net/wiki/spaces/NGDNA/pages/3831923056/EAMxx+BFB+hashing
+* bfbhash: Turns on bit-for-bit hash output:
More info on running EAMxx can be found here:
-https://acme-climate.atlassian.net/wiki/spaces/DOC/pages/3386015745/How+To+Run+EAMxx+SCREAMv1
+
diff --git a/components/eamxx/docs/developer/field.md b/components/eamxx/docs/developer/field.md
index 4170b28ac2b..8df83440a2f 100644
--- a/components/eamxx/docs/developer/field.md
+++ b/components/eamxx/docs/developer/field.md
@@ -1,45 +1,58 @@
-## Field
+# Field
-In EAMxx, a `Field` is a data structure holding two things: pointers to the data and pointers to metadata.
-Both the data and metadata are stored in `std::shared_ptr` instances, to ensure consistency across all copies
-of the field. This allows for fast shallow copy semantic for this class.
+In EAMxx, a `Field` is a data structure holding two things: pointers to the
+data and pointers to metadata. Both the data and metadata are stored in
+`std::shared_ptr` instances, to ensure consistency across all copies of
+the field. This allows for fast shallow copy semantic for this class.
-The data is stored on both CPU and device memory (these may be the same, depending on the Kokkos
-backend). In EAMxx, we always assume and guarantee that the device data is up to date. That implies that the data
-must be explicitly synced to host before using it on host, and explicitly synced to device after host manipulation,
-in order to ensure correctness. In order to access the data, users must use the `get_view`/'get_strided_view' methods,
-which takes two template arguments: the data type, and an enum specifying whether CPU or device data is needed.
-The data type is used to reinterpret the generic pointer stored inside to a view of the correct scalar type and layout.
-It is a possibly const-qualified type, and if the field was marked as "read-only", the method ensures that the
-provided data type is const. A read-only field can be created via the `getConst` method, which returns a shallow
-copy of the field, but marked as read-only. The enum specifying host or device data is optional, with device being the default.
+The data is stored on both CPU and device memory (these may be the same,
+depending on the Kokkos backend). In EAMxx, we always assume and guarantee
+that the device data is up to date. That implies that the data must be
+explicitly synced to host before using it on host, and explicitly synced
+to device after host manipulation, in order to ensure correctness.
+In order to access the data, users must use the `get_view`/
+`get_strided_view` methods, which takes two template arguments:
+the data type, and an enum specifying whether CPU or device data is needed.
+The data type is used to reinterpret the generic pointer stored inside
+to a view of the correct scalar type and layout. It is a possibly
+const-qualified type, and if the field was marked as "read-only",
+the method ensures that the provided data type is const. A read-only field
+can be created via the `getConst` method, which returns a shallow copy of
+the field, but marked as read-only. The enum specifying host or device data
+is optional, with device being the default.
-The metadata is a collection of information on the field, such as name, layout, units, allocation size, and more.
-Part of the metadata is immutable after creation (e.g., name, units, or layout), while some metadata can be
-partially or completely modified. The metadata is contained in the `FieldHeader` data structure, which contains
-four parts:
+The metadata is a collection of information on the field, such as name, layout, units,
+allocation size, and more. Part of the metadata is immutable after creation (e.g.,
+name, units, or layout), while some metadata can be partially or completely modified.
+The metadata is contained in the `FieldHeader` data structure, which contains four
+parts:
-* `FieldIdentifier`: stores the field's name, layout, units, data type, and name of the grid where it's defined.
- These information are condensed in a single string, that can be used to uniquely identify a field,
- allowing to distinguish between different version of the same field. The layout is stored in the `FieldLayout`
- data structure, which includes:
- * the field tags: stored as a `std::vector`, they give context to the field's extents.
- * the field dims: stored both as a `std::vector`, as well as a 1d `Kokkos::View`.
-* `FieldTracking`: stores information on the usage of the field, as well as its possible connections to other
- fields. In particular, the tracked items are:
- * the field time stamp: the time stamp when the field was last updated.
- * the field accumulation start time: used for fields that are accumulated over several time steps
- (or time step subcycles). For instance, it allows to reconstruct fluxes from raw accumulations.
- * the providers/customers: lists of atmosphere processes (see below) that respectively require/compute
- the field in their calculations.
- * the field groups: a list of field groups that this field belongs too. Field groups are used to access
- a group of fields without explicit prior knowledge about the number and/or names of the fields.
-* `FieldAllocProp`: stores information about the allocation. While the field is not yet allocated, users can
- request special allocations for the field, for instance to accommodate packing (for SIMD), which may
- require padding. Upon allocation, this information is then used by the Field structure to extract the
- actual data, wrapped in a properly shaped `Kokkos::View`. The alloc props are also responsible of tracking
- additional information in case the field is a "slice" of a higher-dimensional one, a fact that can affect
- how the data is accessed.
-* Extra data: stored as a `std::map`, allows to catch any metadata that does not fit
- in the above structures. This is a last resort structure, intended to accommodate the most peculiar
- corner cases, and should be used sparingly.
+* `FieldIdentifier`: stores the field's name, layout, units, data type,
+ and name of the grid where it's defined. These information are condensed
+ in a single string, that can be used to uniquely identify a field, allowing
+ to distinguish between different version of the same field.
+ The layout is stored in the `FieldLayout` data structure, which includes:
+ * the field tags: stored as a `std::vector`, they give context to the
+ field's extents.
+ * the field dims: stored both as a `std::vector`, as well as a 1d `Kokkos::View`.
+* `FieldTracking`: stores information on the usage of the field, as well as its
+ possible connections to other fields. In particular, the tracked items are:
+ * the field time stamp: the time stamp when the field was last updated.
+ * the field accumulation start time: used for fields that are accumulated over
+ several time steps (or time step subcycles). For instance, it allows to
+ reconstruct fluxes from raw accumulations.
+ * the providers/customers: lists of atmosphere processes (see below) that
+ respectively require/compute the field in their calculations.
+ * the field groups: a list of field groups that this field belongs too. Field groups
+ are used to access a group of fields without explicit prior knowledge about the
+ number and/or names of the fields.
+* `FieldAllocProp`: stores information about the allocation. While the field is not
+ yet allocated, users can request special allocations for the field, for instance
+ to accommodate packing (for SIMD), which may require padding. Upon allocation,
+ this information is then used by the Field structure to extract the actual data,
+ wrapped in a properly shaped `Kokkos::View`. The alloc props are also
+ responsible of tracking additional information in case the field is a "slice" of
+ a higher-dimensional one, a fact that can affect how the data is accessed.
+* Extra data: stored as a `std::map`, allows to catch any
+ metadata that does not fit in the above structures. This is a last resort structure,
+ intended to accommodate the most peculiar corner cases, and should be used sparingly.
diff --git a/components/eamxx/docs/developer/grid.md b/components/eamxx/docs/developer/grid.md
index 8a61b97e079..b4e1a1c8c03 100644
--- a/components/eamxx/docs/developer/grid.md
+++ b/components/eamxx/docs/developer/grid.md
@@ -1,22 +1,29 @@
-## Grids and Remappers
+# Grids and Remappers
-In EAMxx, the `AbstractGrid` is an interface used to access information regarding the horizontal and vertical
-discretization. The most important information that the grid stores is:
+In EAMxx, the `AbstractGrid` is an interface used to access information regarding
+the horizontal and vertical discretization. The most important information that
+the grid stores is:
-* the number of local/global DOFs: these are the degrees of freedom of the horizontal grid only. Here,
- local/global refers to the MPI partitioning.
-* the DOFs global IDs (GIDs): a list of GIDs of the DOFs on the current MPI rank, stored as a Field
-* the local IDs (LIDs) to index list: this list maps the LID of a DOF (that is, the position of the DOF
- in the GID list) to a "native" indexing system for that DOF. For instance, a `PointGrid` (a class derived from
- `AbstractGrid`) is a simple collection of points, so the "native" indexing system coincides with the LIDs.
- However, for a `SEGrid` (a derived class, for spectral element grids), the "native" indexing is a triplet
- `(ielem,igp,jgp)`, specifying the element index, and the two indices of the Gauss point within the element.
-* geometry data: stored as a `std::map`, this represent any data that is intrinsically
- linked to the grid (either along the horizontal or vertical direction), such as lat/lon coordinates,
- vertical coordinates, area associated with the DOF.
+* the number of local/global DOFs: these are the degrees of freedom of the
+ horizontal grid only. Here, local/global refers to the MPI partitioning.
+* the DOFs global IDs (GIDs): a list of GIDs of the DOFs on the current MPI rank,
+ stored as a Field
+* the local IDs (LIDs) to index list: this list maps the LID of a DOF (that is,
+ the position of the DOF in the GID list) to a "native" indexing system for that
+ DOF. For instance, a `PointGrid` (a class derived from `AbstractGrid`) is a
+ simple collection of points, so the "native" indexing system coincides with the
+ LIDs. However, for a `SEGrid` (a derived class, for spectral element grids),
+ the "native" indexing is a triplet `(ielem,igp,jgp)`, specifying the element
+ index, and the two indices of the Gauss point within the element.
+* geometry data: stored as a `std::map`, this represent any
+ data that is intrinsically linked to the grid (either along the horizontal or
+ vertical direction), such as lat/lon coordinates, vertical coordinates, area
+ associated with the DOF.
-Grids can also be used to retrieve the layout of a 2d/3d scalar/vector field, which allows certain downstream
-classes to perform certain operations without assuming anything on the horizontal grid.
+Grids can also be used to retrieve the layout of a 2d/3d scalar/vector field,
+which allows certain downstream classes to perform certain operations without
+assuming anything on the horizontal grid.
-In general, grid objects are passed around the different parts of EAMxx as const objects (read-only).
-The internal data can only be modified during construction, which usually is handled by a `GridsManager` object.
+In general, grid objects are passed around the different parts of EAMxx as const
+objects (read-only). The internal data can only be modified during construction,
+which usually is handled by a `GridsManager` object.
diff --git a/components/eamxx/docs/developer/index.md b/components/eamxx/docs/developer/index.md
index 2d47bab65fe..69673b12ebd 100644
--- a/components/eamxx/docs/developer/index.md
+++ b/components/eamxx/docs/developer/index.md
@@ -1,3 +1 @@
# SCREAM Developer Guide
-
-
diff --git a/components/eamxx/docs/developer/io.md b/components/eamxx/docs/developer/io.md
index caf237010a3..0a4c7b2d832 100644
--- a/components/eamxx/docs/developer/io.md
+++ b/components/eamxx/docs/developer/io.md
@@ -1,5 +1,5 @@
# Input-Output
-In EAMxx, I/O is handled through the SCORPIO library, currently a submodule of E3SM.
-The `scream_io` library within eamxx allows to interface the EAMxx infrastructure classes
-with the SCORPIO library.
+In EAMxx, I/O is handled through the SCORPIO library, currently a submodule of
+E3SM. The `scream_io` library within eamxx allows to interface the EAMxx
+infrastructure classes with the SCORPIO library.
diff --git a/components/eamxx/docs/developer/kokkos_ekat.md b/components/eamxx/docs/developer/kokkos_ekat.md
index 45827a11f83..a736384b2c6 100644
--- a/components/eamxx/docs/developer/kokkos_ekat.md
+++ b/components/eamxx/docs/developer/kokkos_ekat.md
@@ -2,99 +2,164 @@
## Kokkos
-EAMxx uses Kokkos for performance portable abstractions for parallel execution of code and data management to various HPC platforms, including OpenMP, Cuda, HIP, and SYCL. Here we give a brief overview of the important concepts for understanding Kokkos in EAMxx. For a more in depth description, see the [Kokkos wiki](https://kokkos.org/kokkos-core-wiki).
+EAMxx uses Kokkos for performance portable abstractions for parallel execution
+of code and data management to various HPC platforms, including OpenMP, Cuda,
+HIP, and SYCL. Here we give a brief overview of the important concepts for
+understanding Kokkos in EAMxx. For a more in depth description, see the
+[Kokkos wiki](https://kokkos.org/kokkos-core-wiki).
### Kokkos::Device
-`Kokkos::Device` is a struct which contain the type definitions for two main Kokkos concepts: execution space (`Kokkos::Device::execution_space`), the place on-node where parallel operations (like for-loops, reductions, etc.) are executed, and the memory space (`Kokkos::Device::memory_space`), the memory location on-node where data is stored. Given your machine architecture, Kokkos defines a default "device" space, given by
-```
-Kokkos::Device
-```
-where all performance critical code should be executed (e.g., on an NVIDIA machine, this device would be the GPU accelerators) and a default "host" space, given by
+`Kokkos::Device` is a struct which contain the type definitions for two main
+Kokkos concepts: execution space (`Kokkos::Device::execution_space`), the place
+on-node where parallel operations (like for-loops, reductions, etc.) are
+executed, and the memory space (`Kokkos::Device::memory_space`), the memory
+location on-node where data is stored. Given your machine architecture, Kokkos
+defines a default "device" space, given by
+
+```cpp
+Kokkos::Device
```
-Kokkos::Device
+
+where all performance critical code should be executed (e.g., on an NVIDIA
+machine, this device would be the GPU accelerators) and a default "host" space,
+given by
+
+```c++
+Kokkos::Device
```
-where data can be accessed by the CPU cores and is necessary for I/O interfacing, for example. Currently, these default spaces are the ones used by EAMxx. On CPU-only machines, host and device represent the same space.
+
+where data can be accessed by the CPU cores and is necessary for I/O
+interfacing, for example. Currently, these default spaces are the ones used by
+EAMxx. On CPU-only machines, host and device represent the same space.
### Kokkos Views
-The main data struct provided by Kokkos used in EAMxx in the `Kokkos::View`. This is a multi-dimensional data array that can live on either device or host memory space. These Views are necessary when running on GPU architectures as data structures like `std::vector` and `std::array` will be unavailable on device.
+The main data struct provided by Kokkos used in EAMxx in the `Kokkos::View`.
+This is a multi-dimensional data array that can live on either device or host
+memory space. These Views are necessary when running on GPU architectures as
+data structures like `std::vector` and `std::array` will be unavailable on
+device.
-Views are constructed in EAMxx most commonly with the following template and input arguments
-```
-Kokkos::View(const std::string& label, int dim0, int dim1, ...)
+Views are constructed in EAMxx most commonly with the following template and
+input arguments
+
+```cpp
+Kokkos::View(const std::string& label,
+ int dim0, int dim1, ...)
```
-where
- - `DataType`: scalar type of the view, given as `ScalarType`+`*`(x's number of run-time dimensions). E.g., a 2D view of doubles will have `DataType = double**`. There is also an ability to define compile-time dimensions by using `[]`, see [Kokkos wiki section on views](https://kokkos.org/kokkos-core-wiki/API/core/view/view.html).
- - `LayoutType`: mapping of indices into the underlying 1D memory storage. Types are:
- - `LayoutRight` (used in EAMxx): strides increase from the right most to the left most dimension, right-most dimension is contiguous
- - `LayoutLeft`: strides increase from the left most to the right most dimension, left-most dimension is contiguous
- - `LayoutStride`: strides can be arbitrary for each dimension
- - `DeviceType`: provides space where data live, defaults to the default device
+where
-The following example defines a view "temperature" which has dimensions columns and levels:
-```
-Kokkos::View temperature("temperature", ncols, nlevs);
+- `DataType`: scalar type of the view, given as `ScalarType`+`*`(x's number of
+ run-time dimensions). E.g., a 2D view of doubles will have `DataType =
+ double**`. There is also an ability to define compile-time dimensions by
+ using `[]`, see
+ [Kokkos wiki section on views]
+ ().
+- `LayoutType`: mapping of indices into the underlying 1D memory storage. Types
+ are:
+ - `LayoutRight` (used in EAMxx): strides increase from the right most to the
+ left most dimension, right-most dimension is contiguous
+ - `LayoutLeft`: strides increase from the left most to the right most
+ dimension, left-most dimension is contiguous
+ - `LayoutStride`: strides can be arbitrary for each dimension
+- `DeviceType`: provides space where data live, defaults to the default device
+
+The following example defines a view "temperature" which has dimensions columns
+and levels:
+
+```cpp
+Kokkos::View temperature(
+ "temperature", ncols, nlevs);
```
### Deep Copy
-Kokkos provides `Kokkos::deep_copy(dst, src)` which copies data between views of the same dimensions, or a scalar values into a view. Common uses
-```
+Kokkos provides `Kokkos::deep_copy(dst, src)` which copies data between views
+of the same dimensions, or a scalar values into a view. Common uses
+
+```cpp
Kokkos::deep_copy(view0, view1); // Copy all data from view1 into view0
Kokkos::deep_copy(view0, 5); // Set all values of view0 to 5
```
-As seen in the next section, we can use `deep_copy()` to copy data between host and device.
+
+As seen in the next section, we can use `deep_copy()` to copy data between host
+and device.
### Mirror Views
-We will often need to have memory allocation the resides on device (for computation), and then need that identical data on host (say, for output). Kokkos has a concept of mirror views, where data can be copied from host to device and vice versa.
+We will often need to have memory allocation the resides on device (for
+computation), and then need that identical data on host (say, for output).
+Kokkos has a concept of mirror views, where data can be copied from host to
+device and vice versa.
Here is an example using the device view `temperature` from above
-```
-// Allocate view on host that exactly mirrors the size of layout of the device view
+
+```cpp
+// Allocate view on host that exactly mirrors the size of layout of the device
+view
auto host_temperature = Kokkos::create_mirror_view(temperature);
// Copy all data from device to host
Kokkos::deep_copy(host_temperature, temperature);
```
+
Kokkos also offers an all-in-one option
-```
+
+```cpp
// Note: must hand the host device instance as first argument
-auto host_temperature = Kokkos::create_mirror_view_and_copy(Kokkos::DefaultHostDevice(), temperature);
+auto host_temperature = Kokkos::create_mirror_view_and_copy(
+ Kokkos::DefaultHostDevice(), temperature);
```
### Parallel Execution
-The most basic parallel execution pattern used by EAMxx is the `Kokkos::parallel_for` which defines a for-loop with completely independent iterations. The `parallel_for` takes in an optional label for debugging, an execution policy, which defines a range and location (host or device) for the code to be run, and a lambda describing the body of code to be executed. The following are execution policies used in EAMxx
-
- - `int count`: 1D iteration range `[0, count)`
- - `RangePolicy(int beg, int end)`: 1D iteration range for indices `[beg, end)`
- - `MDRangePolicy>(int[N] beg, int[N] end)`: multi-dimensional iteration range `[beg, end)`
- - `TeamPolicy(int league_size, int team_size, int vector_size)`: 1D iteration over `league_size`, assigned to thread teams of size `team_size`, each with `vector_size` vector lanes. Both `team_size` and `vector_size` can be given `Kokkos::AUTO` as input for Kokkos to automatically compute.
+The most basic parallel execution pattern used by EAMxx is the
+`Kokkos::parallel_for` which defines a for-loop with completely independent
+iterations. The `parallel_for` takes in an optional label for debugging, an
+execution policy, which defines a range and location (host or device) for the
+code to be run, and a lambda describing the body of code to be executed. The
+following are execution policies used in EAMxx
+
+- `int count`: 1D iteration range `[0, count)`
+- `RangePolicy(int beg, int end)`: 1D iteration range for indices
+ `[beg, end)`
+- `MDRangePolicy>(int[N] beg, int[N] end)`: multi-
+ dimensional iteration range `[beg, end)`
+- `TeamPolicy(int league_size, int team_size, int vector_size)`: 1D
+ iteration over `league_size`, assigned to thread teams of size `team_size`,
+ each with `vector_size` vector lanes. Both `team_size` and `vector_size` can
+ be given `Kokkos::AUTO` as input for Kokkos to automatically compute.
If no `ExecSpace` template is given, the default execution space is used.
-For lambda capture, use `KOKKOS_LAMBDA` macro which sets capture automatically based on architecture.
+For lambda capture, use `KOKKOS_LAMBDA` macro which sets capture automatically
+based on architecture.
Example using `RangePolicy` to initialize a view
-```
-Kokkos::View temperature("temperature", ncols, nlevs);
+
+```cpp
+Kokkos::View temperature("temperature", ncols,
+ nlevs);
Kokkos::parallel_for("Init_temp",
- Kokkos::RangePolicy(0, ncols*nlevs),
- KOKKOS_LAMBDA (const int idx) {
+ Kokkos::RangePolicy(0, ncols*nlevs),
+ KOKKOS_LAMBDA (const int idx) {
int icol = idx/nlevs;
int ilev = idx%nlevs;
temperature(icol, ilev) = 0;
});
```
+
Same example with `TeamPolicy`
-```
+
+```cpp
Kokkos::parallel_for("Init_temp",
- Kokkos::TeamPolicy(ncols*nlevs, Kokkos::AUTO, Kokkos::AUTO),
- KOKKOS_LAMBDA (const TeamPolicy::member_type& team) {
+ Kokkos::TeamPolicy(ncols*nlevs, Kokkos::AUTO, Kokkos::AUTO),
+ KOKKOS_LAMBDA (const TeamPolicy::member_type& team) {
// league_rank() gives the index for this team
int icol = team.league_rank()/nlevs;
int ilev = team.league_rank()%nlevs;
@@ -105,32 +170,39 @@ Kokkos::parallel_for("Init_temp",
### Hierarchical Parallelism
-Using `TeamPolicy`, we can have up to three nested levels of parallelism: team parallelism, thread parallelism, vector parallelism. These nested policies can be called within the lambda body using the following execution policies
+Using `TeamPolicy`, we can have up to three nested levels of parallelism: team
+parallelism, thread parallelism, vector parallelism. These nested policies can
+be called within the lambda body using the following execution policies
- - `TeamThreadRange(team, begin, end)`: execute over threads of a team
- - `TeamVectorRange(team, begin, end)`: execute over threads and vector lanes of a team
- - `ThreadVectorRange(team, begin, end)`: execute over vector lanes of a thread
+- `TeamThreadRange(team, begin, end)`: execute over threads of a team
+- `TeamVectorRange(team, begin, end)`: execute over threads and vector lanes of
+ a team
+- `ThreadVectorRange(team, begin, end)`: execute over vector lanes of a thread
An example of using these policies
-```
+
+```cpp
Kokkos::View Q("tracers", ncols, ntracers, nlevs);
Kokkos::parallel_for(Kokkos::TeamPolicy(ncols, Kokkos::AUTO),
- KOKKOS_LAMBDA (TeamPolicy::member_type& team) {
+ KOKKOS_LAMBDA (TeamPolicy::member_type& team) {
int icol = team.league_rank();
Kokkos::parallel_for(Kokkos::TeamVectorRange(team, nlevs), [&](int ilev) {
- temperature(icol, ilev) = 0;
+ temperature(icol, ilev) = 0;
});
Kokkos::parallel_for(Kokkos::TeamThreadRange(team, nlevs), [&](int ilev) {
- Kokkos::parallel_for(Kokkos::ThreadVectorRange(team, ntracers), [&](int iq) {
- Q(icol, iq, ilev) = 0;
- });
+ Kokkos::parallel_for(Kokkos::ThreadVectorRange(team, ntracers), [&](int iq) {
+ Q(icol, iq, ilev) = 0;
+ });
});
});
```
-IMPORTANT! Nested policies cannot be used in arbitrary order. `ThreadVectorRange` must be used inside a `TeamThreadRange`, and `TeamVectorRange` must be the only level of nested parallelism.
-```
+IMPORTANT! Nested policies cannot be used in arbitrary order. `ThreadVectorRange`
+must be used inside a `TeamThreadRange`, and `TeamVectorRange` must be the only
+level of nested parallelism.
+
+```cpp
Kokkos::parallel_for(TeamPolicy(...), ... {
// OK
Kokkos::parallel_for(TeamThreadRange, ... {
@@ -139,9 +211,9 @@ Kokkos::parallel_for(TeamPolicy(...), ... {
// OK
Kokkos::parallel_for(TeamThreadRange, ... {
- Kokkos::parallel_for(ThreadVectorRange, ... {
+ Kokkos::parallel_for(ThreadVectorRange, ... {
- });
+ });
});
// OK
@@ -156,13 +228,15 @@ Kokkos::parallel_for(TeamPolicy(...), ... {
// WRONG, a TeamVectorRange must be the only nested level
Kokkos::parallel_for(TeamVectorRange, ...{
- Kokkos::parallel_for(ThreadVectorRange, ... {
+ Kokkos::parallel_for(ThreadVectorRange, ... {
- });
+ });
});
});
```
-Using these incorrectly can be very tricky to debug as the code almost certainly will _not_ error out, but race conditions will exist among threads.
+
+Using these incorrectly can be very tricky to debug as the code almost certainly
+will _not_ error out, but race conditions will exist among threads.
## EKAT
@@ -175,6 +249,3 @@ Using these incorrectly can be very tricky to debug as the code almost certainly
### Scratch Memory: WorspaceManager
### Algorithms
-
-
-
diff --git a/components/eamxx/docs/developer/managers.md b/components/eamxx/docs/developer/managers.md
index 676449a2184..fa98c8b1d72 100644
--- a/components/eamxx/docs/developer/managers.md
+++ b/components/eamxx/docs/developer/managers.md
@@ -1 +1 @@
-## FieldManager and GridsManager
+# FieldManager and GridsManager
diff --git a/components/eamxx/docs/developer/processes.md b/components/eamxx/docs/developer/processes.md
index 9ad556a3183..adb90e2dfbc 100644
--- a/components/eamxx/docs/developer/processes.md
+++ b/components/eamxx/docs/developer/processes.md
@@ -1,59 +1,77 @@
# Atmospheric Processes
-In EAMxx, `AtmosphereProcess` (AP) is an abstract class representing a portion of the atmosphere timestep algorithm.
-In simple terms, an AP is an object that given certain input fields performs some calculations to compute
-some output fields. The concrete AP classes allow to create a buffer layer between particular packages (e.g.,
-dynamics dycore, physics parametrizations) and the atmosphere driver (AD), allowing separation of concerns,
-so that the AD does not need to know details about the package, and the package does not need to know about
-the EAMxx infrastructure.
-
-To enhance this separation of concerns, EAMxx implements two more classes for handling APs:
-
-- the concrete class `AtmosphereProcessGroup` (APG), which allows to group together a set of AP's, which can be seen from outside as a single process;
-- the `AtmosphereProcessFactory` class, which allows an APG to create its internal processes without any knowledge of
-what they are.
-
-This infrastructure allows the AD to view the whole atmosphere as a single APG, and to be completely agnostic to
-what processes are run, and in which order. This design allows to have a code base that is cleaner, self-container,
-and easy to test via a battery of targeted unit tests.
-
-In EAMxx, we already have a few concrete AP's, interfacing the AD to the Hommexx non-hydrostatic dycore as well as
-some physics parametrizations (P3, SHOC, RRMTPG, etc). In the next section we describe the interfaces of an AP class,
-and we show an example of how to write a new concrete AP class.
+In EAMxx, `AtmosphereProcess` (AP) is an abstract class representing a portion
+of the atmosphere timestep algorithm. In simple terms, an AP is an object that
+given certain input fields performs some calculations to compute some output
+fields. The concrete AP classes allow to create a buffer layer between
+particular packages (e.g., dynamics dycore, physics parametrizations) and the
+atmosphere driver (AD), allowing separation of concerns, so that the AD does
+not need to know details about the package, and the package does not need to
+know about the EAMxx infrastructure.
+
+To enhance this separation of concerns, EAMxx implements two more classes for
+handling APs:
+
+- the concrete class `AtmosphereProcessGroup` (APG), which allows to group
+ together a set of AP's, which can be seen from outside as a single process;
+- the `AtmosphereProcessFactory` class, which allows an APG to create its
+ internal processes without any knowledge of what they are.
+
+This infrastructure allows the AD to view the whole atmosphere as a single APG,
+and to be completely agnostic to what processes are run, and in which order.
+This design allows to have a code base that is cleaner, self-container, and
+easy to test via a battery of targeted unit tests.
+
+In EAMxx, we already have a few concrete AP's, interfacing the AD to the
+Hommexx non-hydrostatic dycore as well as some physics parametrizations (P3,
+SHOC, RRMTPG, etc). In the next section we describe the interfaces of an AP
+class, and we show an example of how to write a new concrete AP class.
## Atmosphere process interfaces
An AP has several interfaces, which can be grouped into three categories:
- - initialization: these interfaces are used to create the AP, as well as to initialize internal data structures;
- - run: these interfaces are used to make the AP compute its output fields from its input fields;
- - finalization: these interfaces are used to perform any clean up operation (e.g., release files) before the AP is
- destroyed.
-
-Among the above, the initialization sequence is the most complex, and conists of several steps:
-
- - The AD creates the APG corresponding to the whole atmosphere. As mentioned above, this phase will make use of a factory,
- which allows the AD to be agnostic to what is actually in the group. All AP's can start performing any initialization
- work that they can, but at this point they are limited to use only an MPI communicator as well as a list of runtime
- parameters (which were previously read from an input file).
- - The AD passes a `GridsManager` to the AP's, so that they can get information about the grids they need. At this point,
- all AP's have all the information they need to establish the layout of the input and output fields they need,
- and can store a list of these "requests"
- - After creating all fields (based on AP's requests), the AD passes a copy of each input and output field to
- the AP's. These fields will be divided in "required" and "computed", which differ in that the former are only
- passed to the AP's as 'read-only' fields (see the [field](field.md#Field) documentation for more details)
- - The AP's are queried for how much scratch memory they may need at run time. After all AP's communicate their needs,
- the AD will provide a pointer to scratch memory to the AP's. This is memory that can be used to initialize
- temporary views/fields or other internal data structures. All AP's are given the same pointer, which means no
- data persistence should be expected at run time between one timestep and the next.
- - The AD calls the 'initialize' method on each AP. At this point, all fields are set, and AP's can complete any
- remaining initialization task
-
-While the base AP class provides an (empty) implementation for some methods, in case derived classes do not need a
-feature, some methods are purely virtual, and concrete classes will have to override them. Looking at existing
-concrete AP implementations is a good way to have a first idea of what a new AP class needs to implement. Here,
-we show go over the possible implementation of these methods in a hypothetical AP class. The header file may
-look something like this
+- initialization: these interfaces are used to create the AP, as well as to
+ initialize internal data structures;
+- run: these interfaces are used to make the AP compute its output fields from
+ its input fields;
+- finalization: these interfaces are used to perform any clean up operation
+ (e.g., release files) before the AP is destroyed.
+
+Among the above, the initialization sequence is the most complex, and consists
+of several steps:
+
+- The AD creates the APG corresponding to the whole atmosphere. As mentioned
+ above, this phase will make use of a factory, which allows the AD to be
+ agnostic to what is actually in the group. All AP's can start performing any
+ initialization work that they can, but at this point they are limited to use
+ only an MPI communicator as well as a list of runtime parameters (which were
+ previously read from an input file).
+- The AD passes a `GridsManager` to the AP's, so that they can get information
+ about the grids they need. At this point, all AP's have all the information
+ they need to establish the layout of the input and output fields they need,
+ and can store a list of these "requests"
+- After creating all fields (based on AP's requests), the AD passes a copy of
+ each input and output field to the AP's. These fields will be divided in
+ "required" and "computed", which differ in that the former are only passed
+ to the AP's as 'read-only' fields (see the [field](field.md#Field)
+ documentation for more details)
+- The AP's are queried for how much scratch memory they may need at run time.
+ After all AP's communicate their needs, the AD will provide a pointer to
+ scratch memory to the AP's. This is memory that can be used to initialize
+ temporary views/fields or other internal data structures. All AP's are given
+ the same pointer, which means no data persistence should be expected at run
+ time between one timestep and the next.
+- The AD calls the 'initialize' method on each AP. At this point, all fields
+ are set, and AP's can complete any remaining initialization task
+
+While the base AP class provides an (empty) implementation for some methods, in
+case derived classes do not need a feature, some methods are purely virtual,
+and concrete classes will have to override them. Looking at existing concrete
+AP implementations is a good way to have a first idea of what a new AP class
+needs to implement. Here, we show go over the possible implementation of these
+methods in a hypothetical AP class. The header file may look something like
+this
```c++
#include
@@ -86,21 +104,26 @@ protected:
bool m_has_blah;
};
```
+
A few comments:
- - we added two views to the class, which are meant to be used to store intermediate results during calculations at
-runtime;
- - there are other methods that the class can override (such as additional operations when the AD sets a field in the
- AP), but most AP's only need to override only these;
- - we strongly encourage to add the keyword `override` when overriding a method; in case of small typos (e.g., missing
- a `&` or a `const`, the compiler will be erroring out, since the signature will not match any virtual method in the
- base class;
- - `findalize_impl` is often empty; unless the AP is managing external resources, everything should be correctly released
- during destruction;
- - the two methods for buffers can be omitted if the AP does not need any scratch memory (and the default implementation
- from the base class will be used).
-
-Here is a possible implementation of the methods, with some inline comments to explain
+- we added two views to the class, which are meant to be used to store
+ intermediate results during calculations at runtime;
+- there are other methods that the class can override (such as additional
+ operations when the AD sets a field in the AP), but most AP's only need to
+ override only these;
+- we strongly encourage to add the keyword `override` when overriding a method;
+ in case of small typos (e.g., missing a `&` or a `const`, the compiler will
+ be erroring out, since the signature will not match any virtual method in the
+ base class;
+- `finalize_impl` is often empty; unless the AP is managing external resources,
+ everything should be correctly released during destruction;
+- the two methods for buffers can be omitted if the AP does not need any
+ scratch memory (and the default implementation from the base class will be
+ used).
+
+Here is a possible implementation of the methods, with some inline comments to
+explain
```c++
MyProcess::MyProcess (const ekat::Comm& comm, const ekat::ParameterList& pl)
diff --git a/components/eamxx/docs/developer/source_tree.md b/components/eamxx/docs/developer/source_tree.md
index 15c018cc885..ed8270db635 100644
--- a/components/eamxx/docs/developer/source_tree.md
+++ b/components/eamxx/docs/developer/source_tree.md
@@ -56,4 +56,3 @@ You'll also see some other files in the `src/` directory itself, such as
+ `scream_config.h.in`: A template for generating a C++ header file with
EAMxx configuration information.
-
diff --git a/components/eamxx/docs/developer/standalone_testing.md b/components/eamxx/docs/developer/standalone_testing.md
index e2bb5d62556..633dcc34dc1 100644
--- a/components/eamxx/docs/developer/standalone_testing.md
+++ b/components/eamxx/docs/developer/standalone_testing.md
@@ -27,26 +27,30 @@ be made known to EAMxx by editing the eamxx/scripts/machines_specs.py files.
There are some instructions on what to do at the top of this file.
`test-all-scream` has a good help dump
-```
-% cd $scream_repo/components/eamxx
-% ./scripts/test-all-scream -h
+
+```shell
+cd $scream_repo/components/eamxx
+./scripts/test-all-scream -h
```
If you are unsure of the cmake configuration for you development cycle, one
trick you can use is to run `test-all-scream` for the `dbg` test and just
copy the cmake command it prints (then ctrl-C the process).
-```
-% cd $scream_repo/components/eamxx
-% ./scripts/test-all-scream -t dbg -m $machine
-* wait for a few seconds*
-* Ctrl-C *
-* Copy the contents of DCMAKE_COMMAND that was passed to ctest *
-* Add "cmake" to beginning of contents and path to eamxx at the end. *
+
+```shell
+cd $scream_repo/components/eamxx
+./scripts/test-all-scream -t dbg -m $machine
+# wait for a few seconds*
+# Ctrl-C *
+# Copy the contents of DCMAKE_COMMAND that was passed to ctest *
+# Add "cmake" to beginning of contents and path to eamxx at the end. *
```
Considerations for using `test-all-scream`:
+
* Your machine must be known to our scripts, see above.
-* If you try to run commands by-hand (outside of test-all-scream; cmake, make, ctest, etc), you'll need to remember to
+* If you try to run commands by-hand (outside of test-all-scream;
+ cmake, make, ctest, etc), you'll need to remember to
load the scream-env into your shell, which can be done like this:
`cd eamxx/scripts; eval $(./scripts/scream-env-cmd $machine)`
* test-all-scream expects to be run from a compute node if you
@@ -63,7 +67,7 @@ Considerations for using `test-all-scream`:
Before running the tests, generate a baseline file:
-```
+```shell
cd $RUN_ROOT_DIR
make baseline
```
@@ -75,7 +79,7 @@ path has been provided.
To run all of SCREAM's tests, make sure you're in `$RUN_ROOT_DIR` and type
-```
+```shell
ctest -VV
```
@@ -84,7 +88,7 @@ This runs everything and reports results in an extra-verbose (`-VV`) manner.
You can also run subsets of the SCREAM tests. For example, to run only the
P3 regression tests (again, from the `$RUN_ROOT_DIR` directory), use
-```
+```shell
ctest -R p3_regression
```
@@ -94,13 +98,13 @@ We can create groupings of tests by using **labels**. For example, we have a
`driver` label that runs tests for SCREAM's standalone driver. You can see a
list of available labels by typing
-```
+```shell
ctest --print-labels
```
To see which tests are associated with a given label (e.g. `driver`), use
-```
+```shell
ctest -L driver -N
```
@@ -117,4 +121,3 @@ on the C++/Kokkos implementation, you can invoke any new tests to the function
If the reference Fortran implementation changes enough that a new baseline file
is required, make sure to let other SCREAM team members know, in order to
minimize disruptions.
-
diff --git a/components/eamxx/docs/developer/style_guide.md b/components/eamxx/docs/developer/style_guide.md
index f4367833009..4f6f340cb66 100644
--- a/components/eamxx/docs/developer/style_guide.md
+++ b/components/eamxx/docs/developer/style_guide.md
@@ -7,4 +7,3 @@ Here's our style guide. Let the holy wars begin!
## Functions and Methods
## Variables
-
diff --git a/components/eamxx/scripts/machines_specs.py b/components/eamxx/scripts/machines_specs.py
index 41d09f4683e..c9c9f973022 100644
--- a/components/eamxx/scripts/machines_specs.py
+++ b/components/eamxx/scripts/machines_specs.py
@@ -221,9 +221,19 @@ class GHCISNLCuda(Machine):
concrete = True
@classmethod
def setup(cls):
- super().setup_base(name="ghci-snl-cuda",num_bld_res=16,num_run_res=1)
+ super().setup_base(name="ghci-snl-cuda")
cls.baselines_dir = "/projects/e3sm/baselines/scream/ghci-snl-cuda"
cls.gpu_arch = "cuda"
+ cls.num_run_res = int(run_cmd_no_fail("nvidia-smi --query-gpu=name --format=csv,noheader | wc -l"))
+
+###############################################################################
+class GHCIOCI(Machine):
+###############################################################################
+ concrete = True
+ @classmethod
+ def setup(cls):
+ super().setup_base(name="ghci-oci")
+ cls.env_setup = [f"eval $({CIMEROOT}/CIME/Tools/get_case_env -c SMS.ne4pg2_ne4pg2.F2010-SCREAMv1.ghci-oci_gnu)"]
###############################################################################
class Lassen(Machine):
diff --git a/components/eamxx/scripts/query_scream.py b/components/eamxx/scripts/query_scream.py
index 4b26451c7cb..a6fe5096bfb 100644
--- a/components/eamxx/scripts/query_scream.py
+++ b/components/eamxx/scripts/query_scream.py
@@ -1,9 +1,5 @@
-from machines_specs import assert_machine_supported, \
- get_mach_cxx_compiler, get_mach_c_compiler, get_mach_f90_compiler, \
- get_mach_batch_command, get_mach_env_setup_command, \
- get_mach_baseline_root_dir, is_cuda_machine, \
- get_mach_compilation_resources, get_mach_testing_resources
+from machines_specs import assert_machine_supported, get_machine, get_mach_env_setup_command
from utils import expect
CHOICES = (
@@ -24,23 +20,24 @@ def query_scream(machine, param):
assert_machine_supported(machine)
expect(param in CHOICES, f"Unknown param {param}")
+ mach = get_machine(machine)
if param == "cxx_compiler":
- return get_mach_cxx_compiler(machine)
+ return mach.cxx_compiler
elif param == "c_compiler":
- return get_mach_c_compiler(machine)
+ return mach.c_compiler
elif param == "f90_compiler":
- return get_mach_f90_compiler(machine)
+ return mach.ftn_compiler
elif param == "batch":
- return get_mach_batch_command(machine)
+ return mach.batch
elif param == "env":
return get_mach_env_setup_command(machine)
elif param == "baseline_root":
- return get_mach_baseline_root_dir(machine)
+ return mach.baselines_dir
elif param == "cuda":
- return str(is_cuda_machine(machine))
+ return str(mach.gpu_arch == "cuda")
elif param == "comp_j":
- return get_mach_compilation_resources()
+ return num_bld_res
elif param == "test_j":
- return get_mach_testing_resources(machine)
+ return gnum_run_res
else:
expect(False, f"Unhandled param {param}")
diff --git a/components/eamxx/src/control/atmosphere_driver.cpp b/components/eamxx/src/control/atmosphere_driver.cpp
index 252a3fe53b7..e935cff5cfc 100644
--- a/components/eamxx/src/control/atmosphere_driver.cpp
+++ b/components/eamxx/src/control/atmosphere_driver.cpp
@@ -807,7 +807,8 @@ void AtmosphereDriver::
set_provenance_data (std::string caseid,
std::string rest_caseid,
std::string hostname,
- std::string username)
+ std::string username,
+ std::string versionid)
{
#ifdef SCREAM_CIME_BUILD
// Check the inputs are valid
@@ -816,6 +817,7 @@ set_provenance_data (std::string caseid,
"Error! Invalid restart case id: " + rest_caseid + "\n");
EKAT_REQUIRE_MSG (hostname!="", "Error! Invalid hostname: " + hostname + "\n");
EKAT_REQUIRE_MSG (username!="", "Error! Invalid username: " + username + "\n");
+ EKAT_REQUIRE_MSG (versionid!="", "Error! Invalid version: " + versionid + "\n");
#else
caseid = rest_caseid = m_casename;
char* user = new char[32];
@@ -835,13 +837,14 @@ set_provenance_data (std::string caseid,
}
delete[] user;
delete[] host;
+ versionid = EAMXX_GIT_VERSION;
#endif
auto& provenance = m_atm_params.sublist("provenance");
provenance.set("caseid",caseid);
provenance.set("rest_caseid",rest_caseid);
provenance.set("hostname",hostname);
provenance.set("username",username);
- provenance.set("version",std::string(EAMXX_GIT_VERSION));
+ provenance.set("git_version",versionid);
}
void AtmosphereDriver::
diff --git a/components/eamxx/src/control/atmosphere_driver.hpp b/components/eamxx/src/control/atmosphere_driver.hpp
index f53cd18302d..9b191371b36 100644
--- a/components/eamxx/src/control/atmosphere_driver.hpp
+++ b/components/eamxx/src/control/atmosphere_driver.hpp
@@ -116,7 +116,8 @@ class AtmosphereDriver
void set_provenance_data (std::string caseid = "",
std::string rest_caseid = "",
std::string hostname = "",
- std::string username = "");
+ std::string username = "",
+ std::string versionid = "");
// Load initial conditions for atm inputs
void initialize_fields ();
diff --git a/components/eamxx/src/control/atmosphere_surface_coupling_importer.cpp b/components/eamxx/src/control/atmosphere_surface_coupling_importer.cpp
index 790b60c449c..385e57cae55 100644
--- a/components/eamxx/src/control/atmosphere_surface_coupling_importer.cpp
+++ b/components/eamxx/src/control/atmosphere_surface_coupling_importer.cpp
@@ -28,35 +28,41 @@ void SurfaceCouplingImporter::set_grids(const std::shared_ptr("sfc_alb_dir_vis", scalar2d_layout, nondim, grid_name);
- add_field("sfc_alb_dir_nir", scalar2d_layout, nondim, grid_name);
- add_field("sfc_alb_dif_vis", scalar2d_layout, nondim, grid_name);
- add_field("sfc_alb_dif_nir", scalar2d_layout, nondim, grid_name);
- add_field("surf_lw_flux_up", scalar2d_layout, W/m2, grid_name);
- add_field("surf_sens_flux", scalar2d_layout, W/m2, grid_name);
- add_field("surf_evap", scalar2d_layout, kg/m2/s, grid_name);
- add_field("surf_mom_flux", vector2d_layout, N/m2, grid_name);
- add_field("surf_radiative_T", scalar2d_layout, K, grid_name);
- add_field("T_2m", scalar2d_layout, K, grid_name);
- add_field("qv_2m", scalar2d_layout, kg/kg, grid_name);
- add_field("wind_speed_10m", scalar2d_layout, m/s, grid_name);
- add_field("snow_depth_land", scalar2d_layout, m, grid_name);
- add_field("ocnfrac", scalar2d_layout, nondim, grid_name);
- add_field("landfrac", scalar2d_layout, nondim, grid_name);
- add_field("icefrac", scalar2d_layout, nondim, grid_name);
+ const FieldLayout scalar2d = m_grid->get_2d_scalar_layout();
+ const FieldLayout vector2d = m_grid->get_2d_vector_layout(2);
+ const FieldLayout vector4d = m_grid->get_2d_vector_layout(4);
+
+ add_field("sfc_alb_dir_vis", scalar2d, nondim, grid_name);
+ add_field("sfc_alb_dir_nir", scalar2d, nondim, grid_name);
+ add_field("sfc_alb_dif_vis", scalar2d, nondim, grid_name);
+ add_field("sfc_alb_dif_nir", scalar2d, nondim, grid_name);
+ add_field("surf_lw_flux_up", scalar2d, W/m2, grid_name);
+ add_field("surf_sens_flux", scalar2d, W/m2, grid_name);
+ add_field("surf_evap", scalar2d, kg/m2/s, grid_name);
+ add_field("surf_mom_flux", vector2d, N/m2, grid_name);
+ add_field("surf_radiative_T", scalar2d, K, grid_name);
+ add_field("T_2m", scalar2d, K, grid_name);
+ add_field("qv_2m", scalar2d, kg/kg, grid_name);
+ add_field("wind_speed_10m", scalar2d, m/s, grid_name);
+ add_field("snow_depth_land", scalar2d, m, grid_name);
+ add_field("ocnfrac", scalar2d, nondim, grid_name);
+ add_field("landfrac", scalar2d, nondim, grid_name);
+ add_field("icefrac", scalar2d, nondim, grid_name);
// Friction velocity [m/s]
- add_field("fv", scalar2d_layout, m/s, grid_name);
+ add_field("fv", scalar2d, m/s, grid_name);
// Aerodynamical resistance
- add_field("ram1", scalar2d_layout, s/m, grid_name);
+ add_field("ram1", scalar2d, s/m, grid_name);
+ // Sea surface temperature [K]
+ add_field("sst", scalar2d, K, grid_name);
+ //dust fluxes [kg/m^2/s]: Four flux values for eacch column
+ add_field("dstflx", vector4d, kg/m2/s, grid_name);
+
}
// =========================================================================================
void SurfaceCouplingImporter::setup_surface_coupling_data(const SCDataManager &sc_data_manager)
diff --git a/components/eamxx/src/dynamics/homme/homme_grids_manager.cpp b/components/eamxx/src/dynamics/homme/homme_grids_manager.cpp
index 87009c7d074..df5de6827f6 100644
--- a/components/eamxx/src/dynamics/homme/homme_grids_manager.cpp
+++ b/components/eamxx/src/dynamics/homme/homme_grids_manager.cpp
@@ -271,6 +271,7 @@ build_physics_grid (const ci_string& type, const ci_string& rebalance) {
auto hyam = phys_grid->create_geometry_data("hyam",layout_mid,nondim);
auto hybm = phys_grid->create_geometry_data("hybm",layout_mid,nondim);
auto lev = phys_grid->create_geometry_data("lev", layout_mid,mbar);
+ auto ilev = phys_grid->create_geometry_data("ilev",layout_int,mbar);
for (auto f : {hyai, hybi, hyam, hybm}) {
auto f_d = get_grid("Dynamics")->get_geometry_data(f.name());
@@ -281,13 +282,20 @@ build_physics_grid (const ci_string& type, const ci_string& rebalance) {
// Build lev from hyam and hybm
const Real ps0 = 100000.0;
- auto hya_v = hyam.get_view();
- auto hyb_v = hybm.get_view();
- auto lev_v = lev.get_view();
- for (int ii=0;iiget_num_vertical_levels();ii++) {
- lev_v(ii) = 0.01*ps0*(hya_v(ii)+hyb_v(ii));
+ auto hyam_v = hyam.get_view();
+ auto hybm_v = hybm.get_view();
+ auto hyai_v = hyai.get_view();
+ auto hybi_v = hybi.get_view();
+ auto lev_v = lev.get_view();
+ auto ilev_v = ilev.get_view();
+ auto num_v_levs = phys_grid->get_num_vertical_levels();
+ for (int ii=0;ii("sfc_alb_dir_vis", scalar2d, nondim, grid_name);
+ //----------- Variables from microphysics scheme -------------
+
+ // Evaporation from stratiform rain [kg/kg/s]
+ add_field("nevapr", scalar3d_mid, kg / kg / s, grid_name);
+
+ // Stratiform rain production rate [kg/kg/s]
+ add_field("precip_total_tend", scalar3d_mid, kg / kg / s,
+ grid_name);
+
// ---------------------------------------------------------------------
// These variables are "updated" or inputs/outputs for the process
// ---------------------------------------------------------------------
@@ -205,7 +215,7 @@ void MAMMicrophysics::set_grids(
LinozHorizInterp_, linoz_file_name_);
// linoz reader
- const auto io_grid_linoz = LinozHorizInterp_->get_src_grid();
+ const auto io_grid_linoz = LinozHorizInterp_->get_tgt_grid();
const int num_cols_io_linoz =
io_grid_linoz->get_num_local_dofs(); // Number of columns on this rank
const int num_levs_io_linoz =
@@ -233,7 +243,7 @@ void MAMMicrophysics::set_grids(
TracerHorizInterp_, oxid_file_name_);
const int nvars = int(var_names.size());
- const auto io_grid = TracerHorizInterp_->get_src_grid();
+ const auto io_grid = TracerHorizInterp_->get_tgt_grid();
const int num_cols_io =
io_grid->get_num_local_dofs(); // Number of columns on this rank
const int num_levs_io =
@@ -258,78 +268,88 @@ void MAMMicrophysics::set_grids(
"num_a1", "num_a2", "num_a4", "soag"};
for(const auto &var_name : extfrc_lst_) {
- std::string item_name = "mam4_" + var_name + "_verti_emiss_file_name";
+ std::string item_name = "mam4_" + var_name + "_elevated_emiss_file_name";
const auto file_name = m_params.get(item_name);
- vert_emis_file_name_[var_name] = file_name;
+ elevated_emis_file_name_[var_name] = file_name;
}
- vert_emis_var_names_["so2"] = {"BB", "ENE_ELEV", "IND_ELEV", "contvolc"};
- vert_emis_var_names_["so4_a1"] = {"BB", "ENE_ELEV", "IND_ELEV", "contvolc"};
- vert_emis_var_names_["so4_a2"] = {"contvolc"};
- vert_emis_var_names_["pom_a4"] = {"BB"};
- vert_emis_var_names_["bc_a4"] = {"BB"};
- vert_emis_var_names_["num_a1"] = {
+ elevated_emis_var_names_["so2"] = {"BB", "ENE_ELEV", "IND_ELEV", "contvolc"};
+ elevated_emis_var_names_["so4_a1"] = {"BB", "ENE_ELEV", "IND_ELEV", "contvolc"};
+ elevated_emis_var_names_["so4_a2"] = {"contvolc"};
+ elevated_emis_var_names_["pom_a4"] = {"BB"};
+ elevated_emis_var_names_["bc_a4"] = {"BB"};
+ elevated_emis_var_names_["num_a1"] = {
"num_a1_SO4_ELEV_BB", "num_a1_SO4_ELEV_ENE", "num_a1_SO4_ELEV_IND",
"num_a1_SO4_ELEV_contvolc"};
- vert_emis_var_names_["num_a2"] = {"num_a2_SO4_ELEV_contvolc"};
+ elevated_emis_var_names_["num_a2"] = {"num_a2_SO4_ELEV_contvolc"};
// num_a4
// FIXME: why the sectors in this files are num_a1;
// I guess this should be num_a4? Is this a bug in the orginal nc files?
- vert_emis_var_names_["num_a4"] = {"num_a1_BC_ELEV_BB",
+ elevated_emis_var_names_["num_a4"] = {"num_a1_BC_ELEV_BB",
"num_a1_POM_ELEV_BB"};
- vert_emis_var_names_["soag"] = {"SOAbb_src", "SOAbg_src", "SOAff_src"};
+ elevated_emis_var_names_["soag"] = {"SOAbb_src", "SOAbg_src", "SOAff_src"};
- int verti_emiss_cyclical_ymd = m_params.get("verti_emiss_ymd");
+ int elevated_emiss_cyclical_ymd = m_params.get("elevated_emiss_ymd");
for(const auto &var_name : extfrc_lst_) {
- const auto file_name = vert_emis_file_name_[var_name];
- const auto var_names = vert_emis_var_names_[var_name];
+ const auto file_name = elevated_emis_file_name_[var_name];
+ const auto var_names = elevated_emis_var_names_[var_name];
scream::mam_coupling::TracerData data_tracer;
scream::mam_coupling::setup_tracer_data(data_tracer, file_name,
- verti_emiss_cyclical_ymd);
+ elevated_emiss_cyclical_ymd);
auto hor_rem = scream::mam_coupling::create_horiz_remapper(
grid_, file_name, extfrc_map_file, var_names, data_tracer);
+
auto file_reader =
- scream::mam_coupling::create_tracer_data_reader(hor_rem, file_name);
- VertEmissionsHorizInterp_.push_back(hor_rem);
- VertEmissionsDataReader_.push_back(file_reader);
- vert_emis_data_.push_back(data_tracer);
- } // var_name vert emissions
+ scream::mam_coupling::create_tracer_data_reader(hor_rem, file_name,
+ data_tracer.file_type);
+ ElevatedEmissionsHorizInterp_.push_back(hor_rem);
+ ElevatedEmissionsDataReader_.push_back(file_reader);
+ elevated_emis_data_.push_back(data_tracer);
+ } // var_name elevated emissions
int i = 0;
int offset_emis_ver = 0;
for(const auto &var_name : extfrc_lst_) {
- const auto file_name = vert_emis_file_name_[var_name];
- const auto var_names = vert_emis_var_names_[var_name];
+ const auto file_name = elevated_emis_file_name_[var_name];
+ const auto var_names = elevated_emis_var_names_[var_name];
const int nvars = static_cast(var_names.size());
forcings_[i].nsectors = nvars;
// I am assuming the order of species in extfrc_lst_.
// Indexing in mam4xx is fortran.
forcings_[i].frc_ndx = i + 1;
- const auto io_grid_emis = VertEmissionsHorizInterp_[i]->get_src_grid();
+ const auto io_grid_emis = ElevatedEmissionsHorizInterp_[i]->get_tgt_grid();
const int num_cols_io_emis =
io_grid_emis->get_num_local_dofs(); // Number of columns on this rank
const int num_levs_io_emis =
io_grid_emis
->get_num_vertical_levels(); // Number of levels per column
- vert_emis_data_[i].init(num_cols_io_emis, num_levs_io_emis, nvars);
- vert_emis_data_[i].allocate_temporal_views();
- forcings_[i].file_alt_data = vert_emis_data_[i].has_altitude_;
+ elevated_emis_data_[i].init(num_cols_io_emis, num_levs_io_emis, nvars);
+ elevated_emis_data_[i].allocate_temporal_views();
+ forcings_[i].file_alt_data = elevated_emis_data_[i].has_altitude_;
for(int isp = 0; isp < nvars; ++isp) {
forcings_[i].offset = offset_emis_ver;
- vert_emis_output_[isp + offset_emis_ver] =
- view_2d("vert_emis_output_", ncol_, nlev_);
+ elevated_emis_output_[isp + offset_emis_ver] =
+ view_2d("elevated_emis_output_", ncol_, nlev_);
}
offset_emis_ver += nvars;
++i;
} // end i
EKAT_REQUIRE_MSG(
- offset_emis_ver <= int(mam_coupling::MAX_NUM_VERT_EMISSION_FIELDS),
+ offset_emis_ver <= int(mam_coupling::MAX_NUM_ELEVATED_EMISSIONS_FIELDS),
"Error! Number of fields is bigger than "
- "MAX_NUM_VERT_EMISSION_FIELDS. Increase the "
- "MAX_NUM_VERT_EMISSION_FIELDS in tracer_reader_utils.hpp \n");
+ "MAX_NUM_ELEVATED_EMISSIONS_FIELDS. Increase the "
+ "MAX_NUM_ELEVATED_EMISSIONS_FIELDS in tracer_reader_utils.hpp \n");
} // Tracer external forcing data
+
+ {
+ const std::string season_wes_file = m_params.get("mam4_season_wes_file");
+ const auto& clat = col_latitudes_;
+ mam_coupling::find_season_index_reader(season_wes_file,
+ clat,
+ index_season_lai_);
+ }
} // set_grids
// ================================================================
@@ -501,6 +521,9 @@ void MAMMicrophysics::initialize_impl(const RunType run_type) {
const int photo_table_len = get_photo_table_work_len(photo_table_);
work_photo_table_ = view_2d("work_photo_table", ncol_, photo_table_len);
+ const int sethet_work_len = mam4::mo_sethet::get_total_work_len_sethet();
+ work_set_het_ = view_2d("work_set_het_array", ncol_, sethet_work_len);
+ cmfdqr_ = view_1d("cmfdqr_", nlev_);
// here's where we store per-column photolysis rates
photo_rates_ = view_3d("photo_rates", ncol_, nlev_, mam4::mo_photo::phtcnt);
@@ -518,8 +541,8 @@ void MAMMicrophysics::initialize_impl(const RunType run_type) {
for(int i = 0; i < static_cast(extfrc_lst_.size()); ++i) {
scream::mam_coupling::update_tracer_data_from_file(
- VertEmissionsDataReader_[i], curr_month, *VertEmissionsHorizInterp_[i],
- vert_emis_data_[i]);
+ ElevatedEmissionsDataReader_[i], curr_month, *ElevatedEmissionsHorizInterp_[i],
+ elevated_emis_data_[i]);
}
invariants_ = view_3d("invarians", ncol_, nlev_, mam4::gas_chemistry::nfs);
@@ -554,6 +577,14 @@ void MAMMicrophysics::run_impl(const double dt) {
Kokkos::parallel_for("preprocess", scan_policy, preprocess_);
Kokkos::fence();
+ //----------- Variables from microphysics scheme -------------
+
+ // Evaporation from stratiform rain [kg/kg/s]
+ const auto& nevapr = get_field_in("nevapr").get_view();
+
+ // Stratiform rain production rate [kg/kg/s]
+ const auto& prain = get_field_in("precip_total_tend").get_view();
+
const auto wet_geometric_mean_diameter_i =
get_field_in("dgnumwet").get_view();
const auto dry_geometric_mean_diameter_i =
@@ -616,20 +647,20 @@ void MAMMicrophysics::run_impl(const double dt) {
linoz_output); // out
Kokkos::fence();
- vert_emiss_time_state_.t_now = ts.frac_of_year_in_days();
+ elevated_emiss_time_state_.t_now = ts.frac_of_year_in_days();
int i = 0;
for(const auto &var_name : extfrc_lst_) {
- const auto file_name = vert_emis_file_name_[var_name];
- const auto var_names = vert_emis_var_names_[var_name];
+ const auto file_name = elevated_emis_file_name_[var_name];
+ const auto var_names = elevated_emis_var_names_[var_name];
const int nsectors = int(var_names.size());
- view_2d vert_emis_output[nsectors];
+ view_2d elevated_emis_output[nsectors];
for(int isp = 0; isp < nsectors; ++isp) {
- vert_emis_output[isp] = vert_emis_output_[isp + forcings_[i].offset];
+ elevated_emis_output[isp] = elevated_emis_output_[isp + forcings_[i].offset];
}
scream::mam_coupling::advance_tracer_data(
- VertEmissionsDataReader_[i], *VertEmissionsHorizInterp_[i], ts,
- vert_emiss_time_state_, vert_emis_data_[i], dry_atm_.p_mid,
- dry_atm_.z_iface, vert_emis_output);
+ ElevatedEmissionsDataReader_[i], *ElevatedEmissionsHorizInterp_[i], ts,
+ elevated_emiss_time_state_, elevated_emis_data_[i], dry_atm_.p_mid,
+ dry_atm_.z_iface, elevated_emis_output);
i++;
Kokkos::fence();
}
@@ -704,7 +735,7 @@ void MAMMicrophysics::run_impl(const double dt) {
const auto zenith_angle = acos_cosine_zenith_;
constexpr int gas_pcnst = mam_coupling::gas_pcnst();
- const auto& vert_emis_output = vert_emis_output_;
+ const auto& elevated_emis_output = elevated_emis_output_;
const auto& extfrc = extfrc_;
const auto& forcings = forcings_;
constexpr int extcnt = mam4::gas_chemistry::extcnt;
@@ -722,6 +753,8 @@ void MAMMicrophysics::run_impl(const double dt) {
clsmap_4[i] = mam4::gas_chemistry::clsmap_4[i];
permute_4[i] = mam4::gas_chemistry::permute_4[i];
}
+ const auto& cmfdqr = cmfdqr_;
+ const auto& work_set_het =work_set_het_;
// loop over atmosphere columns and compute aerosol microphyscs
Kokkos::parallel_for(
policy, KOKKOS_LAMBDA(const ThreadTeam &team) {
@@ -756,7 +789,7 @@ void MAMMicrophysics::run_impl(const double dt) {
// We may need to move this line where we read files.
forcings_in[i].file_alt_data = file_alt_data;
for(int isec = 0; isec < forcings[i].nsectors; ++isec) {
- const auto field = vert_emis_output[isec + forcings[i].offset];
+ const auto field = elevated_emis_output[isec + forcings[i].offset];
forcings_in[i].fields_data[isec] = ekat::subview(field, icol);
}
} // extcnt for loop
@@ -787,6 +820,9 @@ void MAMMicrophysics::run_impl(const double dt) {
ekat::subview(linoz_dPmL_dO3col, icol);
const auto linoz_cariolle_pscs_icol =
ekat::subview(linoz_cariolle_pscs, icol);
+ const auto nevapr_icol = ekat::subview(nevapr, icol);
+ const auto prain_icol = ekat::subview(prain, icol);
+ const auto work_set_het_icol = ekat::subview(work_set_het, icol);
// Note: All variables are inputs, except for progs, which is an
// input/output variable.
mam4::microphysics::perform_atmospheric_chemistry_and_microphysics(
@@ -800,7 +836,12 @@ void MAMMicrophysics::run_impl(const double dt) {
linoz_cariolle_pscs_icol, eccf, adv_mass_kg_per_moles, clsmap_4,
permute_4, offset_aerosol,
config.linoz.o3_sfc, config.linoz.o3_tau, config.linoz.o3_lbl,
- dry_diameter_icol, wet_diameter_icol, wetdens_icol);
+ dry_diameter_icol, wet_diameter_icol, wetdens_icol,
+ dry_atm.phis(icol),
+ cmfdqr,
+ prain_icol,
+ nevapr_icol,
+ work_set_het_icol);
}); // parallel_for for the column loop
Kokkos::fence();
diff --git a/components/eamxx/src/physics/mam/eamxx_mam_microphysics_process_interface.hpp b/components/eamxx/src/physics/mam/eamxx_mam_microphysics_process_interface.hpp
index 6b1dd33dfaa..6ff846d0d0c 100644
--- a/components/eamxx/src/physics/mam/eamxx_mam_microphysics_process_interface.hpp
+++ b/components/eamxx/src/physics/mam/eamxx_mam_microphysics_process_interface.hpp
@@ -25,6 +25,8 @@ class MAMMicrophysics final : public scream::AtmosphereProcess {
using view_1d_host = typename KT::view_1d::HostMirror;
+ using view_int_2d = typename KT::template view_2d;
+
// a thread team dispatched to a single vertical column
using ThreadTeam = mam4::ThreadTeam;
@@ -225,20 +227,25 @@ class MAMMicrophysics final : public scream::AtmosphereProcess {
// Vertical emission uses 9 files, here I am using std::vector to stote
// instance of each file.
- mam_coupling::TracerTimeState vert_emiss_time_state_;
- std::vector> VertEmissionsDataReader_;
- std::vector> VertEmissionsHorizInterp_;
+ mam_coupling::TracerTimeState elevated_emiss_time_state_;
+ std::vector> ElevatedEmissionsDataReader_;
+ std::vector> ElevatedEmissionsHorizInterp_;
std::vector extfrc_lst_;
- std::vector vert_emis_data_;
- std::map vert_emis_file_name_;
- std::map> vert_emis_var_names_;
- view_2d vert_emis_output_[mam_coupling::MAX_NUM_VERT_EMISSION_FIELDS];
+ std::vector elevated_emis_data_;
+ std::map elevated_emis_file_name_;
+ std::map> elevated_emis_var_names_;
+ view_2d elevated_emis_output_[mam_coupling::MAX_NUM_ELEVATED_EMISSIONS_FIELDS];
view_3d extfrc_;
mam_coupling::ForcingHelper forcings_[mam4::gas_chemistry::extcnt];
view_1d_host acos_cosine_zenith_host_;
view_1d acos_cosine_zenith_;
+ view_int_2d index_season_lai_;
+ // // dq/dt for convection [kg/kg/s]
+ view_1d cmfdqr_;
+ view_2d work_set_het_;
+
}; // MAMMicrophysics
} // namespace scream
diff --git a/components/eamxx/src/physics/mam/eamxx_mam_srf_and_online_emissions_functions.hpp b/components/eamxx/src/physics/mam/eamxx_mam_srf_and_online_emissions_functions.hpp
new file mode 100644
index 00000000000..9c01daf8223
--- /dev/null
+++ b/components/eamxx/src/physics/mam/eamxx_mam_srf_and_online_emissions_functions.hpp
@@ -0,0 +1,73 @@
+#ifndef EAMXX_MAM_SRF_AND_ONLINE_EMISSIONS_FUNCTIONS_HPP
+#define EAMXX_MAM_SRF_AND_ONLINE_EMISSIONS_FUNCTIONS_HPP
+
+namespace scream {
+
+namespace {
+
+using KT = ekat::KokkosTypes;
+using view_1d = typename KT::template view_1d;
+using view_2d = typename KT::template view_2d;
+using const_view_1d = typename KT::template view_1d;
+using const_view_2d = typename KT::template view_2d;
+
+//-------- Inititlize gas and aerosol fluxes ------
+void init_fluxes(const int &ncol,
+ view_2d &constituent_fluxes) { // input-output
+
+ constexpr int pcnst = mam4::aero_model::pcnst;
+ const int gas_start_ind = mam4::utils::gasses_start_ind();
+
+ const auto policy =
+ ekat::ExeSpaceUtils::get_default_team_policy(
+ ncol, pcnst - gas_start_ind);
+
+ // Parallel loop over all the columns
+ Kokkos::parallel_for(
+ policy, KOKKOS_LAMBDA(const KT::MemberType &team) {
+ const int icol = team.league_rank();
+ view_1d flux_col = ekat::subview(constituent_fluxes, icol);
+
+ // Zero out constituent fluxes only for gasses and aerosols
+ Kokkos::parallel_for(
+ Kokkos::TeamVectorRange(team, gas_start_ind, pcnst),
+ [&](int icnst) { flux_col(icnst) = 0; });
+ });
+} // init_fluxes ends
+
+//-------- compute online emissions for dust, sea salt and marine organics -----
+void compute_online_dust_nacl_emiss(
+ const int &ncol, const int &nlev, const const_view_1d &ocnfrac,
+ const const_view_1d &sst, const const_view_2d &u_wind,
+ const const_view_2d &v_wind, const const_view_2d &dstflx,
+ const const_view_1d &mpoly, const const_view_1d &mprot,
+ const const_view_1d &mlip, const const_view_1d &soil_erodibility,
+ const const_view_2d &z_mid,
+ // output
+ view_2d &constituent_fluxes) {
+ const int surf_lev = nlev - 1; // surface level
+
+ Kokkos::parallel_for(
+ "online_emis_fluxes", ncol, KOKKOS_LAMBDA(int icol) {
+ // Input
+ const const_view_1d dstflx_icol = ekat::subview(dstflx, icol);
+
+ // Output
+ view_1d fluxes_col = ekat::subview(constituent_fluxes, icol);
+
+ // Compute online emissions
+ // NOTE: mam4::aero_model_emissions calculates mass and number emission
+ // fluxes in units of [kg/m2/s or #/m2/s] (MKS), so no need to convert
+ mam4::aero_model_emissions::aero_model_emissions(
+ sst(icol), ocnfrac(icol), u_wind(icol, surf_lev),
+ v_wind(icol, surf_lev), z_mid(icol, surf_lev), dstflx_icol,
+ soil_erodibility(icol), mpoly(icol), mprot(icol), mlip(icol),
+ // out
+ fluxes_col);
+ });
+} // compute_online_dust_nacl_emiss ends
+
+} // namespace
+} // namespace scream
+
+#endif // EAMXX_MAM_SRF_AND_ONLINE_EMISSIONS_FUNCTIONS_HPP
diff --git a/components/eamxx/src/physics/mam/eamxx_mam_srf_and_online_emissions_process_interface.cpp b/components/eamxx/src/physics/mam/eamxx_mam_srf_and_online_emissions_process_interface.cpp
index 850a82d0896..fd3ebf79700 100644
--- a/components/eamxx/src/physics/mam/eamxx_mam_srf_and_online_emissions_process_interface.cpp
+++ b/components/eamxx/src/physics/mam/eamxx_mam_srf_and_online_emissions_process_interface.cpp
@@ -1,14 +1,24 @@
-//#include
#include
+// For surface and online emission functions
+#include
+
+// For reading soil erodibility file
+#include
+
namespace scream {
+// For reading soil erodibility file
+using soilErodibilityFunc =
+ soil_erodibility::soilErodibilityFunctions;
+
// ================================================================
// Constructor
// ================================================================
MAMSrfOnlineEmiss::MAMSrfOnlineEmiss(const ekat::Comm &comm,
const ekat::ParameterList ¶ms)
: AtmosphereProcess(comm, params) {
+ // FIXME: Do we want to read dust emiss factor from the namelist??
/* Anything that can be initialized without grid information can be
* initialized here. Like universal constants.
*/
@@ -26,18 +36,98 @@ void MAMSrfOnlineEmiss::set_grids(
nlev_ = grid_->get_num_vertical_levels(); // Number of levels per column
using namespace ekat::units;
+ constexpr auto m2 = pow(m, 2);
+ constexpr auto s2 = pow(s, 2);
+ constexpr auto q_unit = kg / kg; // units of mass mixing ratios of tracers
+ constexpr auto n_unit = 1 / kg; // units of number mixing ratios of tracers
+ constexpr auto nondim = ekat::units::Units::nondimensional();
- static constexpr int pcnst = mam4::aero_model::pcnst;
- const FieldLayout scalar2d_pcnct =
- grid_->get_2d_vector_layout(pcnst, "num_phys_constituents");
+ const FieldLayout scalar2d = grid_->get_2d_scalar_layout();
+ const FieldLayout scalar3d_m = grid_->get_3d_scalar_layout(true); // mid
+ const FieldLayout scalar3d_i = grid_->get_3d_scalar_layout(false); // int
+
+ // For U and V components of wind
+ const FieldLayout vector3d = grid_->get_3d_vector_layout(true, 2);
+
+ // For components of dust flux
+ const FieldLayout vector4d = grid_->get_2d_vector_layout(4);
+
+ // --------------------------------------------------------------------------
+ // These variables are "Required" or pure inputs for the process
+ // --------------------------------------------------------------------------
+
+ // ----------- Atmospheric quantities -------------
+
+ // -- Variables required for building DS to compute z_mid --
+ // Specific humidity [kg/kg]
+ // FIXME: Comply with add_tracer calls
+ add_field("qv", scalar3d_m, q_unit, grid_name, "tracers");
+
+ // Cloud liquid mass mixing ratio [kg/kg]
+ add_field("qc", scalar3d_m, q_unit, grid_name, "tracers");
+
+ // Cloud ice mass mixing ratio [kg/kg]
+ add_field("qi", scalar3d_m, q_unit, grid_name, "tracers");
+
+ // Cloud liquid number mixing ratio [1/kg]
+ add_field("nc", scalar3d_m, n_unit, grid_name, "tracers");
+
+ // Cloud ice number mixing ratio [1/kg]
+ add_field("ni", scalar3d_m, n_unit, grid_name, "tracers");
+
+ // Temperature[K] at midpoints
+ add_field("T_mid", scalar3d_m, K, grid_name);
+
+ // Vertical pressure velocity [Pa/s] at midpoints
+ add_field("omega", scalar3d_m, Pa / s, grid_name);
+
+ // Total pressure [Pa] at midpoints
+ add_field("p_mid", scalar3d_m, Pa, grid_name);
+
+ // Total pressure [Pa] at interfaces
+ add_field("p_int", scalar3d_i, Pa, grid_name);
+
+ // Layer thickness(pdel) [Pa] at midpoints
+ add_field("pseudo_density", scalar3d_m, Pa, grid_name);
+
+ // Planetary boundary layer height [m]
+ add_field("pbl_height", scalar2d, m, grid_name);
+
+ // Surface geopotential [m2/s2]
+ add_field("phis", scalar2d, m2 / s2, grid_name);
+
+ //----------- Variables from microphysics scheme -------------
+
+ // Total cloud fraction [fraction] (Require only for building DS)
+ add_field("cldfrac_tot", scalar3d_m, nondim, grid_name);
+
+ // -- Variables required for online dust and sea salt emissions --
+
+ // U and V components of the wind[m/s]
+ add_field("horiz_winds", vector3d, m / s, grid_name);
+
+ //----------- Variables from coupler (ocean component)---------
+ // Ocean fraction [unitless]
+ add_field("ocnfrac", scalar2d, nondim, grid_name);
+
+ // Sea surface temperature [K]
+ add_field("sst", scalar2d, K, grid_name);
+
+ // dust fluxes [kg/m^2/s]: Four flux values for each column
+ add_field("dstflx", vector4d, kg / m2 / s, grid_name);
// -------------------------------------------------------------
- // These variables are "Computed" or outputs for the process
+ // These variables are "Updated" or input-outputs for the process
// -------------------------------------------------------------
- static constexpr Units m2(m * m, "m2");
+
+ constexpr int pcnst = mam4::aero_model::pcnst;
+ const FieldLayout vector2d_pcnst =
+ grid_->get_2d_vector_layout(pcnst, "num_phys_constituents");
+
// Constituent fluxes of species in [kg/m2/s]
- add_field("constituent_fluxes", scalar2d_pcnct, kg / m2 / s,
- grid_name);
+ // FIXME: confirm if it is Updated or Computed
+ add_field("constituent_fluxes", vector2d_pcnst, kg / m2 / s,
+ grid_name);
// Surface emissions remapping file
auto srf_map_file = m_params.get("srf_remap_file", "");
@@ -63,6 +153,7 @@ void MAMSrfOnlineEmiss::set_grids(
so2.species_name = "so2";
so2.sectors = {"AGR", "RCO", "SHP", "SLV", "TRA", "WST"};
srf_emiss_species_.push_back(so2); // add to the vector
+
//--------------------------------------------------------------------
// Init bc_a4 srf emiss data structures
//--------------------------------------------------------------------
@@ -147,7 +238,48 @@ void MAMSrfOnlineEmiss::set_grids(
// output
ispec_srf.horizInterp_, ispec_srf.data_start_, ispec_srf.data_end_,
ispec_srf.data_out_, ispec_srf.dataReader_);
- }
+ } // srf emissions file read init
+
+ // -------------------------------------------------------------
+ // Setup to enable reading soil erodibility file
+ // -------------------------------------------------------------
+
+ const std::string soil_erodibility_data_file =
+ m_params.get("soil_erodibility_file");
+
+ // Field to be read from file
+ const std::string soil_erod_fld_name = "mbl_bsn_fct_geo";
+
+ // Dimensions of the field
+ const std::string soil_erod_dname = "ncol";
+
+ // initialize the file read
+ soilErodibilityFunc::init_soil_erodibility_file_read(
+ ncol_, soil_erod_fld_name, soil_erod_dname, grid_,
+ soil_erodibility_data_file, srf_map_file, serod_horizInterp_,
+ serod_dataReader_); // output
+
+ // -------------------------------------------------------------
+ // Setup to enable reading marine organics file
+ // -------------------------------------------------------------
+ const std::string marine_organics_data_file =
+ m_params.get("marine_organics_file");
+
+ // Fields to be read from file (order matters as they are read in the same
+ // order)
+ const std::vector marine_org_fld_name = {
+ "TRUEPOLYC", "TRUEPROTC", "TRUELIPC"};
+
+ // Dimensions of the field
+ const std::string marine_org_dname = "ncol";
+
+ // initialize the file read
+ marineOrganicsFunc::init_marine_organics_file_read(
+ ncol_, marine_org_fld_name, marine_org_dname, grid_,
+ marine_organics_data_file, srf_map_file,
+ // output
+ morg_horizInterp_, morg_data_start_, morg_data_end_, morg_data_out_,
+ morg_dataReader_);
} // set_grid ends
@@ -182,6 +314,42 @@ void MAMSrfOnlineEmiss::init_buffers(const ATMBufferManager &buffer_manager) {
// INITIALIZE_IMPL
// ================================================================
void MAMSrfOnlineEmiss::initialize_impl(const RunType run_type) {
+ // ---------------------------------------------------------------
+ // Input fields read in from IC file, namelist or other processes
+ // ---------------------------------------------------------------
+
+ // Populate the wet atmosphere state with views from fields
+ wet_atm_.qv = get_field_in("qv").get_view();
+
+ // Following wet_atm vars are required only for building DS
+ wet_atm_.qc = get_field_in("qc").get_view();
+ wet_atm_.nc = get_field_in("nc").get_view();
+ wet_atm_.qi = get_field_in("qi").get_view();
+ wet_atm_.ni = get_field_in("ni").get_view();
+
+ // Populate the dry atmosphere state with views from fields
+ dry_atm_.T_mid = get_field_in("T_mid").get_view();
+ dry_atm_.p_mid = get_field_in("p_mid").get_view();
+ dry_atm_.p_del = get_field_in("pseudo_density").get_view();
+ dry_atm_.p_int = get_field_in("p_int").get_view();
+
+ // Following dry_atm vars are required only for building DS
+ dry_atm_.cldfrac = get_field_in("cldfrac_tot").get_view();
+ dry_atm_.pblh = get_field_in("pbl_height").get_view();
+ dry_atm_.omega = get_field_in("omega").get_view();
+
+ // store fields converted to dry mmr from wet mmr in dry_atm_
+ dry_atm_.z_mid = buffer_.z_mid;
+ dry_atm_.z_iface = buffer_.z_iface;
+ dry_atm_.dz = buffer_.dz;
+ dry_atm_.qv = buffer_.qv_dry;
+ dry_atm_.qc = buffer_.qc_dry;
+ dry_atm_.nc = buffer_.nc_dry;
+ dry_atm_.qi = buffer_.qi_dry;
+ dry_atm_.ni = buffer_.ni_dry;
+ dry_atm_.w_updraft = buffer_.w_updraft;
+ dry_atm_.z_surf = 0.0; // FIXME: for now
+
// ---------------------------------------------------------------
// Output fields
// ---------------------------------------------------------------
@@ -212,10 +380,27 @@ void MAMSrfOnlineEmiss::initialize_impl(const RunType run_type) {
ispec_srf.data_end_); // output
}
+ //-----------------------------------------------------------------
+ // Read Soil erodibility data
+ //-----------------------------------------------------------------
+ // This data is time-independent, we read all data here for the
+ // entire simulation
+ soilErodibilityFunc::update_soil_erodibility_data_from_file(
+ serod_dataReader_, *serod_horizInterp_,
+ soil_erodibility_); // output
+
+ //--------------------------------------------------------------------
+ // Update marine orgaincs from file
+ //--------------------------------------------------------------------
+ // Time dependent data
+ marineOrganicsFunc::update_marine_organics_data_from_file(
+ morg_dataReader_, timestamp(), curr_month, *morg_horizInterp_,
+ morg_data_end_); // output
+
//-----------------------------------------------------------------
// Setup preprocessing and post processing
//-----------------------------------------------------------------
- preprocess_.initialize(constituent_fluxes_);
+ preprocess_.initialize(ncol_, nlev_, wet_atm_, dry_atm_);
} // end initialize_impl()
@@ -223,14 +408,79 @@ void MAMSrfOnlineEmiss::initialize_impl(const RunType run_type) {
// RUN_IMPL
// ================================================================
void MAMSrfOnlineEmiss::run_impl(const double dt) {
- // Zero-out output
- Kokkos::deep_copy(preprocess_.constituent_fluxes_pre_, 0);
+ const auto scan_policy = ekat::ExeSpaceUtils<
+ KT::ExeSpace>::get_thread_range_parallel_scan_team_policy(ncol_, nlev_);
+
+ // preprocess input -- needs a scan for the calculation of atm height
+ Kokkos::parallel_for("preprocess", scan_policy, preprocess_);
+ Kokkos::fence();
+
+ // Constituent fluxes [kg/m^2/s]
+ auto constituent_fluxes = this->constituent_fluxes_;
+ // Zero out constituent fluxes only for gasses and aerosols
+ init_fluxes(ncol_, // in
+ constituent_fluxes); // in-out
+ Kokkos::fence();
// Gather time and state information for interpolation
- auto ts = timestamp() + dt;
+ const auto ts = timestamp() + dt;
//--------------------------------------------------------------------
- // Interpolate srf emiss data
+ // Online emissions from dust and sea salt
+ //--------------------------------------------------------------------
+
+ // --- Interpolate marine organics data --
+
+ // Update TimeState, note the addition of dt
+ morg_timeState_.t_now = ts.frac_of_year_in_days();
+
+ // Update time state and if the month has changed, update the data.
+ marineOrganicsFunc::update_marine_organics_timestate(
+ morg_dataReader_, ts, *morg_horizInterp_,
+ // output
+ morg_timeState_, morg_data_start_, morg_data_end_);
+
+ // Call the main marine organics routine to get interpolated forcings.
+ marineOrganicsFunc::marineOrganics_main(morg_timeState_, morg_data_start_,
+ morg_data_end_, morg_data_out_);
+
+ // Marine organics emission data read from the file (order is important here)
+ const const_view_1d mpoly = ekat::subview(morg_data_out_.emiss_sectors, 0);
+ const const_view_1d mprot = ekat::subview(morg_data_out_.emiss_sectors, 1);
+ const const_view_1d mlip = ekat::subview(morg_data_out_.emiss_sectors, 2);
+
+ // Ocean fraction [unitless]
+ const const_view_1d ocnfrac =
+ get_field_in("ocnfrac").get_view();
+
+ // Sea surface temperature [K]
+ const const_view_1d sst = get_field_in("sst").get_view();
+
+ // U wind component [m/s]
+ const const_view_2d u_wind =
+ get_field_in("horiz_winds").get_component(0).get_view();
+
+ // V wind component [m/s]
+ const const_view_2d v_wind =
+ get_field_in("horiz_winds").get_component(1).get_view();
+
+ // Dust fluxes [kg/m^2/s]: Four flux values for each column
+ const const_view_2d dstflx = get_field_in("dstflx").get_view();
+
+ // Soil edodibility [fraction]
+ const const_view_1d soil_erodibility = this->soil_erodibility_;
+
+ // Vertical layer height at midpoints
+ const const_view_2d z_mid = dry_atm_.z_mid;
+
+ compute_online_dust_nacl_emiss(ncol_, nlev_, ocnfrac, sst, u_wind, v_wind,
+ dstflx, mpoly, mprot, mlip, soil_erodibility,
+ z_mid,
+ // output
+ constituent_fluxes);
+ Kokkos::fence();
+ //--------------------------------------------------------------------
+ // Interpolate srf emiss data read in from emissions files
//--------------------------------------------------------------------
for(srf_emiss_ &ispec_srf : srf_emiss_species_) {
@@ -256,20 +506,18 @@ void MAMSrfOnlineEmiss::run_impl(const double dt) {
// modify units from molecules/cm2/s to kg/m2/s
auto fluxes_in_mks_units = this->fluxes_in_mks_units_;
- auto constituent_fluxes = this->constituent_fluxes_;
const Real mfactor =
amufac * mam4::gas_chemistry::adv_mass[species_index - offset_];
+ const view_1d ispec_outdata0 =
+ ekat::subview(ispec_srf.data_out_.emiss_sectors, 0);
// Parallel loop over all the columns to update units
Kokkos::parallel_for(
- "fluxes", ncol_, KOKKOS_LAMBDA(int icol) {
- fluxes_in_mks_units(icol) =
- ispec_srf.data_out_.emiss_sectors(0, icol) * mfactor;
+ "srf_emis_fluxes", ncol_, KOKKOS_LAMBDA(int icol) {
+ fluxes_in_mks_units(icol) = ispec_outdata0(icol) * mfactor;
constituent_fluxes(icol, species_index) = fluxes_in_mks_units(icol);
});
-
} // for loop for species
Kokkos::fence();
-} // run_imple ends
-
+} // run_impl ends
// =============================================================================
} // namespace scream
diff --git a/components/eamxx/src/physics/mam/eamxx_mam_srf_and_online_emissions_process_interface.hpp b/components/eamxx/src/physics/mam/eamxx_mam_srf_and_online_emissions_process_interface.hpp
index 031fb62d8b7..1a3bb4f36e3 100644
--- a/components/eamxx/src/physics/mam/eamxx_mam_srf_and_online_emissions_process_interface.hpp
+++ b/components/eamxx/src/physics/mam/eamxx_mam_srf_and_online_emissions_process_interface.hpp
@@ -8,11 +8,12 @@
#include
#include
+// For reading marine organics file
+#include
+
// For declaring surface and online emission class derived from atm process
// class
#include
-
-// #include
#include
namespace scream {
@@ -20,19 +21,31 @@ namespace scream {
// The process responsible for handling MAM4 surface and online emissions. The
// AD stores exactly ONE instance of this class in its list of subcomponents.
class MAMSrfOnlineEmiss final : public scream::AtmosphereProcess {
- using KT = ekat::KokkosTypes;
- using view_1d = typename KT::template view_1d;
- using view_2d = typename KT::template view_2d;
+ using KT = ekat::KokkosTypes;
+ using view_1d = typename KT::template view_1d;
+ using view_2d = typename KT::template view_2d;
+ using const_view_1d = typename KT::template view_1d;
+ using const_view_2d = typename KT::template view_2d;
// number of horizontal columns and vertical levels
int ncol_, nlev_;
+ // Wet and dry states of atmosphere
+ mam_coupling::WetAtmosphere wet_atm_;
+ mam_coupling::DryAtmosphere dry_atm_;
+
// buffer for sotring temporary variables
mam_coupling::Buffer buffer_;
// physics grid for column information
std::shared_ptr grid_;
+ // Sea surface temoerature [K]
+ const_view_1d sst_;
+
+ // Dust fluxes (four values for each col) [kg/m2/s]
+ const_view_2d dust_fluxes_;
+
// Constituent fluxes of species in [kg/m2/s]
view_2d constituent_fluxes_;
@@ -42,8 +55,16 @@ class MAMSrfOnlineEmiss final : public scream::AtmosphereProcess {
// Unified atomic mass unit used for unit conversion (BAD constant)
static constexpr Real amufac = 1.65979e-23; // 1.e4* kg / amu
+ // For reading soil erodibility file
+ std::shared_ptr serod_horizInterp_;
+ std::shared_ptr serod_dataReader_;
+ const_view_1d soil_erodibility_;
+
public:
+ // For reading surface emissions and marine organics file
using srfEmissFunc = mam_coupling::srfEmissFunctions;
+ using marineOrganicsFunc =
+ marine_organics::marineOrganicsFunctions;
// Constructor
MAMSrfOnlineEmiss(const ekat::Comm &comm, const ekat::ParameterList ¶ms);
@@ -80,13 +101,35 @@ class MAMSrfOnlineEmiss final : public scream::AtmosphereProcess {
struct Preprocess {
Preprocess() = default;
// on host: initializes preprocess functor with necessary state data
- void initialize(const view_2d &constituent_fluxes) {
- constituent_fluxes_pre_ = constituent_fluxes;
+ void initialize(const int &ncol, const int &nlev,
+ const mam_coupling::WetAtmosphere &wet_atm,
+ const mam_coupling::DryAtmosphere &dry_atm) {
+ ncol_pre_ = ncol;
+ nlev_pre_ = nlev;
+ wet_atm_pre_ = wet_atm;
+ dry_atm_pre_ = dry_atm;
}
+ KOKKOS_INLINE_FUNCTION
+ void operator()(
+ const Kokkos::TeamPolicy::member_type &team) const {
+ const int icol = team.league_rank(); // column index
+
+ compute_dry_mixing_ratios(team, wet_atm_pre_, dry_atm_pre_, icol);
+ team.team_barrier();
+ // vertical heights has to be computed after computing dry mixing ratios
+ // for atmosphere
+ compute_vertical_layer_heights(team, dry_atm_pre_, icol);
+ compute_updraft_velocities(team, wet_atm_pre_, dry_atm_pre_, icol);
+ } // Preprocess operator()
+
// local variables for preprocess struct
- view_2d constituent_fluxes_pre_;
- }; // MAMSrfOnlineEmiss::Preprocess
+ // number of horizontal columns and vertical levels
+ int ncol_pre_, nlev_pre_;
+ // local atmospheric and aerosol state data
+ mam_coupling::WetAtmosphere wet_atm_pre_;
+ mam_coupling::DryAtmosphere dry_atm_pre_;
+ }; // MAMSrfOnlineEmiss::Preprocess
private:
// preprocessing scratch pad
Preprocess preprocess_;
@@ -95,9 +138,11 @@ class MAMSrfOnlineEmiss final : public scream::AtmosphereProcess {
// FIXME: Remove the hardwired indices and use a function
// to find them from an array.
const std::map spcIndex_in_pcnst_ = {
- {"so2", 12}, {"dms", 13}, {"so4_a1", 15},
- {"num_a1", 22}, {"so4_a2", 23}, {"num_a2", 27},
- {"pom_a4", 36}, {"bc_a4", 37}, {"num_a4", 39}};
+ {"so2", 12}, {"dms", 13}, {"so4_a1", 15}, {"dst_a1", 19},
+ {"ncl_a1", 20}, {"mom_a1", 21}, {"num_a1", 22}, {"so4_a2", 23},
+ {"ncl_a2", 25}, {"mom_a2", 26}, {"num_a2", 27}, {"dst_a3", 28},
+ {"ncl_a3", 29}, {"num_a3", 35}, {"pom_a4", 36}, {"bc_a4", 37},
+ {"mom_a4", 38}, {"num_a4", 39}};
// A struct carrying all the fields needed to read
// surface emissions of a species
@@ -122,6 +167,13 @@ class MAMSrfOnlineEmiss final : public scream::AtmosphereProcess {
// A vector for carrying emissions for all the species
std::vector srf_emiss_species_;
+ // For reading marine organics file
+ std::shared_ptr morg_horizInterp_;
+ std::shared_ptr morg_dataReader_;
+ marineOrganicsFunc::marineOrganicsTimeState morg_timeState_;
+ marineOrganicsFunc::marineOrganicsInput morg_data_start_, morg_data_end_;
+ marineOrganicsFunc::marineOrganicsOutput morg_data_out_;
+
// offset for converting pcnst index to gas_pcnst index
static constexpr int offset_ =
mam4::aero_model::pcnst - mam4::gas_chemistry::gas_pcnst;
diff --git a/components/eamxx/src/physics/mam/readfiles/find_season_index_utils.hpp b/components/eamxx/src/physics/mam/readfiles/find_season_index_utils.hpp
new file mode 100644
index 00000000000..2e930cc65cb
--- /dev/null
+++ b/components/eamxx/src/physics/mam/readfiles/find_season_index_utils.hpp
@@ -0,0 +1,75 @@
+#ifndef EAMXX_MAM_FIND_SEASON_INDEX_UTILS
+#define EAMXX_MAM_FIND_SEASON_INDEX_UTILS
+
+#include
+#include
+
+#include "share/io/scorpio_input.hpp"
+#include "share/io/scream_scorpio_interface.hpp"
+
+namespace scream::mam_coupling {
+
+// views for single- and multi-column data
+
+using const_view_1d = typename KT::template view_1d;
+using view_int_2d = typename KT::template view_2d;
+
+using view_1d_host = typename KT::view_1d::HostMirror;
+using view_int_3d_host = typename KT::view_3d::HostMirror;
+using view_int_2d_host = typename KT::view_2d::HostMirror;
+
+/**
+ * @brief Reads the season index from the given file and computes the season
+ * indices based on latitudes.
+ *
+ * @param[in] season_wes_file The path to the season_wes.nc file.
+ * @param[in] clat A 1D view of latitude values in degrees.
+ * @param[out] index_season_lai A 2D view to store the computed season indices.
+ * Note that indices are in C++ (starting from zero).
+ */
+
+inline void find_season_index_reader(const std::string &season_wes_file,
+ const const_view_1d &clat,
+ view_int_2d &index_season_lai) {
+ const int plon = clat.extent(0);
+ scorpio::register_file(season_wes_file, scorpio::Read);
+
+ const int nlat_lai = scorpio::get_dimlen(season_wes_file, "lat");
+ const int npft_lai = scorpio::get_dimlen(season_wes_file, "pft");
+
+ view_1d_host lat_lai("lat_lai", nlat_lai);
+ view_int_2d_host wk_lai_temp("wk_lai", npft_lai, nlat_lai);
+ view_int_3d_host wk_lai("wk_lai", nlat_lai, npft_lai, 12);
+
+ scorpio::read_var(season_wes_file, "lat", lat_lai.data());
+
+ Kokkos::MDRangePolicy>
+ policy_wk_lai({0, 0}, {nlat_lai, npft_lai});
+
+ // loop over time to get all 12 instantence of season_wes
+ for(int itime = 0; itime < 12; ++itime) {
+ scorpio::read_var(season_wes_file, "season_wes", wk_lai_temp.data(), itime);
+ // copy data from wk_lai_temp to wk_lai.
+ // NOTE: season_wes has different layout that wk_lai
+ Kokkos::parallel_for("copy_to_wk_lai", policy_wk_lai,
+ [&](const int j, const int k) {
+ wk_lai(j, k, itime) = wk_lai_temp(k, j);
+ });
+ Kokkos::fence();
+ }
+ scorpio::release_file(season_wes_file);
+
+ index_season_lai = view_int_2d("index_season_lai", plon, 12);
+ const view_int_2d_host index_season_lai_host =
+ Kokkos::create_mirror_view(index_season_lai);
+
+ const view_1d_host clat_host = Kokkos::create_mirror_view(clat);
+ Kokkos::deep_copy(clat_host, clat);
+
+ // Computation is performed on the host
+ mam4::mo_drydep::find_season_index(clat_host, lat_lai, nlat_lai, wk_lai,
+ index_season_lai_host);
+ Kokkos::deep_copy(index_season_lai, index_season_lai_host);
+}
+} // namespace scream::mam_coupling
+#endif //
diff --git a/components/eamxx/src/physics/mam/readfiles/marine_organics.hpp b/components/eamxx/src/physics/mam/readfiles/marine_organics.hpp
new file mode 100644
index 00000000000..a04dff129f4
--- /dev/null
+++ b/components/eamxx/src/physics/mam/readfiles/marine_organics.hpp
@@ -0,0 +1,133 @@
+#ifndef MARINE_ORGANICS_HPP
+#define MARINE_ORGANICS_HPP
+
+// For AtmosphereInput
+#include "share/io/scorpio_input.hpp"
+
+namespace scream {
+namespace marine_organics {
+
+template
+struct marineOrganicsFunctions {
+ using Device = DeviceType;
+
+ using KT = KokkosTypes;
+ using MemberType = typename KT::MemberType;
+ using view_2d = typename KT::template view_2d;
+
+ // -------------------------------------------------------------------------------------------
+ struct marineOrganicsTimeState {
+ marineOrganicsTimeState() = default;
+ // Whether the timestate has been initialized.
+ // The current month
+ int current_month = -1;
+ // Julian Date for the beginning of the month, as defined in
+ // /src/share/util/scream_time_stamp.hpp
+ // See this file for definition of Julian Date.
+ Real t_beg_month;
+ // Current simulation Julian Date
+ Real t_now;
+ // Number of days in the current month, cast as a Real
+ Real days_this_month;
+ }; // marineOrganicsTimeState
+
+ struct marineOrganicsData {
+ marineOrganicsData() = default;
+ marineOrganicsData(const int &ncol_, const int &nfields_)
+ : ncols(ncol_), nsectors(nfields_) {
+ init(ncols, nsectors, true);
+ }
+
+ void init(const int &ncol, const int &nsector, const bool allocate) {
+ ncols = ncol;
+ nsectors = nsector;
+ if(allocate) emiss_sectors = view_2d("morgAllSectors", nsectors, ncols);
+ } // marineOrganicsData init
+
+ // Basic spatial dimensions of the data
+ int ncols, nsectors;
+ view_2d emiss_sectors;
+ }; // marineOrganicsData
+
+ // -------------------------------------------------------------------------------------------
+ struct marineOrganicsInput {
+ marineOrganicsInput() = default;
+ marineOrganicsInput(const int &ncols_, const int &nfields_) {
+ init(ncols_, nfields_);
+ }
+
+ void init(const int &ncols_, const int &nfields_) {
+ data.init(ncols_, nfields_, true);
+ }
+ marineOrganicsData data; // All marineOrganics fields
+ }; // marineOrganicsInput
+
+ // The output is really just marineOrganicsData, but for clarity it might
+ // help to see a marineOrganicsOutput along a marineOrganicsInput in functions
+ // signatures
+ using marineOrganicsOutput = marineOrganicsData;
+
+ // -------------------------------------------------------------------------------------------
+ static std::shared_ptr create_horiz_remapper(
+ const std::shared_ptr &model_grid,
+ const std::string &marineOrganics_data_file, const std::string &map_file,
+ const std::vector &field_name, const std::string &dim_name1);
+
+ // -------------------------------------------------------------------------------------------
+ static std::shared_ptr create_data_reader(
+ const std::shared_ptr &horiz_remapper,
+ const std::string &data_file);
+
+ // -------------------------------------------------------------------------------------------
+ static void update_marine_organics_data_from_file(
+ std::shared_ptr &scorpio_reader,
+ const util::TimeStamp &ts,
+ const int &time_index, // zero-based
+ AbstractRemapper &horiz_interp,
+ marineOrganicsInput &marineOrganics_input);
+
+ // -------------------------------------------------------------------------------------------
+ static void update_marine_organics_timestate(
+ std::shared_ptr &scorpio_reader,
+ const util::TimeStamp &ts, AbstractRemapper &horiz_interp,
+ marineOrganicsTimeState &time_state, marineOrganicsInput &beg,
+ marineOrganicsInput &end);
+
+ // -------------------------------------------------------------------------------------------
+ static void marineOrganics_main(const marineOrganicsTimeState &time_state,
+ const marineOrganicsInput &data_beg,
+ const marineOrganicsInput &data_end,
+ const marineOrganicsOutput &data_out);
+
+ // -------------------------------------------------------------------------------------------
+ static void perform_time_interpolation(
+ const marineOrganicsTimeState &time_state,
+ const marineOrganicsInput &data_beg, const marineOrganicsInput &data_end,
+ const marineOrganicsOutput &data_out);
+
+ // -------------------------------------------------------------------------------------------
+ // Performs convex interpolation of x0 and x1 at point t
+ template
+ KOKKOS_INLINE_FUNCTION static ScalarX linear_interp(const ScalarX &x0,
+ const ScalarX &x1,
+ const ScalarT &t);
+
+ // -------------------------------------------------------------------------------------------
+ static void init_marine_organics_file_read(
+ const int &ncol, const std::vector &field_name,
+ const std::string &dim_name1,
+ const std::shared_ptr &grid,
+ const std::string &data_file, const std::string &mapping_file,
+ // output
+ std::shared_ptr &marineOrganicsHorizInterp,
+ marineOrganicsInput &morg_data_start_,
+ marineOrganicsInput &morg_data_end_, marineOrganicsData &morg_data_out_,
+ std::shared_ptr &marineOrganicsDataReader);
+
+}; // struct marineOrganicsFunctions
+
+} // namespace marine_organics
+} // namespace scream
+#endif // MARINE_ORGANICS_HPP
+
+#include "marine_organics_impl.hpp"
\ No newline at end of file
diff --git a/components/eamxx/src/physics/mam/readfiles/marine_organics_impl.hpp b/components/eamxx/src/physics/mam/readfiles/marine_organics_impl.hpp
new file mode 100644
index 00000000000..389445fa024
--- /dev/null
+++ b/components/eamxx/src/physics/mam/readfiles/marine_organics_impl.hpp
@@ -0,0 +1,296 @@
+#ifndef MARINE_ORGANICS_IMPL_HPP
+#define MARINE_ORGANICS_IMPL_HPP
+
+#include "share/grid/remap/identity_remapper.hpp"
+#include "share/grid/remap/refining_remapper_p2p.hpp"
+#include "share/io/scream_scorpio_interface.hpp"
+#include "share/util/scream_timing.hpp"
+
+namespace scream {
+namespace marine_organics {
+
+template
+std::shared_ptr
+marineOrganicsFunctions::create_horiz_remapper(
+ const std::shared_ptr &model_grid,
+ const std::string &data_file, const std::string &map_file,
+ const std::vector &field_name, const std::string &dim_name1) {
+ using namespace ShortFieldTagsNames;
+
+ scorpio::register_file(data_file, scorpio::Read);
+ const int ncols_data = scorpio::get_dimlen(data_file, dim_name1);
+
+ scorpio::release_file(data_file);
+
+ // Since shallow clones are cheap, we may as well do it (less lines of
+ // code)
+ auto horiz_interp_tgt_grid =
+ model_grid->clone("marine_organics_horiz_interp_tgt_grid", true);
+
+ const int ncols_model = model_grid->get_num_global_dofs();
+ std::shared_ptr remapper;
+ if(ncols_data == ncols_model) {
+ remapper = std::make_shared(
+ horiz_interp_tgt_grid, IdentityRemapper::SrcAliasTgt);
+ } else {
+ EKAT_REQUIRE_MSG(ncols_data <= ncols_model,
+ "Error! We do not allow to coarsen marine organics "
+ "data to fit the model. We only allow\n"
+ " marine organics data to be at the same or "
+ "coarser resolution as the model.\n");
+ // We must have a valid map file
+ EKAT_REQUIRE_MSG(map_file != "",
+ "ERROR: marine organics data is on a different grid "
+ "than the model one,\n"
+ " but remap file is missing from marine organics "
+ "parameter list.");
+
+ remapper =
+ std::make_shared(horiz_interp_tgt_grid, map_file);
+ }
+
+ remapper->registration_begins();
+
+ const auto tgt_grid = remapper->get_tgt_grid();
+
+ const auto layout_2d = tgt_grid->get_2d_scalar_layout();
+ using namespace ekat::units;
+ using namespace ekat::prefixes;
+ Units umolC(micro * mol, "umol C");
+
+ std::vector fields_vector;
+
+ const int field_size = field_name.size();
+ for(int icomp = 0; icomp < field_size; ++icomp) {
+ auto comp_name = field_name[icomp];
+ // set and allocate fields
+ Field f(FieldIdentifier(comp_name, layout_2d, umolC, tgt_grid->name()));
+ f.allocate_view();
+ fields_vector.push_back(f);
+ remapper->register_field_from_tgt(f);
+ }
+
+ remapper->registration_ends();
+
+ return remapper;
+
+} // create_horiz_remapper
+
+// -------------------------------------------------------------------------------------------
+template
+std::shared_ptr
+marineOrganicsFunctions::create_data_reader(
+ const std::shared_ptr &horiz_remapper,
+ const std::string &data_file) {
+ std::vector io_fields;
+ for(int ifld = 0; ifld < horiz_remapper->get_num_fields(); ++ifld) {
+ io_fields.push_back(horiz_remapper->get_src_field(ifld));
+ }
+ const auto io_grid = horiz_remapper->get_src_grid();
+ return std::make_shared(data_file, io_grid, io_fields, true);
+} // create_data_reader
+
+// -------------------------------------------------------------------------------------------
+template
+void marineOrganicsFunctions::update_marine_organics_data_from_file(
+ std::shared_ptr &scorpio_reader, const util::TimeStamp &ts,
+ const int &time_index, // zero-based
+ AbstractRemapper &horiz_interp, marineOrganicsInput &marineOrganics_input) {
+ start_timer("EAMxx::marineOrganics::update_marine_organics_data_from_file");
+
+ // 1. Read from file
+ start_timer(
+ "EAMxx::marineOrganics::update_marine_organics_data_from_file::read_"
+ "data");
+ scorpio_reader->read_variables();
+ stop_timer(
+ "EAMxx::marineOrganics::update_marine_organics_data_from_file::read_"
+ "data");
+
+ // 2. Run the horiz remapper (it is a do-nothing op if marineOrganics data is
+ // on same grid as model)
+ start_timer(
+ "EAMxx::marineOrganics::update_marine_organics_data_from_file::horiz_"
+ "remap");
+ horiz_interp.remap(/*forward = */ true);
+ stop_timer(
+ "EAMxx::marineOrganics::update_marine_organics_data_from_file::horiz_"
+ "remap");
+
+ // 3. Get the tgt field of the remapper
+ start_timer(
+ "EAMxx::marineOrganics::update_marine_organics_data_from_file::get_"
+ "field");
+ // Recall, the fields are registered in the order:
+ // Read the field from the file
+
+ for(int ifld = 0; ifld < horiz_interp.get_num_fields(); ++ifld) {
+ auto sector = horiz_interp.get_tgt_field(ifld).get_view();
+ const auto emiss = Kokkos::subview(marineOrganics_input.data.emiss_sectors,
+ ifld, Kokkos::ALL());
+ Kokkos::deep_copy(emiss, sector);
+ }
+
+ Kokkos::fence();
+
+ stop_timer(
+ "EAMxx::marineOrganics::update_marine_organics_data_from_file::get_"
+ "field");
+
+ stop_timer("EAMxx::marineOrganics::update_marine_organics_data_from_file");
+
+} // END update_marine_organics_data_from_file
+
+// -------------------------------------------------------------------------------------------
+template
+void marineOrganicsFunctions::update_marine_organics_timestate(
+ std::shared_ptr &scorpio_reader, const util::TimeStamp &ts,
+ AbstractRemapper &horiz_interp, marineOrganicsTimeState &time_state,
+ marineOrganicsInput &beg, marineOrganicsInput &end) {
+ // Now we check if we have to update the data that changes monthly
+ // NOTE: This means that marineOrganics assumes monthly data to update. Not
+ // any other frequency.
+ const auto month = ts.get_month() - 1; // Make it 0-based
+ if(month != time_state.current_month) {
+ // Update the marineOrganics time state information
+ time_state.current_month = month;
+ time_state.t_beg_month =
+ util::TimeStamp({ts.get_year(), month + 1, 1}, {0, 0, 0})
+ .frac_of_year_in_days();
+ time_state.days_this_month = util::days_in_month(ts.get_year(), month + 1);
+
+ // Copy end'data into beg'data, and read in the new
+ // end
+ std::swap(beg, end);
+
+ // Update the marineOrganics forcing data for this month and next month
+ // Start by copying next months data to this months data structure.
+ // NOTE: If the timestep is bigger than monthly this could cause the wrong
+ // values
+ // to be assigned. A timestep greater than a month is very unlikely
+ // so we will proceed.
+ int next_month = (time_state.current_month + 1) % 12;
+ update_marine_organics_data_from_file(scorpio_reader, ts, next_month,
+ horiz_interp, end);
+ }
+
+} // END updata_marine_organics_timestate
+
+// -------------------------------------------------------------------------------------------
+template
+template
+KOKKOS_INLINE_FUNCTION ScalarX marineOrganicsFunctions::linear_interp(
+ const ScalarX &x0, const ScalarX &x1, const ScalarT &t) {
+ return (1 - t) * x0 + t * x1;
+} // linear_interp
+
+// -------------------------------------------------------------------------------------------
+template
+void marineOrganicsFunctions::perform_time_interpolation(
+ const marineOrganicsTimeState &time_state,
+ const marineOrganicsInput &data_beg, const marineOrganicsInput &data_end,
+ const marineOrganicsOutput &data_out) {
+ using ExeSpace = typename KT::ExeSpace;
+ using ESU = ekat::ExeSpaceUtils;
+
+ // Gather time stamp info
+ auto &t_now = time_state.t_now;
+ auto &t_beg = time_state.t_beg_month;
+ auto &delta_t = time_state.days_this_month;
+
+ // At this stage, begin/end must have the same dimensions
+ EKAT_REQUIRE(data_end.data.ncols == data_beg.data.ncols);
+
+ auto delta_t_fraction = (t_now - t_beg) / delta_t;
+
+ EKAT_REQUIRE_MSG(delta_t_fraction >= 0 && delta_t_fraction <= 1,
+ "Error! Convex interpolation with coefficient out of "
+ "[0,1].\n t_now : " +
+ std::to_string(t_now) +
+ "\n"
+ " t_beg : " +
+ std::to_string(t_beg) +
+ "\n delta_t: " + std::to_string(delta_t) + "\n");
+
+ const int nsectors = data_beg.data.nsectors;
+ const int ncols = data_beg.data.ncols;
+ using ExeSpace = typename KT::ExeSpace;
+ using ESU = ekat::ExeSpaceUtils;
+ const auto policy = ESU::get_default_team_policy(ncols, nsectors);
+
+ Kokkos::parallel_for(
+ policy, KOKKOS_LAMBDA(const MemberType &team) {
+ const int icol = team.league_rank(); // column index
+ Kokkos::parallel_for(
+ Kokkos::TeamVectorRange(team, 0u, nsectors), [&](int isec) {
+ const auto beg = data_beg.data.emiss_sectors(isec, icol);
+ const auto end = data_end.data.emiss_sectors(isec, icol);
+ data_out.emiss_sectors(isec, icol) =
+ linear_interp(beg, end, delta_t_fraction);
+ });
+ });
+ Kokkos::fence();
+
+} // perform_time_interpolation
+
+// -------------------------------------------------------------------------------------------
+template
+void marineOrganicsFunctions::marineOrganics_main(
+ const marineOrganicsTimeState &time_state,
+ const marineOrganicsInput &data_beg, const marineOrganicsInput &data_end,
+ const marineOrganicsOutput &data_out) {
+ // Beg/End/Tmp month must have all sizes matching
+
+ EKAT_REQUIRE_MSG(
+ data_end.data.ncols == data_beg.data.ncols,
+ "Error! marineOrganicsInput data structs must have the same number of "
+ "columns.\n");
+
+ // Horiz interpolation can be expensive, and does not depend on the particular
+ // time of the month, so it can be done ONCE per month, *outside*
+ // marineOrganics_main (when updating the beg/end states, reading them from
+ // file).
+ EKAT_REQUIRE_MSG(data_end.data.ncols == data_out.ncols,
+ "Error! Horizontal interpolation is performed *before* "
+ "calling marineOrganics_main,\n"
+ " marineOrganicsInput and marineOrganicsOutput data "
+ "structs must have the "
+ "same number columns "
+ << data_end.data.ncols << " " << data_out.ncols
+ << ".\n");
+
+ // Step 1. Perform time interpolation
+ perform_time_interpolation(time_state, data_beg, data_end, data_out);
+} // marineOrganics_main
+
+// -------------------------------------------------------------------------------------------
+template
+void marineOrganicsFunctions::init_marine_organics_file_read(
+ const int &ncol, const std::vector &field_name,
+ const std::string &dim_name1,
+ const std::shared_ptr &grid,
+ const std::string &data_file, const std::string &mapping_file,
+ // output
+ std::shared_ptr &marineOrganicsHorizInterp,
+ marineOrganicsInput &data_start_, marineOrganicsInput &data_end_,
+ marineOrganicsData &data_out_,
+ std::shared_ptr &marineOrganicsDataReader) {
+ // Init horizontal remap
+
+ marineOrganicsHorizInterp = create_horiz_remapper(
+ grid, data_file, mapping_file, field_name, dim_name1);
+
+ // Initialize the size of start/end/out data structures
+ data_start_ = marineOrganicsInput(ncol, field_name.size());
+ data_end_ = marineOrganicsInput(ncol, field_name.size());
+ data_out_.init(ncol, field_name.size(), true);
+
+ // Create reader (an AtmosphereInput object)
+ marineOrganicsDataReader =
+ create_data_reader(marineOrganicsHorizInterp, data_file);
+
+} // init_marine_organics_file_read
+} // namespace marine_organics
+} // namespace scream
+
+#endif // MARINE_ORGANICS_IMPL_HPP
\ No newline at end of file
diff --git a/components/eamxx/src/physics/mam/readfiles/soil_erodibility.hpp b/components/eamxx/src/physics/mam/readfiles/soil_erodibility.hpp
new file mode 100644
index 00000000000..8b47c81d907
--- /dev/null
+++ b/components/eamxx/src/physics/mam/readfiles/soil_erodibility.hpp
@@ -0,0 +1,44 @@
+#ifndef SOIL_ERODIBILITY_HPP
+#define SOIL_ERODIBILITY_HPP
+
+// For AtmosphereInput
+#include "share/io/scorpio_input.hpp"
+
+namespace scream {
+namespace soil_erodibility {
+
+template
+struct soilErodibilityFunctions {
+ using Device = DeviceType;
+
+ using KT = KokkosTypes;
+ using const_view_1d = typename KT::template view_1d;
+
+ static std::shared_ptr create_horiz_remapper(
+ const std::shared_ptr &model_grid,
+ const std::string &soilErodibility_data_file, const std::string &map_file,
+ const std::string &field_name, const std::string &dim_name1);
+
+ static std::shared_ptr create_data_reader(
+ const std::shared_ptr &horiz_remapper,
+ const std::string &data_file);
+
+ static void update_soil_erodibility_data_from_file(
+ std::shared_ptr &scorpio_reader,
+ AbstractRemapper &horiz_interp, const_view_1d &input);
+
+ static void init_soil_erodibility_file_read(
+ const int ncol, const std::string field_name, const std::string dim_name1,
+ const std::shared_ptr &grid,
+ const std::string &data_file, const std::string &mapping_file,
+ // output
+ std::shared_ptr &SoilErodibilityHorizInterp,
+ std::shared_ptr &SoilErodibilityDataReader);
+
+}; // struct soilErodilityFunctions
+
+} // namespace soil_erodibility
+} // namespace scream
+#endif // SOIL_ERODIBILITY_HPP
+
+#include "soil_erodibility_impl.hpp"
\ No newline at end of file
diff --git a/components/eamxx/src/physics/mam/readfiles/soil_erodibility_impl.hpp b/components/eamxx/src/physics/mam/readfiles/soil_erodibility_impl.hpp
new file mode 100644
index 00000000000..af0c4d73c17
--- /dev/null
+++ b/components/eamxx/src/physics/mam/readfiles/soil_erodibility_impl.hpp
@@ -0,0 +1,147 @@
+#ifndef SOIL_ERODIBILITY_IMPL_HPP
+#define SOIL_ERODIBILITY_IMPL_HPP
+
+#include "share/grid/remap/identity_remapper.hpp"
+#include "share/grid/remap/refining_remapper_p2p.hpp"
+#include "share/io/scream_scorpio_interface.hpp"
+#include "share/util/scream_timing.hpp"
+
+namespace scream {
+namespace soil_erodibility {
+
+template
+std::shared_ptr
+soilErodibilityFunctions::create_horiz_remapper(
+ const std::shared_ptr