diff --git a/.github/scripts/ghcr-prune.py b/.github/scripts/ghcr-prune.py new file mode 100644 index 00000000000..87b5db7e112 --- /dev/null +++ b/.github/scripts/ghcr-prune.py @@ -0,0 +1,187 @@ +import argparse +import logging +import requests +import re +import json +from datetime import datetime +from datetime import timedelta + +description = """ +This script can be used to prune container images hosted on ghcr.io.\n + +Our testing workflow will build and push container images to ghcr.io +that are only used for testing. This script is used to cleanup these +temporary images. + +You can filter containers by any combination of name, age, and untagged. +""" + +parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter) + +parser.add_argument("--token", required=True, help='GitHub token with "repo" scope') +parser.add_argument("--org", required=True, help="Organization name") +parser.add_argument("--name", required=True, help="Package name") +parser.add_argument( + "--age", type=int, help="Filter versions by age, removing anything older than" +) +parser.add_argument( + "--filter", help="Filter which versions are consider for pruning", default=".*" +) +parser.add_argument("--untagged", action="store_true", help="Prune untagged versions") +parser.add_argument( + "--dry-run", action="store_true", help="Does not actually delete anything" +) + +logging_group = parser.add_argument_group("logging") +logging_group.add_argument( + "--log-level", choices=("DEBUG", "INFO", "WARNING", "ERROR"), default="INFO" +) + +kwargs = vars(parser.parse_args()) + +logging.basicConfig(level=kwargs["log_level"]) + +logger = logging.getLogger("ghcr-prune") + + +class GitHubPaginate: + """Iterator for GitHub API. + + Provides small wrapper for GitHub API to utilize paging in API calls. + + https://docs.github.com/en/rest/using-the-rest-api/using-pagination-in-the-rest-api?apiVersion=2022-11-28 + """ + def __init__(self, token, org, name, age, filter, untagged, **_): + self.token = token + self.session = None + self.url = ( + f"https://api.github.com/orgs/{org}/packages/container/{name}/versions" + ) + self.expired = datetime.now() - timedelta(days=age) + self.filter = re.compile(filter) + self.page = None + self.untagged = untagged + + def create_session(self): + self.session = requests.Session() + self.session.headers.update( + { + "Accept": "application/vnd.github+json", + "Authorization": f"Bearer {self.token}", + "X-GitHub-Api-Version": "2022-11-28", + } + ) + + def grab_page(self): + if self.session is None: + raise Exception("Must create session first") + + if self.url is None: + raise Exception("No more pages") + + response = self.session.get(self.url) + + response.raise_for_status() + + remaining = int(response.headers["X-RateLimit-Remaining"]) + + logger.debug(f"Remaining api limit {remaining}") + + if remaining <= 0: + reset = response.headers["X-RateLimit-Reset"] + + raise Exception(f"Hit ratelimit will reset at {reset}") + + try: + self.url = self.get_next_url(response.headers["Link"]) + except Exception as e: + logger.debug(f"No Link header found {e}") + + self.url = None + + return self.filter_results(response.json()) + + def get_next_url(self, link): + match = re.match("<([^>]*)>.*", link) + + if match is None: + raise Exception("Could not determine next link") + + return match.group(1) + + def filter_results(self, data): + results = [] + + logger.info(f"Processing {len(data)} containers") + + for x in data: + url = x["url"] + updated_at = datetime.strptime(x["updated_at"], "%Y-%m-%dT%H:%M:%SZ") + + logger.debug(f"Processing\n{json.dumps(x, indent=2)}") + + try: + tag = x["metadata"]["container"]["tags"][0] + except IndexError: + logger.info(f'Found untagged version {x["id"]}') + + if self.untagged: + results.append(url) + + continue + + if not self.filter.match(tag): + logger.info(f"Skipping {tag}, did not match filter") + + continue + + if updated_at < self.expired: + logger.info( + f"Pruning {tag}, updated at {updated_at}, expiration {self.expired}" + ) + + results.append(url) + else: + logger.info(f"Skipping {tag}, more recent than {self.expired}") + + return results + + def __iter__(self): + self.create_session() + + return self + + def __next__(self): + if self.page is None or len(self.page) == 0: + try: + self.page = self.grab_page() + except Exception as e: + logger.debug(f"StopIteration condition {e!r}") + + raise StopIteration from None + + try: + item = self.page.pop(0) + except IndexError: + raise StopIteration from None + + return item + + def remove_container(self, url): + if self.session is None: + raise Exception("Must create session first") + + response = self.session.delete(url) + + response.raise_for_status() + + logger.debug(f"{response.headers}") + + +pager = GitHubPaginate(**kwargs) + +for url in pager: + if kwargs["dry_run"]: + logger.info(f"Pruning {url}") + else: + pager.remove_container(url) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 0b855e6ab76..70899438082 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -4,36 +4,55 @@ on: push: branches: - master + paths: + - 'doc/**' pull_request: branches: - master + paths: + - 'doc/**' + + workflow_dispatch: permissions: contents: read jobs: - check-changes: - name: Check for changes to documentation + cleanup: + permissions: + contents: write # for git push + name: Cleanup branch previews runs-on: ubuntu-latest - outputs: - any_changed: ${{ steps.changed-check.outputs.any_changed }} + if: ${{ github.event_name == 'push' }} steps: - uses: actions/checkout@v3 with: + ref: 'gh-pages' fetch-depth: 0 lfs: true - - uses: tj-actions/changed-files@v32 - id: changed-check - with: - files: doc + path: gh-pages + - name: Remove branch previews + run: | + pushd $GITHUB_WORKSPACE/gh-pages + + for name in `ls branch/` + do + if [[ -z "$(git show-ref --quiet ${name})" ]] + then + git rm -rf branch/${name} + fi + done + + git config user.name github-actions[bot] + git config user.email github-actions[bot]@users.noreply.github.com + git commit -m "Clean up branch previews" + git push build-and-deploy: permissions: contents: write # for peaceiris/actions-gh-pages to push pull-requests: write # to comment on pull requests - needs: check-changes - if: | - needs.check-changes.outputs.any_changed == 'true' && - github.event.pull_request.head.repo.full_name == github.repository + needs: cleanup + if: ${{ always() }} name: Build and deploy documentation runs-on: ubuntu-latest steps: @@ -61,7 +80,9 @@ jobs: run: | make BUILDDIR=${PWD}/_build -C doc/ html - name: Push PR preview - if: ${{ github.event_name == 'pull_request' }} + if: | + github.event_name == 'pull_request' && + github.event.pull_request.head.repo.full_name == github.repository uses: peaceiris/actions-gh-pages@v3 with: github_token: ${{secrets.GITHUB_TOKEN}} @@ -70,7 +91,9 @@ jobs: user_name: 'github-actions[bot]' user_email: 'github-actions[bot]@users.noreply.github.com' - name: Comment about previewing documentation - if: ${{ github.event_name == 'pull_request' }} + if: | + github.event_name == 'pull_request' && + github.event.pull_request.head.repo.full_name == github.repository uses: actions/github-script@v6 with: script: | @@ -99,31 +122,3 @@ jobs: destination_dir: './versions/master/html' user_name: 'github-actions[bot]' user_email: 'github-actions[bot]@users.noreply.github.com' - cleanup: - permissions: - contents: write # for git push - needs: build-and-deploy - name: Cleanup branch previews - runs-on: ubuntu-latest - if: ${{ github.event_name == 'push' }} - steps: - - uses: actions/checkout@v3 - with: - ref: 'gh-pages' - fetch-depth: 0 - lfs: true - - name: Remove branch previews - run: | - for name in `ls branch/` - do - if [[ -z "$(git show-ref --quiet ${name})" ]] - then - git rm -rf branch/${name} - fi - done - - name: Commit and push local changes to gh-pages - run: | - git config user.name github-actions[bot] - git config user.email github-actions[bot]@users.noreply.github.com - git commit -m "Clean up branch previews" - git push diff --git a/.github/workflows/ghcr-prune.yml b/.github/workflows/ghcr-prune.yml new file mode 100644 index 00000000000..5e26f83e3ce --- /dev/null +++ b/.github/workflows/ghcr-prune.yml @@ -0,0 +1,24 @@ +name: Prune ghcr.io container images +on: + schedule: + # run once a day + - cron: '0 2 * * *' + + # Temporary to test + pull_request: + +permissions: {} + +jobs: + prune: + permissions: + packages: write + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + - run: | + pip install requests + + # remove containers older than 14 days and only generated by testing workflow + python .github/scripts/ghcr-prune.py --token ${{ secrets.GITHUB_TOKEN }} --org esmci --name cime --age 14 --filter sha- --untagged diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 00000000000..ce0933e7515 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,24 @@ +name: 'Close stale issues and PRs' +on: + schedule: + # Run every day at 1:30AM + - cron: '30 1 * * *' +jobs: + stale: + permissions: + issues: write + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v8 + with: + stale-issue-message: 'This issue is stale because it has been open 90 days with no activity. Remove stale label or comment or this will be closed in 5 days.' + close-issue-message: 'This issue was closed because it has been stalled for 5 days with no activity.' + days-before-stale: 90 + days-before-close: 5 + days-before-pr-close: -1 + # Issues with this label are exempt from being checked if they are stale... + exempt-issue-labels: Low Priority + # Below are currently defaults, but given in case we decide to change + operations-per-run: 30 + stale-issue-label: Stale + close-issue-reason: not_planned diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 8ffa63c038b..530c00db89e 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -4,10 +4,24 @@ on: push: branches: - master + paths: + - 'CIME/**' + - 'scripts/**' + - 'tools/**' + - 'utils/**' + - 'docker/**' pull_request: branches: - master + paths: + - 'CIME/**' + - 'scripts/**' + - 'tools/**' + - 'utils/**' + - 'docker/**' + + workflow_dispatch: concurrency: group: ${{ github.ref }} @@ -15,51 +29,13 @@ concurrency: permissions: contents: read # to fetch code (actions/checkout) + packages: read jobs: - pre-commit: - runs-on: ubuntu-latest - timeout-minutes: 2 - steps: - - name: Checkout code - uses: actions/checkout@v2 - - name: Set up python - uses: actions/setup-python@v2 - with: - python-version: 3.9 - # Offical action is deprecated in favor of pre-commit.ci - # Should evaulate switching or just running manually. - # - name: Runs pre-commit action - # # Do not run if using act tooling (https://github.com/nektos/act) - # if: ${{ !env.ACT }} - # uses: pre-commit/action@v2.0.3 - - name: Runs pre-commit - run: | - pip install pre-commit - - pre-commit run -a - - # Check if there has been a change to any file under docker/ - get-docker-changes: - runs-on: ubuntu-latest - outputs: - any_changed: ${{ steps.get-changed-files.outputs.any_changed }} - steps: - - name: Checkout code - uses: actions/checkout@v3 - with: - fetch-depth: 2 - - name: Get changed files - id: get-changed-files - uses: tj-actions/changed-files@v29 - with: - files: docker - - # Only build container if there has been a change. build-containers: runs-on: ubuntu-latest - needs: get-docker-changes - if: ${{ needs.get-docker-changes.outputs.any_changed == 'true' }} + permissions: + packages: write steps: - name: Checkout code uses: actions/checkout@v3 @@ -70,16 +46,17 @@ jobs: - name: Login to DockerHub uses: docker/login-action@v2 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} - name: Docker meta id: meta uses: docker/metadata-action@v4 with: - images: jasonb87/cime + images: ghcr.io/ESMCI/cime tags: | - type=raw,value=latest - type=sha,prefix={{ date 'YYYYMMDD' }}_,format=short + type=raw,value=latest,enable=${{ github.event_name == 'push' }} + type=sha,format=long - name: Build and push uses: docker/build-push-action@v3 with: @@ -88,52 +65,76 @@ jobs: push: true tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} - cache-from: type=registry,ref=jasonb87/cime:buildcache - cache-to: type=registry,ref=jasonb87/cime:buildcache,mode=max + cache-from: type=gha + cache-to: type=gha,mode=max + + pre-commit: + runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && ! cancelled() }} + timeout-minutes: 2 + steps: + - name: Checkout code + uses: actions/checkout@v2 + - name: Set up python + uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Runs pre-commit + run: | + pip install pre-commit + + pre-commit run -a # Runs unit testing under different python versions. unit-testing: runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && always() && ! cancelled() }} needs: build-containers - if: ${{ always() && ! cancelled() }} - container: jasonb87/cime:latest + container: + image: ghcr.io/esmci/cime:sha-${{ github.sha }} + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} strategy: matrix: - python-version: [3.7, 3.8, 3.9] + python-version: ['3.8', '3.9', '3.10'] steps: - name: Checkout code uses: actions/checkout@v2 - name: Run tests shell: bash env: - INIT: "false" CIME_MODEL: "cesm" - CIME_DRIVER: "mct" - UPDATE_CIME: "true" - GIT_SHALLOW: "true" + CIME_DRIVER: "nuopc" CIME_TEST_PLATFORM: ubuntu-latest run: | - export INSTALL_PATH="${PWD}" - export CIME_REPO=https://github.com/${{ github.event.pull_request.head.repo.full_name || github.repository }} - export CIME_BRANCH=${GITHUB_HEAD_REF:-${GITHUB_REF##*/}} + export SRC_PATH="${GITHUB_WORKSPACE}" mamba install -y python=${{ matrix.python-version }} source /entrypoint.sh - git config --global --add safe.directory /__w/cime/cime + # GitHub runner home is different than container + cp -rf /root/.cime /github/home/ - init_cime + git status - pytest -vvv --machine docker --no-fortran-run CIME/tests/test_unit* + pytest -vvv --cov=CIME --machine docker --no-fortran-run CIME/tests/test_unit* # Run system tests system-testing: runs-on: ubuntu-latest + if: ${{ github.event_name == 'pull_request' && always() && ! cancelled() }} needs: build-containers - if: ${{ always() && ! cancelled() }} - container: jasonb87/cime:latest + container: + image: ghcr.io/esmci/cime:sha-${{ github.sha }} + credentials: + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + options: --hostname docker strategy: + # allow all jobs to finish + fail-fast: false matrix: model: ["e3sm", "cesm"] driver: ["mct", "nuopc"] @@ -141,6 +142,9 @@ jobs: # exclude nuopc driver when running e3sm tests - model: "e3sm" driver: "nuopc" + # exclude mct driver when running cesm tests + - model: "cesm" + driver: "mct" steps: - name: Checkout code uses: actions/checkout@v2 @@ -152,41 +156,48 @@ jobs: - name: Run tests shell: bash env: - INIT: "false" CIME_MODEL: ${{ matrix.model }} CIME_DRIVER: ${{ matrix.driver }} - UPDATE_CIME: "true" - GIT_SHALLOW: "true" CIME_TEST_PLATFORM: ubuntu-latest run: | - export INSTALL_PATH="${PWD}/cime" - export CIME_REPO=https://github.com/${{ github.event.pull_request.head.repo.full_name || github.repository }} - export CIME_BRANCH=${GITHUB_HEAD_REF:-${GITHUB_REF##*/}} + export SRC_PATH="${GITHUB_WORKSPACE}" source /entrypoint.sh - git config --global --add safe.directory /__w/cime/cime + # GitHub runner home is different than container + cp -rf /root/.cime /github/home/ - if [[ "${CIME_MODEL}" == "e3sm" ]] - then - init_e3sm - else - init_cime - fi + if [[ "${CIME_MODEL}" == "e3sm" ]]; then + git remote set-url origin https://github.com/${{ github.event.pull_request.head.repo.full_name || github.repository }} + git remote set-branches origin "*" + git fetch origin + git checkout ${GITHUB_HEAD_REF:-${GITHUB_REF##*/}} + + # sync correct submodules + git submodule update - source /opt/conda/etc/profile.d/conda.sh + source /opt/conda/etc/profile.d/conda.sh - conda activate base + conda activate base + fi - pytest -vvv --machine docker --no-fortran-run --no-teardown CIME/tests/test_sys* + git status + + pytest -vvv --cov=CIME --machine docker --no-fortran-run --no-teardown CIME/tests/test_sys* + - uses: mxschmitt/action-tmate@v3 + if: ${{ !always() }} + with: + limit-access-to-actor: true - name: Create testing log archive if: ${{ failure() }} shell: bash - run: tar -czvf /testing-logs-${GITHUB_RUN_NUMBER}.tar.gz /storage/cases/ + run: tar -czvf /testing-logs-${GITHUB_RUN_NUMBER}-${{ matrix.model }}-${{ matrix.driver }}.tar.gz /storage/cases/ + # How to download artifacts: + # https://docs.github.com/en/actions/managing-workflow-runs/downloading-workflow-artifacts - name: Upload testing logs if: ${{ failure() }} uses: actions/upload-artifact@v3 with: - name: testing-logs-${{ github.run_number }} - path: /testing-logs-${{ github.run_number}}.tar.gz + name: testing-logs-${{ github.run_number }}-${{ matrix.model }}-${{ matrix.driver }} + path: /testing-logs-${{ github.run_number}}-${{ matrix.model }}-${{ matrix.driver }}.tar.gz retention-days: 4 diff --git a/.gitignore b/.gitignore index 58e9dd92b66..0f54f714f75 100644 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,5 @@ scripts/Tools/JENKINS* components libraries share +test_coverage/** +*.bak diff --git a/.gitmodules b/.gitmodules index e69de29bb2d..13f9ecb952f 100644 --- a/.gitmodules +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "CIME/non_py/cprnc"] + path = CIME/non_py/cprnc + url = git@github.com:ESMCI/cprnc diff --git a/CIME/BuildTools/configure.py b/CIME/BuildTools/configure.py index 4cd9059e5db..7cb884a625f 100755 --- a/CIME/BuildTools/configure.py +++ b/CIME/BuildTools/configure.py @@ -145,13 +145,13 @@ def __init__(self, compiler, mpilib, debug, comp_interface, threading=False): "DEBUG": debug, "COMP_INTERFACE": comp_interface, "PIO_VERSION": 2, - "SMP_PRESENT": threading, + "BUILD_THREADED": threading, "MODEL": get_model(), "SRCROOT": get_src_root(), } def get_build_threaded(self): - return self.get_value("SMP_PRESENT") + return self.get_value("BUILD_THREADED") def get_case_root(self): """Returns the root directory for this case.""" diff --git a/CIME/ParamGen/paramgen.py b/CIME/ParamGen/paramgen.py index 4fa6221f06e..188b954bd31 100644 --- a/CIME/ParamGen/paramgen.py +++ b/CIME/ParamGen/paramgen.py @@ -4,6 +4,7 @@ from copy import deepcopy import logging import subprocess +import shutil try: from paramgen_utils import is_logical_expr, is_formula, has_unexpanded_var @@ -136,9 +137,7 @@ def from_xml_nml(cls, input_path, match="last", no_duplicates=False): """ # First check whether the given xml file conforms to the entry_id_pg.xsd schema - from distutils.spawn import find_executable - - xmllint = find_executable("xmllint") + xmllint = shutil.which("xmllint") if xmllint is None: logger.warning("Couldn't find xmllint. Skipping schema check") else: diff --git a/CIME/SystemTests/README b/CIME/SystemTests/README index 31ee7c4f3e3..61d0eec7f40 100644 --- a/CIME/SystemTests/README +++ b/CIME/SystemTests/README @@ -47,7 +47,7 @@ ERP pes counts hybrid (open-MP/MPI) restart bfb test from startup, default 6 do an 11 day initial test - write a restart at day 6 (suffix base) half the number of tasks and threads for each component do a 5 day restart test starting from restart at day 6 (suffix rest) - this is just like an ERS test but the pe-counts/threading count are modified on retart + this is just like an ERS test but the pe-counts/threading count are modified on restart ERI hybrid/branch/exact restart test, default (by default STOP_N is 22 days) (1) ref1case diff --git a/CIME/SystemTests/dae.py b/CIME/SystemTests/dae.py index 2b0b58c4b4e..175254d2d1b 100644 --- a/CIME/SystemTests/dae.py +++ b/CIME/SystemTests/dae.py @@ -27,7 +27,7 @@ class DAE(SystemTestsCompareTwo): """ ########################################################################### - def __init__(self, case): + def __init__(self, case, **kwargs): ########################################################################### SystemTestsCompareTwo.__init__( self, @@ -36,6 +36,7 @@ def __init__(self, case): run_two_suffix="da", run_one_description="no data assimilation", run_two_description="data assimilation", + **kwargs, ) ########################################################################### diff --git a/CIME/SystemTests/eri.py b/CIME/SystemTests/eri.py index 0bcf4466646..272a3881add 100644 --- a/CIME/SystemTests/eri.py +++ b/CIME/SystemTests/eri.py @@ -38,11 +38,11 @@ def _helper(dout_sr, refdate, refsec, rundir): class ERI(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the ERI system test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) self._testname = "ERI" def run_phase(self): diff --git a/CIME/SystemTests/erio.py b/CIME/SystemTests/erio.py index f9de01a8b27..a1e7b041cc6 100644 --- a/CIME/SystemTests/erio.py +++ b/CIME/SystemTests/erio.py @@ -10,11 +10,11 @@ class ERIO(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to file env_test.xml in the case directory """ - SystemTestsCommon.__init__(self, case, expected=["TEST"]) + SystemTestsCommon.__init__(self, case, expected=["TEST"], **kwargs) self._pio_types = self._case.get_env("run").get_valid_values("PIO_TYPENAME") self._stop_n = self._case.get_value("STOP_N") diff --git a/CIME/SystemTests/erp.py b/CIME/SystemTests/erp.py index 8f347fe6eee..6d58248c138 100644 --- a/CIME/SystemTests/erp.py +++ b/CIME/SystemTests/erp.py @@ -15,7 +15,7 @@ class ERP(RestartTest): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize a test object """ @@ -26,6 +26,7 @@ def __init__(self, case): run_two_suffix="rest", run_one_description="initial", run_two_description="restart", + **kwargs ) def _case_two_setup(self): @@ -41,10 +42,6 @@ def _case_two_setup(self): self._case.set_value("ROOTPE_{}".format(comp), int(rootpe / 2)) RestartTest._case_two_setup(self) - self._case.case_setup(test_mode=True, reset=True) - # Note, some components, like CESM-CICE, have - # decomposition information in env_build.xml that - # needs to be regenerated for the above new tasks and thread counts def _case_one_custom_postrun_action(self): self.copy_case1_restarts_to_case2() diff --git a/CIME/SystemTests/err.py b/CIME/SystemTests/err.py index 4dd79a85aae..355ddd5d390 100644 --- a/CIME/SystemTests/err.py +++ b/CIME/SystemTests/err.py @@ -11,7 +11,7 @@ class ERR(RestartTest): - def __init__(self, case): # pylint: disable=super-init-not-called + def __init__(self, case, **kwargs): # pylint: disable=super-init-not-called """ initialize an object interface to the ERR system test """ @@ -22,6 +22,7 @@ def __init__(self, case): # pylint: disable=super-init-not-called run_one_description="initial", run_two_description="restart", multisubmit=True, + **kwargs ) def _case_one_setup(self): diff --git a/CIME/SystemTests/erri.py b/CIME/SystemTests/erri.py index 8cec2b149ce..7851bd4bb66 100644 --- a/CIME/SystemTests/erri.py +++ b/CIME/SystemTests/erri.py @@ -12,11 +12,11 @@ class ERRI(ERR): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the ERU system test """ - ERR.__init__(self, case) + ERR.__init__(self, case, **kwargs) def _case_two_custom_postrun_action(self): rundir = self._case.get_value("RUNDIR") diff --git a/CIME/SystemTests/ers.py b/CIME/SystemTests/ers.py index df5daea488c..bebed8f04c4 100644 --- a/CIME/SystemTests/ers.py +++ b/CIME/SystemTests/ers.py @@ -9,11 +9,11 @@ class ERS(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the ERS system test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) def _ers_first_phase(self): stop_n = self._case.get_value("STOP_N") diff --git a/CIME/SystemTests/ers2.py b/CIME/SystemTests/ers2.py index e65f703e36e..63a10399b49 100644 --- a/CIME/SystemTests/ers2.py +++ b/CIME/SystemTests/ers2.py @@ -8,11 +8,11 @@ class ERS2(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the ERS2 system test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) def _ers2_first_phase(self): stop_n = self._case.get_value("STOP_N") diff --git a/CIME/SystemTests/ert.py b/CIME/SystemTests/ert.py index 36366395190..b912f7248b7 100644 --- a/CIME/SystemTests/ert.py +++ b/CIME/SystemTests/ert.py @@ -10,11 +10,11 @@ class ERT(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the ERT system test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) def _ert_first_phase(self): diff --git a/CIME/SystemTests/funit.py b/CIME/SystemTests/funit.py index 193c485433e..1ebaf720604 100644 --- a/CIME/SystemTests/funit.py +++ b/CIME/SystemTests/funit.py @@ -12,11 +12,11 @@ class FUNIT(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the FUNIT system test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) case.load_env() def build_phase(self, sharedlib_only=False, model_only=False): diff --git a/CIME/SystemTests/homme.py b/CIME/SystemTests/homme.py index 6161c2e46be..597be0b9a09 100644 --- a/CIME/SystemTests/homme.py +++ b/CIME/SystemTests/homme.py @@ -2,6 +2,6 @@ class HOMME(HommeBase): - def __init__(self, case): - HommeBase.__init__(self, case) + def __init__(self, case, **kwargs): + HommeBase.__init__(self, case, **kwargs) self.cmakesuffix = "" diff --git a/CIME/SystemTests/hommebaseclass.py b/CIME/SystemTests/hommebaseclass.py index 5c29fce7533..bad27d4aa56 100644 --- a/CIME/SystemTests/hommebaseclass.py +++ b/CIME/SystemTests/hommebaseclass.py @@ -14,11 +14,11 @@ class HommeBase(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the SMS system test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) case.load_env() self.csnd = "not defined" self.cmakesuffix = self.csnd diff --git a/CIME/SystemTests/hommebfb.py b/CIME/SystemTests/hommebfb.py index 7cd6b370222..87e566bf918 100644 --- a/CIME/SystemTests/hommebfb.py +++ b/CIME/SystemTests/hommebfb.py @@ -2,6 +2,6 @@ class HOMMEBFB(HommeBase): - def __init__(self, case): - HommeBase.__init__(self, case) + def __init__(self, case, **kwargs): + HommeBase.__init__(self, case, **kwargs) self.cmakesuffix = "-bfb" diff --git a/CIME/SystemTests/icp.py b/CIME/SystemTests/icp.py index f0e3988774c..8d8c5e0ea59 100644 --- a/CIME/SystemTests/icp.py +++ b/CIME/SystemTests/icp.py @@ -6,11 +6,11 @@ class ICP(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to file env_test.xml in the case directory """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) def build_phase(self, sharedlib_only=False, model_only=False): self._case.set_value("CICE_AUTO_DECOMP", "false") diff --git a/CIME/SystemTests/irt.py b/CIME/SystemTests/irt.py index adda8b235ff..1f3637eb5a0 100644 --- a/CIME/SystemTests/irt.py +++ b/CIME/SystemTests/irt.py @@ -19,7 +19,7 @@ class IRT(RestartTest): - def __init__(self, case): + def __init__(self, case, **kwargs): RestartTest.__init__( self, case, @@ -28,6 +28,7 @@ def __init__(self, case): run_one_description="initial", run_two_description="restart", multisubmit=False, + **kwargs ) self._skip_pnl = False diff --git a/CIME/SystemTests/ldsta.py b/CIME/SystemTests/ldsta.py index f7a4a2b4729..a5f7c9196d5 100644 --- a/CIME/SystemTests/ldsta.py +++ b/CIME/SystemTests/ldsta.py @@ -30,11 +30,11 @@ def _date_to_datetime(date_obj): class LDSTA(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the SMS system test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) def run_phase(self): archive_dir = self._case.get_value("DOUT_S_ROOT") diff --git a/CIME/SystemTests/mcc.py b/CIME/SystemTests/mcc.py index 4d47bf2c318..a4b839cf1e9 100644 --- a/CIME/SystemTests/mcc.py +++ b/CIME/SystemTests/mcc.py @@ -11,7 +11,7 @@ class MCC(SystemTestsCompareTwo): - def __init__(self, case): + def __init__(self, case, **kwargs): self._comp_classes = [] self._test_instances = 3 SystemTestsCompareTwo.__init__( @@ -21,6 +21,7 @@ def __init__(self, case): run_two_suffix="single_instance", run_two_description="single instance", run_one_description="multi driver", + **kwargs ) def _case_one_setup(self): diff --git a/CIME/SystemTests/mvk.py b/CIME/SystemTests/mvk.py index 39b4fcb6539..2ab2f72cd33 100644 --- a/CIME/SystemTests/mvk.py +++ b/CIME/SystemTests/mvk.py @@ -28,11 +28,11 @@ class MVK(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the MVK test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) if self._case.get_value("MODEL") == "e3sm": self.component = "eam" diff --git a/CIME/SystemTests/nck.py b/CIME/SystemTests/nck.py index af0a2d0c5e6..f75a2914215 100644 --- a/CIME/SystemTests/nck.py +++ b/CIME/SystemTests/nck.py @@ -15,7 +15,7 @@ class NCK(SystemTestsCompareTwo): - def __init__(self, case): + def __init__(self, case, **kwargs): self._comp_classes = [] SystemTestsCompareTwo.__init__( self, @@ -24,6 +24,7 @@ def __init__(self, case): run_two_suffix="multiinst", run_one_description="one instance", run_two_description="two instances", + **kwargs, ) def _common_setup(self): @@ -60,4 +61,3 @@ def _case_two_setup(self): if rootpe > 1: self._case.set_value("ROOTPE_{}".format(comp), int(rootpe - ntasks)) self._case.set_value("NTASKS_{}".format(comp), ntasks * 2) - self._case.case_setup(test_mode=True, reset=True) diff --git a/CIME/SystemTests/ncr.py b/CIME/SystemTests/ncr.py index a1cc7d3bad5..f0de168ac13 100644 --- a/CIME/SystemTests/ncr.py +++ b/CIME/SystemTests/ncr.py @@ -15,7 +15,7 @@ class NCR(SystemTestsCompareTwo): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an NCR test """ @@ -26,6 +26,7 @@ def __init__(self, case): run_two_suffix="singleinst", run_one_description="two instances, each with the same number of tasks", run_two_description="default build", + **kwargs ) def _comp_classes(self): diff --git a/CIME/SystemTests/nodefail.py b/CIME/SystemTests/nodefail.py index c770fc292bc..d975cfc5bfd 100644 --- a/CIME/SystemTests/nodefail.py +++ b/CIME/SystemTests/nodefail.py @@ -9,11 +9,11 @@ class NODEFAIL(ERS): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the ERS system test """ - ERS.__init__(self, case) + ERS.__init__(self, case, **kwargs) self._fail_sentinel = os.path.join(case.get_value("RUNDIR"), "FAIL_SENTINEL") self._fail_str = case.get_value("NODE_FAIL_REGEX") @@ -24,7 +24,7 @@ def _restart_fake_phase(self): exeroot = self._case.get_value("EXEROOT") driver = self._case.get_value("COMP_INTERFACE") if driver == "nuopc": - logname = "drv" + logname = "med" else: logname = "cpl" fake_exe = """#!/bin/bash diff --git a/CIME/SystemTests/pea.py b/CIME/SystemTests/pea.py index b20c3abd4e7..4fb3a4569ca 100644 --- a/CIME/SystemTests/pea.py +++ b/CIME/SystemTests/pea.py @@ -14,7 +14,7 @@ class PEA(SystemTestsCompareTwo): - def __init__(self, case): + def __init__(self, case, **kwargs): SystemTestsCompareTwo.__init__( self, case, @@ -22,6 +22,7 @@ def __init__(self, case): run_two_suffix="mpi-serial", run_one_description="default mpi library", run_two_description="mpi-serial", + **kwargs, ) def _common_setup(self): @@ -47,4 +48,3 @@ def _case_two_setup(self): if os.path.isfile("Macros"): os.remove("Macros") - self._case.case_setup(test_mode=True, reset=True) diff --git a/CIME/SystemTests/pem.py b/CIME/SystemTests/pem.py index f74f3f93e55..d98f9e4a2c7 100644 --- a/CIME/SystemTests/pem.py +++ b/CIME/SystemTests/pem.py @@ -15,14 +15,21 @@ class PEM(SystemTestsCompareTwo): - def __init__(self, case): + def __init__(self, case, **kwargs): + build_separately = False + # cice, pop require separate builds + comps = case.get_compset_components() + if "cice" in comps or "pop" in comps: + build_separately = True + SystemTestsCompareTwo.__init__( self, case, - separate_builds=True, + separate_builds=build_separately, run_two_suffix="modpes", run_one_description="default pe counts", run_two_description="halved pe counts", + **kwargs ) def _case_one_setup(self): @@ -35,4 +42,3 @@ def _case_two_setup(self): if ntasks > 1: self._case.set_value("NTASKS_{}".format(comp), int(ntasks / 2)) self._case.set_value("ROOTPE_{}".format(comp), int(rootpe / 2)) - self._case.case_setup(test_mode=True, reset=True) diff --git a/CIME/SystemTests/pet.py b/CIME/SystemTests/pet.py index fcf108bd28c..432d7c99303 100644 --- a/CIME/SystemTests/pet.py +++ b/CIME/SystemTests/pet.py @@ -13,7 +13,7 @@ class PET(SystemTestsCompareTwo): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize a test object """ @@ -25,6 +25,7 @@ def __init__(self, case): run_two_suffix="single_thread", run_one_description="default threading", run_two_description="threads set to 1", + **kwargs ) def _case_one_setup(self): @@ -33,12 +34,7 @@ def _case_one_setup(self): if self._case.get_value("NTHRDS_{}".format(comp)) <= 1: self._case.set_value("NTHRDS_{}".format(comp), 2) - # Need to redo case_setup because we may have changed the number of threads - def _case_two_setup(self): # Do a run with all threads set to 1 for comp in self._case.get_values("COMP_CLASSES"): self._case.set_value("NTHRDS_{}".format(comp), 1) - - # Need to redo case_setup because we may have changed the number of threads - self._case.case_setup(reset=True, test_mode=True) diff --git a/CIME/SystemTests/pfs.py b/CIME/SystemTests/pfs.py index 32bdbe08002..ed61d204e8a 100644 --- a/CIME/SystemTests/pfs.py +++ b/CIME/SystemTests/pfs.py @@ -11,11 +11,11 @@ class PFS(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the PFS system test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) def run_phase(self): logger.info("doing an 20 day initial test, no restarts written") diff --git a/CIME/SystemTests/pgn.py b/CIME/SystemTests/pgn.py index e2cd66b7abd..ac68773aeb4 100644 --- a/CIME/SystemTests/pgn.py +++ b/CIME/SystemTests/pgn.py @@ -45,16 +45,18 @@ ] ) FCLD_NC = "cam.h0.cloud.nc" -INIT_COND_FILE_TEMPLATE = "20210915.v2.ne4_oQU240.F2010.{}.{}.0002-{:02d}-01-00000.nc" +INIT_COND_FILE_TEMPLATE = ( + "20231105.v3b01.F2010.ne4_oQU240.chrysalis.{}.{}.0002-{:02d}-01-00000.nc" +) INSTANCE_FILE_TEMPLATE = "{}{}_{:04d}.h0.0001-01-01-00000{}.nc" class PGN(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the PGN test """ - super(PGN, self).__init__(case) + super(PGN, self).__init__(case, **kwargs) if self._case.get_value("MODEL") == "e3sm": self.atmmod = "eam" self.lndmod = "elm" @@ -95,8 +97,8 @@ def build_phase(self, sharedlib_only=False, model_only=False): logger.debug("PGN_INFO: Updating user_nl_* files") csmdata_root = self._case.get_value("DIN_LOC_ROOT") - csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4_v2_init") - csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4_oQU240_v2_init") + csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4_v3_init") + csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4_oQU240_v3_init") iinst = 1 for icond in range(1, NUMBER_INITIAL_CONDITIONS + 1): diff --git a/CIME/SystemTests/pre.py b/CIME/SystemTests/pre.py index 54512a00660..23547d46430 100644 --- a/CIME/SystemTests/pre.py +++ b/CIME/SystemTests/pre.py @@ -25,7 +25,7 @@ class PRE(SystemTestsCompareTwo): """ ########################################################################### - def __init__(self, case): + def __init__(self, case, **kwargs): ########################################################################### SystemTestsCompareTwo.__init__( self, @@ -34,6 +34,7 @@ def __init__(self, case): run_two_suffix="pr", run_one_description="no pause/resume", run_two_description="pause/resume", + **kwargs ) self._stopopt = "" self._stopn = 0 diff --git a/CIME/SystemTests/rep.py b/CIME/SystemTests/rep.py index 60f6b473ea9..367409ac3fa 100644 --- a/CIME/SystemTests/rep.py +++ b/CIME/SystemTests/rep.py @@ -8,9 +8,9 @@ class REP(SystemTestsCompareTwo): - def __init__(self, case): + def __init__(self, case, **kwargs): SystemTestsCompareTwo.__init__( - self, case, separate_builds=False, run_two_suffix="rep2" + self, case, separate_builds=False, run_two_suffix="rep2", **kwargs ) def _case_one_setup(self): diff --git a/CIME/SystemTests/restart_tests.py b/CIME/SystemTests/restart_tests.py index 31d1be32181..5faf2252d1b 100644 --- a/CIME/SystemTests/restart_tests.py +++ b/CIME/SystemTests/restart_tests.py @@ -18,6 +18,7 @@ def __init__( run_one_description="initial", run_two_description="restart", multisubmit=False, + **kwargs ): SystemTestsCompareTwo.__init__( self, @@ -27,6 +28,7 @@ def __init__( run_one_description=run_one_description, run_two_description=run_two_description, multisubmit=multisubmit, + **kwargs ) def _case_one_setup(self): diff --git a/CIME/SystemTests/reuseinitfiles.py b/CIME/SystemTests/reuseinitfiles.py index 5f2567f6c70..76d8bb0522e 100644 --- a/CIME/SystemTests/reuseinitfiles.py +++ b/CIME/SystemTests/reuseinitfiles.py @@ -20,7 +20,7 @@ class REUSEINITFILES(SystemTestsCompareTwo): - def __init__(self, case): + def __init__(self, case, **kwargs): SystemTestsCompareTwo.__init__( self, case, @@ -32,6 +32,7 @@ def __init__(self, case): # init_generated_files from case1 and then need to make sure they are NOT # deleted like is normally done for tests: case_two_keep_init_generated_files=True, + **kwargs ) def _case_one_setup(self): diff --git a/CIME/SystemTests/seq.py b/CIME/SystemTests/seq.py index 0a51d50d283..7413f900899 100644 --- a/CIME/SystemTests/seq.py +++ b/CIME/SystemTests/seq.py @@ -8,7 +8,7 @@ class SEQ(SystemTestsCompareTwo): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to file env_test.xml in the case directory """ @@ -19,6 +19,7 @@ def __init__(self, case): run_two_suffix="seq", run_one_description="base", run_two_description="sequence", + **kwargs ) def _case_one_setup(self): @@ -48,4 +49,3 @@ def _case_two_setup(self): rootpe += newntasks self._case.flush() - self._case.case_setup(test_mode=True, reset=True) diff --git a/CIME/SystemTests/sms.py b/CIME/SystemTests/sms.py index 09722caa3d5..17672b47052 100644 --- a/CIME/SystemTests/sms.py +++ b/CIME/SystemTests/sms.py @@ -10,8 +10,8 @@ class SMS(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the SMS system test """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) diff --git a/CIME/SystemTests/system_tests_common.py b/CIME/SystemTests/system_tests_common.py index 5eaa4ef02d9..236b34d0301 100644 --- a/CIME/SystemTests/system_tests_common.py +++ b/CIME/SystemTests/system_tests_common.py @@ -13,6 +13,7 @@ expect, get_current_commit, SharedArea, + is_comp_standalone, ) from CIME.test_status import * from CIME.hist_utils import ( @@ -26,9 +27,18 @@ from CIME.config import Config from CIME.provenance import save_test_time, get_test_success from CIME.locked_files import LOCKED_DIR, lock_file, is_locked +from CIME.baselines.performance import ( + get_latest_cpl_logs, + perf_get_memory_list, + perf_compare_memory_baseline, + perf_compare_throughput_baseline, + perf_write_baseline, + load_coupler_customization, +) import CIME.build as build import glob, gzip, time, traceback, os +from contextlib import ExitStack logger = logging.getLogger(__name__) @@ -36,8 +46,55 @@ INIT_GENERATED_FILES_DIRNAME = "init_generated_files" +def fix_single_exe_case(case): + """Fixes cases created with --single-exe. + + When tests are created using --single-exe, the test_scheduler will set + `BUILD_COMPLETE` to True, but some tests require calls to `case.case_setup` + which can resets `BUILD_COMPLETE` to false. This function will check if a + case was created with `--single-exe` and ensure `BUILD_COMPLETE` is True. + + Returns: + True when case required modification otherwise False. + """ + if is_single_exe_case(case): + with ExitStack() as stack: + # enter context if case is still read-only, entering the context + # multiple times can cause side effects for later calls to + # `set_value` when it's assumed the cause is writeable. + if case._read_only_mode: + stack.enter_context(case) + + case.set_value("BUILD_COMPLETE", True) + + return True + + return False + + +def is_single_exe_case(case): + """Determines if the case was created with the --single-exe option. + + If `CASEROOT` is not part of `EXEROOT` and the `TEST` variable is True, + then its safe to assume the case was created with `./create_test` + and the `--single-exe` option. + + Returns: + True when the case was created with `--single-exe` otherwise false. + """ + caseroot = case.get_value("CASEROOT") + + exeroot = case.get_value("EXEROOT") + + test = case.get_value("TEST") + + return caseroot not in exeroot and test + + class SystemTestsCommon(object): - def __init__(self, case, expected=None): + def __init__( + self, case, expected=None, **kwargs + ): # pylint: disable=unused-argument """ initialize a CIME system test object, if the locked env_run.orig.xml does not exist copy the current env_run.xml file. If it does exist restore values @@ -54,7 +111,7 @@ def __init__(self, case, expected=None): self._init_locked_files(caseroot, expected) self._skip_pnl = False self._cpllog = ( - "drv" if self._case.get_value("COMP_INTERFACE") == "nuopc" else "cpl" + "med" if self._case.get_value("COMP_INTERFACE") == "nuopc" else "cpl" ) self._ninja = False self._dry_run = False @@ -97,6 +154,7 @@ def _resetup_case(self, phase, reset=False): ) self._case.set_initial_test_values() self._case.case_setup(reset=True, test_mode=True) + fix_single_exe_case(self._case) def build( self, @@ -105,6 +163,7 @@ def build( ninja=False, dry_run=False, separate_builds=False, + skip_submit=False, ): """ Do NOT override this method, this method is the framework that @@ -115,6 +174,9 @@ def build( self._ninja = ninja self._dry_run = dry_run self._user_separate_builds = separate_builds + + was_run_pend = self._test_status.current_is(RUN_PHASE, TEST_PEND_STATUS) + for phase_name, phase_bool in [ (SHAREDLIB_BUILD_PHASE, not model_only), (MODEL_BUILD_PHASE, not sharedlib_only), @@ -153,6 +215,15 @@ def build( comments=("time={:d}".format(int(time_taken))), ) + # Building model while job is queued and awaiting run + if ( + skip_submit + and was_run_pend + and self._test_status.current_is(SUBMIT_PHASE, TEST_PEND_STATUS) + ): + with self._test_status: + self._test_status.set_status(SUBMIT_PHASE, TEST_PASS_STATUS) + return success def build_phase(self, sharedlib_only=False, model_only=False): @@ -219,10 +290,12 @@ def run(self, skip_pnl=False): if self._case.get_value("COMPARE_BASELINE"): if do_baseline_ops: self._phase_modifying_call(BASELINE_PHASE, self._compare_baseline) - self._phase_modifying_call(MEMCOMP_PHASE, self._compare_memory) - self._phase_modifying_call( - THROUGHPUT_PHASE, self._compare_throughput - ) + comp_standalone, _ = is_comp_standalone(self._case) + if not comp_standalone: + self._phase_modifying_call(MEMCOMP_PHASE, self._compare_memory) + self._phase_modifying_call( + THROUGHPUT_PHASE, self._compare_throughput + ) else: with self._test_status: self._test_status.set_status(BASELINE_PHASE, TEST_PEND_STATUS) @@ -379,6 +452,15 @@ def run_indv( stop_option = self._case.get_value("STOP_OPTION") run_type = self._case.get_value("RUN_TYPE") rundir = self._case.get_value("RUNDIR") + try: + self._case.check_all_input_data() + except CIMEError: + caseroot = self._case.get_value("CASEROOT") + raise CIMEError( + "Could not find all inputdata on any server, try " + "manually running `./check_input_data --download " + f"--verbose` from {caseroot!r}." + ) from None if submit_resubmits is None: do_resub = self._case.get_value("BATCH_SYSTEM") != "none" else: @@ -422,7 +504,7 @@ def run_indv( self._case.case_st_archive(resubmit=True) def _coupler_log_indicates_run_complete(self): - newestcpllogfiles = self._get_latest_cpl_logs() + newestcpllogfiles = get_latest_cpl_logs(self._case) logger.debug("Latest Coupler log file(s) {}".format(newestcpllogfiles)) # Exception is raised if the file is not compressed allgood = len(newestcpllogfiles) @@ -440,7 +522,8 @@ def _coupler_log_indicates_run_complete(self): return allgood == 0 def _component_compare_copy(self, suffix): - comments, num_copied = copy_histfiles(self._case, suffix) + # Only match .nc files + comments, num_copied = copy_histfiles(self._case, suffix, match_suffix="nc") self._expected_num_cmp = num_copied append_testlog(comments, self._orig_caseroot) @@ -526,43 +609,6 @@ def _st_archive_case_test(self): else: self._test_status.set_status(STARCHIVE_PHASE, TEST_FAIL_STATUS) - def _get_mem_usage(self, cpllog): - """ - Examine memory usage as recorded in the cpl log file and look for unexpected - increases. - """ - memlist = [] - meminfo = re.compile( - r".*model date =\s+(\w+).*memory =\s+(\d+\.?\d+).*highwater" - ) - if cpllog is not None and os.path.isfile(cpllog): - if ".gz" == cpllog[-3:]: - fopen = gzip.open - else: - fopen = open - with fopen(cpllog, "rb") as f: - for line in f: - m = meminfo.match(line.decode("utf-8")) - if m: - memlist.append((float(m.group(1)), float(m.group(2)))) - # Remove the last mem record, it's sometimes artificially high - if len(memlist) > 0: - memlist.pop() - return memlist - - def _get_throughput(self, cpllog): - """ - Examine memory usage as recorded in the cpl log file and look for unexpected - increases. - """ - if cpllog is not None and os.path.isfile(cpllog): - with gzip.open(cpllog, "rb") as f: - cpltext = f.read().decode("utf-8") - m = re.search(r"# simulated years / cmp-day =\s+(\d+\.\d+)\s", cpltext) - if m: - return float(m.group(1)) - return None - def _phase_modifying_call(self, phase, function): """ Ensures that unexpected exceptions from phases will result in a FAIL result @@ -589,47 +635,29 @@ def _check_for_memleak(self): Examine memory usage as recorded in the cpl log file and look for unexpected increases. """ + config = load_coupler_customization(self._case) + + # default to 0.1 + tolerance = self._case.get_value("TEST_MEMLEAK_TOLERANCE") or 0.1 + + expect(tolerance > 0.0, "Bad value for memleak tolerance in test") + with self._test_status: - latestcpllogs = self._get_latest_cpl_logs() - for cpllog in latestcpllogs: - memlist = self._get_mem_usage(cpllog) - - if len(memlist) < 3: - self._test_status.set_status( - MEMLEAK_PHASE, - TEST_PASS_STATUS, - comments="insuffiencient data for memleak test", - ) - else: - finaldate = int(memlist[-1][0]) - originaldate = int( - memlist[1][0] - ) # skip first day mem record, it can be too low while initializing - finalmem = float(memlist[-1][1]) - originalmem = float(memlist[1][1]) - memdiff = -1 - if originalmem > 0: - memdiff = (finalmem - originalmem) / originalmem - tolerance = self._case.get_value("TEST_MEMLEAK_TOLERANCE") - if tolerance is None: - tolerance = 0.1 - expect(tolerance > 0.0, "Bad value for memleak tolerance in test") - if memdiff < 0: - self._test_status.set_status( - MEMLEAK_PHASE, - TEST_PASS_STATUS, - comments="data for memleak test is insuffiencient", - ) - elif memdiff < tolerance: - self._test_status.set_status(MEMLEAK_PHASE, TEST_PASS_STATUS) - else: - comment = "memleak detected, memory went from {:f} to {:f} in {:d} days".format( - originalmem, finalmem, finaldate - originaldate - ) - append_testlog(comment, self._orig_caseroot) - self._test_status.set_status( - MEMLEAK_PHASE, TEST_FAIL_STATUS, comments=comment - ) + try: + memleak, comment = config.perf_check_for_memory_leak( + self._case, tolerance + ) + except AttributeError: + memleak, comment = perf_check_for_memory_leak(self._case, tolerance) + + if memleak: + append_testlog(comment, self._orig_caseroot) + + status = TEST_FAIL_STATUS + else: + status = TEST_PASS_STATUS + + self._test_status.set_status(MEMLEAK_PHASE, status, comments=comment) def compare_env_run(self, expected=None): """ @@ -656,121 +684,64 @@ def compare_env_run(self, expected=None): return False return True - def _get_latest_cpl_logs(self): + def _compare_memory(self): """ - find and return the latest cpl log file in the run directory + Compares current test memory usage to baseline. """ - coupler_log_path = self._case.get_value("RUNDIR") - cpllogs = glob.glob( - os.path.join(coupler_log_path, "{}*.log.*".format(self._cpllog)) - ) - lastcpllogs = [] - if cpllogs: - lastcpllogs.append(max(cpllogs, key=os.path.getctime)) - basename = os.path.basename(lastcpllogs[0]) - suffix = basename.split(".", 1)[1] - for log in cpllogs: - if log in lastcpllogs: - continue - - if log.endswith(suffix): - lastcpllogs.append(log) + with self._test_status: + try: + below_tolerance, comment = perf_compare_memory_baseline(self._case) + except Exception as e: + logger.info("Failed to compare memory usage baseline: {!s}".format(e)) - return lastcpllogs + self._test_status.set_status( + MEMCOMP_PHASE, TEST_FAIL_STATUS, comments=str(e) + ) + else: + if below_tolerance is not None: + append_testlog(comment, self._orig_caseroot) - def _compare_memory(self): - with self._test_status: - # compare memory usage to baseline - baseline_name = self._case.get_value("BASECMP_CASE") - basecmp_dir = os.path.join( - self._case.get_value("BASELINE_ROOT"), baseline_name - ) - newestcpllogfiles = self._get_latest_cpl_logs() - if len(newestcpllogfiles) > 0: - memlist = self._get_mem_usage(newestcpllogfiles[0]) - for cpllog in newestcpllogfiles: - m = re.search(r"/({}.*.log).*.gz".format(self._cpllog), cpllog) - if m is not None: - baselog = os.path.join(basecmp_dir, m.group(1)) + ".gz" - if baselog is None or not os.path.isfile(baselog): - # for backward compatibility - baselog = os.path.join(basecmp_dir, self._cpllog + ".log") - if os.path.isfile(baselog) and len(memlist) > 3: - blmem = self._get_mem_usage(baselog) - blmem = 0 if blmem == [] else blmem[-1][1] - curmem = memlist[-1][1] - diff = 0.0 if blmem == 0 else (curmem - blmem) / blmem - tolerance = self._case.get_value("TEST_MEMLEAK_TOLERANCE") - if tolerance is None: - tolerance = 0.1 if ( - diff < tolerance + below_tolerance and self._test_status.get_status(MEMCOMP_PHASE) is None ): self._test_status.set_status(MEMCOMP_PHASE, TEST_PASS_STATUS) elif ( self._test_status.get_status(MEMCOMP_PHASE) != TEST_FAIL_STATUS ): - comment = "Error: Memory usage increase >{:d}% from baseline's {:f} to {:f}".format( - int(tolerance * 100), blmem, curmem - ) self._test_status.set_status( MEMCOMP_PHASE, TEST_FAIL_STATUS, comments=comment ) - append_testlog(comment, self._orig_caseroot) def _compare_throughput(self): + """ + Compares current test throughput to baseline. + """ with self._test_status: - # compare memory usage to baseline - baseline_name = self._case.get_value("BASECMP_CASE") - basecmp_dir = os.path.join( - self._case.get_value("BASELINE_ROOT"), baseline_name - ) - newestcpllogfiles = self._get_latest_cpl_logs() - for cpllog in newestcpllogfiles: - m = re.search(r"/({}.*.log).*.gz".format(self._cpllog), cpllog) - if m is not None: - baselog = os.path.join(basecmp_dir, m.group(1)) + ".gz" - if baselog is None or not os.path.isfile(baselog): - # for backward compatibility - baselog = os.path.join(basecmp_dir, self._cpllog) - - if os.path.isfile(baselog): - # compare throughput to baseline - current = self._get_throughput(cpllog) - baseline = self._get_throughput(baselog) - # comparing ypd so bigger is better - if baseline is not None and current is not None: - diff = (baseline - current) / baseline - tolerance = self._case.get_value("TEST_TPUT_TOLERANCE") - if tolerance is None: - tolerance = 0.1 - expect( - tolerance > 0.0, - "Bad value for throughput tolerance in test", - ) - comment = "TPUTCOMP: Computation time changed by {:.2f}% relative to baseline".format( - diff * 100 + try: + below_tolerance, comment = perf_compare_throughput_baseline(self._case) + except Exception as e: + logger.info("Failed to compare throughput baseline: {!s}".format(e)) + + self._test_status.set_status( + THROUGHPUT_PHASE, TEST_FAIL_STATUS, comments=str(e) + ) + else: + if below_tolerance is not None: + append_testlog(comment, self._orig_caseroot) + + if ( + below_tolerance + and self._test_status.get_status(THROUGHPUT_PHASE) is None + ): + self._test_status.set_status(THROUGHPUT_PHASE, TEST_PASS_STATUS) + elif ( + self._test_status.get_status(THROUGHPUT_PHASE) + != TEST_FAIL_STATUS + ): + self._test_status.set_status( + THROUGHPUT_PHASE, TEST_FAIL_STATUS, comments=comment ) - append_testlog(comment, self._orig_caseroot) - if ( - diff < tolerance - and self._test_status.get_status(THROUGHPUT_PHASE) is None - ): - self._test_status.set_status( - THROUGHPUT_PHASE, TEST_PASS_STATUS - ) - elif ( - self._test_status.get_status(THROUGHPUT_PHASE) - != TEST_FAIL_STATUS - ): - comment = "Error: TPUTCOMP: Computation time increase > {:d}% from baseline".format( - int(tolerance * 100) - ) - self._test_status.set_status( - THROUGHPUT_PHASE, TEST_FAIL_STATUS, comments=comment - ) - append_testlog(comment, self._orig_caseroot) def _compare_baseline(self): """ @@ -812,18 +783,59 @@ def _generate_baseline(self): ) # copy latest cpl log to baseline # drop the date so that the name is generic - newestcpllogfiles = self._get_latest_cpl_logs() + newestcpllogfiles = get_latest_cpl_logs(self._case) with SharedArea(): + # TODO ever actually more than one cpl log? for cpllog in newestcpllogfiles: m = re.search(r"/({}.*.log).*.gz".format(self._cpllog), cpllog) + if m is not None: baselog = os.path.join(basegen_dir, m.group(1)) + ".gz" + safe_copy( cpllog, os.path.join(basegen_dir, baselog), preserve_meta=False, ) + perf_write_baseline(self._case, basegen_dir, cpllog) + + +def perf_check_for_memory_leak(case, tolerance): + leak = False + comment = "" + + latestcpllogs = get_latest_cpl_logs(case) + + for cpllog in latestcpllogs: + try: + memlist = perf_get_memory_list(case, cpllog) + except RuntimeError: + return False, "insufficient data for memleak test" + + # last day - second day, skip first day, can be too low while initializing + elapsed_days = int(memlist[-1][0]) - int(memlist[1][0]) + + finalmem, originalmem = float(memlist[-1][1]), float(memlist[1][1]) + + memdiff = -1 if originalmem <= 0 else (finalmem - originalmem) / originalmem + + if memdiff < 0: + leak = False + comment = "data for memleak test is insufficient" + elif memdiff < tolerance: + leak = False + comment = "" + else: + leak = True + comment = ( + "memleak detected, memory went from {:f} to {:f} in {:d} days".format( + originalmem, finalmem, elapsed_days + ) + ) + + return leak, comment + class FakeTest(SystemTestsCommon): """ @@ -834,8 +846,8 @@ class FakeTest(SystemTestsCommon): in utils.py will work with these classes. """ - def __init__(self, case, expected=None): - super(FakeTest, self).__init__(case, expected=expected) + def __init__(self, case, expected=None, **kwargs): + super(FakeTest, self).__init__(case, expected=expected, **kwargs) self._script = None self._requires_exe = False self._case._non_local = True @@ -1053,8 +1065,8 @@ def build_phase(self, sharedlib_only=False, model_only=False): class TESTBUILDFAILEXC(FakeTest): - def __init__(self, case): - FakeTest.__init__(self, case) + def __init__(self, case, **kwargs): + FakeTest.__init__(self, case, **kwargs) raise RuntimeError("Exception from init") diff --git a/CIME/SystemTests/system_tests_compare_n.py b/CIME/SystemTests/system_tests_compare_n.py index b9b53c8c561..5d7dc405304 100644 --- a/CIME/SystemTests/system_tests_compare_n.py +++ b/CIME/SystemTests/system_tests_compare_n.py @@ -40,7 +40,7 @@ """ from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_common import SystemTestsCommon +from CIME.SystemTests.system_tests_common import SystemTestsCommon, fix_single_exe_case from CIME.case import Case from CIME.config import Config from CIME.test_status import * @@ -60,6 +60,8 @@ def __init__( run_descriptions=None, multisubmit=False, ignore_fieldlist_diffs=False, + dry_run=False, + **kwargs ): """ Initialize a SystemTestsCompareN object. Individual test cases that @@ -84,7 +86,7 @@ def __init__( the cases as identical. (This is needed for tests where one case exercises an option that produces extra diagnostic fields.) """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) self._separate_builds = separate_builds self._ignore_fieldlist_diffs = ignore_fieldlist_diffs @@ -129,7 +131,8 @@ def __init__( self._cases[0] = self._case self._caseroots = self._get_caseroots() - self._setup_cases_if_not_yet_done() + if not dry_run: + self._setup_cases_if_not_yet_done() self._multisubmit = ( multisubmit and self._cases[0].get_value("BATCH_SYSTEM") != "none" @@ -504,6 +507,7 @@ def _setup_case(self, i): self._activate_case(i) self._common_setup() self._case_setup(i) + fix_single_exe_case(self._cases[i]) if i == 0: # Flush the case so that, if errors occur later, then at least base case is # in a correct, post-setup state. This is important because the mere @@ -516,6 +520,7 @@ def _setup_case(self, i): # This assures that case one namelists are populated # and creates the case.test script self._case.case_setup(test_mode=False, reset=True) + fix_single_exe_case(self._case) else: # Go back to base case to ensure that's where we are for any following code self._activate_case(0) diff --git a/CIME/SystemTests/system_tests_compare_two.py b/CIME/SystemTests/system_tests_compare_two.py index bdbe47ce6db..5eaac4948e1 100644 --- a/CIME/SystemTests/system_tests_compare_two.py +++ b/CIME/SystemTests/system_tests_compare_two.py @@ -24,6 +24,9 @@ (2) _case_two_setup This method will be called to set up case 2, the "test" case +Note that the base class will always call case_setup(reset=True) on +both case1 and case2 during setup. + In addition, they MAY require the following methods: (1) _common_setup @@ -45,7 +48,7 @@ """ from CIME.XML.standard_module_setup import * -from CIME.SystemTests.system_tests_common import SystemTestsCommon +from CIME.SystemTests.system_tests_common import SystemTestsCommon, fix_single_exe_case from CIME.case import Case from CIME.config import Config from CIME.test_status import * @@ -66,6 +69,8 @@ def __init__( multisubmit=False, ignore_fieldlist_diffs=False, case_two_keep_init_generated_files=False, + dry_run=False, + **kwargs ): """ Initialize a SystemTestsCompareTwo object. Individual test cases that @@ -98,7 +103,7 @@ def __init__( is provided for the sake of specific tests, e.g., a test of the behavior of running with init_generated_files in place. """ - SystemTestsCommon.__init__(self, case) + SystemTestsCommon.__init__(self, case, **kwargs) self._separate_builds = separate_builds self._ignore_fieldlist_diffs = ignore_fieldlist_diffs @@ -136,7 +141,9 @@ def __init__( # _setup_cases_if_not_yet_done self._case2 = None - self._setup_cases_if_not_yet_done() + # Prevent additional setup_case calls when detecting support for `--single-exe` + if not dry_run: + self._setup_cases_if_not_yet_done() self._multisubmit = ( multisubmit and self._case1.get_value("BATCH_SYSTEM") != "none" @@ -548,12 +555,16 @@ def _setup_cases(self): # This assures that case one namelists are populated # and creates the case.test script self._case.case_setup(test_mode=False, reset=True) + fix_single_exe_case(self._case) # Set up case 2 with self._case2: self._activate_case2() self._common_setup() self._case_two_setup() + self._case2.case_setup(test_mode=True, reset=True) + + fix_single_exe_case(self._case2) # Go back to case 1 to ensure that's where we are for any following code self._activate_case1() diff --git a/CIME/SystemTests/test_utils/user_nl_utils.py b/CIME/SystemTests/test_utils/user_nl_utils.py index eab45921c95..930d683666b 100644 --- a/CIME/SystemTests/test_utils/user_nl_utils.py +++ b/CIME/SystemTests/test_utils/user_nl_utils.py @@ -8,7 +8,7 @@ def append_to_user_nl_files(caseroot, component, contents): """ - Append the string given by 'contents' to the end of each user_nl file for + Append the string(s) given by 'contents' to the end of each user_nl file for the given component (there may be multiple such user_nl files in the case of a multi-instance test). @@ -25,9 +25,13 @@ def append_to_user_nl_files(caseroot, component, contents): matching the pattern 'user_nl_clm*'. (We do a wildcard match to handle multi-instance tests.) - contents (str): Contents to append to the end of each user_nl file + contents (str or list-like): Contents to append to the end of each user_nl + file. If list-like, each item will be appended on its own line. """ + if isinstance(contents, str): + contents = [contents] + files = _get_list_of_user_nl_files(caseroot, component) if len(files) == 0: @@ -35,7 +39,9 @@ def append_to_user_nl_files(caseroot, component, contents): for one_file in files: with open(one_file, "a") as user_nl_file: - user_nl_file.write("\n" + contents + "\n") + user_nl_file.write("\n") + for c in contents: + user_nl_file.write(c + "\n") def _get_list_of_user_nl_files(path, component): diff --git a/CIME/SystemTests/tsc.py b/CIME/SystemTests/tsc.py index d95e8b03ea3..1a37ecaac5d 100644 --- a/CIME/SystemTests/tsc.py +++ b/CIME/SystemTests/tsc.py @@ -32,7 +32,9 @@ SIM_LENGTH = 600 # seconds OUT_FREQ = 10 # seconds INSPECT_AT = [300, 450, 600] # seconds -INIT_COND_FILE_TEMPLATE = "20210915.v2.ne4_oQU240.F2010.{}.{}.0002-{:02d}-01-00000.nc" +INIT_COND_FILE_TEMPLATE = ( + "20231105.v3b01.F2010.ne4_oQU240.chrysalis.{}.{}.0002-{:02d}-01-00000.nc" +) VAR_LIST = [ "T", "Q", @@ -49,11 +51,11 @@ class TSC(SystemTestsCommon): - def __init__(self, case): + def __init__(self, case, **kwargs): """ initialize an object interface to the TSC test """ - super(TSC, self).__init__(case) + super(TSC, self).__init__(case, **kwargs) if self._case.get_value("MODEL") == "e3sm": self.atmmod = "eam" self.lndmod = "elm" @@ -100,8 +102,8 @@ def _run_with_specified_dtime(self, dtime=2): self._case.set_value("STOP_OPTION", "nsteps") csmdata_root = self._case.get_value("DIN_LOC_ROOT") - csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4_v2_init") - csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4_oQU240_v2_init") + csmdata_atm = os.path.join(csmdata_root, "atm/cam/inic/homme/ne4_v3_init") + csmdata_lnd = os.path.join(csmdata_root, "lnd/clm2/initdata/ne4_oQU240_v3_init") nstep_output = OUT_FREQ // dtime for iinst in range(1, NINST + 1): diff --git a/CIME/Tools/Makefile b/CIME/Tools/Makefile index 1e6d0f1896c..e3ddb9c2042 100644 --- a/CIME/Tools/Makefile +++ b/CIME/Tools/Makefile @@ -48,7 +48,7 @@ ifeq ($(strip $(SMP)),TRUE) THREADDIR = threads compile_threaded = TRUE else - ifeq ($(strip $(SMP_PRESENT)),TRUE) + ifeq ($(strip $(BUILD_THREADED)),TRUE) THREADDIR = threads compile_threaded = TRUE else @@ -373,7 +373,13 @@ ifeq ($(strip $(MPILIB)), mpi-serial) MPIFC := $(SFC) MPICC := $(SCC) MPICXX := $(SCXX) - CONFIG_ARGS += MCT_PATH=$(SHAREDLIBROOT)/$(SHAREDPATH)/mct/mpi-serial + ifndef MPI_SERIAL_PATH + CONFIG_ARGS += MCT_PATH=$(SHAREDLIBROOT)/$(SHAREDPATH)/mct/mpi-serial + else + CONFIG_ARGS += MCT_PATH=$(MPI_SERIAL_PATH) + INC_MPI := $(MPI_SERIAL_PATH)/include + LIB_MPI := $(MPI_SERIAL_PATH)/lib + endif else CC := $(MPICC) FC := $(MPIFC) @@ -541,7 +547,7 @@ cospsimulator_intr.o: $(COSP_LIBDIR)/libcosp.a endif ifdef FV3CORE_LIBDIR -$(FV3CORE_LIBDIR)/libfv3core.a: $(EXEROOT)/FMS/libfms.a +$(FV3CORE_LIBDIR)/libfv3core.a: $(LIBROOT)/libfms.a $(MAKE) -C $(FV3CORE_LIBDIR) complib COMPLIB='$(FV3CORE_LIBDIR)/libfv3core.a' F90='$(FC)' CC='$(CC)' FFLAGS='$(FFLAGS) $(FC_AUTO_R8)' CFLAGS='$(CFLAGS)' INCLDIR='$(INCLDIR)' FC_TYPE='$(COMPILER)' dyn_grid.o: $(FV3CORE_LIBDIR)/libfv3core.a @@ -561,8 +567,14 @@ ifdef MPAS_LIBDIR # this isn't necessary, since libmpas should never be an actual file (the library that is created # is named libmpas.a), but adding the PHONY declaration provides an extra bit of safety .PHONY: libmpas +# The CASEROOT, COMPILER and MACH are added so that the Depends file could be visible to +# the MPAS dycore. +# The GPUFLAGS is added so that the GPU flags defined in ccs_config_cesm could also be +# used to build the MPAS dycore if needed. libmpas: cam_abortutils.o physconst.o - $(MAKE) -C $(MPAS_LIBDIR) CC="$(CC)" FC="$(FC)" PIODEF="$(PIODEF)" FFLAGS='$(FREEFLAGS) $(FFLAGS)' \ + $(MAKE) -C $(MPAS_LIBDIR) CC="$(CC)" FC="$(FC)" PIODEF="$(PIODEF)" \ + FFLAGS='$(FREEFLAGS) $(FFLAGS)' GPUFLAGS='$(GPUFLAGS)' \ + CASEROOT='$(CASEROOT)' COMPILER='$(COMPILER)' MACH='$(MACH)' \ FCINCLUDES='$(INCLDIR) $(INCS) -I$(ABS_INSTALL_SHAREDPATH)/include -I$(ABS_ESMF_PATH)/include' dyn_comp.o: libmpas @@ -589,10 +601,12 @@ ifdef LAPACK_LIBDIR SLIBS += -L$(LAPACK_LIBDIR) -llapack -lblas endif ifdef LIB_MPI - ifndef MPI_LIB_NAME - SLIBS += -L$(LIB_MPI) -lmpi - else - SLIBS += -L$(LIB_MPI) -l$(MPI_LIB_NAME) + ifndef MPI_SERIAL_PATH + ifndef MPI_LIB_NAME + SLIBS += -L$(LIB_MPI) -lmpi + else + SLIBS += -L$(LIB_MPI) -l$(MPI_LIB_NAME) + endif endif endif @@ -613,6 +627,9 @@ endif # Remove arch flag if it exists F90_LDFLAGS := $(filter-out -arch%,$(LDFLAGS)) +ifdef GPUFLAGS + F90_LDFLAGS += $(GPUFLAGS) +endif # Machine stuff to appear last on the link step ifndef MLIBS @@ -875,7 +892,7 @@ endif ifdef FV3CORE_LIBDIR ULIBDEP += $(FV3CORE_LIBDIR)/libfv3core.a - ULIBDEP += $(EXEROOT)/FMS/libfms.a + ULIBDEP += $(LIBROOT)/libfms.a endif ifdef MPAS_LIBDIR @@ -910,12 +927,21 @@ GENF90 ?= $(CIMEROOT)/CIME/non_py/externals/genf90/genf90.pl .SUFFIXES: .F90 .F .f90 .f .c .cpp .o .in ifeq ($(MPILIB),mpi-serial) - MPISERIAL = $(INSTALL_SHAREDPATH)/lib/libmpi-serial.a - MLIBS += $(MPISERIAL) - CMAKE_OPTS += -DMPI_C_INCLUDE_PATH=$(INSTALL_SHAREDPATH)/include \ + ifdef MPI_SERIAL_PATH + MPISERIAL = $(MPI_SERIAL_PATH)/lib/libmpi-serial.a + MLIBS += -L$(MPI_SERIAL_PATH)/lib -lmpi-serial + CMAKE_OPTS += -DMPI_C_INCLUDE_PATH=$(MPI_SERIAL_PATH)/include \ + -DMPI_Fortran_INCLUDE_PATH=$(MPI_SERIAL_PATH)/include \ + -DMPI_C_LIBRARIES=$(MPI_SERIAL_PATH)/lib/libmpi-serial.a \ + -DMPI_Fortran_LIBRARIES=$(MPI_SERIAL_PATH)/lib/libmpi-serial.a + else + MPISERIAL = $(INSTALL_SHAREDPATH)/lib/libmpi-serial.a + MLIBS += -L$(INSTALL_SHAREDPATH)/lib -lmpi-serial + CMAKE_OPTS += -DMPI_C_INCLUDE_PATH=$(INSTALL_SHAREDPATH)/include \ -DMPI_Fortran_INCLUDE_PATH=$(INSTALL_SHAREDPATH)/include \ -DMPI_C_LIBRARIES=$(INSTALL_SHAREDPATH)/lib/libmpi-serial.a \ -DMPI_Fortran_LIBRARIES=$(INSTALL_SHAREDPATH)/lib/libmpi-serial.a + endif endif $(MCTLIBS) : $(MPISERIAL) diff --git a/CIME/Tools/bless_test_results b/CIME/Tools/bless_test_results index d630aff69bd..eb9663dcf9a 100755 --- a/CIME/Tools/bless_test_results +++ b/CIME/Tools/bless_test_results @@ -8,20 +8,21 @@ blessing of diffs. You may need to load modules for cprnc to work. """ - from standard_script_setup import * from CIME.utils import expect from CIME.XML.machines import Machines from CIME.bless_test_results import bless_test_results -import argparse, sys, os +import argparse +import sys +import os +import logging _MACHINE = Machines() -############################################################################### + def parse_command_line(args, description): - ############################################################################### parser = argparse.ArgumentParser( usage="""\n{0} [-n] [-r ] [-b ] [-c ] [ ...] [--verbose] OR @@ -45,35 +46,18 @@ OR formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) - default_compiler = _MACHINE.get_default_compiler() - scratch_root = _MACHINE.get_value("CIME_OUTPUT_ROOT") - default_testroot = os.path.join(scratch_root) + create_bless_options(parser) - CIME.utils.setup_standard_logging_options(parser) + create_baseline_options(parser) - parser.add_argument( - "-n", "--namelists-only", action="store_true", help="Only analyze namelists." - ) + create_test_options(parser) - parser.add_argument( - "--hist-only", action="store_true", help="Only analyze history files." - ) - - parser.add_argument( - "-b", - "--baseline-name", - help="Name of baselines to use. Default will use BASELINE_NAME_CMP first if possible, otherwise branch name.", - ) - - parser.add_argument( - "--baseline-root", - help="Root of baselines. Default will use the BASELINE_ROOT from the case.", - ) + CIME.utils.setup_standard_logging_options(parser) parser.add_argument( "-c", "--compiler", - default=default_compiler, + default=_MACHINE.get_default_compiler(), help="Compiler of run you want to bless", ) @@ -85,36 +69,15 @@ OR "This option forces the bless to happen regardless.", ) - parser.add_argument( + mutual_execution = parser.add_mutually_exclusive_group() + + mutual_execution.add_argument( "--report-only", action="store_true", help="Only report what files will be overwritten and why. Caution is a good thing when updating baselines", ) - parser.add_argument( - "-r", - "--test-root", - default=default_testroot, - help="Path to test results that are being blessed", - ) - - parser.add_argument( - "--new-test-root", - help="If bless_test_results needs to create cases (for blessing namelists), use this root area", - ) - - parser.add_argument( - "--new-test-id", - help="If bless_test_results needs to create cases (for blessing namelists), use this test id", - ) - - parser.add_argument( - "-t", - "--test-id", - help="Limit processes to case dirs matching this test-id. Can be useful if mutiple runs dumped into the same dir.", - ) - - parser.add_argument( + mutual_execution.add_argument( "-f", "--force", action="store_true", @@ -127,6 +90,8 @@ OR "\ncan follow either the config_pes.xml or the env_mach_pes.xml format.", ) + parser.add_argument("--exclude", nargs="*", help="Exclude tests") + parser.add_argument( "bless_tests", nargs="*", @@ -135,73 +100,92 @@ OR args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) - expect( - not (args.report_only and args.force), - "Makes no sense to use -r and -f simultaneously", + return vars(args) + + +def create_bless_options(parser): + bless_group = parser.add_argument_group("Bless options") + + mutual_bless_group = bless_group.add_mutually_exclusive_group() + + mutual_bless_group.add_argument( + "-n", "--namelists-only", action="store_true", help="Only analyze namelists." ) - expect( - not (args.namelists_only and args.hist_only), - "Makes no sense to use --namelists-only and --hist-only simultaneously", + + mutual_bless_group.add_argument( + "--hist-only", action="store_true", help="Only analyze history files." ) - return ( - args.baseline_name, - args.baseline_root, - args.test_root, - args.compiler, - args.test_id, - args.namelists_only, - args.hist_only, - args.report_only, - args.force, - args.pes_file, - args.bless_tests, - args.no_skip_pass, - args.new_test_root, - args.new_test_id, + mutual_perf_group = bless_group.add_mutually_exclusive_group() + + mutual_perf_group.add_argument( + "--bless-tput", + action="store_true", + help="Bless throughput, use `--bless-perf` to bless throughput and memory", ) + mutual_perf_group.add_argument( + "--bless-mem", + action="store_true", + help="Bless memory, use `--bless-perf` to bless throughput and memory", + ) -############################################################################### -def _main_func(description): - ############################################################################### - ( - baseline_name, - baseline_root, - test_root, - compiler, - test_id, - namelists_only, - hist_only, - report_only, - force, - pes_file, - bless_tests, - no_skip_pass, - new_test_root, - new_test_id, - ) = parse_command_line(sys.argv, description) - - success = bless_test_results( - baseline_name, - baseline_root, - test_root, - compiler, - test_id=test_id, - namelists_only=namelists_only, - hist_only=hist_only, - report_only=report_only, - force=force, - pesfile=pes_file, - bless_tests=bless_tests, - no_skip_pass=no_skip_pass, - new_test_root=new_test_root, - new_test_id=new_test_id, + bless_group.add_argument( + "--bless-perf", action="store_true", help="Bless both throughput and memory" ) - sys.exit(0 if success else 1) -############################################################################### +def create_baseline_options(parser): + baseline_group = parser.add_argument_group("Baseline options") + + baseline_group.add_argument( + "-b", + "--baseline-name", + help="Name of baselines to use. Default will use BASELINE_NAME_CMP first if possible, otherwise branch name.", + ) + + baseline_group.add_argument( + "--baseline-root", + help="Root of baselines. Default will use the BASELINE_ROOT from the case.", + ) + + +def create_test_options(parser): + default_testroot = _MACHINE.get_value("CIME_OUTPUT_ROOT") + + test_group = parser.add_argument_group("Test options") + + test_group.add_argument( + "-r", + "--test-root", + default=default_testroot, + help="Path to test results that are being blessed", + ) + + test_group.add_argument( + "--new-test-root", + help="If bless_test_results needs to create cases (for blessing namelists), use this root area", + ) + + test_group.add_argument( + "--new-test-id", + help="If bless_test_results needs to create cases (for blessing namelists), use this test id", + ) + + test_group.add_argument( + "-t", + "--test-id", + help="Limit processes to case dirs matching this test-id. Can be useful if mutiple runs dumped into the same dir.", + ) + + +def _main_func(description): + kwargs = parse_command_line(sys.argv, description) + + success = bless_test_results(**kwargs) + + sys.exit(0 if success else 1) + if __name__ == "__main__": _main_func(__doc__) diff --git a/CIME/Tools/case.build b/CIME/Tools/case.build index c8e4d54c467..4edf177198e 100755 --- a/CIME/Tools/case.build +++ b/CIME/Tools/case.build @@ -80,6 +80,14 @@ def parse_command_line(args, description): help="Build each component one at a time, separately, with output going to separate logs", ) + parser.add_argument( + "--skip-submit", + action="store_true", + help="Sets the current test phase to RUN, skipping the SUBMIT phase. This " + "may be useful if rebuilding the model while this test is in the batch queue. " + "ONLY USE IF A TEST CASE, OTHERWISE IGNORED.", + ) + parser.add_argument( "--dry-run", action="store_true", @@ -173,6 +181,7 @@ def parse_command_line(args, description): args.separate_builds, args.ninja, args.dry_run, + args.skip_submit, ) @@ -191,6 +200,7 @@ def _main_func(description): separate_builds, ninja, dry_run, + skip_submit, ) = parse_command_line(sys.argv, description) success = True @@ -234,6 +244,7 @@ def _main_func(description): ninja=ninja, dry_run=dry_run, separate_builds=separate_builds, + skip_submit=skip_submit, ) else: diff --git a/CIME/Tools/cs.status b/CIME/Tools/cs.status index 3db5402b741..4ab7b7e8ecd 100755 --- a/CIME/Tools/cs.status +++ b/CIME/Tools/cs.status @@ -103,6 +103,13 @@ def parse_command_line(args, description): help="Test root used when --test-id is given", ) + parser.add_argument( + "--force-rebuild", + action="store_true", + help="When used with 'test-id', the" + "tests will have their 'BUILD_SHAREDLIB' phase reset to 'PEND'.", + ) + args = parser.parse_args(args[1:]) _validate_args(args) @@ -120,10 +127,17 @@ def parse_command_line(args, description): args.expected_fails_file, args.test_id, args.test_root, + args.force_rebuild, ) def _validate_args(args): + if args.force_rebuild: + expect( + args.test_id != [], + "Cannot force a rebuild without 'test-id'", + ) + expect( not (args.summary and args.count_fails), "--count-fails cannot be specified with --summary", @@ -158,6 +172,7 @@ def _main_func(description): expected_fails_file, test_ids, test_root, + force_rebuild, ) = parse_command_line(sys.argv, description) for test_id in test_ids: test_paths.extend( @@ -172,6 +187,7 @@ def _main_func(description): check_throughput=check_throughput, check_memory=check_memory, expected_fails_filepath=expected_fails_file, + force_rebuild=force_rebuild, ) diff --git a/CIME/Tools/jenkins_generic_job b/CIME/Tools/jenkins_generic_job index 210c557edc2..ec93bfca238 100755 --- a/CIME/Tools/jenkins_generic_job +++ b/CIME/Tools/jenkins_generic_job @@ -38,7 +38,7 @@ OR CIME.utils.setup_standard_logging_options(parser) - default_baseline = CIME.utils.get_current_branch(repo=CIME.utils.get_cime_root()) + default_baseline = CIME.utils.get_current_branch(repo=CIME.utils.get_src_root()) if default_baseline is not None: default_baseline = default_baseline.replace(".", "_").replace( "/", "_" @@ -174,6 +174,24 @@ OR help="Fail if memory check fails (fail if tests footprint grows)", ) + parser.add_argument( + "--ignore-memleak", + action="store_true", + help="Do not fail if there are memleaks", + ) + + parser.add_argument( + "--ignore-namelists", + action="store_true", + help="Do not fail if there are namelist diffs", + ) + + parser.add_argument( + "--save-timing", + action="store_true", + help="Tell create_test to save timings of tests", + ) + parser.add_argument( "--pes-file", help="Full pathname of an optional pes specification file. The file" @@ -252,6 +270,9 @@ OR args.update_success, args.check_throughput, args.check_memory, + args.ignore_memleak, + args.ignore_namelists, + args.save_timing, args.pes_file, args.jenkins_id, args.queue, @@ -281,6 +302,9 @@ def _main_func(description): update_success, check_throughput, check_memory, + ignore_memleak, + ignore_namelists, + save_timing, pes_file, jenkins_id, queue, @@ -308,6 +332,9 @@ def _main_func(description): update_success, check_throughput, check_memory, + ignore_memleak, + ignore_namelists, + save_timing, pes_file, jenkins_id, queue, diff --git a/CIME/Tools/jenkins_script b/CIME/Tools/jenkins_script deleted file mode 100755 index c1d1728c2cf..00000000000 --- a/CIME/Tools/jenkins_script +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# -# Wrapper around jenkins_generic_job that will allow output -# from that script to always be printed to the screen and -# recoverable if Jenkins is forced to kill the job. This is the -# script that should be used from Jenkins. -# - -SCRIPT_DIR=$( cd "$( dirname "$0" )" && pwd ) -DATE_STAMP=$(date "+%Y-%m-%d_%H%M%S") -export JENKINS_START_TIME=$(date "+%s") - -umask 002 - -$SCRIPT_DIR/jenkins_generic_job --submit-to-cdash --update-success "$@" >& JENKINS_$DATE_STAMP diff --git a/CIME/Tools/standard_script_setup.py b/CIME/Tools/standard_script_setup.py index 74ad6ae39ea..1faab6f0a89 100644 --- a/CIME/Tools/standard_script_setup.py +++ b/CIME/Tools/standard_script_setup.py @@ -41,6 +41,5 @@ def check_minimum_python_version(major, minor): import CIME.utils - CIME.utils.stop_buffering_output() import logging, argparse diff --git a/CIME/XML/archive_base.py b/CIME/XML/archive_base.py index 01297da9f0e..fa42e186937 100644 --- a/CIME/XML/archive_base.py +++ b/CIME/XML/archive_base.py @@ -3,11 +3,39 @@ """ from CIME.XML.standard_module_setup import * from CIME.XML.generic_xml import GenericXML +from CIME.utils import convert_to_type logger = logging.getLogger(__name__) class ArchiveBase(GenericXML): + def exclude_testing(self, compname): + """ + Checks if component should be excluded from testing. + """ + value = self._get_attribute(compname, "exclude_testing") + + if value is None: + return False + + return convert_to_type(value, "logical") + + def _get_attribute(self, compname, attr_name): + attrib = self.get_entry_attributes(compname) + + if attrib is None: + return None + + return attrib.get(attr_name, None) + + def get_entry_attributes(self, compname): + entry = self.get_entry(compname) + + if entry is None: + return None + + return self.attrib(entry) + def get_entry(self, compname): """ Returns an xml node corresponding to compname in comp_archive_spec @@ -117,7 +145,11 @@ def get_all_hist_files(self, casename, model, from_dir, suffix="", ref_case=None ext = ext[:-1] string = model + r"\d?_?(\d{4})?\." + ext if has_suffix: - string += "." + suffix + "$" + if not suffix in string: + string += r"\." + suffix + "$" + + if not string.endswith("$"): + string += "$" logger.debug("Regex is {}".format(string)) pfile = re.compile(string) diff --git a/CIME/XML/env_batch.py b/CIME/XML/env_batch.py index d20c67c5ab3..19d578a389a 100644 --- a/CIME/XML/env_batch.py +++ b/CIME/XML/env_batch.py @@ -15,6 +15,7 @@ get_batch_script_for_job, get_logging_options, format_time, + add_flag_to_cmd, ) from CIME.locked_files import lock_file, unlock_file from collections import OrderedDict @@ -555,7 +556,7 @@ def get_batch_directives(self, case, job, overrides=None, output_format="default return "\n".join(result) - def get_submit_args(self, case, job): + def get_submit_args(self, case, job, resolve=True): """ return a list of touples (flag, name) """ @@ -563,7 +564,7 @@ def get_submit_args(self, case, job): submit_arg_nodes = self._get_arg_nodes(case, bs_nodes) - submitargs = self._process_args(case, submit_arg_nodes, job) + submitargs = self._process_args(case, submit_arg_nodes, job, resolve=resolve) return submitargs @@ -597,7 +598,7 @@ def _get_arg_nodes(self, case, bs_nodes): return submit_arg_nodes - def _process_args(self, case, submit_arg_nodes, job): + def _process_args(self, case, submit_arg_nodes, job, resolve=True): submitargs = " " for arg in submit_arg_nodes: @@ -619,19 +620,25 @@ def _process_args(self, case, submit_arg_nodes, job): if " " in flag: flag, name = flag.split() if name: - if "$" in name: + if resolve and "$" in name: rflag = self._resolve_argument(case, flag, name, job) + # This is to prevent -gpu_type=none in qsub args + if rflag.endswith("=none"): + continue if len(rflag) > len(flag): submitargs += " {}".format(rflag) else: - submitargs += " {} {}".format(flag, name) + submitargs += " " + add_flag_to_cmd(flag, name) else: submitargs += " {}".format(flag) else: - try: - submitargs += self._resolve_argument(case, flag, name, job) - except ValueError: - continue + if resolve: + try: + submitargs += self._resolve_argument(case, flag, name, job) + except ValueError: + continue + else: + submitargs += " " + add_flag_to_cmd(flag, name) return submitargs @@ -697,13 +704,8 @@ def _resolve_argument(self, case, flag, name, job): if flag == "-q" and rval == "batch" and case.get_value("MACH") == "blues": # Special case. Do not provide '-q batch' for blues raise ValueError() - if ( - flag.rfind("=", len(flag) - 1, len(flag)) >= 0 - or flag.rfind(":", len(flag) - 1, len(flag)) >= 0 - ): - submitargs = " {}{}".format(flag, str(rval).strip()) - else: - submitargs = " {} {}".format(flag, str(rval).strip()) + + submitargs = " " + add_flag_to_cmd(flag, rval) return submitargs @@ -793,20 +795,10 @@ def submit_jobs( batch_job_id = None for _ in range(num_submit): for job, dependency in jobs: - if dependency is not None: - deps = dependency.split() - else: - deps = [] - dep_jobs = [] - if user_prereq is not None: - dep_jobs.append(user_prereq) - for dep in deps: - if dep in depid.keys() and depid[dep] is not None: - dep_jobs.append(str(depid[dep])) - if prev_job is not None: - dep_jobs.append(prev_job) + dep_jobs = get_job_deps(dependency, depid, prev_job, user_prereq) logger.debug("job {} depends on {}".format(job, dep_jobs)) + result = self._submit_single_job( case, job, @@ -821,6 +813,7 @@ def submit_jobs( dry_run=dry_run, workflow=workflow, ) + batch_job_id = str(alljobs.index(job)) if dry_run else result depid[job] = batch_job_id jobcmds.append((job, result)) @@ -929,42 +922,54 @@ def _submit_single_job( logger.info("Starting job script {}".format(job)) function_name = job.replace(".", "_") job_name = "." + job - if not dry_run: - args = self._build_run_args( - job, - True, - skip_pnl=skip_pnl, - set_continue_run=resubmit_immediate, - submit_resubmits=workflow and not resubmit_immediate, - ) - try: - if hasattr(case, function_name): - getattr(case, function_name)( - **{k: v for k, (v, _) in args.items()} - ) + args = self._build_run_args( + job, + True, + skip_pnl=skip_pnl, + set_continue_run=resubmit_immediate, + submit_resubmits=workflow and not resubmit_immediate, + ) + + try: + if hasattr(case, function_name): + if dry_run: + return + + getattr(case, function_name)(**{k: v for k, (v, _) in args.items()}) + else: + expect( + os.path.isfile(job_name), + "Could not find file {}".format(job_name), + ) + if dry_run: + return os.path.join(self._caseroot, job_name) else: - expect( - os.path.isfile(job_name), - "Could not find file {}".format(job_name), - ) run_cmd_no_fail( os.path.join(self._caseroot, job_name), combine_output=True, verbose=True, from_dir=self._caseroot, ) - except Exception as e: - # We don't want exception from the run phases getting into submit phase - logger.warning( - "Exception from {}: {}".format(function_name, str(e)) - ) + except Exception as e: + # We don't want exception from the run phases getting into submit phase + logger.warning("Exception from {}: {}".format(function_name, str(e))) return - submitargs = self.get_submit_args(case, job) - args_override = self.get_value("BATCH_COMMAND_FLAGS", subgroup=job) - if args_override: - submitargs = args_override + submitargs = case.get_value("BATCH_COMMAND_FLAGS", subgroup=job, resolved=False) + + project = case.get_value("PROJECT", subgroup=job) + + if not project: + # If there is no project then we need to remove the project flag + if ( + batch_system == "pbs" or batch_system == "cobalt" + ) and " -A " in submitargs: + submitargs = submitargs.replace("-A", "") + elif batch_system == "lsf" and " -P " in submitargs: + submitargs = submitargs.replace("-P", "") + elif batch_system == "slurm" and " --account " in submitargs: + submitargs = submitargs.replace("--account", "") if dep_jobs is not None and len(dep_jobs) > 0: logger.debug("dependencies: {}".format(dep_jobs)) @@ -1086,10 +1091,10 @@ def _submit_single_job( # add ` before cd $CASEROOT and at end of command submitcmd = submitcmd.replace("cd $CASEROOT", "'cd $CASEROOT") + "'" + submitcmd = case.get_resolved_value(submitcmd, subgroup=job) if dry_run: return submitcmd else: - submitcmd = case.get_resolved_value(submitcmd) logger.info("Submitting job script {}".format(submitcmd)) output = run_cmd_no_fail(submitcmd, combine_output=True) jobid = self.get_job_id(output) @@ -1123,8 +1128,14 @@ def get_job_id(self, output): jobid_pattern is not None, "Could not find jobid_pattern in env_batch.xml", ) + + # If no output was provided, skip the search. This could + # be because --no-batch was provided. + if not output: + return output else: return output + search_match = re.search(jobid_pattern, output) expect( search_match is not None, @@ -1387,3 +1398,41 @@ def make_all_batch_files(self, case): input_batch_script, job ) ) + + +def get_job_deps(dependency, depid, prev_job=None, user_prereq=None): + """ + Gather list of job batch ids that a job depends on. + + Parameters + ---------- + dependency : str + List of dependent job names. + depid : dict + Lookup where keys are job names and values are the batch id. + user_prereq : str + User requested dependency. + + Returns + ------- + list + List of batch ids that job depends on. + """ + deps = [] + dep_jobs = [] + + if user_prereq is not None: + dep_jobs.append(user_prereq) + + if dependency is not None: + # Match all words, excluding "and" and "or" + deps = re.findall(r"\b(?!and\b|or\b)\w+(?:\.\w+)?\b", dependency) + + for dep in deps: + if dep in depid and depid[dep] is not None: + dep_jobs.append(str(depid[dep])) + + if prev_job is not None: + dep_jobs.append(prev_job) + + return dep_jobs diff --git a/CIME/XML/env_build.py b/CIME/XML/env_build.py index 7bd805b1c0f..fe863e414ef 100644 --- a/CIME/XML/env_build.py +++ b/CIME/XML/env_build.py @@ -18,4 +18,19 @@ def __init__( initialize an object interface to file env_build.xml in the case directory """ schema = os.path.join(utils.get_schema_path(), "env_entry_id.xsd") + self._caseroot = case_root EnvBase.__init__(self, case_root, infile, schema=schema, read_only=read_only) + + def set_value(self, vid, value, subgroup=None, ignore_type=False): + """ + Set the value of an entry-id field to value + Returns the value or None if not found + subgroup is ignored in the general routine and applied in specific methods + """ + # Do not allow any of these to be the same as CASEROOT + if vid in ("EXEROOT", "OBJDIR", "LIBROOT"): + utils.expect(value != self._caseroot, f"Cannot set {vid} to CASEROOT") + + return super(EnvBuild, self).set_value( + vid, value, subgroup=subgroup, ignore_type=ignore_type + ) diff --git a/CIME/XML/env_mach_pes.py b/CIME/XML/env_mach_pes.py index c7635573f95..76c6588901b 100644 --- a/CIME/XML/env_mach_pes.py +++ b/CIME/XML/env_mach_pes.py @@ -42,6 +42,8 @@ def get_value( resolved=True, subgroup=None, max_mpitasks_per_node=None, + max_cputasks_per_gpu_node=None, + ngpus_per_node=None, ): # pylint: disable=arguments-differ # Special variable NINST_MAX is used to determine the number of # drivers in multi-driver mode. @@ -58,7 +60,13 @@ def get_value( if "NTASKS" in vid or "ROOTPE" in vid: if max_mpitasks_per_node is None: max_mpitasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") - if value is not None and value < 0: + if max_cputasks_per_gpu_node is None: + max_cputasks_per_gpu_node = self.get_value("MAX_CPUTASKS_PER_GPU_NODE") + if ngpus_per_node is None: + ngpus_per_node = self.get_value("NGPUS_PER_NODE") + if (ngpus_per_node and value) and value < 0: + value = -1 * value * max_cputasks_per_gpu_node + elif value and value < 0: value = -1 * value * max_mpitasks_per_node # in the nuopc driver there is only one NINST value # so that NINST_{comp} = NINST @@ -154,6 +162,7 @@ def get_total_tasks(self, comp_classes, async_interface=False): tt = rootpe + nthrds * ((ntasks - 1) * pstrid + 1) maxrootpe = max(maxrootpe, rootpe) total_tasks = max(tt, total_tasks) + if asyncio_tasks: total_tasks = total_tasks + len(asyncio_tasks) if self.get_value("MULTI_DRIVER"): @@ -167,13 +176,24 @@ def get_tasks_per_node(self, total_tasks, max_thread_count): "totaltasks > 0 expected, totaltasks = {}".format(total_tasks), ) if self._comp_interface == "nuopc" and self.get_value("ESMF_AWARE_THREADING"): - tasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") + if self.get_value("NGPUS_PER_NODE") > 0: + tasks_per_node = self.get_value("MAX_CPUTASKS_PER_GPU_NODE") + else: + tasks_per_node = self.get_value("MAX_MPITASKS_PER_NODE") else: - tasks_per_node = min( - self.get_value("MAX_TASKS_PER_NODE") // max_thread_count, - self.get_value("MAX_MPITASKS_PER_NODE"), - total_tasks, - ) + ngpus_per_node = self.get_value("NGPUS_PER_NODE") + if ngpus_per_node and ngpus_per_node > 0: + tasks_per_node = min( + self.get_value("MAX_TASKS_PER_NODE") // max_thread_count, + self.get_value("MAX_CPUTASKS_PER_GPU_NODE"), + total_tasks, + ) + else: + tasks_per_node = min( + self.get_value("MAX_TASKS_PER_NODE") // max_thread_count, + self.get_value("MAX_MPITASKS_PER_NODE"), + total_tasks, + ) return tasks_per_node if tasks_per_node > 0 else 1 def get_total_nodes(self, total_tasks, max_thread_count): diff --git a/CIME/XML/env_mach_specific.py b/CIME/XML/env_mach_specific.py index 03e84f0faee..0592921ab9a 100644 --- a/CIME/XML/env_mach_specific.py +++ b/CIME/XML/env_mach_specific.py @@ -277,19 +277,25 @@ def make_env_mach_specific_file(self, shell, case, output_dir=""): if env_value.startswith("sh"): lines.append("{}".format(env_name)) else: - lines.append("export {}={}".format(env_name, env_value)) + if env_value is None: + lines.append("unset {}".format(env_name)) + else: + lines.append("export {}={}".format(env_name, env_value)) elif shell == "csh": if env_name == "source": if env_value.startswith("csh"): lines.append("{}".format(env_name)) else: - lines.append("setenv {} {}".format(env_name, env_value)) + if env_value is None: + lines.append("unsetenv {}".format(env_name)) + else: + lines.append("setenv {} {}".format(env_name, env_value)) else: expect(False, "Unknown shell type: '{}'".format(shell)) with open(os.path.join(output_dir, filename), "w") as fd: - fd.write("\n".join(lines)) + fd.write("\n".join(lines) + "\n") # Private API @@ -320,7 +326,8 @@ def _compute_resource_actions(self, resource_nodes, case, job=None): def _compute_actions(self, nodes, child_tag, case, job=None): result = [] # list of tuples ("name", "argument") - compiler, mpilib = case.get_value("COMPILER"), case.get_value("MPILIB") + compiler = case.get_value("COMPILER") + mpilib = case.get_value("MPILIB") for node in nodes: if self._match_attribs(self.attrib(node), case, job=job): diff --git a/CIME/XML/env_workflow.py b/CIME/XML/env_workflow.py index 3c976693639..c59ff23aba4 100644 --- a/CIME/XML/env_workflow.py +++ b/CIME/XML/env_workflow.py @@ -112,7 +112,13 @@ def get_job_specs(self, case, job): if ngpus_per_node > max_gpus_per_node: ngpus_per_node = max_gpus_per_node - return task_count, num_nodes, tasks_per_node, thread_count, ngpus_per_node + return ( + task_count, + num_nodes, + tasks_per_node, + thread_count, + ngpus_per_node, + ) # pylint: disable=arguments-differ def get_value(self, item, attribute=None, resolved=True, subgroup="PRIMARY"): diff --git a/CIME/XML/files.py b/CIME/XML/files.py index 26843409ed0..c0149f9601a 100644 --- a/CIME/XML/files.py +++ b/CIME/XML/files.py @@ -136,7 +136,9 @@ def set_value(self, vid, value, subgroup=None, ignore_type=False): def get_schema(self, nodename, attributes=None): node = self.get_optional_child("entry", {"id": nodename}) + schemanode = self.get_optional_child("schema", root=node, attributes=attributes) + if schemanode is not None: logger.debug("Found schema for {}".format(nodename)) return self.get_resolved_value(self.text(schemanode)) diff --git a/CIME/XML/generic_xml.py b/CIME/XML/generic_xml.py index c2a8364e090..083743695b3 100644 --- a/CIME/XML/generic_xml.py +++ b/CIME/XML/generic_xml.py @@ -8,7 +8,7 @@ import xml.etree.ElementTree as ET # pylint: disable=import-error -from distutils.spawn import find_executable +from shutil import which import getpass from copy import deepcopy from collections import namedtuple @@ -105,7 +105,8 @@ def __init__( def read(self, infile, schema=None): """ - Read and parse an xml file into the object + Read and parse an xml file into the object. The schema variable can either be a path to an xsd schema file or + a dictionary of paths to files by version. """ cached_read = False if not self.DISABLE_CACHING and infile in self._FILEMAP: @@ -126,8 +127,10 @@ def read(self, infile, schema=None): logger.debug("read: {}".format(infile)) with open(infile, "r", encoding="utf-8") as fd: self.read_fd(fd) - - if schema is not None and self.get_version() > 1.0: + version = str(self.get_version()) + if type(schema) is dict: + self.validate_xml_file(infile, schema[version]) + elif schema is not None and self.get_version() > 1.0: self.validate_xml_file(infile, schema) logger.debug("File version is {}".format(str(self.get_version()))) @@ -472,9 +475,9 @@ def write(self, outfile=None, force_write=False): xmlstr = self.get_raw_record() # xmllint provides a better format option for the output file - xmllint = find_executable("xmllint") + xmllint = which("xmllint") - if xmllint is not None: + if xmllint: if isinstance(outfile, str): run_cmd_no_fail( "{} --format --output {} -".format(xmllint, outfile), @@ -609,7 +612,9 @@ def set_value( return value if valnodes else None - def get_resolved_value(self, raw_value, allow_unresolved_envvars=False): + def get_resolved_value( + self, raw_value, allow_unresolved_envvars=False, subgroup=None + ): """ A value in the xml file may contain references to other xml variables or to environment variables. These are refered to in @@ -659,7 +664,8 @@ def get_resolved_value(self, raw_value, allow_unresolved_envvars=False): logger.debug("find: {}".format(var)) # The overridden versions of this method do not simply return None # so the pylint should not be flagging this - ref = self.get_value(var) # pylint: disable=assignment-from-none + # pylint: disable=assignment-from-none + ref = self.get_value(var, subgroup=subgroup) if ref is not None: logger.debug("resolve: " + str(ref)) @@ -688,11 +694,17 @@ def validate_xml_file(self, filename, schema): """ validate an XML file against a provided schema file using pylint """ - expect(os.path.isfile(filename), "xml file not found {}".format(filename)) - expect(os.path.isfile(schema), "schema file not found {}".format(schema)) - xmllint = find_executable("xmllint") expect( - os.path.isfile(xmllint), + filename and os.path.isfile(filename), + "xml file not found {}".format(filename), + ) + expect( + schema and os.path.isfile(schema), "schema file not found {}".format(schema) + ) + xmllint = which("xmllint") + + expect( + xmllint and os.path.isfile(xmllint), " xmllint not found in PATH, xmllint is required for cime. PATH={}".format( os.environ["PATH"] ), diff --git a/CIME/XML/grids.py b/CIME/XML/grids.py index 819838edddd..e34aacf2d01 100644 --- a/CIME/XML/grids.py +++ b/CIME/XML/grids.py @@ -25,6 +25,10 @@ def __init__(self, infile=None, files=None, comp_interface=None): infile = files.get_value("GRIDS_SPEC_FILE") logger.debug(" Grid specification file is {}".format(infile)) schema = files.get_schema("GRIDS_SPEC_FILE") + expect( + os.path.isfile(infile) and os.access(infile, os.R_OK), + f" grid file not found {infile}", + ) try: GenericXML.__init__(self, infile, schema) except: diff --git a/CIME/XML/machines.py b/CIME/XML/machines.py index 25d7841a50e..e3a047d25de 100644 --- a/CIME/XML/machines.py +++ b/CIME/XML/machines.py @@ -7,12 +7,20 @@ from CIME.utils import convert_to_unknown_type, get_cime_config import socket +from pathlib import Path logger = logging.getLogger(__name__) class Machines(GenericXML): - def __init__(self, infile=None, files=None, machine=None, extra_machines_dir=None): + def __init__( + self, + infile=None, + files=None, + machine=None, + extra_machines_dir=None, + read_only=True, + ): """ initialize an object if a filename is provided it will be used, @@ -23,6 +31,9 @@ def __init__(self, infile=None, files=None, machine=None, extra_machines_dir=Non additional directory that will be searched for a config_machines.xml file; if found, the contents of this file will be appended to the standard config_machines.xml. An empty string is treated the same as None. + + The schema variable can be passed as a path to an xsd schema file or a dictionary of paths + with version number as keys. """ self.machine_node = None @@ -37,13 +48,28 @@ def __init__(self, infile=None, files=None, machine=None, extra_machines_dir=Non files = Files() if infile is None: infile = files.get_value("MACHINES_SPEC_FILE") - schema = files.get_schema("MACHINES_SPEC_FILE") - logger.debug("Verifying using schema {}".format(schema)) self.machines_dir = os.path.dirname(infile) + if os.path.exists(infile): + checked_files.append(infile) + else: + expect(False, f"file not found {infile}") + + schema = { + "3.0": files.get_schema( + "MACHINES_SPEC_FILE", attributes={"version": "3.0"} + ), + "2.0": files.get_schema( + "MACHINES_SPEC_FILE", attributes={"version": "2.0"} + ), + } + # Before v3 there was but one choice + if not schema["3.0"]: + schema = files.get_schema("MACHINES_SPEC_FILE") + + logger.debug("Verifying using schema {}".format(schema)) - GenericXML.__init__(self, infile, schema) - checked_files.append(infile) + GenericXML.__init__(self, infile, schema, read_only=read_only) # Append the contents of $HOME/.cime/config_machines.xml if it exists. # @@ -81,7 +107,7 @@ def __init__(self, infile=None, files=None, machine=None, extra_machines_dir=Non machine is not None, f"Could not initialize machine object from {', '.join(checked_files)}. This machine is not available for the target CIME_MODEL.", ) - self.set_machine(machine) + self.set_machine(machine, schema=schema) def get_child(self, name=None, attributes=None, root=None, err_msg=None): if root is None: @@ -129,6 +155,19 @@ def list_available_machines(self): for node in nodes: mach = self.get(node, "MACH") machines.append(mach) + if self.get_version() == 3.0: + machdirs = [ + os.path.basename(f.path) + for f in os.scandir(self.machines_dir) + if f.is_dir() + ] + machdirs.remove("cmake_macros") + machdirs.remove("userdefined_laptop_template") + for mach in machdirs: + if mach not in machines: + machines.append(mach) + + machines.sort() return machines def probe_machine_name(self, warn=True): @@ -140,6 +179,7 @@ def probe_machine_name(self, warn=True): names_not_found = [] nametomatch = socket.getfqdn() + machine = self._probe_machine_name_one_guess(nametomatch) if machine is None: @@ -167,10 +207,15 @@ def _probe_machine_name_one_guess(self, nametomatch): Find a matching regular expression for nametomatch in the NODENAME_REGEX field in the file. First match wins. Returns None if no match is found. """ + if self.get_version() < 3: + return self._probe_machine_name_one_guess_v2(nametomatch) + else: + return self._probe_machine_name_one_guess_v3(nametomatch) - machine = None - nodes = self.get_children("machine") + def _probe_machine_name_one_guess_v2(self, nametomatch): + nodes = self.get_children("machine") + machine = None for node in nodes: machtocheck = self.get(node, "MACH") logger.debug("machine is " + machtocheck) @@ -212,7 +257,55 @@ def _probe_machine_name_one_guess(self, nametomatch): return machine - def set_machine(self, machine): + def _probe_machine_name_one_guess_v3(self, nametomatch): + + nodes = self.get_children("NODENAME_REGEX", root=self.root) + + children = [y for x in nodes for y in self.get_children(root=x)] + + for child in children: + machtocheck = self.get(child, "MACH") + regex_str = self.text(child) + logger.debug( + "machine is {} regex {}, nametomatch {}".format( + machtocheck, regex_str, nametomatch + ) + ) + + if regex_str is not None: + # an environment variable can be used + if regex_str.startswith("$ENV"): + machine_value = self.get_resolved_value( + regex_str, allow_unresolved_envvars=True + ) + logger.debug("machine_value is {}".format(machine_value)) + if not machine_value.startswith("$ENV"): + try: + match, this_machine = machine_value.split(":") + except ValueError: + expect( + False, + "Bad formation of NODENAME_REGEX. Expected envvar:value, found {}".format( + regex_str + ), + ) + if match == this_machine: + machine = machtocheck + break + else: + regex = re.compile(regex_str) + if regex.match(nametomatch): + logger.debug( + "Found machine: {} matches {}".format( + machtocheck, nametomatch + ) + ) + machine = machtocheck + break + + return machine + + def set_machine(self, machine, schema=None): """ Sets the machine block in the Machines object @@ -225,15 +318,34 @@ def set_machine(self, machine): CIMEError: ERROR: No machine trump found """ if machine == "Query": - self.machine = machine - elif self.machine != machine or self.machine_node is None: - self.machine_node = super(Machines, self).get_child( - "machine", - {"MACH": machine}, - err_msg="No machine {} found".format(machine), - ) - self.machine = machine + return machine + elif self.get_version() == 3: + machines_file = Path.home() / ".cime" / machine / "config_machines.xml" + + if machines_file.exists(): + GenericXML.read( + self, + machines_file, + schema=schema, + ) + else: + machines_file = ( + Path(self.machines_dir) / machine / "config_machines.xml" + ) + + if machines_file.exists(): + GenericXML.read( + self, + machines_file, + schema=schema, + ) + self.machine_node = super(Machines, self).get_child( + "machine", + {"MACH": machine}, + err_msg="No machine {} found".format(machine), + ) + self.machine = machine return machine # pylint: disable=arguments-differ @@ -282,6 +394,11 @@ def get_field_from_list(self, listname, reqval=None, attributes=None): """ expect(self.machine_node is not None, "Machine object has no machine defined") supported_values = self.get_value(listname, attributes=attributes) + logger.debug( + "supported values for {} on {} is {}".format( + listname, self.machine, supported_values + ) + ) # if no match with attributes, try without if supported_values is None: supported_values = self.get_value(listname, attributes=None) @@ -326,26 +443,12 @@ def get_default_MPIlib(self, attributes=None): def is_valid_compiler(self, compiler): """ Check the compiler is valid for the current machine - - >>> machobj = Machines(machine="cori-knl") - >>> machobj.get_default_compiler() - 'intel' - >>> machobj.is_valid_compiler("gnu") - True - >>> machobj.is_valid_compiler("nag") - False """ return self.get_field_from_list("COMPILERS", reqval=compiler) is not None def is_valid_MPIlib(self, mpilib, attributes=None): """ Check the MPILIB is valid for the current machine - - >>> machobj = Machines(machine="cori-knl") - >>> machobj.is_valid_MPIlib("mpi-serial") - True - >>> machobj.is_valid_MPIlib("fake-mpi") - False """ return ( mpilib == "mpi-serial" @@ -356,14 +459,6 @@ def is_valid_MPIlib(self, mpilib, attributes=None): def has_batch_system(self): """ Return if this machine has a batch system - - >>> machobj = Machines(machine="cori-knl") - >>> machobj.has_batch_system() - True - >>> machobj.set_machine("melvin") - 'melvin' - >>> machobj.has_batch_system() - False """ result = False batch_system = self.get_optional_child("BATCH_SYSTEM", root=self.machine_node) diff --git a/CIME/XML/tests.py b/CIME/XML/tests.py index 297659b2c33..4a9eefc0fc4 100644 --- a/CIME/XML/tests.py +++ b/CIME/XML/tests.py @@ -5,6 +5,9 @@ from CIME.XML.generic_xml import GenericXML from CIME.XML.files import Files +from CIME.utils import find_system_test +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +from CIME.SystemTests.system_tests_compare_n import SystemTestsCompareN logger = logging.getLogger(__name__) @@ -27,6 +30,33 @@ def __init__(self, infile=None, files=None): if os.path.isfile(infile): self.read(infile) + def support_single_exe(self, case): + """Checks if case supports --single-exe. + + Raises: + Exception: If system test cannot be found. + Exception: If `case` does not support --single-exe. + """ + testname = case.get_value("TESTCASE") + + try: + test = find_system_test(testname, case)(case, dry_run=True) + except Exception as e: + raise e + else: + # valid if subclass is SystemTestsCommon or _separate_builds is false + valid = ( + not issubclass(type(test), SystemTestsCompareTwo) + and not issubclass(type(test), SystemTestsCompareN) + ) or not test._separate_builds + + if not valid: + case_base_id = case.get_value("CASEBASEID") + + raise Exception( + f"{case_base_id} does not support the '--single-exe' option as it requires separate builds" + ) + def get_test_node(self, testname): logger.debug("Get settings for {}".format(testname)) node = self.get_child("test", {"NAME": testname}) diff --git a/CIME/baselines/__init__.py b/CIME/baselines/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/CIME/baselines/performance.py b/CIME/baselines/performance.py new file mode 100644 index 00000000000..55092f397e2 --- /dev/null +++ b/CIME/baselines/performance.py @@ -0,0 +1,610 @@ +import os +import glob +import re +import gzip +import logging +from CIME.config import Config +from CIME.utils import expect, get_src_root, get_current_commit, get_timestamp + +logger = logging.getLogger(__name__) + + +def perf_compare_throughput_baseline(case, baseline_dir=None): + """ + Compares model throughput. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline_dir : str + Overrides the baseline directory. + + Returns + ------- + below_tolerance : bool + Whether the comparison was below the tolerance. + comment : str + Provides explanation from comparison. + """ + if baseline_dir is None: + baseline_dir = case.get_baseline_dir() + + config = load_coupler_customization(case) + + baseline_file = os.path.join(baseline_dir, "cpl-tput.log") + + baseline = read_baseline_file(baseline_file) + + tolerance = case.get_value("TEST_TPUT_TOLERANCE") + + if tolerance is None: + tolerance = 0.1 + + expect( + tolerance > 0.0, + "Bad value for throughput tolerance in test", + ) + + try: + below_tolerance, comment = config.perf_compare_throughput_baseline( + case, baseline, tolerance + ) + except AttributeError: + below_tolerance, comment = _perf_compare_throughput_baseline( + case, baseline, tolerance + ) + + return below_tolerance, comment + + +def perf_compare_memory_baseline(case, baseline_dir=None): + """ + Compares model highwater memory usage. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline_dir : str + Overrides the baseline directory. + + Returns + ------- + below_tolerance : bool + Whether the comparison was below the tolerance. + comment : str + Provides explanation from comparison. + """ + if baseline_dir is None: + baseline_dir = case.get_baseline_dir() + + config = load_coupler_customization(case) + + baseline_file = os.path.join(baseline_dir, "cpl-mem.log") + + baseline = read_baseline_file(baseline_file) + + tolerance = case.get_value("TEST_MEMLEAK_TOLERANCE") + + if tolerance is None: + tolerance = 0.1 + + try: + below_tolerance, comments = config.perf_compare_memory_baseline( + case, baseline, tolerance + ) + except AttributeError: + below_tolerance, comments = _perf_compare_memory_baseline( + case, baseline, tolerance + ) + + return below_tolerance, comments + + +def perf_write_baseline(case, basegen_dir, throughput=True, memory=True): + """ + Writes the baseline performance files. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + basegen_dir : str + Path to baseline directory. + throughput : bool + If true, write throughput baseline. + memory : bool + If true, write memory baseline. + """ + config = load_coupler_customization(case) + + if throughput: + try: + tput, mode = perf_get_throughput(case, config) + except RuntimeError as e: + logger.debug("Could not get throughput: {0!s}".format(e)) + else: + baseline_file = os.path.join(basegen_dir, "cpl-tput.log") + + write_baseline_file(baseline_file, tput, mode) + + logger.info("Updated throughput baseline to {!s}".format(tput)) + + if memory: + try: + mem, mode = perf_get_memory(case, config) + except RuntimeError as e: + logger.info("Could not get memory usage: {0!s}".format(e)) + else: + baseline_file = os.path.join(basegen_dir, "cpl-mem.log") + + write_baseline_file(baseline_file, mem, mode) + + logger.info("Updated memory usage baseline to {!s}".format(mem)) + + +def load_coupler_customization(case): + """ + Loads customizations from the coupler `cime_config` directory. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + CIME.config.Config + Runtime configuration. + """ + comp_root_dir_cpl = case.get_value("COMP_ROOT_DIR_CPL") + + cpl_customize = os.path.join(comp_root_dir_cpl, "cime_config", "customize") + + return Config.load(cpl_customize) + + +def perf_get_throughput(case, config): + """ + Gets the model throughput. + + First attempts to use a coupler define method to retrieve the + models throughput. If this is not defined then the default + method of parsing the coupler log is used. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str or None + Model throughput. + """ + try: + tput, mode = config.perf_get_throughput(case) + except AttributeError: + tput, mode = _perf_get_throughput(case) + + return tput, mode + + +def perf_get_memory(case, config): + """ + Gets the model memory usage. + + First attempts to use a coupler defined method to retrieve the + models memory usage. If this is not defined then the default + method of parsing the coupler log is used. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str or None + Model memory usage. + """ + try: + mem, mode = config.perf_get_memory(case) + except AttributeError: + mem, mode = _perf_get_memory(case) + + return mem, mode + + +def write_baseline_file(baseline_file, value, mode="a"): + """ + Writes value to `baseline_file`. + + Parameters + ---------- + baseline_file : str + Path to the baseline file. + value : str + Value to write. + mode : str + Mode to open file with. + """ + with open(baseline_file, mode) as fd: + fd.write(value) + + +def _perf_get_memory(case, cpllog=None): + """ + Default function to retrieve memory usage from the coupler log. + + If the usage is not available from the log then `None` is returned. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + cpllog : str + Overrides the default coupler log. + + Returns + ------- + str or None + Model memory usage or `None`. + + Raises + ------ + RuntimeError + If not enough sample were found. + """ + memlist = perf_get_memory_list(case, cpllog) + + if memlist is None: + raise RuntimeError("Could not get default memory usage") from None + + value = _format_baseline(memlist[-1][1]) + + return value, "a" + + +def perf_get_memory_list(case, cpllog): + if cpllog is None: + cpllog = get_latest_cpl_logs(case) + else: + cpllog = [ + cpllog, + ] + + try: + memlist = get_cpl_mem_usage(cpllog[0]) + except (FileNotFoundError, IndexError): + memlist = None + + logger.debug("Could not parse memory usage from coupler log") + else: + if len(memlist) <= 3: + raise RuntimeError( + f"Found {len(memlist)} memory usage samples, need atleast 4" + ) + + return memlist + + +def _perf_get_throughput(case): + """ + Default function to retrieve throughput from the coupler log. + + If the throughput is not available from the log then `None` is returned. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str or None + Model throughput or `None`. + """ + cpllog = get_latest_cpl_logs(case) + + try: + tput = get_cpl_throughput(cpllog[0]) + except (FileNotFoundError, IndexError): + tput = None + + logger.debug("Could not parse throughput from coupler log") + + if tput is None: + raise RuntimeError("Could not get default throughput") from None + + value = _format_baseline(tput) + + return value, "a" + + +def get_latest_cpl_logs(case): + """ + find and return the latest cpl log file in the run directory + """ + coupler_log_path = case.get_value("RUNDIR") + + cpllog_name = "med" if case.get_value("COMP_INTERFACE") == "nuopc" else "cpl" + + cpllogs = glob.glob(os.path.join(coupler_log_path, "{}*.log.*".format(cpllog_name))) + + lastcpllogs = [] + + if cpllogs: + lastcpllogs.append(max(cpllogs, key=os.path.getctime)) + + basename = os.path.basename(lastcpllogs[0]) + + suffix = basename.split(".", 1)[1] + + for log in cpllogs: + if log in lastcpllogs: + continue + + if log.endswith(suffix): + lastcpllogs.append(log) + + return lastcpllogs + + +def get_cpl_mem_usage(cpllog): + """ + Read memory usage from coupler log. + + Parameters + ---------- + cpllog : str + Path to the coupler log. + + Returns + ------- + list + Memory usage (data, highwater) as recorded by the coupler or empty list. + """ + memlist = [] + + meminfo = re.compile(r".*model date =\s+(\w+).*memory =\s+(\d+\.?\d+).*highwater") + + if cpllog is not None and os.path.isfile(cpllog): + if ".gz" == cpllog[-3:]: + fopen = gzip.open + else: + fopen = open + + with fopen(cpllog, "rb") as f: + for line in f: + m = meminfo.match(line.decode("utf-8")) + + if m: + memlist.append((float(m.group(1)), float(m.group(2)))) + + # Remove the last mem record, it's sometimes artificially high + if len(memlist) > 0: + memlist.pop() + + return memlist + + +def get_cpl_throughput(cpllog): + """ + Reads throuhgput from coupler log. + + Parameters + ---------- + cpllog : str + Path to the coupler log. + + Returns + ------- + int or None + Throughput as recorded by the coupler or None + """ + if cpllog is not None and os.path.isfile(cpllog): + with gzip.open(cpllog, "rb") as f: + cpltext = f.read().decode("utf-8") + + m = re.search(r"# simulated years / cmp-day =\s+(\d+\.\d+)\s", cpltext) + + if m: + return float(m.group(1)) + return None + + +def read_baseline_file(baseline_file): + """ + Reads value from `baseline_file`. + + Strips comments and returns the raw content to be decoded. + + Parameters + ---------- + baseline_file : str + Path to the baseline file. + + Returns + ------- + str + Value stored in baseline file without comments. + """ + with open(baseline_file) as fd: + lines = [x.strip() for x in fd.readlines() if not x.startswith("#") and x != ""] + + return "\n".join(lines) + + +def _perf_compare_throughput_baseline(case, baseline, tolerance): + """ + Default throughput baseline comparison. + + Compares the throughput from the coupler to the baseline value. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline : list + Lines contained in the baseline file. + tolerance : float + Allowed tolerance for comparison. + + Returns + ------- + below_tolerance : bool + Whether the comparison was below the tolerance. + comment : str + provides explanation from comparison. + """ + current, _ = _perf_get_throughput(case) + + try: + current = float(_parse_baseline(current)) + except (ValueError, TypeError): + comment = "Could not compare throughput to baseline, as baseline had no value." + + return None, comment + + try: + # default baseline is stored as single float + baseline = float(_parse_baseline(baseline)) + except (ValueError, TypeError): + comment = "Could not compare throughput to baseline, as baseline had no value." + + return None, comment + + # comparing ypd so bigger is better + diff = (baseline - current) / baseline + + below_tolerance = None + + if diff is not None: + below_tolerance = diff < tolerance + + info = "Throughput changed by {:.2f}%: baseline={:.3f} sypd, tolerance={:d}%, current={:.3f} sypd".format( + diff * 100, baseline, int(tolerance * 100), current + ) + if below_tolerance: + comment = "TPUTCOMP: " + info + else: + comment = "Error: TPUTCOMP: " + info + + return below_tolerance, comment + + +def _perf_compare_memory_baseline(case, baseline, tolerance): + """ + Default memory usage baseline comparison. + + Compares the highwater memory usage from the coupler to the baseline value. + + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline : list + Lines contained in the baseline file. + tolerance : float + Allowed tolerance for comparison. + + Returns + ------- + below_tolerance : bool + Whether the comparison was below the tolerance. + comment : str + provides explanation from comparison. + """ + try: + current, _ = _perf_get_memory(case) + except RuntimeError as e: + return None, str(e) + + try: + current = float(_parse_baseline(current)) + except (ValueError, TypeError): + comment = "Could not compare throughput to baseline, as baseline had no value." + + return None, comment + + try: + # default baseline is stored as single float + baseline = float(_parse_baseline(baseline)) + except (ValueError, TypeError): + baseline = 0.0 + + try: + diff = (current - baseline) / baseline + except ZeroDivisionError: + diff = 0.0 + + # Should we check if tolerance is above 0 + below_tolerance = None + comment = "" + + if diff is not None: + below_tolerance = diff < tolerance + + info = "Memory usage highwater changed by {:.2f}%: baseline={:.3f} MB, tolerance={:d}%, current={:.3f} MB".format( + diff * 100, baseline, int(tolerance * 100), current + ) + if below_tolerance: + comment = "MEMCOMP: " + info + else: + comment = "Error: MEMCOMP: " + info + + return below_tolerance, comment + + +def _format_baseline(value): + """ + Encodes value with default baseline format. + + Default format: + sha: date: + + Parameters + ---------- + value : str + Baseline value to encode. + + Returns + ------- + value : str + Baseline entry. + """ + commit_hash = get_current_commit(repo=get_src_root()) + + timestamp = get_timestamp(timestamp_format="%Y-%m-%d_%H:%M:%S") + + return f"sha:{commit_hash} date:{timestamp} {value}\n" + + +def _parse_baseline(data): + """ + Parses default baseline format. + + Default format: + sha: date: + + Parameters + ---------- + data : str + Containing contents of baseline file. + + Returns + ------- + value : str + Value of the latest blessed baseline. + """ + lines = data.split("\n") + lines = [x for x in lines if x != ""] + + try: + value = lines[-1].strip().split(" ")[-1] + except IndexError: + value = None + + return value diff --git a/CIME/bless_test_results.py b/CIME/bless_test_results.py index 62637851cb4..0502c541d3a 100644 --- a/CIME/bless_test_results.py +++ b/CIME/bless_test_results.py @@ -5,22 +5,115 @@ get_scripts_root, EnvironmentContext, parse_test_name, + match_any, ) from CIME.config import Config from CIME.test_status import * from CIME.hist_utils import generate_baseline, compare_baseline from CIME.case import Case from CIME.test_utils import get_test_status_files +from CIME.baselines.performance import ( + perf_compare_throughput_baseline, + perf_compare_memory_baseline, + perf_write_baseline, +) import os, time logger = logging.getLogger(__name__) + +def _bless_throughput( + case, + test_name, + baseline_root, + baseline_name, + report_only, + force, +): + success = True + reason = None + below_threshold = False + + baseline_dir = os.path.join( + baseline_root, baseline_name, case.get_value("CASEBASEID") + ) + + try: + below_threshold, comment = perf_compare_throughput_baseline( + case, baseline_dir=baseline_dir + ) + except FileNotFoundError as e: + comment = f"Could not read throughput file: {e!s}" + except Exception as e: + comment = f"Error comparing throughput baseline: {e!s}" + + if below_threshold: + logger.info("Throughput diff appears to have been already resolved.") + else: + logger.info(comment) + + if not report_only and ( + force or input("Update this diff (y/n)? ").upper() in ["Y", "YES"] + ): + try: + perf_write_baseline(case, baseline_dir, memory=False) + except Exception as e: + success = False + + reason = f"Failed to write baseline throughput for {test_name!r}: {e!s}" + + return success, reason + + +def _bless_memory( + case, + test_name, + baseline_root, + baseline_name, + report_only, + force, +): + success = True + reason = None + below_threshold = False + + baseline_dir = os.path.join( + baseline_root, baseline_name, case.get_value("CASEBASEID") + ) + + try: + below_threshold, comment = perf_compare_memory_baseline( + case, baseline_dir=baseline_dir + ) + except FileNotFoundError as e: + comment = f"Could not read memory usage file: {e!s}" + except Exception as e: + comment = f"Error comparing memory baseline: {e!s}" + + if below_threshold: + logger.info("Memory usage diff appears to have been already resolved.") + else: + logger.info(comment) + + if not report_only and ( + force or input("Update this diff (y/n)? ").upper() in ["Y", "YES"] + ): + try: + perf_write_baseline(case, baseline_dir, throughput=False) + except Exception as e: + success = False + + reason = f"Failed to write baseline memory usage for test {test_name!r}: {e!s}" + + return success, reason + + ############################################################################### def bless_namelists( test_name, report_only, force, - pesfile, + pes_file, baseline_name, baseline_root, new_test_root=None, @@ -38,11 +131,12 @@ def bless_namelists( ): config = Config.instance() - create_test_gen_args = " -g {} ".format( - baseline_name + create_test_gen_args = ( + " -g {} ".format(baseline_name) if config.create_test_flag_mode == "cesm" else " -g -b {} ".format(baseline_name) ) + if new_test_root is not None: create_test_gen_args += " --test-root={0} --output-root={0} ".format( new_test_root @@ -50,8 +144,8 @@ def bless_namelists( if new_test_id is not None: create_test_gen_args += " -t {}".format(new_test_id) - if pesfile is not None: - create_test_gen_args += " --pesfile {}".format(pesfile) + if pes_file is not None: + create_test_gen_args += " --pesfile {}".format(pes_file) stat, out, _ = run_cmd( "{}/create_test {} --namelists-only {} --baseline-root {} -o".format( @@ -67,9 +161,7 @@ def bless_namelists( return True, None -############################################################################### def bless_history(test_name, case, baseline_name, baseline_root, report_only, force): - ############################################################################### real_user = case.get_value("REALUSER") with EnvironmentContext(USER=real_user): @@ -103,7 +195,6 @@ def bless_history(test_name, case, baseline_name, baseline_root, report_only, fo return True, None -############################################################################### def bless_test_results( baseline_name, baseline_root, @@ -114,13 +205,19 @@ def bless_test_results( hist_only=False, report_only=False, force=False, - pesfile=None, + pes_file=None, bless_tests=None, no_skip_pass=False, new_test_root=None, new_test_id=None, + exclude=None, + bless_tput=False, + bless_mem=False, + bless_perf=False, + **_, # Capture all for extra ): - ############################################################################### + bless_all = not (namelists_only | hist_only | bless_tput | bless_mem | bless_perf) + test_status_files = get_test_status_files(test_root, compiler, test_id=test_id) # auto-adjust test-id if multiple rounds of tests were matched @@ -137,10 +234,14 @@ def bless_test_results( most_recent = sorted(timestamps)[-1] logger.info("Matched test batch is {}".format(most_recent)) - bless_tests_counts = None + bless_tests_counts = [] if bless_tests: bless_tests_counts = dict([(bless_test, 0) for bless_test in bless_tests]) + # compile excludes into single regex + if exclude is not None: + exclude = re.compile("|".join([f"({x})" for x in exclude])) + broken_blesses = [] for test_status_file in test_status_files: if not most_recent in test_status_file: @@ -153,10 +254,11 @@ def bless_test_results( testopts = parse_test_name(test_name)[1] testopts = [] if testopts is None else testopts build_only = "B" in testopts + # TODO test_name will never be None otherwise `parse_test_name` would raise an error if test_name is None: case_dir = os.path.basename(test_dir) test_name = CIME.utils.normalize_case_id(case_dir) - if not bless_tests or CIME.utils.match_any(test_name, bless_tests_counts): + if not bless_tests or match_any(test_name, bless_tests_counts): broken_blesses.append( ( "unknown", @@ -169,141 +271,172 @@ def bless_test_results( else: continue - if bless_tests in [[], None] or CIME.utils.match_any( - test_name, bless_tests_counts - ): - overall_result, phase = ts.get_overall_test_status( - ignore_namelists=True, ignore_memleak=True - ) + # Must pass tests to continue + has_no_tests = bless_tests in [[], None] + match_test_name = match_any(test_name, bless_tests_counts) + excluded = exclude.match(test_name) if exclude else False - # See if we need to bless namelist - if not hist_only: - if no_skip_pass: - nl_bless = True - else: - nl_bless = ts.get_status(NAMELIST_PHASE) != TEST_PASS_STATUS + if (not has_no_tests and not match_test_name) or excluded: + logger.debug("Skipping {!r}".format(test_name)) + + continue + + overall_result, phase = ts.get_overall_test_status( + ignore_namelists=True, + ignore_memleak=True, + check_throughput=False, + check_memory=False, + ) + + # See if we need to bless namelist + if namelists_only or bless_all: + if no_skip_pass: + nl_bless = True else: - nl_bless = False + nl_bless = ts.get_status(NAMELIST_PHASE) != TEST_PASS_STATUS + else: + nl_bless = False + + hist_bless, tput_bless, mem_bless = [False] * 3 + + # Skip if test is build only i.e. testopts contains "B" + if not build_only: + bless_needed = is_bless_needed( + test_name, ts, broken_blesses, overall_result, no_skip_pass, phase + ) # See if we need to bless baselines - if not namelists_only and not build_only: - run_result = ts.get_status(RUN_PHASE) - if run_result is None: - broken_blesses.append((test_name, "no run phase")) - logger.warning( - "Test '{}' did not make it to run phase".format(test_name) - ) - hist_bless = False - elif run_result != TEST_PASS_STATUS: - broken_blesses.append((test_name, "run phase did not pass")) - logger.warning( - "Test '{}' run phase did not pass, not safe to bless, test status = {}".format( - test_name, ts.phase_statuses_dump() - ) - ) - hist_bless = False - elif overall_result == TEST_FAIL_STATUS: - broken_blesses.append((test_name, "test did not pass")) - logger.warning( - "Test '{}' did not pass due to phase {}, not safe to bless, test status = {}".format( - test_name, phase, ts.phase_statuses_dump() - ) - ) - hist_bless = False + if hist_only or bless_all: + hist_bless = bless_needed - elif no_skip_pass: - hist_bless = True - else: - hist_bless = ts.get_status(BASELINE_PHASE) != TEST_PASS_STATUS - else: - hist_bless = False + if bless_tput or bless_perf: + tput_bless = bless_needed - # Now, do the bless - if not nl_bless and not hist_bless: - logger.info( - "Nothing to bless for test: {}, overall status: {}".format( - test_name, overall_result - ) - ) - else: + if not tput_bless: + tput_bless = ts.get_status(THROUGHPUT_PHASE) != TEST_PASS_STATUS - logger.info( - "###############################################################################" - ) - logger.info( - "Blessing results for test: {}, most recent result: {}".format( - test_name, overall_result - ) - ) - logger.info("Case dir: {}".format(test_dir)) - logger.info( - "###############################################################################" - ) - if not force: - time.sleep(2) - - with Case(test_dir) as case: - # Resolve baseline_name and baseline_root - if baseline_name is None: - baseline_name_resolved = case.get_value("BASELINE_NAME_CMP") - if not baseline_name_resolved: - baseline_name_resolved = CIME.utils.get_current_branch( - repo=CIME.utils.get_cime_root() - ) - else: - baseline_name_resolved = baseline_name + if bless_mem or bless_perf: + mem_bless = bless_needed - if baseline_root is None: - baseline_root_resolved = case.get_value("BASELINE_ROOT") - else: - baseline_root_resolved = baseline_root + if not mem_bless: + mem_bless = ts.get_status(MEMCOMP_PHASE) != TEST_PASS_STATUS - if baseline_name_resolved is None: - broken_blesses.append( - (test_name, "Could not determine baseline name") + # Now, do the bless + if not nl_bless and not hist_bless and not tput_bless and not mem_bless: + logger.info( + "Nothing to bless for test: {}, overall status: {}".format( + test_name, overall_result + ) + ) + else: + logger.debug("Determined blesses for {!r}".format(test_name)) + logger.debug("nl_bless = {}".format(nl_bless)) + logger.debug("hist_bless = {}".format(hist_bless)) + logger.debug("tput_bless = {}".format(tput_bless)) + logger.debug("mem_bless = {}".format(mem_bless)) + + logger.info( + "###############################################################################" + ) + logger.info( + "Blessing results for test: {}, most recent result: {}".format( + test_name, overall_result + ) + ) + logger.info("Case dir: {}".format(test_dir)) + logger.info( + "###############################################################################" + ) + if not force: + time.sleep(2) + + with Case(test_dir) as case: + # Resolve baseline_name and baseline_root + if baseline_name is None: + baseline_name_resolved = case.get_value("BASELINE_NAME_CMP") + if not baseline_name_resolved: + cime_root = CIME.utils.get_cime_root() + baseline_name_resolved = CIME.utils.get_current_branch( + repo=cime_root ) - continue + else: + baseline_name_resolved = baseline_name - if baseline_root_resolved is None: - broken_blesses.append( - (test_name, "Could not determine baseline root") - ) - continue + if baseline_root is None: + baseline_root_resolved = case.get_value("BASELINE_ROOT") + else: + baseline_root_resolved = baseline_root + + if baseline_name_resolved is None: + broken_blesses.append( + (test_name, "Could not determine baseline name") + ) + continue - # Bless namelists - if nl_bless: - success, reason = bless_namelists( + if baseline_root_resolved is None: + broken_blesses.append( + (test_name, "Could not determine baseline root") + ) + continue + + # Bless namelists + if nl_bless: + success, reason = bless_namelists( + test_name, + report_only, + force, + pes_file, + baseline_name_resolved, + baseline_root_resolved, + new_test_root=new_test_root, + new_test_id=new_test_id, + ) + if not success: + broken_blesses.append((test_name, reason)) + + # Bless hist files + if hist_bless: + if "HOMME" in test_name: + success = False + reason = "HOMME tests cannot be blessed with bless_for_tests" + else: + success, reason = bless_history( test_name, - report_only, - force, - pesfile, + case, baseline_name_resolved, baseline_root_resolved, - new_test_root=new_test_root, - new_test_id=new_test_id, + report_only, + force, ) - if not success: - broken_blesses.append((test_name, reason)) - - # Bless hist files - if hist_bless: - if "HOMME" in test_name: - success = False - reason = ( - "HOMME tests cannot be blessed with bless_for_tests" - ) - else: - success, reason = bless_history( - test_name, - case, - baseline_name_resolved, - baseline_root_resolved, - report_only, - force, - ) - - if not success: - broken_blesses.append((test_name, reason)) + + if not success: + broken_blesses.append((test_name, reason)) + + if tput_bless: + success, reason = _bless_throughput( + case, + test_name, + baseline_root_resolved, + baseline_name_resolved, + report_only, + force, + ) + + if not success: + broken_blesses.append((test_name, reason)) + + if mem_bless: + success, reason = _bless_memory( + case, + test_name, + baseline_root_resolved, + baseline_name_resolved, + report_only, + force, + ) + + if not success: + broken_blesses.append((test_name, reason)) # Emit a warning if items in bless_tests did not match anything if bless_tests: @@ -327,3 +460,37 @@ def bless_test_results( success = False return success + + +def is_bless_needed(test_name, ts, broken_blesses, overall_result, no_skip_pass, phase): + needed = False + + run_result = ts.get_status(RUN_PHASE) + + if run_result is None: + broken_blesses.append((test_name, "no run phase")) + logger.warning("Test '{}' did not make it to run phase".format(test_name)) + needed = False + elif run_result != TEST_PASS_STATUS: + broken_blesses.append((test_name, "run phase did not pass")) + logger.warning( + "Test '{}' run phase did not pass, not safe to bless, test status = {}".format( + test_name, ts.phase_statuses_dump() + ) + ) + needed = False + elif overall_result == TEST_FAIL_STATUS: + broken_blesses.append((test_name, "test did not pass")) + logger.warning( + "Test '{}' did not pass due to phase {}, not safe to bless, test status = {}".format( + test_name, phase, ts.phase_statuses_dump() + ) + ) + needed = False + + elif no_skip_pass: + needed = True + else: + needed = ts.get_status(BASELINE_PHASE) != TEST_PASS_STATUS + + return needed diff --git a/CIME/build.py b/CIME/build.py index d512991504d..3f5c57ca998 100644 --- a/CIME/build.py +++ b/CIME/build.py @@ -45,7 +45,7 @@ "OS", "PIO_VERSION", "SHAREDLIBROOT", - "SMP_PRESENT", + "BUILD_THREADED", "USE_ESMF_LIB", "USE_MOAB", "CAM_CONFIG_OPTS", @@ -171,6 +171,7 @@ def generate_makefile_macro(case, caseroot): "gptl", "csm_share", "csm_share_cpl7", + "mpi-serial", ] ) cmake_macro = os.path.join(caseroot, "Macros.cmake") @@ -245,10 +246,24 @@ def get_standard_cmake_args(case, sharedpath): cmake_args += " -Dcompile_threaded={} ".format( stringify_bool(case.get_build_threaded()) ) + # check settings for GPU + gpu_type = case.get_value("GPU_TYPE") + gpu_offload = case.get_value("GPU_OFFLOAD") + if gpu_type != "none": + expect( + gpu_offload != "none", + "Both GPU_TYPE and GPU_OFFLOAD must be defined if either is", + ) + cmake_args += f" -DGPU_TYPE={gpu_type} -DGPU_OFFLOAD={gpu_offload}" + else: + expect( + gpu_offload == "none", + "Both GPU_TYPE and GPU_OFFLOAD must be defined if either is", + ) ocn_model = case.get_value("COMP_OCN") - atm_model = case.get_value("COMP_ATM") - if ocn_model == "mom" or atm_model == "fv3gfs": + atm_dycore = case.get_value("CAM_DYCORE") + if ocn_model == "mom" or (atm_dycore and atm_dycore == "fv3"): cmake_args += " -DUSE_FMS=TRUE " cmake_args += " -DINSTALL_SHAREDPATH={} ".format( @@ -265,6 +280,7 @@ def get_standard_cmake_args(case, sharedpath): for var in _CMD_ARGS_FOR_BUILD: cmake_args += xml_to_make_variable(case, var, cmake=True) + atm_model = case.get_value("COMP_ATM") if atm_model == "scream": cmake_args += xml_to_make_variable(case, "HOMME_TARGET", cmake=True) @@ -471,59 +487,63 @@ def _build_model_cmake( os.makedirs(build_dir) # Components-specific cmake args. Cmake requires all component inputs to be available - # regardless of requested build list - cmp_cmake_args = "" - all_models = [] - files = Files(comp_interface=comp_interface) - for model, _, _, _, config_dir in complist: - # Create the Filepath and CIME_cppdefs files - if model == "cpl": - config_dir = os.path.join( - files.get_value("COMP_ROOT_DIR_CPL"), "cime_config" - ) - - cmp_cmake_args += _create_build_metadata_for_component( - config_dir, libroot, bldroot, case - ) - all_models.append(model) - - # Call CMake - cmake_args = get_standard_cmake_args(case, sharedpath) - cmake_env = "" - ninja_path = os.path.join(srcroot, "externals/ninja/bin") - if ninja: - cmake_args += " -GNinja " - cmake_env += "PATH={}:$PATH ".format(ninja_path) - - # Glue all pieces together: - # - cmake environment - # - common (i.e. project-wide) cmake args - # - component-specific cmake args - # - path to src folder + # regardless of requested build list. We do not want to re-invoke cmake + # if it has already been called. do_timing = "/usr/bin/time -p " if os.path.exists("/usr/bin/time") else "" - cmake_cmd = "{} {}cmake {} {} {}/components".format( - cmake_env, do_timing, cmake_args, cmp_cmake_args, srcroot - ) - stat = 0 - if dry_run: - logger.info("CMake cmd:\ncd {} && {}\n\n".format(bldroot, cmake_cmd)) - else: - logger.info( - "Configuring full {} model with output to file {}".format( - cime_model, bldlog + if not os.path.exists(os.path.join(bldroot, "CMakeCache.txt")): + cmp_cmake_args = "" + all_models = [] + files = Files(comp_interface=comp_interface) + for model, _, _, _, config_dir in complist: + # Create the Filepath and CIME_cppdefs files + if model == "cpl": + config_dir = os.path.join( + files.get_value("COMP_ROOT_DIR_CPL"), "cime_config" + ) + + cmp_cmake_args += _create_build_metadata_for_component( + config_dir, libroot, bldroot, case ) + all_models.append(model) + + # Call CMake + cmake_args = get_standard_cmake_args(case, sharedpath) + cmake_env = "" + ninja_path = os.path.join(srcroot, "externals/ninja/bin") + if ninja: + cmake_args += " -GNinja " + cmake_env += "PATH={}:$PATH ".format(ninja_path) + + # Glue all pieces together: + # - cmake environment + # - common (i.e. project-wide) cmake args + # - component-specific cmake args + # - path to src folder + cmake_cmd = "{} {}cmake {} {} {}/components".format( + cmake_env, do_timing, cmake_args, cmp_cmake_args, srcroot ) - logger.info(" Calling cmake directly, see top of log file for specific call") - with open(bldlog, "w") as fd: - fd.write("Configuring with cmake cmd:\n{}\n\n".format(cmake_cmd)) + stat = 0 + if dry_run: + logger.info("CMake cmd:\ncd {} && {}\n\n".format(bldroot, cmake_cmd)) + else: + logger.info( + "Configuring full {} model with output to file {}".format( + cime_model, bldlog + ) + ) + logger.info( + " Calling cmake directly, see top of log file for specific call" + ) + with open(bldlog, "w") as fd: + fd.write("Configuring with cmake cmd:\n{}\n\n".format(cmake_cmd)) - # Add logging before running - cmake_cmd = "({}) >> {} 2>&1".format(cmake_cmd, bldlog) - stat = run_cmd(cmake_cmd, from_dir=bldroot)[0] - expect( - stat == 0, - "BUILD FAIL: cmake config {} failed, cat {}".format(cime_model, bldlog), - ) + # Add logging before running + cmake_cmd = "({}) >> {} 2>&1".format(cmake_cmd, bldlog) + stat = run_cmd(cmake_cmd, from_dir=bldroot)[0] + expect( + stat == 0, + "BUILD FAIL: cmake config {} failed, cat {}".format(cime_model, bldlog), + ) # Set up buildlist if not buildlist: @@ -756,8 +776,9 @@ def _build_libraries( libs.append("CDEPS") ocn_model = case.get_value("COMP_OCN") - atm_model = case.get_value("COMP_ATM") - if ocn_model == "mom" or atm_model == "fv3gfs": + + atm_dycore = case.get_value("CAM_DYCORE") + if ocn_model == "mom" or (atm_dycore and atm_dycore == "fv3"): libs.append("FMS") files = Files(comp_interface=comp_interface) @@ -1111,6 +1132,7 @@ def _case_build_impl( ninst_build = case.get_value("NINST_BUILD") smp_value = case.get_value("SMP_VALUE") clm_use_petsc = case.get_value("CLM_USE_PETSC") + mpaso_use_petsc = case.get_value("MPASO_USE_PETSC") cism_use_trilinos = case.get_value("CISM_USE_TRILINOS") mali_use_albany = case.get_value("MALI_USE_ALBANY") mach = case.get_value("MACH") @@ -1135,7 +1157,7 @@ def _case_build_impl( # the future there may be others -- so USE_PETSC will be true if # ANY of those are true. - use_petsc = clm_use_petsc + use_petsc = bool(clm_use_petsc) or bool(mpaso_use_petsc) case.set_value("USE_PETSC", use_petsc) # Set the overall USE_TRILINOS variable to TRUE if any of the diff --git a/CIME/build_scripts/buildlib.cprnc b/CIME/build_scripts/buildlib.cprnc index 5e6708da133..51426de27c6 100755 --- a/CIME/build_scripts/buildlib.cprnc +++ b/CIME/build_scripts/buildlib.cprnc @@ -6,7 +6,7 @@ sys.path.append(os.path.join(_CIMEROOT, "CIME", "Tools")) from standard_script_setup import * from CIME import utils -from CIME.utils import run_bld_cmd_ensure_logging +from CIME.utils import run_bld_cmd_ensure_logging, CIMEError from CIME.case import Case from CIME.build import get_standard_cmake_args @@ -63,10 +63,35 @@ def buildlib(bldroot, installpath, case): ) cmake_args = get_standard_cmake_args(case, "ignore_sharedpath") + os.environ["CIMEROOT"] = cimeroot - cmake_cmd = ". ./.env_mach_specific.sh && NETCDF=$(dirname $(dirname $(which nf-config))) cmake {cmake_args} -DMPILIB=mpi-serial -DDEBUG=FALSE -C Macros.cmake {cimeroot}/CIME/non_py/cprnc -DCMAKE_PREFIX_PATH={dest_path} -DBLDROOT={bldroot}".format( - cimeroot=cimeroot, dest_path=installpath, cmake_args=cmake_args, bldroot=bldroot + + srcroot = case.get_value("SRCROOT") + + cprnc_src_root = None + candidate_paths = ( + os.path.join(cimeroot, "CIME/non_py/cprnc"), + os.path.join(srcroot, "externals/cprnc"), ) + + for candidate in candidate_paths: + if os.path.exists(candidate): + cprnc_src_root = candidate + + break + else: + logger.debug("{!r} is not a valid cprnc source path") + + if cprnc_src_root is None: + raise CIMEError("Could not find a valid cprnc source directory") + + cmake_cmd = ". ./.env_mach_specific.sh && NETCDF=$(dirname $(dirname $(which nf-config))) cmake {cmake_args} -DMPILIB=mpi-serial -DDEBUG=FALSE -C Macros.cmake {cprnc_src_root} -DCMAKE_PREFIX_PATH={dest_path} -DBLDROOT={bldroot}".format( + cprnc_src_root=cprnc_src_root, + dest_path=installpath, + cmake_args=cmake_args, + bldroot=bldroot, + ) + run_bld_cmd_ensure_logging(cmake_cmd, logger, from_dir=bldroot) gmake_cmd = case.get_value("GMAKE") diff --git a/CIME/build_scripts/buildlib.mpi-serial b/CIME/build_scripts/buildlib.mpi-serial index 83ad88367fd..7aaad973d80 100755 --- a/CIME/build_scripts/buildlib.mpi-serial +++ b/CIME/build_scripts/buildlib.mpi-serial @@ -50,6 +50,11 @@ def buildlib(bldroot, installpath, case): ############################################################################### caseroot = case.get_value("CASEROOT") srcroot = case.get_value("SRCROOT") + # check to see if MPI_SERIAL is installed + with open(os.path.join(caseroot, "Macros.make"), "r") as f: + for line in f: + if "MPI_SERIAL_PATH" in line: + return customize_path = os.path.join(srcroot, "cime_config", "customize") diff --git a/CIME/case/case.py b/CIME/case/case.py index 4924baf8cda..e08b5ffe2c4 100644 --- a/CIME/case/case.py +++ b/CIME/case/case.py @@ -74,6 +74,7 @@ class Case(object): This class extends across multiple files, class members external to this file are listed in the following imports + """ from CIME.case.case_setup import case_setup @@ -123,6 +124,7 @@ def __init__(self, case_root=None, read_only=True, record=False, non_local=False self._env_generic_files = [] self._files = [] self._comp_interface = None + self.gpu_enabled = False self._non_local = non_local self.read_xml() @@ -205,6 +207,13 @@ def __init__(self, case_root=None, read_only=True, record=False, non_local=False self.initialize_derived_attributes() + def get_baseline_dir(self): + baseline_root = self.get_value("BASELINE_ROOT") + + baseline_name = self.get_value("BASECMP_CASE") + + return os.path.join(baseline_root, baseline_name) + def check_if_comp_var(self, vid): for env_file in self._env_entryid_files: new_vid, new_comp, iscompvar = env_file.check_if_comp_var(vid) @@ -275,6 +284,9 @@ def initialize_derived_attributes(self): if max_gpus_per_node: self.ngpus_per_node = self.get_value("NGPUS_PER_NODE") + # update the maximum MPI tasks for a GPU node (could differ from a pure-CPU node) + if self.ngpus_per_node > 0: + max_mpitasks_per_node = self.get_value("MAX_CPUTASKS_PER_GPU_NODE") self.tasks_per_numa = int(math.ceil(self.tasks_per_node / 2.0)) smt_factor = max( @@ -451,6 +463,15 @@ def get_values(self, item, attribute=None, resolved=True, subgroup=None): return [] def get_value(self, item, attribute=None, resolved=True, subgroup=None): + if item == "GPU_ENABLED": + if not self.gpu_enabled: + if ( + self.get_value("GPU_TYPE") != "none" + and self.get_value("NGPUS_PER_NODE") > 0 + ): + self.gpu_enabled = True + return "true" if self.gpu_enabled else "false" + result = None for env_file in self._files: # Wait and resolve in self rather than in env_file @@ -460,7 +481,7 @@ def get_value(self, item, attribute=None, resolved=True, subgroup=None): if result is not None: if resolved and isinstance(result, str): - result = self.get_resolved_value(result) + result = self.get_resolved_value(result, subgroup=subgroup) vtype = env_file.get_type_info(item) if vtype is not None and vtype != "char": result = convert_to_type(result, vtype, item) @@ -527,13 +548,17 @@ def get_type_info(self, item): return result - def get_resolved_value(self, item, recurse=0, allow_unresolved_envvars=False): + def get_resolved_value( + self, item, recurse=0, allow_unresolved_envvars=False, subgroup=None + ): num_unresolved = item.count("$") if item else 0 recurse_limit = 10 if num_unresolved > 0 and recurse < recurse_limit: for env_file in self._env_entryid_files: item = env_file.get_resolved_value( - item, allow_unresolved_envvars=allow_unresolved_envvars + item, + allow_unresolved_envvars=allow_unresolved_envvars, + subgroup=subgroup, ) if "$" not in item: return item @@ -542,6 +567,7 @@ def get_resolved_value(self, item, recurse=0, allow_unresolved_envvars=False): item, recurse=recurse + 1, allow_unresolved_envvars=allow_unresolved_envvars, + subgroup=subgroup, ) return item @@ -1141,7 +1167,6 @@ def _setup_mach_pes(self, pecount, multi_driver, ninst, machine_name, mpilib): comment = None force_tasks = None force_thrds = None - if match1: opti_tasks = match1.group(1) if opti_tasks.isdigit(): @@ -1211,7 +1236,6 @@ def _setup_mach_pes(self, pecount, multi_driver, ninst, machine_name, mpilib): pstrid = pes_pstrid[pstrid_str] if pstrid_str in pes_pstrid else 1 totaltasks.append((ntasks + rootpe) * nthrds) - mach_pes_obj.set_value(ntasks_str, ntasks) mach_pes_obj.set_value(nthrds_str, nthrds) mach_pes_obj.set_value(rootpe_str, rootpe) @@ -1262,6 +1286,8 @@ def configure( extra_machines_dir=None, case_group=None, ngpus_per_node=0, + gpu_type=None, + gpu_offload=None, ): expect( @@ -1344,6 +1370,7 @@ def configure( and "MPILIB" not in x and "MAX_MPITASKS_PER_NODE" not in x and "MAX_TASKS_PER_NODE" not in x + and "MAX_CPUTASKS_PER_GPU_NODE" not in x and "MAX_GPUS_PER_NODE" not in x ] @@ -1378,6 +1405,7 @@ def configure( for name in ( "MAX_TASKS_PER_NODE", "MAX_MPITASKS_PER_NODE", + "MAX_CPUTASKS_PER_GPU_NODE", "MAX_GPUS_PER_NODE", ): dmax = machobj.get_value(name, {"compiler": compiler}) @@ -1385,13 +1413,23 @@ def configure( dmax = machobj.get_value(name) if dmax: self.set_value(name, dmax) + elif name == "MAX_CPUTASKS_PER_GPU_NODE": + logger.debug( + "Variable {} not defined for machine {} and compiler {}".format( + name, machine_name, compiler + ) + ) elif name == "MAX_GPUS_PER_NODE": logger.debug( - "Variable {} not defined for machine {}".format(name, machine_name) + "Variable {} not defined for machine {} and compiler {}".format( + name, machine_name, compiler + ) ) else: logger.warning( - "Variable {} not defined for machine {}".format(name, machine_name) + "Variable {} not defined for machine {} and compiler {}".format( + name, machine_name, compiler + ) ) machdir = machobj.get_machines_dir() @@ -1509,47 +1547,62 @@ def configure( self.set_value("TEST", True) # ---------------------------------------------------------------------------------------------------------- - # Sanity check: - # 1. We assume that there is always a string "gpu" in the compiler name if we want to enable GPU - # 2. For compilers without the string "gpu" in the name: - # 2.1. the ngpus-per-node argument would not update the NGPUS_PER_NODE XML variable, as long as - # the MAX_GPUS_PER_NODE XML variable is not defined (i.e., this argument is not in effect). - # 2.2. if the MAX_GPUS_PER_NODE XML variable is defined, then the ngpus-per-node argument - # must be set to 0. Otherwise, an error will be triggered. - # 3. For compilers with the string "gpu" in the name: - # 3.1. if ngpus-per-node argument is smaller than 0, an error will be triggered. - # 3.2. if ngpus_per_node argument is larger than the value of MAX_GPUS_PER_NODE, the NGPUS_PER_NODE + # Sanity check for a GPU run: + # 1. GPU_TYPE and GPU_OFFLOAD must both be defined to use GPUS + # 2. if ngpus_per_node argument is larger than the value of MAX_GPUS_PER_NODE, the NGPUS_PER_NODE # XML variable in the env_mach_pes.xml file would be set to MAX_GPUS_PER_NODE automatically. - # 3.3. if ngpus-per-node argument is equal to 0, it will be updated to 1 automatically. + # 3. if ngpus-per-node argument is equal to 0, it will be updated to 1 automatically. # ---------------------------------------------------------------------------------------------------------- max_gpus_per_node = self.get_value("MAX_GPUS_PER_NODE") - if max_gpus_per_node: - if "gpu" in compiler: - if not ngpus_per_node: - ngpus_per_node = 1 - logger.warning( - "Setting ngpus_per_node to 1 for compiler {}".format(compiler) - ) - expect( - ngpus_per_node > 0, - " ngpus_per_node is expected > 0 for compiler {}; current value is {}".format( - compiler, ngpus_per_node - ), - ) - else: - expect( - ngpus_per_node == 0, - " ngpus_per_node is expected = 0 for compiler {}; current value is {}".format( - compiler, ngpus_per_node - ), - ) + if gpu_type and str(gpu_type).lower() != "none": + expect( + max_gpus_per_node, + f"GPUS are not defined for machine={machine_name} and compiler={compiler}", + ) + expect( + gpu_offload, + "Both gpu-type and gpu-offload must be defined if either is defined", + ) + expect( + compiler in ["nvhpc", "cray"], + f"Only nvhpc and cray compilers are expected for a GPU run; the user given compiler is {compiler}, ", + ) + valid_gpu_type = self.get_value("GPU_TYPE").split(",") + valid_gpu_type.remove("none") + expect( + gpu_type in valid_gpu_type, + f"Unsupported GPU type is given: {gpu_type} ; valid values are {valid_gpu_type}", + ) + valid_gpu_offload = self.get_value("GPU_OFFLOAD").split(",") + valid_gpu_offload.remove("none") + expect( + gpu_offload in valid_gpu_offload, + f"Unsupported GPU programming model is given: {gpu_offload} ; valid values are {valid_gpu_offload}", + ) + self.gpu_enabled = True if ngpus_per_node >= 0: self.set_value( "NGPUS_PER_NODE", - ngpus_per_node + max(1, ngpus_per_node) if ngpus_per_node <= max_gpus_per_node else max_gpus_per_node, ) + elif gpu_offload and str(gpu_offload).lower() != "none": + expect( + False, + "Both gpu-type and gpu-offload must be defined if either is defined", + ) + elif ngpus_per_node != 0: + expect( + False, + f"ngpus_per_node is expected to be 0 for a pure CPU run ; {ngpus_per_node} is provided instead ;", + ) + + # Set these two GPU XML variables here to overwrite the default values + # Only set them for "cesm" model + if self._cime_model == "cesm": + self.set_value("GPU_TYPE", str(gpu_type).lower()) + self.set_value("GPU_OFFLOAD", str(gpu_offload).lower()) self.initialize_derived_attributes() @@ -1586,6 +1639,13 @@ def configure( ) env_batch.set_job_defaults(bjobs, self) + # Set BATCH_COMMAND_FLAGS to the default values + + for job in bjobs: + if test and job[0] == "case.run" or not test and job[0] == "case.test": + continue + submitargs = env_batch.get_submit_args(self, job[0], resolve=False) + self.set_value("BATCH_COMMAND_FLAGS", submitargs, subgroup=job[0]) # Make sure that parallel IO is not specified if total_tasks==1 if self.total_tasks == 1: @@ -1723,7 +1783,10 @@ def _create_caseroot_sourcemods(self): if self._comp_interface == "nuopc": components.extend(["cdeps"]) - readme_message = """Put source mods for the {component} library in this directory. + readme_message_start = ( + "Put source mods for the {component} library in this directory." + ) + readme_message_end = """ WARNING: SourceMods are not kept under version control, and can easily become out of date if changes are made to the source code on which they @@ -1757,7 +1820,18 @@ def _create_caseroot_sourcemods(self): # to fail). readme_file = os.path.join(directory, "README") with open(readme_file, "w") as fd: - fd.write(readme_message.format(component=component)) + fd.write(readme_message_start.format(component=component)) + + if component == "cdeps": + readme_message_extra = """ + +Note that this subdirectory should only contain files from CDEPS's +dshr and streams source code directories. +Files related to specific data models should go in SourceMods subdirectories +for those data models (e.g., src.datm).""" + fd.write(readme_message_extra) + + fd.write(readme_message_end) if config.copy_cism_source_mods: # Note: this is CESM specific, given that we are referencing cism explitly @@ -2052,12 +2126,10 @@ def get_mpirun_cmd(self, job=None, allow_unresolved_envvars=True, overrides=None mpi_arg_string += " : " ngpus_per_node = self.get_value("NGPUS_PER_NODE") - if ngpus_per_node and ngpus_per_node > 0 and config.gpus_use_set_device_rank: - # 1. this setting is tested on Casper only and may not work on other machines - # 2. need to be revisited in the future for a more adaptable implementation - rundir = self.get_value("RUNDIR") - output_name = rundir + "/set_device_rank.sh" - mpi_arg_string = mpi_arg_string + " " + output_name + " " + if ngpus_per_node and ngpus_per_node > 0: + mpi_gpu_run_script = self.get_value("MPI_GPU_WRAPPER_SCRIPT") + if mpi_gpu_run_script: + mpi_arg_string = mpi_arg_string + " " + mpi_gpu_run_script return self.get_resolved_value( "{} {} {} {}".format( @@ -2354,6 +2426,8 @@ def create( extra_machines_dir=None, case_group=None, ngpus_per_node=0, + gpu_type=None, + gpu_offload=None, ): try: # Set values for env_case.xml @@ -2427,6 +2501,8 @@ def create( extra_machines_dir=extra_machines_dir, case_group=case_group, ngpus_per_node=ngpus_per_node, + gpu_type=gpu_type, + gpu_offload=gpu_offload, ) self.create_caseroot() diff --git a/CIME/case/case_clone.py b/CIME/case/case_clone.py index f829e13993c..7b81e0e91b5 100644 --- a/CIME/case/case_clone.py +++ b/CIME/case/case_clone.py @@ -54,7 +54,10 @@ def create_clone( if os.path.isdir(os.path.join(newcase_cimeroot, "share")) and get_model() == "cesm": srcroot = newcase_cimeroot else: - srcroot = os.path.join(newcase_cimeroot, "..") + srcroot = self.get_value("SRCROOT") + if not srcroot: + srcroot = os.path.join(newcase_cimeroot, "..") + newcase = self.copy(newcasename, newcaseroot, newsrcroot=srcroot) with newcase: newcase.set_value("CIMEROOT", newcase_cimeroot) @@ -104,7 +107,7 @@ def create_clone( if exeroot is not None: expect( not keepexe, - "create_case_clone: if keepexe is True, " "then exeroot cannot be set", + "create_case_clone: if keepexe is True, then exeroot cannot be set", ) newcase.set_value("EXEROOT", exeroot) if rundir is not None: @@ -216,8 +219,6 @@ def create_clone( ) ) - newcase.case_setup() - return newcase diff --git a/CIME/case/case_run.py b/CIME/case/case_run.py index b7518090504..2e86e594a5d 100644 --- a/CIME/case/case_run.py +++ b/CIME/case/case_run.py @@ -5,7 +5,7 @@ from CIME.config import Config from CIME.utils import gzip_existing_file, new_lid, run_and_log_case_status from CIME.utils import run_sub_or_cmd, append_status, safe_copy, model_log, CIMEError -from CIME.utils import get_model, batch_jobid +from CIME.utils import batch_jobid, is_comp_standalone from CIME.get_timing import get_timing import shutil, time, sys, os, glob @@ -292,19 +292,15 @@ def _post_run_check(case, lid): ############################################################################### rundir = case.get_value("RUNDIR") - model = case.get_value("MODEL") driver = case.get_value("COMP_INTERFACE") - model = get_model() - fv3_standalone = False + comp_standalone, model = is_comp_standalone(case) - if "CPL" not in case.get_values("COMP_CLASSES"): - fv3_standalone = True if driver == "nuopc": - if fv3_standalone: + if comp_standalone: file_prefix = model else: - file_prefix = "drv" + file_prefix = "med" else: file_prefix = "cpl" @@ -322,7 +318,6 @@ def _post_run_check(case, lid): cpl_logs = [os.path.join(rundir, file_prefix + ".log." + lid)] cpl_logfile = cpl_logs[0] - # find the last model.log and cpl.log model_logfile = os.path.join(rundir, model + ".log." + lid) if not os.path.isfile(model_logfile): @@ -332,15 +327,19 @@ def _post_run_check(case, lid): else: count_ok = 0 for cpl_logfile in cpl_logs: - print(f"cpl_logfile {cpl_logfile}") if not os.path.isfile(cpl_logfile): break with open(cpl_logfile, "r") as fd: - if fv3_standalone and "HAS ENDED" in fd.read(): + logfile = fd.read() + if ( + comp_standalone + and "HAS ENDED" in logfile + or "END OF MODEL RUN" in logfile + ): count_ok += 1 - elif not fv3_standalone and "SUCCESSFUL TERMINATION" in fd.read(): + elif not comp_standalone and "SUCCESSFUL TERMINATION" in logfile: count_ok += 1 - if count_ok != cpl_ninst: + if count_ok < cpl_ninst: expect(False, "Model did not complete - see {} \n ".format(cpl_logfile)) diff --git a/CIME/case/case_setup.py b/CIME/case/case_setup.py index aa8fb8b6b6c..a170d1bfddd 100644 --- a/CIME/case/case_setup.py +++ b/CIME/case/case_setup.py @@ -21,7 +21,6 @@ copy_local_macros_to_dir, ) from CIME.utils import batch_jobid -from CIME.utils import transform_vars from CIME.test_status import * from CIME.locked_files import unlock_file, lock_file @@ -29,6 +28,7 @@ logger = logging.getLogger(__name__) + ############################################################################### def _build_usernl_files(case, model, comp): ############################################################################### @@ -142,12 +142,29 @@ def _create_macros_cmake( ############################################################################### if not os.path.isfile(os.path.join(caseroot, "Macros.cmake")): safe_copy(os.path.join(cmake_macros_dir, "Macros.cmake"), caseroot) - if not os.path.exists(os.path.join(caseroot, "cmake_macros")): - shutil.copytree(cmake_macros_dir, case_cmake_path) - copy_depends_files( - mach_obj.get_machine_name(), mach_obj.machines_dir, caseroot, compiler - ) + if not os.path.exists(case_cmake_path): + os.mkdir(case_cmake_path) + + # This impl is coupled to contents of Macros.cmake + os_ = mach_obj.get_value("OS") + mach = mach_obj.get_machine_name() + macros = [ + "universal.cmake", + os_ + ".cmake", + compiler + ".cmake", + "{}_{}.cmake".format(compiler, os), + mach + ".cmake", + "{}_{}.cmake".format(compiler, mach), + "CMakeLists.txt", + ] + for macro in macros: + repo_macro = os.path.join(cmake_macros_dir, macro) + case_macro = os.path.join(case_cmake_path, macro) + if not os.path.exists(case_macro) and os.path.exists(repo_macro): + safe_copy(repo_macro, case_cmake_path) + + copy_depends_files(mach, mach_obj.machines_dir, caseroot, compiler) ############################################################################### @@ -328,7 +345,7 @@ def _case_setup_impl( case.initialize_derived_attributes() - case.set_value("SMP_PRESENT", case.get_build_threaded()) + case.set_value("BUILD_THREADED", case.get_build_threaded()) else: case.check_pelayouts_require_rebuild(models) @@ -344,7 +361,7 @@ def _case_setup_impl( cost_per_node = case.get_value("COSTPES_PER_NODE") case.set_value("COST_PES", case.num_nodes * cost_per_node) threaded = case.get_build_threaded() - case.set_value("SMP_PRESENT", threaded) + case.set_value("BUILD_THREADED", threaded) if threaded and case.total_tasks * case.thread_count > cost_per_node: smt_factor = max( 1.0, int(case.get_value("MAX_TASKS_PER_NODE") / cost_per_node) @@ -405,6 +422,15 @@ def _case_setup_impl( run_cmd_no_fail( "{}/cime_config/cism.template {}".format(glcroot, caseroot) ) + if comp == "cam": + camroot = case.get_value("COMP_ROOT_DIR_ATM") + if os.path.exists(os.path.join(camroot, "cam.case_setup.py")): + logger.debug("Running cam.case_setup.py") + run_cmd_no_fail( + "python {cam}/cime_config/cam.case_setup.py {cam} {case}".format( + cam=camroot, case=caseroot + ) + ) _build_usernl_files(case, "drv", "cpl") @@ -482,31 +508,3 @@ def case_setup(self, clean=False, test_mode=False, reset=False, keep=None): caseroot=caseroot, is_batch=is_batch, ) - - # put the following section here to make sure the rundir is generated first - machdir = self.get_value("MACHDIR") - mach = self.get_value("MACH") - ngpus_per_node = self.get_value("NGPUS_PER_NODE") - overrides = {} - overrides["ngpus_per_node"] = ngpus_per_node - input_template = os.path.join(machdir, "mpi_run_gpu.{}".format(mach)) - if os.path.isfile(input_template): - # update the wrapper script that sets the device id for each MPI rank - output_text = transform_vars( - open(input_template, "r").read(), case=self, overrides=overrides - ) - - # write it out to the run dir - rundir = self.get_value("RUNDIR") - output_name = os.path.join(rundir, "set_device_rank.sh") - logger.info("Creating file {}".format(output_name)) - with open(output_name, "w") as f: - f.write(output_text) - - # make the wrapper script executable - if os.path.isfile(output_name): - os.system("chmod +x " + output_name) - else: - expect( - False, "The file {} is not written out correctly.".format(output_name) - ) diff --git a/CIME/case/case_st_archive.py b/CIME/case/case_st_archive.py index 64005b13d09..8238cf2f912 100644 --- a/CIME/case/case_st_archive.py +++ b/CIME/case/case_st_archive.py @@ -364,8 +364,10 @@ def get_histfiles_for_restarts( histfiles = set() rest_hist_varname = archive.get_entry_value("rest_history_varname", archive_entry) if rest_hist_varname != "unset": - cmd = "ncdump -v {} {} ".format( - rest_hist_varname, os.path.join(rundir, restfile) + ncdump = shutil.which("ncdump") + expect(ncdump, "ncdump not found in path") + cmd = "{} -v {} {} ".format( + ncdump, rest_hist_varname, os.path.join(rundir, restfile) ) if testonly: out = "{} =".format(rest_hist_varname) @@ -1184,7 +1186,9 @@ def test_env_archive(self, testdir="env_archive_test"): for comp_archive_spec in comp_archive_specs: comp_expected = archive.get(comp_archive_spec, "compname") - if comp_expected == "ww3": + # Rename ww3 component when case and archive names don't match, + # specific to CESM. + if comp_expected == "ww3" and "ww" in comps_in_case: comp_expected = "ww" comp_class = archive.get(comp_archive_spec, "compclass").upper() if comp_class in components: diff --git a/CIME/case/case_submit.py b/CIME/case/case_submit.py index cc996f2f50b..7893d2d3aae 100644 --- a/CIME/case/case_submit.py +++ b/CIME/case/case_submit.py @@ -12,8 +12,6 @@ from CIME.locked_files import unlock_file, lock_file from CIME.test_status import * -import socket - logger = logging.getLogger(__name__) @@ -39,6 +37,7 @@ def _submit( batch_args=None, workflow=True, chksum=False, + dryrun=False, ): if job is None: job = case.get_first_job() @@ -94,8 +93,10 @@ def _submit( batch_system = "none" else: batch_system = env_batch.get_batch_system_type() - unlock_file(os.path.basename(env_batch.filename), caseroot=caseroot) - case.set_value("BATCH_SYSTEM", batch_system) + + if batch_system != case.get_value("BATCH_SYSTEM"): + unlock_file(os.path.basename(env_batch.filename), caseroot=caseroot) + case.set_value("BATCH_SYSTEM", batch_system) env_batch_has_changed = False if not external_workflow: @@ -162,9 +163,6 @@ def _submit( case.check_case(skip_pnl=skip_pnl, chksum=chksum) if job == case.get_primary_job(): case.check_DA_settings() - if case.get_value("MACH") == "mira": - with open(".original_host", "w") as fd: - fd.write(socket.gethostname()) # Load Modules case.load_env() @@ -183,16 +181,20 @@ def _submit( mail_type=mail_type, batch_args=batch_args, workflow=workflow, + dry_run=dryrun, ) - xml_jobids = [] - for jobname, jobid in job_ids.items(): - logger.info("Submitted job {} with id {}".format(jobname, jobid)) - if jobid: - xml_jobids.append("{}:{}".format(jobname, jobid)) + if dryrun: + for job in job_ids: + xml_jobids.append("{}:{}".format(job[0], job[1])) + else: + for jobname, jobid in job_ids.items(): + logger.info("Submitted job {} with id {}".format(jobname, jobid)) + if jobid: + xml_jobids.append("{}:{}".format(jobname, jobid)) xml_jobid_text = ", ".join(xml_jobids) - if xml_jobid_text: + if xml_jobid_text and not dryrun: case.set_value("JOB_IDS", xml_jobid_text) return xml_jobid_text @@ -212,6 +214,7 @@ def submit( batch_args=None, workflow=True, chksum=False, + dryrun=False, ): if resubmit_immediate and self.get_value("MACH") in ["mira", "cetus"]: logger.warning( @@ -264,6 +267,7 @@ def submit( batch_args=batch_args, workflow=workflow, chksum=chksum, + dryrun=dryrun, ) run_and_log_case_status( functor, @@ -287,7 +291,8 @@ def check_case(self, skip_pnl=False, chksum=False): if not skip_pnl: self.create_namelists() # Must be called before check_all_input_data logger.info("Checking that inputdata is available as part of case submission") - self.check_all_input_data(chksum=chksum) + if not self.get_value("TEST"): + self.check_all_input_data(chksum=chksum) if self.get_value("COMP_WAV") == "ww": # the ww3 buildnml has dependencies on inputdata so we must run it again @@ -350,7 +355,7 @@ def check_case(self, skip_pnl=False, chksum=False): expect( self.get_value("BUILD_COMPLETE"), - "Build complete is " "not True please rebuild the model by calling case.build", + "Build complete is not True please rebuild the model by calling case.build", ) logger.info("Check case OK") diff --git a/CIME/case/check_input_data.py b/CIME/case/check_input_data.py index d099a6da046..5bff3823d7c 100644 --- a/CIME/case/check_input_data.py +++ b/CIME/case/check_input_data.py @@ -212,7 +212,7 @@ def _check_all_input_data_impl( chksum=chksum and chksum_found, ) if download and not success: - if not chksum: + if chksum: chksum_found = _download_checksum_file(self.get_value("RUNDIR")) success = _downloadfromserver(self, input_data_root, data_list_dir) diff --git a/CIME/config.py b/CIME/config.py index 8491b2f3f2e..d2306d354d0 100644 --- a/CIME/config.py +++ b/CIME/config.py @@ -9,19 +9,132 @@ logger = logging.getLogger(__name__) -class Config: +class ConfigBase: def __new__(cls): if not hasattr(cls, "_instance"): - cls._instance = super(Config, cls).__new__(cls) + cls._instance = super(ConfigBase, cls).__new__(cls) return cls._instance def __init__(self): - if getattr(self, "_loaded", False): - return - self._attribute_config = {} + @property + def loaded(self): + return getattr(self, "_loaded", False) + + @classmethod + def instance(cls): + """Access singleton. + + Explicit way to access singleton, same as calling constructor. + """ + return cls() + + @classmethod + def load(cls, customize_path): + obj = cls() + + logger.debug("Searching %r for files to load", customize_path) + + customize_files = glob.glob(f"{customize_path}/**/*.py", recursive=True) + + # filter out any tests + customize_files = [ + x for x in customize_files if "tests" not in x and "conftest" not in x + ] + + customize_module_spec = importlib.machinery.ModuleSpec("cime_customize", None) + + customize_module = importlib.util.module_from_spec(customize_module_spec) + + sys.modules["CIME.customize"] = customize_module + + for x in sorted(customize_files): + obj._load_file(x, customize_module) + + setattr(obj, "_loaded", True) + + return obj + + def _load_file(self, file_path, customize_module): + logger.debug("Loading file %r", file_path) + + raw_config = utils.import_from_file("raw_config", file_path) + + # filter user define variables and functions + user_defined = [x for x in dir(raw_config) if not x.endswith("__")] + + # set values on this object, will overwrite existing + for x in user_defined: + try: + value = getattr(raw_config, x) + except AttributeError: + # should never hit this + logger.fatal("Attribute %r missing on obejct", x) + + sys.exit(1) + else: + setattr(customize_module, x, value) + + self._set_attribute(x, value) + + def _set_attribute(self, name, value, desc=None): + if hasattr(self, name): + logger.debug("Overwriting %r attribute", name) + + logger.debug("Setting attribute %r with value %r", name, value) + + setattr(self, name, value) + + self._attribute_config[name] = { + "desc": desc, + "default": value, + } + + def print_rst_table(self): + max_variable = max([len(x) for x in self._attribute_config.keys()]) + max_default = max( + [len(str(x["default"])) for x in self._attribute_config.values()] + ) + max_type = max( + [len(type(x["default"]).__name__) for x in self._attribute_config.values()] + ) + max_desc = max([len(x["desc"]) for x in self._attribute_config.values()]) + + divider_row = ( + f"{'='*max_variable} {'='*max_default} {'='*max_type} {'='*max_desc}" + ) + + rows = [ + divider_row, + f"Variable{' '*(max_variable-8)} Default{' '*(max_default-7)} Type{' '*(max_type-4)} Description{' '*(max_desc-11)}", + divider_row, + ] + + for variable, value in sorted( + self._attribute_config.items(), key=lambda x: x[0] + ): + variable_fill = max_variable - len(variable) + default_fill = max_default - len(str(value["default"])) + type_fill = max_type - len(type(value["default"]).__name__) + + rows.append( + f"{variable}{' '*variable_fill} {value['default']}{' '*default_fill} {type(value['default']).__name__}{' '*type_fill} {value['desc']}" + ) + + rows.append(divider_row) + + print("\n".join(rows)) + + +class Config(ConfigBase): + def __init__(self): + super().__init__() + + if self.loaded: + return + self._set_attribute( "additional_archive_components", ("drv", "dart"), @@ -177,11 +290,6 @@ def __init__(self): False, desc="If set to `True` then COMP_ROOT_DIR_CPL is set using UFS_DRIVER if defined.", ) - self._set_attribute( - "gpus_use_set_device_rank", - True, - desc="If set to `True` and NGPUS_PER_NODE > 0 then `$RUNDIR/set_device_rank.sh` is appended when the MPI run command is generated.", - ) self._set_attribute( "test_custom_project_machine", "melvin", @@ -200,107 +308,3 @@ def __init__(self): "{srcroot}/libraries/mct", desc="Sets the path to the mct library.", ) - - @classmethod - def instance(cls): - """Access singleton. - - Explicit way to access singleton, same as calling constructor. - """ - return cls() - - @classmethod - def load(cls, customize_path): - obj = cls() - - logger.debug("Searching %r for files to load", customize_path) - - customize_files = glob.glob(f"{customize_path}/**/*.py", recursive=True) - - # filter out any tests - customize_files = [ - x for x in customize_files if "tests" not in x and "conftest" not in x - ] - - customize_module_spec = importlib.machinery.ModuleSpec("cime_customize", None) - - customize_module = importlib.util.module_from_spec(customize_module_spec) - - sys.modules["CIME.customize"] = customize_module - - for x in sorted(customize_files): - obj._load_file(x, customize_module) - - setattr(obj, "_loaded", True) - - return obj - - def _load_file(self, file_path, customize_module): - logger.debug("Loading file %r", file_path) - - raw_config = utils.import_from_file("raw_config", file_path) - - # filter user define variables and functions - user_defined = [x for x in dir(raw_config) if not x.endswith("__")] - - # set values on this object, will overwrite existing - for x in user_defined: - try: - value = getattr(raw_config, x) - except AttributeError: - # should never hit this - logger.fatal("Attribute %r missing on obejct", x) - - sys.exit(1) - else: - setattr(customize_module, x, value) - - self._set_attribute(x, value) - - def _set_attribute(self, name, value, desc=None): - if hasattr(self, name): - logger.debug("Overwriting %r attribute", name) - - logger.debug("Setting attribute %r with value %r", name, value) - - setattr(self, name, value) - - self._attribute_config[name] = { - "desc": desc, - "default": value, - } - - def print_rst_table(self): - max_variable = max([len(x) for x in self._attribute_config.keys()]) - max_default = max( - [len(str(x["default"])) for x in self._attribute_config.values()] - ) - max_type = max( - [len(type(x["default"]).__name__) for x in self._attribute_config.values()] - ) - max_desc = max([len(x["desc"]) for x in self._attribute_config.values()]) - - divider_row = ( - f"{'='*max_variable} {'='*max_default} {'='*max_type} {'='*max_desc}" - ) - - rows = [ - divider_row, - f"Variable{' '*(max_variable-8)} Default{' '*(max_default-7)} Type{' '*(max_type-4)} Description{' '*(max_desc-11)}", - divider_row, - ] - - for variable, value in sorted( - self._attribute_config.items(), key=lambda x: x[0] - ): - variable_fill = max_variable - len(variable) - default_fill = max_default - len(str(value["default"])) - type_fill = max_type - len(type(value["default"]).__name__) - - rows.append( - f"{variable}{' '*variable_fill} {value['default']}{' '*default_fill} {type(value['default']).__name__}{' '*type_fill} {value['desc']}" - ) - - rows.append(divider_row) - - print("\n".join(rows)) diff --git a/CIME/cs_status.py b/CIME/cs_status.py index 8b4c479b93d..6a65ca4da71 100644 --- a/CIME/cs_status.py +++ b/CIME/cs_status.py @@ -6,7 +6,7 @@ from __future__ import print_function from CIME.XML.standard_module_setup import * from CIME.XML.expected_fails_file import ExpectedFailsFile -from CIME.test_status import TestStatus +from CIME.test_status import TestStatus, SHAREDLIB_BUILD_PHASE, TEST_PEND_STATUS import os import sys from collections import defaultdict @@ -20,6 +20,7 @@ def cs_status( check_throughput=False, check_memory=False, expected_fails_filepath=None, + force_rebuild=False, out=sys.stdout, ): """Print the test statuses of all tests in test_paths. The default @@ -56,6 +57,11 @@ def cs_status( for test_path in test_paths: test_dir = os.path.dirname(test_path) ts = TestStatus(test_dir=test_dir) + + if force_rebuild: + with ts: + ts.set_status(SHAREDLIB_BUILD_PHASE, TEST_PEND_STATUS) + test_id = os.path.basename(test_dir).split(".")[-1] if summary: output = _overall_output( diff --git a/CIME/data/config/cesm/config_files.xml b/CIME/data/config/cesm/config_files.xml index a83773d2335..adcd79c85e3 100644 --- a/CIME/data/config/cesm/config_files.xml +++ b/CIME/data/config/cesm/config_files.xml @@ -42,7 +42,8 @@ case_last env_case.xml file containing machine specifications for target model primary component (for documentation only - DO NOT EDIT) - $CIMEROOT/CIME/data/config/xml_schemas/config_machines.xsd + $CIMEROOT/CIME/data/config/xml_schemas/config_machines.xsd + $CIMEROOT/CIME/data/config/xml_schemas/config_machines_version3.xsd @@ -413,7 +414,7 @@ $COMP_ROOT_DIR_ATM/cime_config/testdefs/testlist_cam.xml $COMP_ROOT_DIR_GLC/cime_config/testdefs/testlist_cism.xml $COMP_ROOT_DIR_LND/cime_config/testdefs/testlist_clm.xml - $COMP_ROOT_DIR_LND/cime_config/testdefs/testlist_clm.xml + $COMP_ROOT_DIR_LND/cime_config/testdefs/testlist_slim.xml $COMP_ROOT_DIR_ICE/cime_config/testdefs/testlist_cice.xml $COMP_ROOT_DIR_ICE/cime_config/testdefs/testlist_cice.xml $COMP_ROOT_DIR_OCN/cime_config/testdefs/testlist_pop.xml diff --git a/CIME/data/config/config_tests.xml b/CIME/data/config/config_tests.xml index 045029255ec..0352b5207ca 100644 --- a/CIME/data/config/config_tests.xml +++ b/CIME/data/config/config_tests.xml @@ -66,7 +66,7 @@ ERP pes counts hybrid (open-MP/MPI) restart bfb test from startup, default 6 do an 11 day initial test - write a restart at day 6 (suffix base) half the number of tasks and threads for each component do a 5 day restart test starting from restart at day 6 (suffix rest) - this is just like an ERS test but the pe-counts/threading count are modified on retart + this is just like an ERS test but the pe-counts/threading count are modified on restart ERI hybrid/branch/exact restart test, default (by default STOP_N is 22 days) (1) ref1case diff --git a/CIME/data/config/xml_schemas/config_archive.xsd b/CIME/data/config/xml_schemas/config_archive.xsd index bc366e6178a..cc7fe137ab8 100644 --- a/CIME/data/config/xml_schemas/config_archive.xsd +++ b/CIME/data/config/xml_schemas/config_archive.xsd @@ -6,7 +6,7 @@ - + @@ -50,6 +50,7 @@ + diff --git a/CIME/data/config/xml_schemas/config_machines.xsd b/CIME/data/config/xml_schemas/config_machines.xsd index d6e3c280a93..b025c4039e0 100644 --- a/CIME/data/config/xml_schemas/config_machines.xsd +++ b/CIME/data/config/xml_schemas/config_machines.xsd @@ -6,6 +6,8 @@ + + @@ -56,6 +58,10 @@ + + + + @@ -166,6 +172,16 @@ + + + + + + + + - + SITE VENDOR platform, os is ---, xx pes/node, batch system is --- - - .*.cheyenne.ucar.edu - LINUX diff --git a/CIME/data/config/xml_schemas/config_machines_version3.xsd b/CIME/data/config/xml_schemas/config_machines_version3.xsd new file mode 100644 index 00000000000..92b55839fb2 --- /dev/null +++ b/CIME/data/config/xml_schemas/config_machines_version3.xsd @@ -0,0 +1,330 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/CIME/data/config/xml_schemas/env_mach_specific.xsd b/CIME/data/config/xml_schemas/env_mach_specific.xsd index f86c6b9f6e1..3c7a3a0d679 100644 --- a/CIME/data/config/xml_schemas/env_mach_specific.xsd +++ b/CIME/data/config/xml_schemas/env_mach_specific.xsd @@ -9,7 +9,9 @@ - + + + @@ -102,6 +104,8 @@ + + @@ -134,7 +138,7 @@ - + diff --git a/CIME/get_tests.py b/CIME/get_tests.py index 857fa974d3e..e4d7d3ea4a8 100644 --- a/CIME/get_tests.py +++ b/CIME/get_tests.py @@ -23,6 +23,7 @@ # "inherit" : (suite1, suite2, ...), # Optional. Suites to inherit tests from. Default is None. Tuple, list, or str. # "time" : "HH:MM:SS", # Optional. Recommended upper-limit on test time. # "share" : True|False, # Optional. If True, all tests in this suite share a build. Default is False. +# "perf" : True|False, # Optional. If True, all tests in this suite will do performance tracking. Default is False. # "tests" : (test1, test2, ...) # Optional. The list of tests for this suite. See above for format. Tuple, list, or str. This is the ONLY inheritable attribute. # } @@ -89,6 +90,20 @@ "SMS_P16.f19_g16_rx1.X", ), }, + "cime_test_perf": { + "time": "0:10:00", + "perf": True, + "tests": ( + "SMS_P2.T42_T42.S", + "SMS_P4.T42_T42.S", + "SMS_P8.T42_T42.S", + "SMS_P16.T42_T42.S", + ), + }, + "cime_test_timing": { + "time": "0:10:00", + "tests": ("SMS_P1.T42_T42.S",), + }, "cime_test_repeat": { "tests": ( "TESTRUNPASS_P1.f19_g16_rx1.A", @@ -115,7 +130,6 @@ "ERR_Ln9.f45_g37_rx1.A", "ERP_Ln9.f45_g37_rx1.A", "SMS_D_Ln9_Mmpi-serial.f19_g16_rx1.A", - "DAE.ww3a.ADWAV", "PET_Ln9_P4.f19_f19.A", "PEM_Ln9_P4.f19_f19.A", "SMS_Ln3.T42_T42.S", @@ -160,12 +174,12 @@ def _get_key_data(raw_dict, key, the_type): def get_test_data(suite): ############################################################################### """ - For a given suite, returns (inherit, time, share, tests) + For a given suite, returns (inherit, time, share, perf, tests) """ raw_dict = _ALL_TESTS[suite] for key in raw_dict.keys(): expect( - key in ["inherit", "time", "share", "tests"], + key in ["inherit", "time", "share", "perf", "tests"], "Unexpected test key '{}'".format(key), ) @@ -173,6 +187,7 @@ def get_test_data(suite): _get_key_data(raw_dict, "inherit", tuple), _get_key_data(raw_dict, "time", str), _get_key_data(raw_dict, "share", bool), + _get_key_data(raw_dict, "perf", bool), _get_key_data(raw_dict, "tests", tuple), ) @@ -202,7 +217,7 @@ def get_test_suite( "Compiler {} not valid for machine {}".format(compiler, machine), ) - inherits_from, _, _, tests_raw = get_test_data(suite) + inherits_from, _, _, _, tests_raw = get_test_data(suite) tests = [] for item in tests_raw: expect( @@ -299,6 +314,32 @@ def get_build_groups(tests): return [tuple(item[0]) for item in build_groups] +############################################################################### +def is_perf_test(test): + ############################################################################### + """ + Is the provided test in a suite with perf=True? + + >>> is_perf_test("SMS_P2.T42_T42.S.melvin_gnu") + True + >>> is_perf_test("SMS_P2.f19_g16_rx1.X.melvin_gnu") + False + >>> is_perf_test("PFS_P2.f19_g16_rx1.X.melvin_gnu") + True + """ + # Get a list of performance suites + if test.startswith("PFS"): + return True + else: + suites = get_test_suites() + for suite in suites: + perf = get_test_data(suite)[3] + if perf and suite_has_test(suite, test, skip_inherit=True): + return True + + return False + + ############################################################################### def infer_arch_from_tests(testargs): ############################################################################### diff --git a/CIME/hist_utils.py b/CIME/hist_utils.py index a46cda0e5d1..86ce16e6d43 100644 --- a/CIME/hist_utils.py +++ b/CIME/hist_utils.py @@ -52,7 +52,7 @@ def _iter_model_file_substrs(case): yield model -def copy_histfiles(case, suffix): +def copy_histfiles(case, suffix, match_suffix=None): """Copy the most recent batch of hist files in a case, adding the given suffix. This can allow you to temporarily "save" these files so they won't be blown @@ -71,9 +71,15 @@ def copy_histfiles(case, suffix): comments = "Copying hist files to suffix '{}'\n".format(suffix) num_copied = 0 for model in _iter_model_file_substrs(case): + if case.get_value("TEST") and archive.exclude_testing(model): + logger.info( + "Case is a test and component %r is excluded from comparison", model + ) + + continue comments += " Copying hist files for model '{}'\n".format(model) test_hists = archive.get_latest_hist_files( - casename, model, rundir, ref_case=ref_case + casename, model, rundir, suffix=match_suffix, ref_case=ref_case ) num_copied += len(test_hists) for test_hist in test_hists: @@ -287,6 +293,12 @@ def _compare_hists( archive = case.get_env("archive") ref_case = case.get_value("RUN_REFCASE") for model in _iter_model_file_substrs(case): + if case.get_value("TEST") and archive.exclude_testing(model): + logger.info( + "Case is a test and component %r is excluded from comparison", model + ) + + continue if model == "cpl" and suffix2 == "multiinst": multiinst_driver_compare = True comments += " comparing model '{}'\n".format(model) @@ -326,20 +338,30 @@ def _compare_hists( if not ".nc" in hist1: logger.info("Ignoring non-netcdf file {}".format(hist1)) continue - success, cprnc_log_file, cprnc_comment = cprnc( - model, - os.path.join(from_dir1, hist1), - os.path.join(from_dir2, hist2), - case, - from_dir1, - multiinst_driver_compare=multiinst_driver_compare, - outfile_suffix=outfile_suffix, - ignore_fieldlist_diffs=ignore_fieldlist_diffs, - ) + try: + success, cprnc_log_file, cprnc_comment = cprnc( + model, + os.path.join(from_dir1, hist1), + os.path.join(from_dir2, hist2), + case, + from_dir1, + multiinst_driver_compare=multiinst_driver_compare, + outfile_suffix=outfile_suffix, + ignore_fieldlist_diffs=ignore_fieldlist_diffs, + ) + except: + cprnc_comment = "CPRNC executable not found" + cprnc_log_file = None + success = False + if success: comments += " {} matched {}\n".format(hist1, hist2) else: - if cprnc_comment == CPRNC_FIELDLISTS_DIFFER: + if not cprnc_log_file: + comments += cprnc_comment + all_success = False + return all_success, comments, 0 + elif cprnc_comment == CPRNC_FIELDLISTS_DIFFER: comments += " {} {} {}\n".format(hist1, FIELDLISTS_DIFFER, hist2) else: comments += " {} {} {}\n".format(hist1, DIFF_COMMENT, hist2) @@ -428,6 +450,11 @@ def cprnc( """ if not cprnc_exe: cprnc_exe = case.get_value("CCSM_CPRNC") + expect( + os.path.isfile(cprnc_exe) and os.access(cprnc_exe, os.X_OK), + f"cprnc {cprnc_exe} does not exist or is not executable", + ) + basename = os.path.basename(file1) multiinst_regex = re.compile(r".*%s[^_]*(_[0-9]{4})[.]h.?[.][^.]+?[.]nc" % model) mstr = "" diff --git a/CIME/jenkins_generic_job.py b/CIME/jenkins_generic_job.py index 0cb963c06bd..d68bc2b007c 100644 --- a/CIME/jenkins_generic_job.py +++ b/CIME/jenkins_generic_job.py @@ -278,6 +278,9 @@ def jenkins_generic_job( update_success, check_throughput, check_memory, + ignore_memleak, + ignore_namelists, + save_timing, pes_file, jenkins_id, queue, @@ -360,16 +363,19 @@ def jenkins_generic_job( create_test_args.append("-j {:d}".format(parallel_jobs)) if walltime is not None: - create_test_args.append(" --walltime " + walltime) + create_test_args.append("--walltime " + walltime) if baseline_root is not None: - create_test_args.append(" --baseline-root " + baseline_root) + create_test_args.append("--baseline-root " + baseline_root) if pes_file is not None: - create_test_args.append(" --pesfile " + pes_file) + create_test_args.append("--pesfile " + pes_file) if queue is not None: - create_test_args.append(" --queue " + queue) + create_test_args.append("--queue " + queue) + + if save_timing: + create_test_args.append("--save-timing") create_test_cmd = "./create_test " + " ".join(create_test_args) @@ -416,7 +422,8 @@ def jenkins_generic_job( no_wait=not use_batch, # wait if using queue check_throughput=check_throughput, check_memory=check_memory, - ignore_namelists=False, # don't ignore namelist diffs + ignore_namelists=ignore_namelists, + ignore_memleak=ignore_memleak, cdash_build_name=cdash_build_name, cdash_project=cdash_project, cdash_build_group=cdash_build_group, diff --git a/CIME/non_py/cprnc b/CIME/non_py/cprnc new file mode 160000 index 00000000000..9276b219750 --- /dev/null +++ b/CIME/non_py/cprnc @@ -0,0 +1 @@ +Subproject commit 9276b219750881633d8673c72ec80ac821f96d82 diff --git a/CIME/non_py/cprnc/CMakeLists.txt b/CIME/non_py/cprnc/CMakeLists.txt deleted file mode 100644 index 3ec73581ae2..00000000000 --- a/CIME/non_py/cprnc/CMakeLists.txt +++ /dev/null @@ -1,88 +0,0 @@ -# Generate this with: $cimeroot/CIME/scripts/configure --mpilib=mpi-serial --macros-format=CMake -# You'll also need to source the .env_mach_specific.sh file before trying to build cprnc - -include("${BLDROOT}/Macros.cmake") -set(CMAKE_C_COMPILER "${SCC}") -set(CMAKE_Fortran_COMPILER "${SFC}") - -project(CPRNC C Fortran) -enable_language(Fortran) -set(CMAKE_Fortran_FLAGS "${FFLAGS}") - -message("HERE fortran flags are ${CMAKE_Fortran_FLAGS} FFLAGS are ${FFLAGS}") - -cmake_minimum_required(VERSION 2.8) - -# Find netcdf -set(NetCDF_PATH ${NETCDF_PATH}) - -if (EXISTS ${SRC_ROOT}/libraries/parallelio/cmake) - set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${SRC_ROOT}/libraries/parallelio/cmake) -else() - set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${SRC_ROOT}/externals/scorpio/cmake) -endif() - -find_package (NetCDF COMPONENTS Fortran REQUIRED) - -# generate compare_vars_mod.F90 -add_custom_command( - OUTPUT ${PROJECT_BINARY_DIR}/compare_vars_mod.F90 - COMMAND perl ${PROJECT_SOURCE_DIR}/../externals/genf90/genf90.pl - ${PROJECT_SOURCE_DIR}/compare_vars_mod.F90.in > ${PROJECT_BINARY_DIR}/compare_vars_mod.F90 - DEPENDS ${PROJECT_SOURCE_DIR}/compare_vars_mod.F90.in ${PROJECT_SOURCE_DIR}/../externals/genf90/genf90.pl -) - -# Set up includes -include_directories( - ${NetCDF_Fortran_INCLUDE_DIRS} - ${PROJECT_SOURCE_DIR} - ${PROJECT_BINARY_DIR} -) - -# -# Set up lib dependencies, relying on nf-config if possible or NetCDF_Fortran_LIBRARIES -# as a fallback. We want the executable to find libraries via RPATH so that cprnc is -# less-dependent on the current environment (since cprnc is built with a serial netcdf, -# it's likely that a parallel case will have different netcdf modules loaded when it -# comes time to execute cprnc). -# -execute_process(COMMAND ${NETCDF_PATH}/bin/nf-config --flibs - RESULT_VARIABLE NF_CONFIG_RESULT - OUTPUT_VARIABLE NF_CONFIG_OUTPUT) - -if (NF_CONFIG_RESULT STREQUAL "0") - separate_arguments(NF_LIB_LIST UNIX_COMMAND "${NF_CONFIG_OUTPUT}") -else() - set(NF_LIB_LIST ${NetCDF_Fortran_LIBRARIES}) -endif() - -message("lib list is: ${NF_LIB_LIST}") - -foreach(NF_LIB IN LISTS NF_LIB_LIST) - if (NF_LIB MATCHES "-l") - continue() - elseif (NF_LIB MATCHES "-L/") - string(REGEX REPLACE "^-L" "" NF_LIB_DIR "${NF_LIB}") - list(APPEND NF_LIB_DIRS ${NF_LIB_DIR}) - else() - get_filename_component(NF_LIB_DIR ${NF_LIB} DIRECTORY) - list(APPEND NF_LIB_DIRS ${NF_LIB_DIR}) - endif() -endforeach() - -message("lib dirs are: ${NF_LIB_DIRS}") - -set(CMAKE_BUILD_RPATH ${NF_LIB_DIRS}) - -# Add targets -set (CPRNC_SRCS - ${PROJECT_BINARY_DIR}/compare_vars_mod.F90 - filestruct.F90 - utils.F90 - prec.F90 - cprnc.F90 -) - -add_executable(cprnc ${CPRNC_SRCS}) - -target_link_libraries(cprnc ${NF_LIB_LIST}) diff --git a/CIME/non_py/cprnc/Depends b/CIME/non_py/cprnc/Depends deleted file mode 100644 index 582f9ee1794..00000000000 --- a/CIME/non_py/cprnc/Depends +++ /dev/null @@ -1,6 +0,0 @@ -cprnc.o: cprnc.F90 filestruct.o compare_vars_mod.o utils.o -filestruct.o: filestruct.F90 prec.o -prec.o : prec.F90 -compare_vars_mod.o: compare_vars_mod.F90 prec.o utils.o filestruct.o -compare_vars_mod.F90 : compare_vars_mod.F90.in -utils.o : utils.F90 filestruct.o prec.o diff --git a/CIME/non_py/cprnc/Makefile b/CIME/non_py/cprnc/Makefile deleted file mode 100644 index 8c1ef109fb6..00000000000 --- a/CIME/non_py/cprnc/Makefile +++ /dev/null @@ -1,96 +0,0 @@ -#----------------------------------------------------------------------- -# This Makefile is for building cprnc on AIX, Compaq, Linux (with pgf90, -# lf95, ifort, or gfortran compilers), IRIX or SUN platforms. -# -# These macros can be changed by setting environment variables: -# -# Set the path to netcdf: -# -# gmake NETCDF=pathToNetcdf -# -# This sets LIB_NETCDF=$NETCDF/lib and INC_NETCDF=$NETCDF/include -# -# LIB_NETCDF --- Library directory location of netcdf. -# INC_NETCDF --- Include directory location of netcdf. This directory needs to contain -# the NetCDF .mod files, i.e., netcdf.mod and typesizes.mod. -# -# If the include and library files don't have a common root directory then set them -# independently in the commandline: -# -# gmake LIB_NETCDF=pathToLIBFiles INC_NETCDF=pathToINCFiles -# -# You also can set the environment variables: -# -# USER_FC ------ User defined Fortran compiler (for Linux can be pgf90, lf95, ifort, gfortran) -# EXEDIR ------- Directory to build executable in. (Defaults to .) -# VPATH -------- GNU make path. (Defaults to current directory) -# -#------------------------------------------------------------------------ -include Macros.make -# Set up special characters -null := - -EXENAME = cprnc -RM = rm - -NETCDF_PATH ?= $(NETCDF_FORTRAN_PATH) - -# Default for the netcdf library and include directories -LIB_NETCDF := $(NETCDF_PATH)/lib -INC_NETCDF := $(NETCDF_PATH)/include -LDFLAGS = -L$(LIB_NETCDF) -lnetcdff -Wl,-rpath $(LIB_NETCDF) - -# Determine platform -UNAMES := $(shell uname -s) -SNAME := $(shell uname -n | cut -c1-2) - -GENF90 = ../externals/genf90/genf90.pl - -FC := $(SFC) -FFLAGS += -I$(INC_NETCDF) -I. -#------------------------------------------------------------------------ -# Default rules and macros -#------------------------------------------------------------------------ - -# If path to source code not given -ifeq ($(VPATH),$(null)) - VPATH:= . -endif - -OBJS := compare_vars_mod.o cprnc.o filestruct.o prec.o utils.o - -# If executable directory not given -ifeq ($(EXEDIR),$(null)) - EXEDIR := . -endif - -.SUFFIXES: -.SUFFIXES: .F90 .f90 .o .in - -.F90.o: - $(FC) -c $(FFLAGS) $< - -.f90.o: - $(FC) -c $(FFLAGS) $< - -$(EXEDIR)/$(EXENAME): $(OBJS) - $(FC) -o $@ $(OBJS) $(LDFLAGS) $(SLIBS) - -compare_vars_mod.F90 : compare_vars_mod.F90.in - perl $(GENF90) $< > $@ - -clean: - $(RM) -f $(OBJS) *.mod $(EXEDIR)/$(EXENAME) - -# remove generated file during clean -realclean: - $(RM) -f $(OBJS) *.mod $(EXEDIR)/$(EXENAME) compare_vars_mod.F90 core - -include $(CURDIR)/Depends - -# 'make check' will run the standard tests but without baseline -# comparisons. For complete testing, you should generally also do -# baseline comparisons. See the notes in test_inputs/README for details. -check: $(EXEDIR)/$(EXENAME) - $(RM) -fr tmpdir - ./run_tests -outdir tmpdir diff --git a/CIME/non_py/cprnc/README b/CIME/non_py/cprnc/README deleted file mode 100644 index f3f2cf09a35..00000000000 --- a/CIME/non_py/cprnc/README +++ /dev/null @@ -1,190 +0,0 @@ -cprnc README ------------- - -cprnc is a generic tool for analyzing a netcdf file or comparing -two netcdf files. - -If you are trying to debug an installed cprnc tool make sure that you -are looking at the correct one by comparing the path to the one in -your case directory. - - -Quick Start Guide: ------------------- - -On cime supported systems you can generate a cmake Macros file using the following -(assuming you are running the command from the directory CIME/data/cprnc): - -export CIMEROOT=../.. -MPILIB=mpi-serial source ./.env_mach_specific.sh -../configure --macros-format=CMake --mpilib=mpi-serial - -Next run cmake . to build the Makefile and then -make to build cprnc. - -Finally, put the resulting executable in CCSM_CPRNC as defined in -config_machines.xml. - - - Usage: cprnc [-v] [-d dimname:start[:count]] file1 [file2] - -m: Compare each time sample. Default is false, i.e. match "time" - coordinate values before comparing - -v: Verbose output - -d dimname:start[:count] - Print variable values for the specified dimname index subrange. - - -Users Guide: ------------- - -cprnc is a Fortran-90 application. It relies on netcdf version 3 or -later and uses the f90 netcdf interfaces. It requires a netcdf include -file and a netcdf library. - -cprnc generates an ascii output file via standard out. It initially -summarizes some characteristics of the input file[s]. A compare file is -generally 132 characters wide and an analyze file is less than 80 -characters wide. - -In analyze mode, the output for a field looks like - - ( lon, lat, time, -----) - 259200 ( 587, 134, 1) ( 269, 59, 1) - FX1 96369 8.273160400390625E+02 0.000000000000000E+00 - avg abs field values: 9.052845920820910E+01 - -and a guide to this information is printed at the top of the file - - ( dim1, dim2, dim3, dim4) - ARRSIZ1 ( indx1, indx2, indx3) file 1 - FIELD NVALID MAX MIN - - -The first 10 characters of the field name are identified in the first - dozen columns of the third line. -The first line summarizes the names of the dimensions of the field -The second line summarizes the indices of the maximum and minimum value - of the field for the first three dimensions. If the fourth dimension - exists, it's always assumed to be time. Time is handled separately. -The third line summarizes the number of valid values in the array - and the maximum and minimum value over those valid values. Invalid - values are values that are identified to be "fill" value. -The last line summarizes some overall statistics including the average - absolute value of the valid values of the field. - -In comparison mode, the output (132 chars wide) for a field looks like - - 96369 ( lon, lat, time) - 259200 ( 422, 198, 1) ( 203, 186, 1) ( 47, 169, 1) ( 224, 171, 1) - FIRA 96369 1.466549530029297E+02 -3.922052764892578E+01 1.4E+02 -3.037954139709473E+01 1.0E+00 -3.979958057403564E+00 - 96369 1.321966247558594E+02 -1.603044700622559E+01 1.084177169799805E+02 3.982142448425293E+00 - 259200 ( 156, 31, 1) ( 573, 178, 1) ( - avg abs field values: 6.778244097051392E+01 rms diff: 1.4E+01 avg rel diff(npos): 4.6E-02 - 5.960437961084186E+01 avg decimal digits(ndif): 1.2 worst: 0.0 - -and a guide to this information is printed at the top of the file - - NDIFFS ( dim1, dim2, dim3, dim4, ... ) - ARRSIZ1 ( indx1, indx2, indx3, ... ) file 1 - FIELD NVALID1 MAX1 MIN1 DIFFMAX VALUES RDIFMAX VALUES - NVALID2 MAX2 MIN2 - ARRSIZ2 ( indx1, indx2, indx3, ...) file 2 - -The information content is identical to the information in analyze -mode with the following additions. Two additional lines are added -in the main body. Lines 4 and 5 are identical to line 3 and 2 -respectively but are associated with file 2 instead of file 1. -In addition, the right hand side of lines 2, 3, and 4 contain -information about the maximum difference, the location and values -of the maximum difference, the relative difference and the location -and values of the maximum relative difference. The last two line -summarize some overall statistics including average absolute values -of the field on the two files, rms difference, average relative -difference, average number of digits that match, and the worst -case for the number of digits that match. - -"avg rel diff" gives the average relative difference (sum of relative -differences normalized by the number of indices where both variables -have valid values). The denominator for each relative difference is -the MAX of the two values. - -"avg decimal digits" is determined by: For each diff, determine the -number of digits that match (as -log10(rdiff(i)); add this to a -running sum; then normalize by the number of diffs (ignoring places -where the two variables are the same). For example, if there are 10 -values, 8 of which match, one has a relative difference of 1e-3 and -one has a relative difference of 1e-5, then the avg decimal digits -will be 4. - -"worst decimal digits" is simply log10(1/rdmax), where rdmax is the -max relative difference (in the above example, this would give 3). - -At the end of the output file, a summary is presented that looks like - -SUMMARY of cprnc: - A total number of 119 fields were compared - of which 83 had non-zero differences - and 17 had differences in fill patterns - and 2 had differences in dimension sizes - A total number of 10 fields could not be analyzed - A total number of 0 time-varying fields on file 1 were not found on file 2. - A total number of 0 time-constant fields on file 1 were not found on file 2. - A total number of 0 time-varying fields on file 2 were not found on file 1. - A total number of 0 time-constant fields on file 2 were not found on file 1. - diff_test: the two files seem to be DIFFERENT - - -This summarizes: -- the number of fields that were compared -- the number of fields that differed (not counting fields that differed - only in the fill pattern) -- the number of fields with differences in fill patterns -- the number of fields with differences in dimension sizes -- the number of fields that could not be analyzed -- the number of fields on one file but not the other - - for files with an unlimited (time) dimension, these counts are - broken down into time-varying fields (i.e., fields with an unlimited - dimension) and time-constant fields (i.e., fields without an - unlimited dimension) -- whether the files are IDENTICAL, DIFFERENT, or DIFFER only in their field lists - - Files are considered DIFFERENT if there are differences in the values, fill - patterns or dimension sizes of any variable - - Files are considered to "DIFFER only in their field lists" if matching - variables are all identical, but there are either fields on file1 that are - not on file2, or fields on file2 that are not on file1 - - However, if the only difference in field lists is in the presence - or absence of time-constant fields on a file that has an unlimited - (time) dimension, the files are considered to be IDENTICAL, with - an extra message appended that notes this fact. (While not ideal, - this exception is needed so that exact restart tests pass despite - some time-constant fields being on the output files from one case - but not the other.) - -Developers Guide: ------------------ - -The tool works as follows. - -Fields can be analyzed if they are int, float or double and -have between 0 and n dimensions - -In general, fields that appear on both files are -compared. If they are sizes, no difference -statistics are computed and only a summary of the fields on -the files are presented. If fields only appear -on one file, those fields are analyzed. - -The unlimited dimension is treated uniquely. In general, for files -that have a dimension named "time", the time axes are compared -and matching time values on the two files are compared one -timestep at a time. Time values that don't match are skipped. -To override the matching behaviour, use cprnc -m. In this mode, -timestamps are compared in indexical space. In analyze mode, -the fields are analyzed one timestamp at a time. In general, -if there is a "time" axis, it will be the outer-most loop in -the output analysis. In compare mode, fields with a time axis -and a timestamp that are not common between the two files are -ignored. - -It is also possible to compare files that don't have an unlimited -dimension; in this case, the '-m' flag must be given. diff --git a/CIME/non_py/cprnc/compare_vars_mod.F90.in b/CIME/non_py/cprnc/compare_vars_mod.F90.in deleted file mode 100644 index 5b2e1a1966e..00000000000 --- a/CIME/non_py/cprnc/compare_vars_mod.F90.in +++ /dev/null @@ -1,638 +0,0 @@ -module compare_vars_mod - use filestruct, only : file_t, var_t, is_time_varying, vdimsize, dim_t, verbose - use prec, only : r4, r8, i4 - use netcdf, only : nf90_char, nf90_int, nf90_double, nf90_float, nf90_get_var, nf90_max_dims, & - nf90_inq_varid, nf90_get_att, nf90_noerr - use utils, only : checknf90, get_dim_str, get_dimname_str - implicit none - logical :: ignoretime - - interface compute_var_stats -! TYPE real,double,int - module procedure compute_var_stats_{TYPE} - end interface - - interface get_rdiff_stats - ! TYPE real,double - module procedure get_rdiff_stats_{TYPE} - end interface - -contains - - subroutine compare_vars(n,file, vtotal, ndiffs, nfilldiffs, vsizes_differ, & - vnot_analyzed, vtypes_differ ) - integer, intent(in) :: n ! number of files to analyze (1 or 2) - integer, intent(out) :: vtotal - integer, intent(out) :: ndiffs ! number of fields with differences (not counting fields that differ only in the fill pattern) - integer, intent(out) :: nfilldiffs ! number of fields with differences in fill pattern - integer, intent(out) :: vsizes_differ - integer, intent(out) :: vnot_analyzed - integer, intent(out) :: vtypes_differ - - - type(file_t) :: file(n) - double precision, pointer :: time(:,:) - double precision :: tdiff - ! start by making sure that times match - integer :: ierr, ns1, ns2, vid1 - type(var_t), pointer :: v1 - integer :: i, t, nvars, t1, t2, nt - integer :: vidnsteph - integer, allocatable :: nsteph(:) - character(len=132) :: dimstr - type(dim_t), pointer :: udim - real(r8), parameter :: timeepsilon = 1.e-9 ! time diff less than this considered insignificant - - - vtotal = 0 - vsizes_differ = 0 - vtypes_differ = 0 - vnot_analyzed = 0 - - if(n==2 .and. .not.ignoretime) then - ! NOTE(wjs, 2019-03-21) Most of the cprnc code allows the unlimited dimension to be - ! named anything - not necessarily 'time'. But this block of code assumes that the - ! unlimited dimension is named 'time' in order to find the associated coordinate - ! variable. We should probably generalize this by looking for a variable with the - ! same name as the unlimited dimension. - call checknf90(nf90_inq_varid(file(1)%fh, 'time', vid1), & - err_str='These files don''t have a time dimension, use cprnc with -m') - - ns1 = file(1)%dim(file(1)%unlimdimid)%dimsize - if(n==2) then - ns2 = file(2)%dim(file(2)%unlimdimid)%dimsize - else - ns2=1 - end if - allocate(time(max(ns1,ns2),2)) - - call checknf90(nf90_get_var(file(1)%fh, vid1, time(1:ns1,1))) - if(n==2) then - call checknf90(nf90_get_var(file(2)%fh, file(1)%var(vid1)%matchid, time(1:ns2,2))) - end if - if(verbose) then - print *,'File 1 time: ', time(1:ns1,1) - print *,'File 2 time: ', time(1:ns2,2) - end if - end if - - nvars = size(file(1)%var) - if (file(1)%has_unlimited_dim()) then - udim => file(1)%dim(file(1)%unlimdimid) - else - if (.not. ignoretime) then - write(6,*) 'ERROR: For files without an unlimited dimension,' - write(6,*) 'ignore_time needs to be true (via setting the -m flag to cprnc)' - stop - end if - end if - - ndiffs = 0 - nfilldiffs = 0 - -! First look at variables which do not have unlimdim - do i=1,nvars - v1 => file(1)%var(i) - - if (.not. is_time_varying(v1, file(1)%has_unlimited_dim(), file(1)%unlimdimid)) then - call get_dimname_str(v1%ndims,v1%dimids,file(1)%dim,dimstr) - write(6,140) trim(v1%name),trim(dimstr) - vtotal = vtotal+1 - - call compare_one_var(v1=v1, numcases=n, file=file, varnum=i, & - vsizes_differ=vsizes_differ, & - vnot_analyzed=vnot_analyzed, & - vtypes_differ=vtypes_differ, & - ndiffs=ndiffs, nfilldiffs=nfilldiffs) - - end if - end do - -! Now look at variables that DO have unlimdim - if (file(1)%has_unlimited_dim()) then - - ierr = nf90_inq_varid(file(1)%fh, 'nsteph', vidnsteph) - if(ierr == NF90_NOERR) then - allocate(nsteph(udim%kount)) - call checknf90(nf90_get_var(file(1)%fh, vidnsteph, nsteph)) - end if - - - do t=1,udim%dimsize,udim%kount - t1 = t ! need to find mathing times - assumed for now - t2 = t - if(.not. ignoretime) then - do while(t1<=ns1 .and. t2<= ns2) - tdiff = abs(time(t1,1) - time(t2,2)) - if (tdiff < timeepsilon) exit - if(time(t1,1) < time(t2,2)) then - Write(6,*) 'Skipping a time sample on file 1' - t1=t1+1 - else if(time(t1,1) > time(t2,2)) then - Write(6,*) 'Skipping a time sample on file 2' - t2=t2+1 - end if - end do - if(verbose) print *,__FILE__,__LINE__,tdiff,timeepsilon, t1, t2 - if(tdiff< timeepsilon .and. t1/=t2) then - Write(6,*) 'Found common timesteps:', t1, t2 - else if(tdiff > timeepsilon) then - Write(6,*) 'No matching time found.' - vnot_analyzed = nvars - return - end if - if(verbose) print *,__FILE__,__LINE__,time(t1,1),time(t2,2), t1, t2, time(:,:) - end if - - - if(allocated(nsteph)) then - print *,'NSTEPH: ',nsteph(t) - deallocate(nsteph) - end if - - do i=1,nvars - v1 => file(1)%var(i) - if (is_time_varying(v1, file(1)%has_unlimited_dim(), file(1)%unlimdimid)) then - call get_dimname_str(v1%ndims,v1%dimids,file(1)%dim,dimstr) - vtotal = vtotal+1 - write(6,145) trim(v1%name),trim(dimstr), t1, t2 - - call compare_one_var(v1=v1, numcases=n, file=file, varnum=i, & - vsizes_differ=vsizes_differ, & - vnot_analyzed=vnot_analyzed, & - vtypes_differ=vtypes_differ, & - ndiffs=ndiffs, nfilldiffs=nfilldiffs, & - tindex=(/t1, t2/)) - - end if - end do - end do - end if ! if (file(1)%has_unlimited_dim()) - -140 format(1x,a,3x,a) -145 format(1x,a,3x,a,' t_index = ',2i6) - - end subroutine compare_vars - - - ! Compare a single variable, and update counts - ! For variables with multiple time slices, this just does comparisons for a single time slice - subroutine compare_one_var(v1, numcases, file, varnum, & - vsizes_differ, vnot_analyzed, vtypes_differ, & - ndiffs, nfilldiffs, & - tindex) - type(var_t) , intent(in) :: v1 ! variable info for the variable in file 1 - integer , intent(in) :: numcases - type(file_t), intent(in) :: file(numcases) - integer , intent(in) :: varnum - integer , intent(inout) :: vsizes_differ - integer , intent(inout) :: vnot_analyzed - integer , intent(inout) :: vtypes_differ - integer , intent(inout) :: ndiffs - integer , intent(inout) :: nfilldiffs - integer , intent(in), optional :: tindex(numcases) - - integer :: idiff, ifilldiff, isizes_differ, inot_analyzed, itypes_differ - - ! initialize output arguments of compare_var in case compare_var doesn't get called - idiff = 0 - ifilldiff = 0 - isizes_differ = 0 - itypes_differ = 0 - inot_analyzed = 0 - - select case(v1%xtype) - case(nf90_int) - call compare_var_int(numcases,file,(/varnum,v1%matchid/), & - idiff, ifilldiff, isizes_differ, itypes_differ, & - tindex) - case(nf90_float) - call compare_var_real(numcases,file,(/varnum,v1%matchid/), & - idiff, ifilldiff, isizes_differ, itypes_differ, & - tindex) - case(nf90_double) - call compare_var_double(numcases,file,(/varnum,v1%matchid/), & - idiff, ifilldiff, isizes_differ, itypes_differ, & - tindex) - case(nf90_char) - inot_analyzed = 1 - ! call compare_var_char(file1,file2,i,v1%matchid) - case default - print *,'Type not recognized for variable: ', v1%name - end select - - vsizes_differ = vsizes_differ+isizes_differ - vtypes_differ = vtypes_differ+itypes_differ - vnot_analyzed = vnot_analyzed+inot_analyzed - ndiffs = ndiffs+idiff - nfilldiffs = nfilldiffs + ifilldiff - end subroutine compare_one_var - - - ! TYPE real,int,double - subroutine compare_var_{TYPE}(n,file, vid, idiff, ifilldiff, ifail, itypes, tindex) - use, intrinsic :: ieee_arithmetic, only: ieee_is_nan - integer, intent(in) :: n - type(file_t) :: file(2) - integer, intent(in) :: vid(2) - integer, intent(out) :: idiff ! 1 if diffs in field, 0 otherwise (0 if only diffs are in fill pattern) - integer, intent(out) :: ifilldiff ! 1 if diffs in fill pattern, 0 otherwise - ! (idiff & ifilldiff are both 1 if there are diffs in both the fill pattern and the valid values) - integer, intent(out) :: ifail ! 1 if variable sizes differ, 0 otherwise - integer, intent(out) :: itypes ! 1 if variable types differ, 0 otherwise - integer, optional :: tindex(2) - - integer :: s1, s2, l1(1), i, ierr - - {VTYPE}, pointer :: buf(:,:), vdiff(:) - {VTYPE} :: fv1, fv2 - real(r8) :: rms, min_val(2), max_val(2), avgval(2), m1, rdmax - real(r8) :: rms_normalized ! rms normalized by absolute values - real(r8) :: rms_normalized_denom ! denominator for computing rms_normalized - real(r8) :: rdsum ! sum of relative differences - real(r8) :: rdlogsum ! sum of negative log10 of relative differences - real(r8) :: rdbar ! average of relative differences - real(r8) :: rdlogbar ! rdlogsum normalized by number of non-zero differences - integer :: t(2), n1, n2, min_loc(2), max_loc(2), spacelen - integer :: start(NF90_MAX_DIMS,2), kount(NF90_MAX_DIMS,2), dsizes(NF90_MAX_DIMS,2) - logical, pointer :: mask(:,:) - integer :: diffcnt, rdmaxloc - character(len=80) :: min_str(2), max_str(2), dmax_str, rdmax_str, space - logical :: compare2 - - min_str = '' - max_str = '' - dmax_str = '' - rdmax_str = '' - space = '' - - if(present(tindex)) then - t = tindex - else - t = 1 - end if - - compare2 = (n==2 .and. vid(2)>0) - ifail = 0 - ifilldiff = 0 - idiff = 0 - s1 = vdimsize(file(1)%dim, file(1)%var(vid(1))%dimids) - - if(verbose) print *,__FILE__,__LINE__,s1,file(1)%var(vid(1))%name - - if(compare2) then - s2 = vdimsize(file(2)%dim, file(2)%var(vid(2))%dimids) - - if(s1 /= s2) then - write(6,*) 'WARNING: Variable ',trim(file(1)%var(vid(1))%name),' sizes differ' - write(6,'(a,a32)') ' DIMSIZEDIFF ', file(1)%var(vid(1))%name - ifail = 1 - return - end if - if(file(2)%var(vid(2))%xtype /= file(1)%var(vid(1))%xtype) then - write(6,*) 'WARNING: Variable ',trim(file(1)%var(vid(1))%name),' types differ' - write(6,'(a,a32,2i2)') ' TYPEDIFF ', file(1)%var(vid(1))%name,file(1)%var(vid(1))%xtype,file(2)%var(vid(2))%xtype - itypes = 1 - endif - - end if - n1 = size(file(1)%var(vid(1))%dimids) - - do i=1,n1 - start(i,1) = file(1)%dim(file(1)%var(vid(1))%dimids(i))%start - kount(i,1) = file(1)%dim(file(1)%var(vid(1))%dimids(i))%kount - dsizes(i,1) = file(1)%dim(file(1)%var(vid(1))%dimids(i))%dimsize - if(file(1)%var(vid(1))%dimids(i) == file(1)%unlimdimid) then - start(i,1)=t(1) - dsizes(i,1) = kount(i,1) - end if - end do - - allocate(buf(s1,n)) - - call checknf90(nf90_get_var(file(1)%fh, vid(1), buf(:,1), start(1:n1,1), kount(1:n1,1))) - - - allocate(mask(s1,n)) - ierr = nf90_get_att(file(1)%fh, vid(1), '_FillValue', fv1) - if(ierr == NF90_NOERR) then - mask(:,1) = (buf(:,1)/=fv1) - else - mask(:,1) = .true. - end if - if(n1>0) then - call compute_var_stats(buf(:,1), s2, mask(:,1), min_loc(1), max_loc(1), min_val(1), max_val(1), avgval(1)) - call get_dim_str(n1,translate_loc(n1,min_loc(1),start(1:n1,1),kount(1:n1,1),dsizes(1:n1,1)),min_str(1)) - call get_dim_str(n1,translate_loc(n1,max_loc(1),start(1:n1,1),kount(1:n1,1),dsizes(1:n1,1)),max_str(1)) - end if - space = ' ' - spacelen=1 - if(n1>3) spacelen=(n1-3)*8 ! adjusts the output format - - if(compare2) then - n2 = size(file(2)%var(vid(2))%dimids) - if(n2/=n1) then - print *,'WARNING variable ',trim(file(1)%var(vid(1))%name),& - ' dims differ but total size is the same, will try to compare anyway' - endif - - - do i=1,n2 - start(i,2) = file(2)%dim(file(2)%var(vid(2))%dimids(i))%start - kount(i,2) = file(2)%dim(file(2)%var(vid(2))%dimids(i))%kount - dsizes(i,2) = file(2)%dim(file(2)%var(vid(2))%dimids(i))%dimsize - if(file(2)%var(vid(2))%dimids(i) == file(2)%unlimdimid) then - start(i,2)=t(2) - dsizes(i,2) = kount(i,2) - end if - end do - - call checknf90(nf90_get_var(file(2)%fh, vid(2), buf(:,2), start(1:n2,2), kount(1:n2,2))) - ierr = nf90_get_att(file(2)%fh, vid(2), '_FillValue', fv2) - if(ierr == NF90_NOERR) then - mask(:,2) = (buf(:,2)/=fv2) - else - mask(:,2) = .true. - end if - if(n2>0) then - call compute_var_stats(buf(:,2), s2, mask(:,2), min_loc(2), max_loc(2), min_val(2), max_val(2), avgval(2)) - call get_dim_str(n2,translate_loc(n2,min_loc(2),start(1:n2,2),kount(1:n2,2),dsizes(1:n2,2)),min_str(2)) - call get_dim_str(n2,translate_loc(n2,max_loc(2),start(1:n2,2),kount(1:n2,2),dsizes(1:n2,2)),max_str(2)) - end if - diffcnt=0 - if(any(buf(:,1) /= buf(:,2))) then - allocate(vdiff(s1)) - -! Use the union of mask1 and mask2 - if(any(mask(:,1) .neqv. mask(:,2))) then - write(6,*) 'WARNING: Fill patterns differ between files' - write(6,'(a,a32)') ' FILLDIFF ', file(1)%var(vid(1))%name - ifilldiff = 1 - mask(:,1) = (mask(:,1) .and. mask(:,2)) - end if - - s2 = count(mask(:,1)) - vdiff = abs(buf(:,1)-buf(:,2)) - rms = sqrt(sum(vdiff**2,mask(:,1))/real(s2)) - diffcnt = 0 -#if {ITYPE}==TYPEDOUBLE || {ITYPE}==TYPEREAL - ! Count the NaN values only if they differ between files - do i=1,s1 - if(mask(i,1)) then - if(ieee_is_nan(buf(i,1)) .neqv. ieee_is_nan(buf(i,2))) then - diffcnt = diffcnt + 1 - endif - endif - enddo -#endif - diffcnt = diffcnt + count(vdiff>0 .and. mask(:,1)) - ! Compute normalized rms difference; normalize using the avg abs field - ! values. Note that this differs from the definition of normalized rms - ! difference found in some references (e.g., normalizing by [max - min], which - ! can be sensitive to outliers). - if (n1 > 0 .and. n2 > 0 .and. rms > 0) then - rms_normalized_denom = (avgval(1) + avgval(2)) / 2.0 - if(abs(rms_normalized_denom)>0)then - rms_normalized = rms / rms_normalized_denom - else - rms_normalized = huge(rms) - end if - else - ! don't try to compute rms_normalized in any of the following conditions: - ! n1 = 0 -- then we won't have avgval(1) - ! n2 = 0 -- then we won't have avgval(2) - ! rms = 0 -- then rms_normalized should be 0... but don't try to compute it - ! above in case we have a 0/0 condition - rms_normalized = 0 - end if - - -! diffcnt==0 implies only diffs are in missing values - if(diffcnt>0) then - idiff = 1 - m1 = maxval(vdiff, mask=mask(:,1)) - l1 = maxloc(vdiff, mask=mask(:,1)) - - - if (n1>0) then - call get_dim_str(n1,translate_loc(n1,l1(1),start(1:n1,1),kount(1:n1,1),dsizes(1:n1,1)),dmax_str) - else - dmax_str = ' ' - end if - -#if ({ITYPE} != TYPEINT) - call get_rdiff_stats(s1,buf(:,1),buf(:,2),vdiff,mask(:,1),rdsum, rdlogsum, rdmax, rdmaxloc) - if (n1>0) then - call get_dim_str(n1,translate_loc(n1,rdmaxloc,start(1:n1,1),kount(1:n1,1),dsizes(1:n1,1)),rdmax_str) - else - rdmax_str = ' ' - end if -#endif - - deallocate(vdiff) - - rdbar = rdsum / real(s2) - rdlogbar = rdlogsum / real(diffcnt) - - if(n1==0) then - ! Note that NORMALIZED RMS is NOT computed in this case, so we simply - ! print 0 for that. -#if ({ITYPE} == TYPEINT) - write(6,902) s2, buf(1,1), buf(2,1) - write(6,811) ' RMS ', file(1)%var(vid(1))%name, rms, ' NORMALIZED ', 0 -#else - write(6,803) s2, buf(1,1), buf(2,1) - write(6,812) ' RMS ', file(1)%var(vid(1))%name, rms, ' NORMALIZED ', 0. -#endif - - else - write(6,800) diffcnt, s1, trim(max_str(1)),trim(min_str(1)), trim(dmax_str), trim(rdmax_str) -#if ({ITYPE} == TYPEINT) - ! Note that rdmaxloc is NOT computed in this case, so we print 0 in place - ! of buf(rdmaxloc,1) and buf(rdmaxloc,2) - write(6,903) s2, max_val(1), min_val(1), m1, buf(l1(1),1), rdbar, 0.0, & - count(mask(:,2)), max_val(2), min_val(2), buf(l1(1),2), 0.0 - write(6,810) s1, trim(max_str(2)), trim(min_str(2)) - ! write(6,905) avgval(1), rms, rdbar, avgval(2), rdlogbar, log10(1./rdmax) - write(6,812) ' RMS ', file(1)%var(vid(1))%name, rms, ' NORMALIZED ', rms_normalized -#else - write(6,803) s2, max_val(1), space(1:spacelen),min_val(1), m1, buf(l1(1),1), rdbar, buf(rdmaxloc,1), & - count(mask(:,2)), max_val(2), space(1:spacelen),min_val(2), buf(l1(1),2), buf(rdmaxloc,2) - write(6,810) s1, trim(max_str(2)), trim(min_str(2)) - write(6,805) avgval(1), rms, rdbar, avgval(2), rdlogbar, log10(1./rdmax) - write(6,812) ' RMS ', file(1)%var(vid(1))%name, rms, ' NORMALIZED ', rms_normalized -#endif - endif - endif - end if - if(diffcnt==0) then ! no differences found - if(n1>0) then - write(6,810) s1, trim(max_str(1)),trim(min_str(1)) -#if ({ITYPE} == TYPEINT) - write(6,914) s2, max_val(1), space(1:spacelen),min_val(1), count(mask(:,2)),& - max_val(2),space(1:spacelen),min_val(2) - write(6,810) s1, trim(max_str(2)), trim(min_str(2)) - write(6,815) avgval(1), avgval(2) -#else - write(6,814) s2, max_val(1), space(1:spacelen),min_val(1), count(mask(:,2)),& - max_val(2),space(1:spacelen),min_val(2) - write(6,810) s1, trim(max_str(2)), trim(min_str(2)) - write(6,815) avgval(1), avgval(2) -#endif - endif - end if - else ! Single file analysis output - if(n==2 ) then - write(6,*) 'Variable on file1: ',trim(file(1)%var(vid(1))%name),' not found on file2' - end if - - write(6,810 ) s1, trim(max_str(1)),trim(min_str(1)) - write(6, 825) s2, max_val(1),min_val(1) - write(6, 826) avgval(1) - end if - - deallocate(buf, mask) -800 format(3x,i8,1x,i8,2x,a,1x,a,1x,a,1x,a) -803 format(12x, i8,1x,1pe23.15,a,e23.15,e8.1, e23.15,e8.1,e23.15,/, & - 12x, i8,1x, e23.15,a,e23.15,8x, e23.15,8x, e23.15) - - -805 format(10x,'avg abs field values: ',1pe23.15,4x,'rms diff:',e8.1, & - 3x,'avg rel diff(npos): ',e8.1,/, & - 10x,' ', e23.15,24x, & - 'avg decimal digits(ndif): ',0p,f4.1,' worst: ',f4.1) - -810 format(12x,i8,2x,a,1x,a) - -! RMS for int -811 format(a,a32,1pe11.4,11x,a,i12,/) - -! RMS for real -812 format(a,a32,1pe11.4,11x,a,1pe11.4,/) - -814 format(12x, i8,1x,e23.15,a,e23.15,/, & - 12x, i8,1x,e23.15,a,e23.15) -815 format(10x,'avg abs field values: ',1pe23.15,/, & - 10x,' ', e23.15) -825 format(12x,i8,1x,1p2e23.15) -826 format(12x,'avg abs field values: ',1pe23.15,/) -902 format(12x, i8,1x,i8,a,i8) - -903 format(12x, i8,3e23.15,i8,2e23.15/, & - 12x, i8,2e23.15,23x,i8,23x,e23.15) - - -905 format(10x,'avg abs field values: ',i8,4x,'rms diff:',i8, & - 3x,'avg rel diff(npos): ',i8,/, & - 10x,' ', i8,24x, & - 'avg decimal digits(ndif): ',i8,' worst: ',i8) -914 format(12x, i8,1x,1pe23.15,a,1pe23.15,/, & - 12x, i8,1x,1pe23.15,a,1pe23.15) -915 format(10x,'avg abs field values: ',i8,/, & - 10x,' ', i8) - - end subroutine compare_var_{TYPE} - - ! TYPE real,double - subroutine get_rdiff_stats_{TYPE} (s1, v1, v2, vdiff, mask, rdsum, rdlogsum, rdmax, loc) - integer, intent(in) :: s1 - {VTYPE}, intent(in) :: v1(:), v2(:), vdiff(:) - logical, intent(in) :: mask(:) - real(r8), intent(out) :: rdsum, rdlogsum, rdmax - integer, intent(out) :: loc - real(r8) :: denom, rdiff(s1) - - integer :: i, iloc(1) - - rdiff=0 - rdsum=0 - rdlogsum=0 - do i=1,s1 - if(vdiff(i)>0) then - denom = max(abs(v1(i)), abs(v2(i))) - rdiff(i) = vdiff(i)/denom - rdsum = rdsum+rdiff(i) - rdlogsum = rdlogsum - log10(rdiff(i)) - end if - end do - rdmax = maxval(rdiff) - iloc = maxloc(rdiff) - - loc = iloc(1) - - end subroutine get_rdiff_stats_{TYPE} - - ! TYPE real,int,double - subroutine compute_var_stats_{TYPE} (buf, nvalid, mask, min_loc, max_loc, min_val, max_val, avgval) - {VTYPE}, intent(in) :: buf(:) - logical, intent(in) :: mask(:) - integer, intent(out) :: nvalid, min_loc, max_loc - real(r8), intent(out) :: min_val, max_val, avgval - - integer :: loc(2) - - nvalid = count(mask) - if(nvalid>0) then - loc(1:1) = maxloc(buf, mask=mask) - loc(2:2) = minloc(buf, mask=mask) - max_loc = loc(1) - min_loc = loc(2) - max_val = maxval(buf, mask=mask) - min_val = minval(buf, mask=mask) - avgval = sum(abs(buf),mask=mask)/real(nvalid) - else - max_loc=0 - min_loc=0 - max_val=0 - min_val=0 - avgval=0 - end if - - - - end subroutine compute_var_stats_{TYPE} - - - - - - function translate_loc(ndims, loc, start, kount, dsize) - integer, intent(in) :: ndims, loc, start(:), kount(:), dsize(:) - integer :: translate_loc(ndims) - - integer :: i, tloc, tprod - - tprod = product(kount) - if(loc>tprod) then - write(6,*) 'ERROR in translate_loc: location ',loc,' exceeds array size',tprod - stop - end if - if(ndims<1) then - stop '0D array in translate_loc' - endif - translate_loc = 1 - if(ndims==1) then - translate_loc = loc - else if(loc<=dsize(1)) then - translate_loc(1) = loc - else - tloc = loc - - if(verbose) print *,__LINE__,loc,ndims,dsize(1:ndims) - do i=ndims,1,-1 - tprod = tprod/dsize(i) - if(tloc>=tprod) then - translate_loc(i) = tloc/tprod + start(i) - tloc = tloc - (tloc/tprod)*tprod - end if - end do - translate_loc(1) = translate_loc(1)-1 - - if(verbose) print *,__LINE__,translate_loc(1:ndims) - end if - - end function translate_loc - - - -end module compare_vars_mod diff --git a/CIME/non_py/cprnc/cprnc.F90 b/CIME/non_py/cprnc/cprnc.F90 deleted file mode 100644 index 735b02c6f7d..00000000000 --- a/CIME/non_py/cprnc/cprnc.F90 +++ /dev/null @@ -1,310 +0,0 @@ -program piocprnc - use netcdf - use filestruct - use compare_vars_mod -#ifdef NAGFOR - use f90_unix -#endif - implicit none - - integer :: nargs, n - character(len=1024) :: arg = '' ! cmd-line argument - character(len=1024) :: fname(2) = ' ' ! input filenames - integer :: nchars - integer :: numcases=1 - integer :: ierr -! integer, external :: iargc - type(file_t) :: file(2) - type(dim_t) :: dimoptions(12) - integer :: dimoptioncnt - integer :: nvars, ndiffs, nfilldiffs - - ! The following variables count the number of fields found on one file but not the - ! other, only considering (a) fields with an unlimited (time) dimension, and (b) fields - ! without an unlimited (time) dimension on a file that doesn't have an unlimited - ! dimension. - integer :: num_not_found_on_file1, num_not_found_on_file2 - - ! The following variables count the number of fields found on one file but not the - ! other, only considering fields without an unlimited (time) dimension on a file that - ! has an unlimited dimension. - integer :: num_not_found_on_file1_timeconst, num_not_found_on_file2_timeconst - - integer :: num_sizes_differ - integer :: num_types_differ - integer :: num_not_analyzed -! -! Parse arg list -! - - - nargs = command_argument_count () - dimoptioncnt=0 - ignoretime=.false. - n = 1 - do while (n <= nargs) - arg = ' ' - call getarg (n, arg) - n = n + 1 - select case (arg) - case ('-v') - verbose = .true. - case ('-d') - call getarg(n, arg) - n=n+1 - dimoptioncnt=dimoptioncnt+1 - call parsearg(arg, dimoptions(dimoptioncnt)%name, dimoptions(dimoptioncnt)%start, dimoptions(dimoptioncnt)%kount) - - case ('-m') - ignoretime=.true. - case default - if (fname(1) == ' ') then - fname(1) = arg(1:len_trim(arg)) - nchars = len_trim (fname(1)) - write (6,*) 'file 1=',fname(1)(1:nchars) - else if (fname(2)==' ') then - fname(2) = arg(1:len_trim(arg)) - nchars = len_trim (fname(2)) - write (6,*) 'file 2=',fname(2)(1:nchars) - numcases = 2 - else - call usage_exit (' ') - end if - end select - end do -! -! Must have at least 1 file input -! - if (fname(1) == ' ') then - call usage_exit ('You must enter at least 1 input file') - end if - -! -! Read the files and initialize file_t -! - do n=1, numcases - ierr = nf90_open(fname(n),NF90_NOWRITE, file(n)%fh) - if(ierr /= NF90_NOERR) then - stop 'Failed to open file ' - endif - if(dimoptioncnt>0) then - call init_file_struct( file(n), dimoptions(1:dimoptioncnt) ) - else - call init_file_struct( file(n)) - end if - end do - - if(numcases==2) then - call compare_metadata(file(1), file(2)) - - call compare_dimensions( file(1)%dim, file(2)%dim) - - num_not_found_on_file1 = 0 - num_not_found_on_file2 = 0 - num_not_found_on_file1_timeconst = 0 - num_not_found_on_file2_timeconst = 0 - call match_vars( file(1), file(2), & - num_not_found_on_file1 = num_not_found_on_file1, & - num_not_found_on_file2 = num_not_found_on_file2, & - num_not_found_on_file1_timeconst = num_not_found_on_file1_timeconst, & - num_not_found_on_file2_timeconst = num_not_found_on_file2_timeconst) - end if - call compare_vars(numcases, file, nvars, ndiffs, nfilldiffs, & - num_sizes_differ, num_not_analyzed, num_types_differ) - - -! -! Summarize results -! - write(6,806) - write(6,*) ' ' - write(6,700) 'SUMMARY of cprnc:' - if(numcases==1) then - write(6,700) ' A total number of ',nvars,' fields in file 1 were analyzed (non-compare mode)' - write(6,700) ' A total number of ',num_not_analyzed, & - ' fields in file 1 could not be analyzed' - else - write(6,700) ' A total number of ',nvars,' fields were compared' - write(6,700) ' of which ',ndiffs,' had non-zero differences' - write(6,700) ' and ',nfilldiffs,' had differences in fill patterns' - write(6,700) ' and ',num_sizes_differ,' had different dimension sizes' - write(6,700) ' and ',num_types_differ,' had different data types' - write(6,700) ' A total number of ',num_sizes_differ + num_not_analyzed, & - ' fields could not be analyzed' - - call print_fields_not_found( & - filenum = 1, & - file_has_unlimited_dim = file(1)%has_unlimited_dim(), & - num_not_found = num_not_found_on_file2, & - num_not_found_timeconst = num_not_found_on_file2_timeconst) - - call print_fields_not_found( & - filenum = 2, & - file_has_unlimited_dim = file(2)%has_unlimited_dim(), & - num_not_found = num_not_found_on_file1, & - num_not_found_timeconst = num_not_found_on_file1_timeconst) - - if (nvars == 0 .or. ndiffs > 0 .or. nfilldiffs > 0 .or. & - num_sizes_differ > 0 .or. num_not_analyzed >= nvars .or. & - num_types_differ > 0) then - write(6,700) ' diff_test: the two files seem to be DIFFERENT ' - else if (num_not_found_on_file1 > 0 .or. num_not_found_on_file2 > 0) then - ! Note that we deliberately allow num_not_found_on_file1_timeconst or - ! num_not_found_on_file2_timeconst to be > 0: those do NOT result in a - ! "DIFFER" result. - ! - ! Ideally, we'd count those fields here, too. Doing so would catch more - ! differences and would simplify the cprnc code. But this sometimes leads to - ! problems when comparing restart vs. baseline files - ! (https://github.com/ESMCI/cime/issues/3007). We could add a flag that you - ! specify to not count these fields, but there are backwards compatibility - ! issues with doing so. Eventually it could be good to count these absent - ! fields as a DIFFER result by default, adding a flag that you can specify to - ! not count them, then have cime specify this flag when doing the in-test - ! comparison (so absent time-constant fields would result in a DIFFER result - ! for cime's baseline comparisons and for interactive use of cprnc). - write(6,'(a)') ' diff_test: the two files DIFFER only in their field lists' - else - write(6,700) ' diff_test: the two files seem to be IDENTICAL ' - if (num_not_found_on_file1_timeconst > 0 .or. & - num_not_found_on_file2_timeconst > 0) then - write(6,'(a)') ' (But note that there were differences in field lists just for time-constant fields.)' - end if - end if - end if - write(6,*) ' ' -700 format(a,i6,a) -806 format(132('*')) - - - - contains - subroutine usage_exit (arg) - implicit none - - character(len=*), intent(in) :: arg - - if (arg /= ' ') write (6,*) arg - write(6,*)'Usage: cprnc [-m] [-v] [-d dimname:start[:count]] file1 [file2]' - write(6,*)'-v: Verbose output' - write(6,*)'-m: Ignore time variable and just match contents (default is to match the values in variable time.)' - write(6,*)'-d dimname:start[:count]: Print variable values for the specified dimension index start and count. If not present,' - write(6,*)' count will default to 1. If count is < 0 then count will be set to dimsize-start' - write(6,*)' ' - - stop 999 - end subroutine usage_exit - - - subroutine parsearg (arg, dimname, v1, v2) - !------------------------------------------------------------------------------------------- - ! Purpose: Parse cmd line args about printing. - ! - ! Method: Input is expected in the form: dimname:number1[:number2] where dimname is expected to - ! be the name of a dimension in the input file(s), number1 is the starting position in that - ! dimension to be evaluated and number2 is the number of values to read in the dimension - ! if number2 is missing all remaining values are read. - ! - !------------------------------------------------------------------------------------------- - implicit none - - character(len=*), intent(in) :: arg ! cmd line arg expected of the form 'num1:num2' or 'num1' - - character(len=*), intent(out) :: dimname - integer, intent(out) :: v1 ! e.g. num1 from above example - integer, intent(out) :: v2 ! e.g. num2 from above example - - integer :: i, j ! indices through arg - integer :: ierr ! io error status - - ! - ! First get a dimension name - ! - dimname = ' ' - i = scan(arg,':') - dimname(1:i-1)=arg(1:i-1) - i=i+1 - - ! - ! now try to get an integer number for everything up to ":" - ! - j=i - do while (j < len(arg) .and. arg(j:j) >= '0' .and. arg(j:j) <= '9' .and. arg(j:j) /= ':') - j = j + 1 - end do - read (arg(i:j-1), '(i5)') v1 - ! - ! Next, if ":" comes after the number, look for the next number - ! - i=j - - if (arg(i:i) == ':') then - j = i + 1 - do while (j < len(arg) .and. scan(arg(j:j),"-0123456789")>0) - j = j + 1 - end do - read (arg(i+1:j-1), '(i5)', iostat=ierr) v2 - ! - ! On unexpected input set v2 = -1, e.g. "-d lon:2:blah" will mean get all lons > 1 - ! - if (ierr /= 0) then - v2 = -1 - end if - else - ! - ! ":" not present. Interpret for example '-d lon:2' to mean '-d lon:2:1' - ! - v2 = 1 - end if - if(verbose) print *,__FILE__,__LINE__,trim(dimname),v1,v2 - return - end subroutine parsearg - - subroutine print_fields_not_found(filenum, file_has_unlimited_dim, & - num_not_found, num_not_found_timeconst) - ! Prints information about the number of fields in filenum not found on the other file - - integer, intent(in) :: filenum ! file number for which we're printing this information - logical, intent(in) :: file_has_unlimited_dim ! whether this file has an unlimited dimension - - ! Number of fields in filenum but not on the other file, only considering (a) fields - ! with an unlimited (time) dimension, and (b) fields without an unlimited (time) - ! dimension on a file that doesn't have an unlimited dimension - integer, intent(in) :: num_not_found - - ! Number of fields in filenum but not on the other file, only considering fields - ! without an unlimited (time) dimension on a file that has an unlimited dimension - integer, intent(in) :: num_not_found_timeconst - - integer :: other_filenum - - if (filenum == 1) then - other_filenum = 2 - else if (filenum == 2) then - other_filenum = 1 - else - stop 'Unexpected value for filenum' - end if - - if (file_has_unlimited_dim) then - write(6,'(a,i6,a,i1,a,i1,a)') & - ' A total number of ', num_not_found, & - ' time-varying fields on file ', filenum, & - ' were not found on file ', other_filenum, '.' - write(6,'(a,i6,a,i1,a,i1,a)') & - ' A total number of ', num_not_found_timeconst, & - ' time-constant fields on file ', filenum, & - ' were not found on file ', other_filenum, '.' - else - write(6,'(a,i6,a,i1,a,i1,a)') & - ' A total number of ', num_not_found, & - ' fields on file ', filenum, & - ' were not found on file ', other_filenum, '.' - if (num_not_found_timeconst > 0) then - stop 'Programming error: file has no unlimited dimension, but num_not_found_timeconst > 0' - end if - end if - - end subroutine print_fields_not_found - - end program piocprnc diff --git a/CIME/non_py/cprnc/filestruct.F90 b/CIME/non_py/cprnc/filestruct.F90 deleted file mode 100644 index 814e7faed5e..00000000000 --- a/CIME/non_py/cprnc/filestruct.F90 +++ /dev/null @@ -1,548 +0,0 @@ -module filestruct - use netcdf - implicit none - type dim_t - integer :: dimsize - integer :: start, kount ! used for user requested dimension subsetting - character(len=nf90_MAX_NAME) ::name = '' - end type dim_t - - type var_t - integer :: matchid - integer :: ndims - integer :: natts - integer, pointer :: dimids(:) - integer :: xtype - character(len=nf90_MAX_NAME) ::name = '' - end type var_t - - type file_t - integer :: fh - integer :: natts - type(dim_t), pointer :: dim(:) - type(var_t), pointer :: var(:) - integer :: unlimdimid - contains - procedure :: has_unlimited_dim ! logical function; returns true if this file has an unlimited dimension - end type file_t - - logical :: verbose - -contains - logical function has_unlimited_dim(file) - ! Returns true if this file has an unlimited dimension - class(file_t), intent(in) :: file - - if (file%unlimdimid == -1) then - has_unlimited_dim = .false. - else - has_unlimited_dim = .true. - end if - end function has_unlimited_dim - - subroutine init_file_struct( file, dimoptions ) - - type(file_t) :: file - type(dim_t), optional :: dimoptions(:) - integer :: ndims, nvars - integer :: dimids(NF90_MAX_DIMS) - integer :: i, ierr, docnt, n1, n2 - integer :: j, start, kount - character(len=NF90_MAX_NAME) :: name, dname - ierr= nf90_inquire(file%fh, ndims, nvars, file%natts, file%unlimdimid) - - allocate(file%dim(ndims)) - allocate(file%var(nvars)) - - - do i=1,ndims - ierr = nf90_inquire_dimension(file%fh, i, file%dim(i)%name, file%dim(i)%dimsize) - file%dim(i)%start=1 - if(i==file%unlimdimid) then - file%dim(i)%kount=1 - else - file%dim(i)%kount=file%dim(i)%dimsize - end if - end do - - if(present(dimoptions)) then - docnt = size(dimoptions) - do j=1,docnt - start = dimoptions(j)%start - kount = dimoptions(j)%kount - name = dimoptions(j)%name - n1 = len_trim(name) - do i=1,ndims - dname = file%dim(i)%name - n2 = len_trim(dname) - if(name(1:n1).eq.dname(1:n2) ) then - - - if((start > 0) .and. (start < file%dim(i)%dimsize)) then - file%dim(i)%start = start - else - write(6,*) 'Command line start value for dim ',name(1:n1),& - ' out of bounds, expected 1-',file%dim(i)%dimsize,' got: ',start - stop - end if - if(kount > 0 .and. start+kount <= file%dim(i)%dimsize) then - file%dim(i)%kount = kount - else if(kount == -1) then - file%dim(i)%kount = file%dim(i)%dimsize-file%dim(i)%start+1 - else - write(6,*) 'Command line count value for dim ',name(1:n1),& - ' out of bounds, expected 1-',file%dim(i)%dimsize-file%dim(i)%start+1,' got: ',kount - stop - - endif - write(6,*) 'Setting dimension bounds for dim ',name(1:n1),file%dim(i)%start,file%dim(i)%kount - - exit - end if - end do - end do - end if - - do i=1,nvars - file%var(i)%matchid=-1 - ierr = nf90_inquire_variable(file%fh, i, file%var(i)%name, file%var(i)%xtype, file%var(i)%ndims, dimids, & - file%var(i)%natts) - allocate(file%var(i)%dimids(file%var(i)%ndims)) - file%var(i)%dimids = dimids(1:file%var(i)%ndims) - end do - - - end subroutine init_file_struct - - - subroutine compare_metadata(file1, file2, vid) - type(file_t) :: file1, file2 - integer, optional, intent(in) :: vid - - integer :: id1, id2, natts1, natts2 - - integer :: i, ierr - character(len=NF90_MAX_NAME) :: attname - integer :: atttype, attlen - - real, pointer :: attreal1(:), attreal2(:) - double precision, pointer :: attdouble1(:),attdouble2(:) - integer, pointer :: attint1(:),attint2(:) - integer, parameter :: maxstrlen=32767 - character(len=maxstrlen) :: attchar1, attchar2 - logical :: found - - - if(present(vid)) then - id1 = vid - id2 = file1%var(id1)%matchid - ierr = nf90_inquire_variable(file1%fh, id1, nAtts=natts1) - ierr = nf90_inquire_variable(file2%fh, id2, nAtts=natts2) - else - id1 = NF90_GLOBAL - id2 = NF90_GLOBAL - natts1 = file1%natts - natts2 = file2%natts - end if - - do i=1,natts1 - found = .true. - attname = '' - ierr = nf90_inq_attname(file1%fh, id1, i, attname) - ierr = nf90_inquire_attribute(file1%fh, id1, trim(attname), atttype, attlen) - - select case(atttype) - case(nf90_char) - if (attlen > maxstrlen) then - stop 'maximum string length exceeded' - endif - attchar1=' ' - attchar2=' ' - - ierr = nf90_get_att(file1%fh,id1, trim(attname), attchar1) - ierr = nf90_get_att(file2%fh,id2, trim(attname), attchar2) - if(ierr==NF90_NOERR) then - if(trim(attname).ne.'case' .and. attchar1(1:attlen) .ne. attchar2(1:attlen)) then - print *, 'Attribute ',trim(attname),' from file1: ',attchar1(1:attlen),& - ' does not match that found on file2: ',attchar2(1:attlen) - end if - else - print *, 'Attribute ',trim(attname),' from file1: ',attchar1(1:attlen),& - ' not found on file2' - end if - if(id1==NF90_GLOBAL .and. trim(attname) .eq. 'case') then - print *, 'CASE 1 : ',trim(attchar1) - print *, 'CASE 2 : ',trim(attchar2) - endif - if(id1==NF90_GLOBAL .and. trim(attname) .eq. 'title') then - print *, 'TITLE 1 : ',trim(attchar1) - print *, 'TITLE 2 : ',trim(attchar2) - end if - case(nf90_int) - allocate(attint1(attlen),attint2(attlen)) - ierr = nf90_get_att(file1%fh,id1, trim(attname), attint1) - ierr = nf90_get_att(file2%fh,id2, trim(attname), attint2) - - if(ierr==NF90_NOERR) then - if(any(attint1 /= attint2)) then - print *, 'Attribute ',trim(attname),' from file1: ',attint1,' does not match that found on file2 ',attint2 - end if - else - print *, 'Attribute ',trim(attname),' from file1: ',attint1,' not found on file2' - end if - deallocate(attint1, attint2) - - - case(nf90_float) - allocate(attreal1(attlen),attreal2(attlen)) - ierr = nf90_get_att(file1%fh,id1, trim(attname), attreal1) - ierr = nf90_get_att(file2%fh,id2, trim(attname), attreal2) - if(ierr==NF90_NOERR) then - if(any(attreal1 /= attreal2)) then - print *, 'Attribute ',trim(attname),' from file1: ',attreal1,' does not match that found on file2 ',attreal2 - end if - else - print *, 'Attribute ',trim(attname),' from file1: ',attreal1,' not found on file2' - end if - deallocate(attreal1, attreal2) - case(nf90_double) - allocate(attdouble1(attlen), attdouble2(attlen)) - ierr = nf90_get_att(file1%fh,id1, trim(attname), attdouble1) - ierr = nf90_get_att(file2%fh,id2, trim(attname), attdouble2) - if(ierr==NF90_NOERR) then - if(any(attdouble1 /= attdouble2)) then - print *, 'Attribute ',trim(attname),' from file1: ',attdouble1,' does not match that found on file2 ',attdouble2 - end if - else - print *, 'Attribute ',trim(attname),' from file1: ',attdouble1,' not found on file2' - end if - deallocate(attdouble1, attdouble2) - case default - print *,' Did not recognize attribute with id: ',i,' type: ',atttype, ' name: ',trim(attname), ' len: ',attlen - end select - end do - - end subroutine compare_metadata - - - - - - - - - subroutine compare_dimensions( dimfile1, dimfile2) - type(dim_t), intent(in) :: dimfile1(:), dimfile2(:) - - integer :: ds1, ds2 - integer :: i, j - logical,pointer :: found(:,:) - - ds1 = size(dimfile1) - ds2 = size(dimfile2) - - allocate(found(2,max(ds1,ds2))) - - found = .false. - do i=1,ds1 - do j=1,ds2 - if(dimfile1(i)%name .eq. dimfile2(j)%name) then - if(dimfile1(i)%dimsize == dimfile2(j)%dimsize) then - print *, 'Dimension ',trim(dimfile1(i)%name), ' matches' - else - print *, 'Dimension ',trim(dimfile1(i)%name), ' differs ', dimfile1(i)%dimsize, ' /= ',dimfile2(j)%dimsize - end if - found(1,i) = .true. - found(2,j) = .true. - end if - end do - end do - do i=1,ds1 - if(.not. found(1,i)) then - print *, 'Could not find match for file 1 dimension ',trim(dimfile1(i)%name) - end if - end do - do i=1,ds2 - if(.not. found(2,i)) then - print *, 'Could not find match for file 2 dimension ',trim(dimfile2(i)%name) - end if - end do - deallocate(found) - end subroutine compare_dimensions - - - subroutine match_vars( file1, file2, & - num_not_found_on_file1, num_not_found_on_file2, & - num_not_found_on_file1_timeconst, num_not_found_on_file2_timeconst) - type(file_t), intent(inout) :: file1, file2 - - ! Accumulates count of variables on file2 not found on file1; this only considers (a) - ! fields with an unlimited (time) dimension, and (b) fields without an unlimited - ! (time) dimension on a file that doesn't have an unlimited dimension. - integer, intent(inout) :: num_not_found_on_file1 - - ! Accumulates count of variables on file1 not found on file2; this only considers (a) - ! fields with an unlimited (time) dimension, and (b) fields without an unlimited - ! (time) dimension on a file that doesn't have an unlimited dimension. - integer, intent(inout) :: num_not_found_on_file2 - - ! Accumulates count of variables on file2 not found on file1; this only considers - ! fields without an unlimited (time) dimension on a file that has an unlimited - ! dimension. - integer, intent(inout) :: num_not_found_on_file1_timeconst - - ! Accumulates count of variables on file1 not found on file2; this only considers - ! fields without an unlimited (time) dimension on a file that has an unlimited - ! dimension. - integer, intent(inout) :: num_not_found_on_file2_timeconst - - type(var_t), pointer :: varfile1(:),varfile2(:) - - integer :: vs1, vs2, i, j - - - - varfile1 => file1%var - varfile2 => file2%var - - vs1 = size(varfile1) - vs2 = size(varfile2) - - do i=1,vs1 - do j=1,vs2 - if(varfile1(i)%name .eq. varfile2(j)%name) then - varfile1(i)%matchid=j - varfile2(j)%matchid=i - end if - end do - end do - do i=1,vs1 - if(varfile1(i)%matchid<0) then - print *, 'Could not find match for file1 variable ',trim(varfile1(i)%name), ' in file2' - if (file1%has_unlimited_dim() .and. & - .not. is_time_varying(varfile1(i), file1%has_unlimited_dim(), file1%unlimdimid)) then - num_not_found_on_file2_timeconst = num_not_found_on_file2_timeconst + 1 - else - num_not_found_on_file2 = num_not_found_on_file2 + 1 - end if - end if - end do - do i=1,vs2 - if(varfile2(i)%matchid<0) then - print *, 'Could not find match for file2 variable ',trim(varfile2(i)%name), ' in file1' - if (file2%has_unlimited_dim() .and. & - .not. is_time_varying(varfile2(i), file2%has_unlimited_dim(), file2%unlimdimid)) then - num_not_found_on_file1_timeconst = num_not_found_on_file1_timeconst + 1 - else - num_not_found_on_file1 = num_not_found_on_file1 + 1 - end if - end if - end do - end subroutine match_vars - - - function is_time_varying(var, file_has_unlimited_dim, unlimdimid) - type(var_t), intent(in) :: var ! variable of interest - logical , intent(in) :: file_has_unlimited_dim ! true if the file has an unlimited dimension - integer , intent(in) :: unlimdimid ! the file's unlimited dim id (if it has one) - - logical :: is_time_varying ! true if the given variable is time-varying - - if (file_has_unlimited_dim) then - is_time_varying = any(var%dimids == unlimdimid) - else - is_time_varying = .false. - end if - end function is_time_varying - - - function vdimsize(dims, dimids) - type(dim_t), intent(in) :: dims(:) - integer, intent(in) :: dimids(:) - - integer :: vdimsize - integer :: i - - vdimsize=1 - do i=1,size(dimids) - if(verbose) print *,__FILE__,__LINE__,i,dimids(i),size(dims),size(dimids) - vdimsize = vdimsize*dims(dimids(i))%kount - end do - - end function vdimsize - - - - - - subroutine compare_var_int(f1, f2, i1, i2, t) - type(file_t) :: f1,f2 - integer, intent(in) :: i1, i2 - integer, optional :: t - - - integer :: s1, s2, m1, m2, l1(1), l2(1), i, ierr - integer, pointer :: v1(:), v2(:), vdiff(:) - integer :: t1, n1 - integer :: start(NF90_MAX_DIMS), count(NF90_MAX_DIMS) - - if(present(t)) then - t1 = t - else - t1 = 1 - end if - - s1 = vdimsize(f1%dim, f1%var(i1)%dimids) - s2 = vdimsize(f2%dim, f2%var(i2)%dimids) - - if(s1 /= s2) then - print *, 'Variable ',f1%var(i)%name,' sizes differ' - end if - - n1 = size(f1%var(i1)%dimids) - start = 1 - do i=1,n1 - count(i) = f1%dim(f1%var(i1)%dimids(i))%dimsize - if(f1%var(i1)%dimids(i) == f1%unlimdimid) then - count(i)=1 - start(i)=t1 - end if - end do - - allocate(v1(s1), v2(s2)) - - ierr = nf90_get_var(f1%fh, i1, v1, start(1:n1), count(1:n1)) - ierr = nf90_get_var(f2%fh, i2, v2, start(1:n1), count(1:n1)) - - if(any(v1 /= v2)) then - allocate(vdiff(s1)) - vdiff = abs(v1-v2) - m1 = maxval(vdiff) - m2 = minval(vdiff) - l1 = maxloc(vdiff) - l2 = minloc(vdiff) - - print *,__FILE__,__LINE__,m1,m2,l1,l2 - deallocate(vdiff) - end if - - deallocate(v1,v2) - end subroutine compare_var_int - - subroutine compare_var_float(f1, f2, i1, i2, t) - type(file_t) :: f1,f2 - integer, intent(in) :: i1, i2 - integer, optional :: t - - - integer :: s1, s2, m1, m2, l1(1), l2(1), i, ierr - real, pointer :: v1(:), v2(:), vdiff(:) - integer :: t1, n1 - integer :: start(NF90_MAX_DIMS), count(NF90_MAX_DIMS) - - if(present(t)) then - t1 = t - else - t1 = 1 - end if - - s1 = vdimsize(f1%dim, f1%var(i1)%dimids) - s2 = vdimsize(f2%dim, f2%var(i2)%dimids) - - if(s1 /= s2) then - print *, 'Variable ',f1%var(i)%name,' sizes differ' - end if - - n1 = size(f1%var(i1)%dimids) - start = 1 - do i=1,n1 - count(i) = f1%dim(f1%var(i1)%dimids(i))%dimsize - if(f1%var(i1)%dimids(i) == f1%unlimdimid) then - count(i)=1 - start(i)=t1 - end if - end do - - allocate(v1(s1), v2(s2)) - - ierr = nf90_get_var(f1%fh, i1, v1, start(1:n1), count(1:n1)) - ierr = nf90_get_var(f2%fh, i2, v2, start(1:n1), count(1:n1)) - - if(any(v1 /= v2)) then - allocate(vdiff(s1)) - vdiff = abs(v1-v2) - m1 = maxval(vdiff) - m2 = minval(vdiff) - l1 = maxloc(vdiff) - l2 = minloc(vdiff) - - print *,__FILE__,__LINE__,m1,m2,l1,l2 - deallocate(vdiff) - end if - - deallocate(v1,v2) - end subroutine compare_var_float - - subroutine compare_var_double(f1, f2, i1, i2, t) - type(file_t) :: f1,f2 - integer, intent(in) :: i1, i2 - integer, optional :: t - - - integer :: s1, s2, m1, m2, l1(1), l2(1), i, ierr - double precision, pointer :: v1(:), v2(:), vdiff(:) - integer :: t1, n1 - integer :: start(NF90_MAX_DIMS), count(NF90_MAX_DIMS) - - if(present(t)) then - t1 = t - else - t1 = 1 - end if - - s1 = vdimsize(f1%dim, f1%var(i1)%dimids) - s2 = vdimsize(f2%dim, f2%var(i2)%dimids) - - if(s1 /= s2) then - print *, 'Variable ',f1%var(i)%name,' sizes differ' - end if - - n1 = size(f1%var(i1)%dimids) - start = 1 - do i=1,n1 - count(i) = f1%dim(f1%var(i1)%dimids(i))%dimsize - if(f1%var(i1)%dimids(i) == f1%unlimdimid) then - count(i)=1 - start(i)=t1 - end if - end do - - allocate(v1(s1), v2(s2)) - - ierr = nf90_get_var(f1%fh, i1, v1, start(1:n1), count(1:n1)) - ierr = nf90_get_var(f2%fh, i2, v2, start(1:n1), count(1:n1)) - - if(any(v1 /= v2)) then - allocate(vdiff(s1)) - vdiff = abs(v1-v2) - m1 = maxval(vdiff) - m2 = minval(vdiff) - l1 = maxloc(vdiff) - l2 = minloc(vdiff) - - print *,__FILE__,__LINE__,m1,m2,l1,l2 - deallocate(vdiff) - end if - - deallocate(v1,v2) - end subroutine compare_var_double - - - - - - - - - -end module filestruct diff --git a/CIME/non_py/cprnc/prec.F90 b/CIME/non_py/cprnc/prec.F90 deleted file mode 100644 index abbaba26ea7..00000000000 --- a/CIME/non_py/cprnc/prec.F90 +++ /dev/null @@ -1,8 +0,0 @@ -module prec -! -! Constants for setting precision -! - integer, parameter :: r4 = selected_real_kind (6) - integer, parameter :: r8 = selected_real_kind (12) - integer, parameter :: i4 = selected_int_kind (6) -end module prec diff --git a/CIME/non_py/cprnc/run_tests b/CIME/non_py/cprnc/run_tests deleted file mode 100755 index 98d1a22f3bd..00000000000 --- a/CIME/non_py/cprnc/run_tests +++ /dev/null @@ -1,121 +0,0 @@ -#!/usr/bin/env perl -# -# Run all cprnc tests -# See usage message for details -# -# Bill Sacks -# 5-28-13 - -use strict; -use Getopt::Long; - -#---------------------------------------------------------------------- -# Define parameters -#---------------------------------------------------------------------- - -# Hash giving info about each test. Key is the test file; value is a -# hash reference containing at least the associated control file (key: -# control), and possibly extra arguments to cprnc (key: extra_args) -my %tests = ('copy.nc' => {control => 'control.nc'}, - 'extra_variables.nc' => {control => 'control.nc'}, - 'diffs_in_vals.nc' => {control => 'control.nc'}, - 'diffs_in_vals_and_extra_and_missing.nc' => {control => 'control.nc'}, - 'diffs_in_fill.nc' => {control => 'control.nc'}, - 'diffs_in_vals_and_diffs_in_fill.nc' => {control => 'control.nc'}, - 'diffs_in_vals_and_fill.nc' => {control => 'control.nc'}, - 'lon_differs.nc' => {control => 'control.nc'}, - 'missing_variables.nc' => {control => 'control.nc'}, - 'vals_differ_by_1.1.nc' => {control => 'control.nc'}, - 'vals_differ_by_1.1_somewhere.nc' => {control => 'control.nc'}, - 'vals_differ_by_varying_amounts.nc' => {control => 'control.nc'}, - 'vals_differ_by_varying_amounts2.nc' => {control => 'control.nc'}, - - 'int_missing.nc' => {control => 'control_int.nc'}, - - 'multipleTimes_someTimeless_diffs_in_vals_and_fill.nc' => {control => 'control_multipleTimes_someTimeless.nc'}, - 'multipleTimes_someTimeless_extra_and_missing.nc' => {control => 'control_multipleTimes_someTimeless.nc'}, - - 'noTime_diffs_in_vals_and_fill.nc' => {control => 'control_noTime.nc', - extra_args => '-m'}, - 'noTime_extra_and_missing.nc' => {control => 'control_noTime.nc', - extra_args => '-m'}, - - 'diffs_0d.nc' => {control => 'control_0d.nc', - extra_args => '-m'}, - - 'cpl.hi.subset.test.nc' => {control => 'cpl.hi.subset.control.nc'}, - 'clm2.h0.subset.test.nc' => {control => 'clm2.h0.subset.control.nc'}, - 'clm2.h1.subset.test.nc' => {control => 'clm2.h1.subset.control.nc'}, - - 'diffs_in_attribute.nc' => {control => 'control_attributes.nc'}, - - 'copy_char.nc' => {control => 'control_char.nc', - extra_args => '-m'}, - - 'diffs_in_nans.nc' => {control => 'control_floatDoubleNan.nc'}, - ); - -#---------------------------------------------------------------------- -# Get arguments and check them -#---------------------------------------------------------------------- - -my %opts; -GetOptions( - "outdir=s" => \$opts{'outdir'}, - "h|help" => \$opts{'help'}, -) or usage(); - -usage() if $opts{'help'}; - -if (@ARGV) { - print "ERROR: unrecognized arguments: @ARGV\n"; - usage(); -} - -if (!$opts{'outdir'}) { - print "ERROR: -outdir must be provided\n"; - usage(); -} - - -#---------------------------------------------------------------------- -# Main script -#---------------------------------------------------------------------- - -mkdir $opts{'outdir'} or die "ERROR creating directory $opts{'outdir'}; -note that this directory should NOT exist before running this script\n"; - -my $num_tests = keys %tests; -print "Running $num_tests tests...\n"; - -foreach my $test (keys %tests) { - print "$test\n"; - my $test_file = $test; - my $control_file = $tests{$test}{'control'}; - my $outfile = "$opts{'outdir'}/${test}.out"; - - my $extra_args = $tests{$test}{'extra_args'}; - - open (my $file, ">", "$outfile") or die "ERROR opening $outfile"; - print $file `./cprnc $extra_args test_inputs/$control_file test_inputs/$test_file`; - close $file; -} - -#---------------------------------------------------------------------- -# Subroutines -#---------------------------------------------------------------------- - -sub usage { - die < [OPTIONS] - - Run all cprnc tests, putting output in directory given by . - should NOT exist before running this script. - - -OPTIONS - -help [or -h] Display this help - -EOF -} diff --git a/CIME/non_py/cprnc/summarize_cprnc_diffs b/CIME/non_py/cprnc/summarize_cprnc_diffs deleted file mode 100755 index 84933192cc6..00000000000 --- a/CIME/non_py/cprnc/summarize_cprnc_diffs +++ /dev/null @@ -1,573 +0,0 @@ -#!/usr/bin/env perl -# -# Summarize cprnc output from all tests in a CESM test suite. -# See usage message for details. -# -# Bill Sacks -# 4-10-13 - -use strict; -use Getopt::Long; -use File::Basename; -use List::Util qw(max sum); - -#---------------------------------------------------------------------- -# Get arguments and check them -#---------------------------------------------------------------------- - -my %opts; -# set defaults -$opts{'output_suffix'} = ''; -# set up options -GetOptions( - "basedir=s" => \$opts{'basedir'}, - "testid=s" => \$opts{'testid'}, - "output_suffix=s" => \$opts{'output_suffix'}, - "narrow" => \$opts{'narrow'}, - "h|help" => \$opts{'help'}, -) or usage(); - -usage() if $opts{'help'}; - -if (@ARGV) { - print "ERROR: unrecognized arguments: @ARGV\n"; - usage(); -} - -if (!$opts{'basedir'}) { - print "ERROR: -basedir must be provided\n"; - usage(); -} -if (!$opts{'testid'}) { - print "ERROR: -testid must be provided\n"; - usage(); -} - -#---------------------------------------------------------------------- -# Main script -#---------------------------------------------------------------------- - -# Create hash containing summary of cprnc differences. This is a reference to a hash, with: -# Keys: "Directory Filename Variable" -# Values: Reference to a hash containing: -# Dir => directory (gives test name) -# Filename => cprnc filename -# Variable => variable -# RMS => rms value [may or may not be present] -# RMS_NORM => normalized rms value [may or may not be present] -# FILLDIFF => ' ' [may or may not be present] -# DIMSIZEDIFF => ' ' [may or may not be present] -my ($summary_hash) = - process_cprnc_output($opts{'basedir'}, $opts{'testid'}, $opts{'output_suffix'}); - -my $outbase="cprnc.summary.$opts{'testid'}"; -if ($opts{'output_suffix'}) { - $outbase = "$outbase.$opts{'output_suffix'}"; -} - -# set widths of output strings -my $widths_hash; -if ($opts{'narrow'}) { - $widths_hash = { Dir => 40, Filename => 40, Variable => 40 }; -} -else { - $widths_hash = max_widths($summary_hash); -} - - -print_results_by_test("${outbase}.by_test", $summary_hash, $widths_hash); -print_results_by_varname("${outbase}.by_varname", $summary_hash, $widths_hash); -print_results_by_rms("${outbase}.by_rms", $summary_hash, $widths_hash); - - - -#---------------------------------------------------------------------- -# Subroutines -#---------------------------------------------------------------------- - -sub usage { - die < -testid [OPTIONS] - - is the base directory in which test directories can be found - - is the testid of the tests to summarize - (can contain shell wildcards) - - This script can be used to post-process and summarize baseline comparison - output from one or more CESM test suites. - - The script finds all directories in basedir whose name ends with the given - testid; these are the test directories of interest. It then examines the - 'run' subdirectory of each test directory of interest, looking for files of - the form *.nc.cprnc.out. Or, if the -output_suffix argument is given, then - it looks for files of the form *.nc.cprnc.out.SUFFIX. (With this naming - convention [i.e., looking for files of the form *.nc.cprnc.out], note that - it only looks at output for baseline comparisons - NOT output from the test - itself, such as cprnc output files from the exact restart test.) (Actually, - we also allow for files of the form *.nc_[0-9][0-9][0-9][0-9].cprnc.out, - such as *.nc_0001.cprnc.out and *.nc_0002.cprnc.out, to pick up - multi-instance files.) - - Summaries of cprnc differences (RMS and normalized RMS differences, FILLDIFFs and DIMSIZEDIFFs) - are placed in three output files beginning with the name 'cprnc.summary', in - the current directory. These files contain the same information, but one is - sorted by test name, one is sorted by variable name, and is one sorted from - largest to smallest normalized RMS differences. - - -OPTIONS - -output_suffix If provided, look for files of the form *.nc.cprnc.out.SUFFIX - rather than just *.nc.cprnc.out - - -narrow Use generally-narrower output field widths to aid readability, - at the expense of truncated strings - - -help [or -h] Display this help - -EOF -} - - -# process_cprnc_output -# Read through all cprnc files, and build hashes of instances of RMS, normalized RMS, FILLDIFF and DIMSIZEDIFF -# Inputs: -# - basedir -# - testid -# - output_suffix -# Output: hash reference -# Dies with an error if no cprnc output files are found -sub process_cprnc_output { - my ($basedir, $testid, $output_suffix) = @_; - - my %diffs; - my $num_files = 0; - - my @test_dirs = glob "${basedir}/*${testid}"; - - foreach my $test_dir (@test_dirs) { - my $test_dir_base = basename($test_dir); - - my @cprnc_files; - if ($output_suffix) { - @cprnc_files = glob "${test_dir}/run/*.nc.cprnc.out.${output_suffix} ${test_dir}/run/*.nc_[0-9][0-9][0-9][0-9].cprnc.out.${output_suffix}"; - } - else { - @cprnc_files = glob "${test_dir}/run/*.nc.cprnc.out ${test_dir}/run/*.nc_[0-9][0-9][0-9][0-9].cprnc.out"; - } - - foreach my $cprnc_file (@cprnc_files) { - my $cprnc_file_base = basename($cprnc_file); - $num_files++; - - open IN, "<", $cprnc_file or die "ERROR opening ${cprnc_file}"; - - while (my $line = ) { - chomp $line; - - process_line($line, $test_dir_base, $cprnc_file_base, \%diffs); - } # while - - close IN; - } # foreach cprnc_file - - } # foreach test_dir - - if ($num_files == 0) { - die "ERROR: no cprnc.out files found\n"; - } - - return \%diffs; -} - - -# process_line: Process one line from one file -# Inputs: -# - line -# - test_dir -# - cprnc_file -# - diffs hash reference (MODIFIED) -sub process_line { - my ($line, $test_dir, $cprnc_file, $diffs) = @_; - - my $diff_type; - my $varname; - my $rms; - my $ignore; - my $rms_normalized; - - if ($line =~ /^ *RMS /) { - ($diff_type, $varname, $rms, $ignore, $rms_normalized) = split " ", $line; - } elsif ($line =~ /^ *FILLDIFF /) { - ($diff_type, $varname) = split " ", $line; - $rms = ""; - $rms_normalized = ""; - } elsif ($line =~ /^ *DIMSIZEDIFF /) { - ($diff_type, $varname) = split " ", $line; - $rms = ""; - $rms_normalized = ""; - } else { - $diff_type = ""; - } - - if ($diff_type eq 'RMS' || $diff_type eq 'FILLDIFF' || $diff_type eq 'DIMSIZEDIFF') { - # We have found a cprnc difference - - my $key = "$test_dir $cprnc_file $varname"; - - # For RMS errors, keep the highest error found - if ($diff_type eq "RMS") { - if (exists $diffs->{$key} && exists $diffs->{$key}{'RMS_NORM'}) { - if ($diffs->{$key}{'RMS_NORM'} > $rms_normalized) { - warn "WARNING: Ignoring lower RMS value: $key : $rms_normalized < $diffs->{$key}{'RMS_NORM'}\n"; - return; - } - else { - warn "WARNING: Replacing RMS with higher value: $key : $rms_normalized > $diffs->{$key}{'RMS_NORM'}\n"; - } - } - } - - # If the diffs hash doesn't already contain information about this - # directory/filename/variable combo, then we need to create a hash - # reference with the appropriate basic metadata. - if (!exists $diffs->{$key}) { - $diffs->{$key} = { - Dir => $test_dir, - Filename => $cprnc_file, - Variable => $varname, - }; - } - - # Whether or not the hash already contained the given key, we need to add - # the value of interest -- either the RMS and normalized RMS errors, or - # the fact that there is a FILLDIFF or DIMSIZEDIFF. - if ($diff_type eq "RMS") { - $diffs->{$key}{'RMS'} = $rms; - $diffs->{$key}{'RMS_NORM'} = $rms_normalized; - } else { - # No meaningful value here - just record the fact that we saw a - # FILLDIFF or DIMSIZEDIFF - $diffs->{$key}{$diff_type} = ""; - } - } elsif ($diff_type ne '') { - die "Unexpected diff_type: $diff_type"; - } -} - - -# max_widths -# Inputs: -# - summary_hash (hash reference) -# Output: reference to a hash containing the maximum width of each of -# the following in the summary hash: -# - Dir -# - Filename -# - Variable -sub max_widths { - my $summary_hash = shift; - - my %maxes; - - foreach my $var ('Dir','Filename','Variable') { - $maxes{$var} = max (map { length($summary_hash->{$_}{$var}) } keys %$summary_hash); - } - - return \%maxes; -} - - -# print_results_by_test: Print sorted hash entries to a file, sorted by test name -# Inputs: -# - outfile: name of output file -# - summary_hash: hash reference containing results to print -# - widths: hash reference giving widths of output strings -sub print_results_by_test { - my ($outfile, $summary_hash, $widths) = @_; - - open OUT, ">", "$outfile" or die "ERROR opening $outfile"; - - my @sorted_keys = sort{ $summary_hash->{$a}{'Dir'} cmp $summary_hash->{$b}{'Dir'} - or $summary_hash->{$a}{'Filename'} cmp $summary_hash->{$b}{'Filename'} - or $summary_hash->{$a}{'Variable'} cmp $summary_hash->{$b}{'Variable'} } - keys %$summary_hash; - - my $last_dir; - my $last_filename; - - my $separator_width = sum(values %$widths) + 57; - - for my $key (@sorted_keys) { - - # Print a separator line between different files - if ($summary_hash->{$key}{'Dir'} ne $last_dir || - $summary_hash->{$key}{'Filename'} ne $last_filename) { - if ($last_dir && $last_filename) { - print OUT "=" x $separator_width . "\n"; - } - $last_dir = $summary_hash->{$key}{'Dir'}; - $last_filename = $summary_hash->{$key}{'Filename'}; - } - - my $line = format_line($summary_hash->{$key}, $widths); - - print OUT "$line\n"; - } - - close OUT; -} - - -# print_results_by_varname: Print sorted hash entries to a file, sorted by variable name -# Inputs: -# - outfile: name of output file -# - summary_hash: hash reference containing results to print -# - widths: hash reference giving widths of output strings -sub print_results_by_varname { - my ($outfile, $summary_hash, $widths) = @_; - - open OUT, ">", "$outfile" or die "ERROR opening $outfile"; - - my @sorted_keys = sort{ $summary_hash->{$a}{'Variable'} cmp $summary_hash->{$b}{'Variable'} - or $summary_hash->{$a}{'Dir'} cmp $summary_hash->{$b}{'Dir'} - or $summary_hash->{$a}{'Filename'} cmp $summary_hash->{$b}{'Filename'} } - keys %$summary_hash; - - my $last_variable; - - my $separator_width = sum(values %$widths) + 57; - - for my $key (@sorted_keys) { - - # Print a separator line between different variables - if ($summary_hash->{$key}{'Variable'} ne $last_variable) { - if ($last_variable) { - print OUT "=" x $separator_width . "\n"; - } - $last_variable = $summary_hash->{$key}{'Variable'}; - } - - my $line = format_line($summary_hash->{$key}, $widths); - - print OUT "$line\n"; - } - - close OUT; -} - - - -# print_results_by_rms: Print sorted hash entries to a file, sorted by RMS_NORM -# Inputs: -# - outfile: name of output file -# - summary_hash: hash reference containing results to print -# - widths: hash reference giving widths of output strings -sub print_results_by_rms { - my ($outfile, $summary_hash, $widths) = @_; - - open OUT, ">", "$outfile" or die "ERROR opening $outfile"; - - my @sorted_keys = sort {$summary_hash->{$b}{'RMS_NORM'} <=> $summary_hash->{$a}{'RMS_NORM'} - or $summary_hash->{$a}{'Dir'} cmp $summary_hash->{$b}{'Dir'} - or $summary_hash->{$a}{'Filename'} cmp $summary_hash->{$b}{'Filename'} - or $summary_hash->{$a}{'Variable'} cmp $summary_hash->{$b}{'Variable'} } - keys %$summary_hash; - - for my $key (@sorted_keys) { - my $line = format_line($summary_hash->{$key}, $widths); - - print OUT "$line\n"; - } - - close OUT; -} - - -# Inputs: -# - reference to a hash containing: -# - Dir -# - Filename -# - Variable -# - RMS (optional) -# - RMS_NORM (optional) -# - FILLDIFF (optional) -# - DIMSIZEDIFF (optional) -# - widths: hash reference giving widths of output strings -# Return a formatted line for printing -sub format_line { - my ($hash_ref, $widths) = @_; - - my $dir = $hash_ref->{'Dir'}; - my $filename = $hash_ref->{'Filename'}; - my $variable = $hash_ref->{'Variable'}; - my $rms = ""; - my $rms_normalized = ""; - my $filldiff = ""; - my $dimsizediff = ""; - if (exists $hash_ref->{'RMS'}) { - $rms = sprintf(" : RMS %-16g", $hash_ref->{'RMS'}); - } - if (exists $hash_ref->{'RMS_NORM'}) { - $rms_normalized = sprintf(" : RMS_NORM %-16g", $hash_ref->{'RMS_NORM'}); - } - if (exists $hash_ref->{'FILLDIFF'}) { - $filldiff = " : FILLDIFF"; - } - if (exists $hash_ref->{'DIMSIZEDIFF'}) { - $dimsizediff = " : DIMSIZEDIFF"; - } - - # for width=40, the format string will contain '%-40.40s' - my $format = '%-' . $widths->{'Dir'} . '.' . $widths->{'Dir'} . 's : ' . - '%-' . $widths->{'Filename'} . '.' . $widths->{'Filename'} . 's : ' . - '%-' . $widths->{'Variable'} . '.' . $widths->{'Variable'} . 's' . - '%s%s%s%s'; - - sprintf($format, $dir, $filename, $variable, $filldiff, $dimsizediff, $rms, $rms_normalized); -} - -#======================================================================= -# Notes about testing: unit tests -#======================================================================= - -#----------------------------------------------------------------------- -# Testing process_line -#----------------------------------------------------------------------- - -# use Data::Dumper; - -# my %diffs; - -# # shouldn't do anything -# process_line("hello", "test_dir1", "file_a", \%diffs); - -# # test basic filldiff -# process_line("FILLDIFF var1", "test_dir1", "file_b", \%diffs); - -# # add an RMS to existing filldiff -# process_line("RMS var1 4200 NORMALIZED 42", "test_dir1", "file_b", \%diffs); - -# # test basic rms error -# process_line("RMS var17 0.314 NORMALIZED 3.14", "test_dir1", "file_b", \%diffs); - -# # add a filldiff to existing rms error -# process_line("FILLDIFF var17", "test_dir1", "file_b", \%diffs); - -# # add a filldiff without RMS -# process_line("FILLDIFF var42", "test_dir2", "file_c", \%diffs); - -# # add a dimsizediff -# process_line("DIMSIZEDIFF var43", "test_dir2", "file_c", \%diffs); - -# # add an RMS error without filldiff -# process_line("RMS var100 99 NORMALIZED 100", "test_dir2", "file_d", \%diffs); - -# # test a warning: should issue a warning and replace the above setting -# process_line("RMS var100 9 NORMALIZED 200", "test_dir2", "file_d", \%diffs); - -# # test a warning: should issue a warning but NOT replace the above setting -# # (normalized RMS is smaller even though standard RMS is bigger: the normalized -# # one should be considered in deciding whether to replace the previous setting) -# process_line("RMS var100 999 NORMALIZED 50", "test_dir2", "file_d", \%diffs); - -# print Dumper(\%diffs); - - -# THE ABOVE SHOULD PRINT SOMETHING LIKE THIS (though the output from Dumper will -# likely appear in a different order): - -# WARNING: Replacing RMS with higher value: test_dir2 file_d var100 : 200 > 100 -# WARNING: Ignoring lower RMS value: test_dir2 file_d var100 : 50 < 200 -# $VAR1 = { -# 'test_dir1 file_b var17' => { -# 'RMS' => '0.314', -# 'Variable' => 'var17', -# 'Filename' => 'file_b', -# 'FILLDIFF' => '', -# 'Dir' => 'test_dir1', -# 'RMS_NORM' => '3.14' -# }, -# 'test_dir2 file_d var100' => { -# 'Dir' => 'test_dir2', -# 'RMS_NORM' => 200, -# 'Filename' => 'file_d', -# 'Variable' => 'var100', -# 'RMS' => '9' -# }, -# 'test_dir1 file_b var1' => { -# 'Filename' => 'file_b', -# 'RMS_NORM' => '42', -# 'FILLDIFF' => '', -# 'Dir' => 'test_dir1', -# 'RMS' => '4200', -# 'Variable' => 'var1' -# }, -# 'test_dir2 file_c var43' => { -# 'Variable' => 'var43', -# 'DIMSIZEDIFF' => '', -# 'Dir' => 'test_dir2', -# 'Filename' => 'file_c' -# }, -# 'test_dir2 file_c var42' => { -# 'Filename' => 'file_c', -# 'Dir' => 'test_dir2', -# 'FILLDIFF' => '', -# 'Variable' => 'var42' -# } -# }; - - -#----------------------------------------------------------------------- -# Testing the print routines -#----------------------------------------------------------------------- - -# Add the following to the above test code: - -# my $widths_hash = { Dir => 40, Filename => 40, Variable => 40 }; -# print_results_by_test("testout.by_test", \%diffs, $widths_hash); -# print_results_by_rms("testout.by_rms", \%diffs, $widths_hash); - -# This should give: - -# $ cat testout.by_rms -# test_dir2 : file_d : var100 : RMS 9 : RMS_NORM 200 -# test_dir1 : file_b : var1 : FILLDIFF : RMS 4200 : RMS_NORM 42 -# test_dir1 : file_b : var17 : FILLDIFF : RMS 0.314 : RMS_NORM 3.14 -# test_dir2 : file_c : var42 : FILLDIFF -# test_dir2 : file_c : var43 : DIMSIZEDIFF -# $ cat testout.by_test -# test_dir1 : file_b : var1 : FILLDIFF : RMS 4200 : RMS_NORM 42 -# test_dir1 : file_b : var17 : FILLDIFF : RMS 0.314 : RMS_NORM 3.14 -# ================================================================================================================================================================================= -# test_dir2 : file_c : var42 : FILLDIFF -# test_dir2 : file_c : var43 : DIMSIZEDIFF -# ================================================================================================================================================================================= -# test_dir2 : file_d : var100 : RMS 9 : RMS_NORM 200 - - - -#======================================================================= -# Notes about testing: integration tests -#======================================================================= - -# Test the following - -# Note: can do these tests by running the cprnc tests and organizing -# outputs into particular directories. -# -# For each of these tests, sort the different output files and compare -# the sorted files to make sure the same info is in all output files; -# then look at one of the output files. -# -# - no RMS or FILLDIFFs at all (testid that just contains output from -# comparing control & copy) -# -# - some RMS and some FILLDIFFs, split across 2 directories, each with -# 2 cprnc files (this can be done by comparing the control file with -# diffs_in_fill.nc, diffs_in_vals.nc, diffs_in_vals_and_diffs_in_fill.nc -# and diffs_in_vals_and_fill.nc) -# -# - multiple RMS errors to test RMS sorting, split across 2 directories -# (this can be done by comparing the control file with four of the -# vals_differ_by_* files) diff --git a/CIME/non_py/cprnc/test_inputs/README b/CIME/non_py/cprnc/test_inputs/README deleted file mode 100644 index f1bdfbd94e6..00000000000 --- a/CIME/non_py/cprnc/test_inputs/README +++ /dev/null @@ -1,188 +0,0 @@ -This directory contains simple test inputs to test cprnc. - -All comparisons can be run by running the run_tests script in the -parent directory. Suggestion: run this once from the baseline -directory, then once from the new directory; compare against baselines -by doing a directory diff of the two directories, or with, e.g.: - - baseline_out=/PATH/TO/BASELINE/OUTPUT - new_out=/PATH/TO/NEW/OUTPUT - for fl in $baseline_out/*; do echo $fl; diff -a $fl $new_out/`basename $fl`; done > diff_report - -The files here are: - ---- FILES COMPARED AGAINST control.nc --- - -- copy.nc: copy of control file (i.e., no diffs) - -- diffs_in_vals.nc: one variable has differences in values - -- diffs_in_vals_and_extra_and_missing.nc: one variable has differences in - values; also, one variable is missing and there is an extra variable. Purpose - of this test is to make sure that this case is reported as a DIFFERENCE rather - than just a warning due to the missing fields. - -- diffs_in_fill.nc: one variable has differences in fill pattern - -- diffs_in_vals_and_diffs_in_fill.nc: one variable has differences in - values, another has differences in fill pattern - -- diffs_in_vals_and_fill.nc: a single variable has differences in both - values and fill pattern - -- extra_variables.nc: has two extra variables beyond those in control.nc - -- lon_differs.nc: number of longitude points differs - -- missing_variables.nc: missing two variables that are present in control.nc - -- vals_differ_by_1.1.nc: testvar has values equal to 1.1 times those - in the control file. This is useful for testing the relative - difference calculation. - - True values are the following (note that relative difference is - defined using a denominator of max(v1,v2)): - - - RMS diff: 0.6204837 (printed as 6.2e-1) - - avg rel diff: 0.0909091 (printed as 9.1e-2) - - avg decimal digits: 1.041393 (printed as 1.0) - - worst decimal digits: 1.041393 (printed as 1.0) - -- vals_differ_by_1.1_somewhere.nc: similar to vals_differ_by_1.1.nc, - but now only a single value differs by a factor of 1.1 - - True values are the following (note that relative difference is - defined using a denominator of max(v1,v2)): - - - RMS diff: 0.3162278 (printed as 3.2e-1) - - avg rel diff: 0.009090909 (printed as 9.1e-3) - - avg decimal digits: 1.041393 (printed as 1.0) [note that the - average here ignores the indices with no difference] - - worst decimal digits: 1.041393 (printed as 1.0) - -- vals_differ_by_varying_amounts.nc: testvar has values equal to 1, - 1.01, 1.02, ..., 1.09 times those in the control file. This is - useful for testing the relative difference calculation using more - complex differences. - - True values are the following (note that relative difference is - defined using a denominator of max(v1,v2)): - - - RMS diff: 0.4434862 (printed as 4.4e-1) - - avg rel diff: 0.04233828 (printed as 4.2e-2) - - avg decimal digits: 1.403306 (printed as 1.4) [note that the - average here normalizes by 9 rather than 10, since the first index - has a relative difference of 0] - - worst decimal digits: 1.083184 (printed as 1.1) - -- vals_differ_by_varying_amounts2.nc: First 8 values of testvar are - identical to control; 9th is control * (1-1e-3), 10th is control * - (1-1e-5). This is the same as the example given in ../README. - - True values are the following: - - - RMS diff: 0.002846226 (printed as 2.8e-3) - - avg rel diff: 0.000101 (printed as 1.0e-4) - - avg decimal digits: 4.0 - - worst decimal digits: 3.0 - ---- FILES COMPARED AGAINST control_int.nc --- - -Note: This file is the same as control.nc, but stores the main variables -as integers rather than reals. - -- int_missing.nc: A variable is missing. The point of this test is to - cover code that resulted in https://github.com/ESMCI/cime/issues/3661. - ---- FILES COMPARED AGAINST control_multipleTimes_someTimeless.nc --- - -Note: This file has some variables with a time dimension, some -without. The time dimension has multiple times, in order to make sure -that the variables with vs. without the time dimension really are -treated differently. Also, a variable without time appears first, in -order to make sure that cprnc doesn't rely on there being a variable -with time first. - -- multipleTimes_someTimeless_diffs_in_vals_and_fill.nc: one variable - with a time dimension has differences in both values and fill - pattern (in time 2); and one variable without a time dimension has - differences in both values and fill pattern. The differences are the - same for both variables (e.g., RMS errors should be the same for - both). - -- multipleTimes_someTimeless_extra_and_missing.nc: two timeless - variables are missing and there is one extra timeless - variable. Purpose of this test is to make sure that the results are - reported as IDENTICAL when the only diffs in field lists are variables - without an unlimited dimension (in a file that has an unlimited - dimension). - ---- FILES COMPARED AGAINST control_noTime.nc --- - -Note: This file has no time (unlimited) dimension. - -- noTime_diffs_in_vals_and_fill.nc: a single variable has differences - in both values and fill pattern - -- noTime_extra_and_missing.nc: two variables are missing and there is - one extra variable. Purpose of this test is to make sure that even - missing fields without an unlimited dimension trigger a DIFFER result - if the file doesn't have an unlimited dimension to begin with. - ---- FILES COMPARED AGAINST control_0d.nc --- - -Note: This file has two 0-d variables - -- diffs_0d.nc: diffs in both 0-d variables (int & real) - - ---- FILES COMPARED AGAINST cpl.hi.subset.control.nc --- - -Note: This file is a subset of a standard cpl hist file (as of May, 2013). - -- cpl.hi.subset.test.nc: some variables are the same, some differ - - ---- FILES COMPARED AGAINST clm2.h0.subset.control.nc --- - -Note: This file is a subset of a standard clm hist file (as of May, 2013). - -- clm2.h0.subset.test.nc: some variables are the same, some differ - - ---- FILES COMPARED AGAINST clm2.h1.subset.control.nc --- - -Note: This file is a subset of a standard clm 1-d hist file -(dov2xy=false) (as of May, 2013). Note that it also includes two -times. - -- clm2.h1.subset.test.nc: some variables are the same, some - differ. Note that this includes identical & different integer - variables, identical & different real-valued variables, and - variables with different spatial dimensions (e.g., landunit, pft, - and lat x lon). - ---- FILES COMPARED AGAINST control_attributes.nc --- - -Note: This file is like control.nc, but contains some global attributes - -- diffs_in_attribute.nc: one global attribute is the same, one differs; in - addition, one global attribute is missing, and there is a new one on this file - that is not present on the control.nc file - ---- FILES COMPARED AGAINST control_char.nc --- - -Note: This file just has a character variable, in order to test what is output -for character variables (which cannot be analyzed). - -- copy_char.nc: identical to control_char.nc - ---- FILES COMPARED AGAINST control_floatDoubleNan.nc --- - -Note: This file has a float variable, a double variable, and a double -variable with a NaN value. Its initial purpose is for testing -comparisons involving NaNs. - -- diffs_in_nans.nc: a float and double variable each have a NaN where - the control file does not, and another double variable has a NaN - only where the control file also has a NaN diff --git a/CIME/non_py/cprnc/test_inputs/clm2.h0.subset.control.nc b/CIME/non_py/cprnc/test_inputs/clm2.h0.subset.control.nc deleted file mode 100644 index 1ffd6474f60..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/clm2.h0.subset.control.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/clm2.h0.subset.test.nc b/CIME/non_py/cprnc/test_inputs/clm2.h0.subset.test.nc deleted file mode 100644 index 41a08ce7bf7..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/clm2.h0.subset.test.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/clm2.h1.subset.control.nc b/CIME/non_py/cprnc/test_inputs/clm2.h1.subset.control.nc deleted file mode 100644 index 2d96f1fdc74..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/clm2.h1.subset.control.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/clm2.h1.subset.test.nc b/CIME/non_py/cprnc/test_inputs/clm2.h1.subset.test.nc deleted file mode 100644 index db606c4b135..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/clm2.h1.subset.test.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/control.nc b/CIME/non_py/cprnc/test_inputs/control.nc deleted file mode 100644 index d9c0ce6a5c4..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/control.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/control_0d.nc b/CIME/non_py/cprnc/test_inputs/control_0d.nc deleted file mode 100644 index d48586f2282..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/control_0d.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/control_attributes.nc b/CIME/non_py/cprnc/test_inputs/control_attributes.nc deleted file mode 100644 index a26bc946415..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/control_attributes.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/control_char.nc b/CIME/non_py/cprnc/test_inputs/control_char.nc deleted file mode 100644 index 7b4567908ea..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/control_char.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/control_floatDoubleNan.nc b/CIME/non_py/cprnc/test_inputs/control_floatDoubleNan.nc deleted file mode 100644 index 1e99d3281e6..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/control_floatDoubleNan.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/control_int.nc b/CIME/non_py/cprnc/test_inputs/control_int.nc deleted file mode 100644 index 13913cf90e3..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/control_int.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/control_multipleTimes_someTimeless.nc b/CIME/non_py/cprnc/test_inputs/control_multipleTimes_someTimeless.nc deleted file mode 100644 index 9ebf4579c92..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/control_multipleTimes_someTimeless.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/control_noTime.nc b/CIME/non_py/cprnc/test_inputs/control_noTime.nc deleted file mode 100644 index 92be43053d0..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/control_noTime.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/copy.nc b/CIME/non_py/cprnc/test_inputs/copy.nc deleted file mode 100644 index d9c0ce6a5c4..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/copy.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/copy_char.nc b/CIME/non_py/cprnc/test_inputs/copy_char.nc deleted file mode 100644 index 7b4567908ea..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/copy_char.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/cpl.hi.subset.control.nc b/CIME/non_py/cprnc/test_inputs/cpl.hi.subset.control.nc deleted file mode 100644 index c83d8cc499d..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/cpl.hi.subset.control.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/cpl.hi.subset.test.nc b/CIME/non_py/cprnc/test_inputs/cpl.hi.subset.test.nc deleted file mode 100644 index 3fdd331ba42..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/cpl.hi.subset.test.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/diffs_0d.nc b/CIME/non_py/cprnc/test_inputs/diffs_0d.nc deleted file mode 100644 index db275131129..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/diffs_0d.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/diffs_in_attribute.nc b/CIME/non_py/cprnc/test_inputs/diffs_in_attribute.nc deleted file mode 100644 index b81f05122af..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/diffs_in_attribute.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/diffs_in_fill.nc b/CIME/non_py/cprnc/test_inputs/diffs_in_fill.nc deleted file mode 100644 index 07b62e33948..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/diffs_in_fill.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/diffs_in_nans.nc b/CIME/non_py/cprnc/test_inputs/diffs_in_nans.nc deleted file mode 100644 index 732bf56896f..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/diffs_in_nans.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/diffs_in_vals.nc b/CIME/non_py/cprnc/test_inputs/diffs_in_vals.nc deleted file mode 100644 index 16411c4881a..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/diffs_in_vals.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/diffs_in_vals_and_diffs_in_fill.nc b/CIME/non_py/cprnc/test_inputs/diffs_in_vals_and_diffs_in_fill.nc deleted file mode 100644 index 9b7adc86763..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/diffs_in_vals_and_diffs_in_fill.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/diffs_in_vals_and_extra_and_missing.nc b/CIME/non_py/cprnc/test_inputs/diffs_in_vals_and_extra_and_missing.nc deleted file mode 100644 index e29e072a24c..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/diffs_in_vals_and_extra_and_missing.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/diffs_in_vals_and_fill.nc b/CIME/non_py/cprnc/test_inputs/diffs_in_vals_and_fill.nc deleted file mode 100644 index 6db4782e6ae..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/diffs_in_vals_and_fill.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/extra_variables.nc b/CIME/non_py/cprnc/test_inputs/extra_variables.nc deleted file mode 100644 index 2d33ab90796..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/extra_variables.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/int_missing.nc b/CIME/non_py/cprnc/test_inputs/int_missing.nc deleted file mode 100644 index 139b358d57f..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/int_missing.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/lon_differs.nc b/CIME/non_py/cprnc/test_inputs/lon_differs.nc deleted file mode 100644 index bb429246912..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/lon_differs.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/missing_variables.nc b/CIME/non_py/cprnc/test_inputs/missing_variables.nc deleted file mode 100644 index 71d9ae2d082..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/missing_variables.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/multipleTimes_someTimeless_diffs_in_vals_and_fill.nc b/CIME/non_py/cprnc/test_inputs/multipleTimes_someTimeless_diffs_in_vals_and_fill.nc deleted file mode 100644 index 7edf3a495d3..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/multipleTimes_someTimeless_diffs_in_vals_and_fill.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/multipleTimes_someTimeless_extra_and_missing.nc b/CIME/non_py/cprnc/test_inputs/multipleTimes_someTimeless_extra_and_missing.nc deleted file mode 100644 index d2718a86de5..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/multipleTimes_someTimeless_extra_and_missing.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/noTime_diffs_in_vals_and_fill.nc b/CIME/non_py/cprnc/test_inputs/noTime_diffs_in_vals_and_fill.nc deleted file mode 100644 index c3006345ed7..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/noTime_diffs_in_vals_and_fill.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/noTime_extra_and_missing.nc b/CIME/non_py/cprnc/test_inputs/noTime_extra_and_missing.nc deleted file mode 100644 index c6c2d790900..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/noTime_extra_and_missing.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/vals_differ_by_1.1.nc b/CIME/non_py/cprnc/test_inputs/vals_differ_by_1.1.nc deleted file mode 100644 index ddc52582461..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/vals_differ_by_1.1.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/vals_differ_by_1.1_somewhere.nc b/CIME/non_py/cprnc/test_inputs/vals_differ_by_1.1_somewhere.nc deleted file mode 100644 index 703507681b5..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/vals_differ_by_1.1_somewhere.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/vals_differ_by_varying_amounts.nc b/CIME/non_py/cprnc/test_inputs/vals_differ_by_varying_amounts.nc deleted file mode 100644 index f42d2952af2..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/vals_differ_by_varying_amounts.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/test_inputs/vals_differ_by_varying_amounts2.nc b/CIME/non_py/cprnc/test_inputs/vals_differ_by_varying_amounts2.nc deleted file mode 100644 index 4167b2d121b..00000000000 Binary files a/CIME/non_py/cprnc/test_inputs/vals_differ_by_varying_amounts2.nc and /dev/null differ diff --git a/CIME/non_py/cprnc/utils.F90 b/CIME/non_py/cprnc/utils.F90 deleted file mode 100644 index 9bf0d5d82fc..00000000000 --- a/CIME/non_py/cprnc/utils.F90 +++ /dev/null @@ -1,85 +0,0 @@ -module utils - use filestruct, only : dim_t -contains - -subroutine get_dimname_str(ndims,dimids,dims,dimname_str) - integer, intent(in) :: ndims - integer, intent(in) :: dimids(:) - type(dim_t) :: dims(:) - character(len=*),intent(out) :: dimname_str - - integer :: dlen - integer :: j - - dimname_str = ' ' - - if(ndims>0) then - dimname_str(1:1) = '(' - dlen=2 - - do j=1,ndims - dimname_str(dlen:) = trim(dims(dimids(j))%name)//',' - dlen=dlen+ len_trim(dims(dimids(j))%name) + 1 - end do - dimname_str(dlen-1:dlen-1) = ')' - end if - - -end subroutine get_dimname_str - -subroutine get_dim_str(ndims,loc,dim_str) - integer, intent(in) :: ndims - integer, intent(in) :: loc(:) - character(len=*),intent(out) :: dim_str - - integer :: dlen - integer :: j - - dim_str = ' ' - - if(ndims>0) then - dim_str(1:1) = '(' - dlen=2 - - do j=1,ndims - write(dim_str(dlen:),'(i6,a)') loc(j),',' - - dlen=len_trim(dim_str)+1 - end do - dim_str(dlen-1:dlen-1) = ')' - end if - - -end subroutine get_dim_str - - - -subroutine checknf90(ierr,returnflag,err_str) - use netcdf, only : nf90_noerr, nf90_strerror - integer, intent(in) :: ierr - logical, optional, intent(in) :: returnflag - character(len=*), optional, intent(in) :: err_str - - if(ierr/=NF90_NOERR) then - print *, trim(nf90_strerror(ierr)) - if(present(err_str)) then - print *, trim(err_str) - end if - if(present(returnflag)) then - if(returnflag) return - end if -#ifdef AIX - call xl__trbk() -#endif - stop - - end if - - - -end subroutine checknf90 - - - - -end module utils diff --git a/CIME/non_py/src/CMake/CIME_utils.cmake b/CIME/non_py/src/CMake/CIME_utils.cmake index a75784681c6..4a91112559b 100644 --- a/CIME/non_py/src/CMake/CIME_utils.cmake +++ b/CIME/non_py/src/CMake/CIME_utils.cmake @@ -72,10 +72,7 @@ include(genf90_utils) #================================================= # pFUnit and its preprocessor -find_package(pFUnit) - -# Preprocessor and driver handling. -include(pFUnit_utils) +find_package(PFUNIT) # Need to add PFUNIT_INCLUDE_DIRS to the general list of include_directories # because we use pfunit's 'throw'. diff --git a/CIME/non_py/src/CMake/FindpFUnit.cmake b/CIME/non_py/src/CMake/FindpFUnit.cmake deleted file mode 100644 index a46c0f73eee..00000000000 --- a/CIME/non_py/src/CMake/FindpFUnit.cmake +++ /dev/null @@ -1,52 +0,0 @@ -# Find module for pFUnit -# -# For this module to work, either the pFUnit parser must be discoverable -# (e.g. in the user's PATH), or else the environment variable "PFUNIT" must -# be defined, and point to the root directory for the PFUNIT installation. -# -# This module sets some typical variables: -# PFUNIT_FOUND -# PFUNIT_LIBRARY(/LIBRARIES) -# PFUNIT_INCLUDE_DIR(/DIRS) -# -# The module also sets: -# PFUNIT_DRIVER - Path to the pFUnit driver source. -# PFUNIT_MODULE_DIR - Directory containing pFUnit's module files. -# PFUNIT_PARSER - Path to pFUnitParser.py (the preprocessor). - -#========================================================================== -# Copyright (c) 2013-2014, University Corporation for Atmospheric Research -# -# This software is distributed under a two-clause BSD license, with no -# warranties, express or implied. See the accompanying LICENSE file for -# details. -#========================================================================== - -include(FindPackageHandleStandardArgs) - -find_program(PFUNIT_PARSER pFUnitParser.py - HINTS ${PFUNIT_PATH}/bin $ENV{PFUNIT}/bin) - -string(REGEX REPLACE "bin/pFUnitParser\\.py\$" "" - pfunit_directory ${PFUNIT_PARSER}) - -find_library(PFUNIT_LIBRARY pfunit - HINTS ${PFUNIT_PATH}/lib ${pfunit_directory}/lib) - -find_path(PFUNIT_INCLUDE_DIR driver.F90 - HINTS ${pfunit_directory}/include) - -set(PFUNIT_DRIVER ${PFUNIT_INCLUDE_DIR}/driver.F90) - -find_path(PFUNIT_MODULE_DIR NAMES pfunit.mod PFUNIT.MOD - HINTS ${pfunit_directory}/include ${pfunit_directory}/mod) - -set(PFUNIT_LIBRARIES ${PFUNIT_LIBRARY}) -set(PFUNIT_INCLUDE_DIRS ${PFUNIT_INCLUDE_DIR} ${PFUNIT_MODULE_DIR}) - -# Handle QUIETLY and REQUIRED. -find_package_handle_standard_args(pFUnit DEFAULT_MSG - PFUNIT_LIBRARY PFUNIT_INCLUDE_DIR PFUNIT_MODULE_DIR PFUNIT_PARSER) - -mark_as_advanced(PFUNIT_INCLUDE_DIR PFUNIT_LIBRARY PFUNIT_MODULE_DIR - PFUNIT_PARSER) diff --git a/CIME/non_py/src/CMake/README.md b/CIME/non_py/src/CMake/README.md index ae583206b0e..80b84eb2bba 100644 --- a/CIME/non_py/src/CMake/README.md +++ b/CIME/non_py/src/CMake/README.md @@ -14,16 +14,12 @@ Find modules for specific libraries: FindNETCDF -FindpFUnit - FindPnetcdf Utility modules: genf90_utils - Generate Fortran code from genf90.pl templates. -pFUnit_utils - Create executables using the pFUnit parser and driver. - Sourcelist_utils - Use source file lists defined over multiple directories. Modules that are CESM-specific and/or incomplete: diff --git a/CIME/non_py/src/CMake/pFUnit_utils.cmake b/CIME/non_py/src/CMake/pFUnit_utils.cmake deleted file mode 100644 index 2accabe20e1..00000000000 --- a/CIME/non_py/src/CMake/pFUnit_utils.cmake +++ /dev/null @@ -1,227 +0,0 @@ -# Utilities for using pFUnit's preprocessor and provided driver file. -# -# This module relies upon the variables defined by the FindpFUnit module. -# -#========================================================================== -# -# add_pFUnit_executable -# -# Arguments: -# name - Name of the executable to add. -# pf_file_list - List of .pf files to process. -# output_directory - Directory where generated sources will be placed. -# fortran_source_list - List of Fortran files to include. -# -# Preprocesses the input .pf files to create test suites, then creates an -# executable that drives those suites with the pFUnit driver. -# -# Limitations: -# add_pFUnit_executable cannot currently handle cases where the user -# choses to do certain things "manually", such as: -# -# - Test suites written in normal Fortran (not .pf) files. -# - User-specified testSuites.inc -# - User-specified driver file in fortran_source_list. -# -#========================================================================== -# -# define_pFUnit_failure -# -# Arguments: -# test_name - Name of a CTest test. -# -# Defines FAIL_REGULAR_EXPRESSION and PASS_REGULAR_EXPRESSION for the given -# test, so that pFUnit's overall pass/fail status can be detected. -# -#========================================================================== -# -# create_pFUnit_test -# -# Required arguments: -# test_name - Name of a CTest test. -# executable_name - Name of the executable associated with this test. -# pf_file_list - List of .pf files to process. -# fortran_source_list - List of Fortran files to include. -# -# Optional arguments, specified via keyword: -# GEN_OUTPUT_DIRECTORY - directory for generated source files, relative to CMAKE_CURRENT_BINARY_DIR -# - Defaults to CMAKE_CURRENT_BINARY_DIR -# - Needs to be given if you have multiple separate pFUnit tests defined in the same directory -# COMMAND - Command to run the pFUnit test -# - Defaults to ./executable_name -# - Needs to be given if you need more on the command line than just the executable -# name, such as setting the number of threads -# - A multi-part command should NOT be enclosed in quotes (see example below) -# - COMMAND should NOT contain the mpirun command: this is specified -# separately, via the PFUNIT_MPIRUN CMake variable -# - The name of the executable should be prefixed with ./ for this to work -# when dot is not in your path (e.g., ./foo_exe rather than simply foo_exe) -# -# Non-standard CMake variables used: -# PFUNIT_MPIRUN - If executables need to be prefixed with an mpirun command, -# PFUNIT_MPIRUN gives this prefix (e.g., "mpirun") -# -# Does everything needed to create a pFUnit-based test, wrapping -# add_pFUnit_executable, add_test, and define_pFUnit_failure. -# -# Example, using defaults for the optional arguments: -# create_pFUnit_test(mytest mytest_exe "${pfunit_sources}" "${test_sources}") -# -# Example, specifying values for the optional arguments: -# create_pFUnit_test(mytest mytest_exe "${pfunit_sources}" "${test_sources}" -# GEN_OUTPUT_DIRECTORY mytest_dir -# COMMAND env OMP_NUM_THREADS=3 ./mytest_exe) -# -#========================================================================== - -#========================================================================== -# Copyright (c) 2013-2014, University Corporation for Atmospheric Research -# -# This software is distributed under a two-clause BSD license, with no -# warranties, express or implied. See the accompanying LICENSE file for -# details. -#========================================================================== - -include(CMakeParseArguments) - -# Notify CMake that a given Fortran file can be produced by preprocessing a -# pFUnit file. -function(preprocess_pf_suite pf_file fortran_file) - - add_custom_command(OUTPUT ${fortran_file} - COMMAND python ${PFUNIT_PARSER} ${pf_file} ${fortran_file} - MAIN_DEPENDENCY ${pf_file}) - -endfunction(preprocess_pf_suite) - -# This function manages most of the work involved in preprocessing pFUnit -# files. You provide every *.pf file for a given executable, an output -# directory where generated sources should be output, and a list name. It -# will generate the sources, and append them and the pFUnit driver to the -# named list. -function(process_pFUnit_source_list pf_file_list output_directory - fortran_list_name) - - foreach(pf_file IN LISTS pf_file_list) - - # If a file is a relative path, expand it (relative to current source - # directory. - get_filename_component(pf_file "${pf_file}" ABSOLUTE) - - # Get extensionless base name from input. - get_filename_component(pf_file_stripped "${pf_file}" NAME_WE) - - # Add the generated Fortran files to the source list. - set(fortran_file ${output_directory}/${pf_file_stripped}.F90) - preprocess_pf_suite(${pf_file} ${fortran_file}) - list(APPEND ${fortran_list_name} ${fortran_file}) - - # Add the file to testSuites.inc - set(testSuites_contents - "${testSuites_contents}ADD_TEST_SUITE(${pf_file_stripped}_suite)\n") - endforeach() - - # Regenerate testSuites.inc if and only if necessary. - if(EXISTS ${output_directory}/testSuites.inc) - file(READ ${output_directory}/testSuites.inc old_testSuites_contents) - endif() - - if(NOT testSuites_contents STREQUAL old_testSuites_contents) - file(WRITE ${output_directory}/testSuites.inc ${testSuites_contents}) - endif() - - # Export ${fortran_list_name} to the caller, and add ${PFUNIT_DRIVER} - # to it. - set(${fortran_list_name} "${${fortran_list_name}}" "${PFUNIT_DRIVER}" - PARENT_SCOPE) - -endfunction(process_pFUnit_source_list) - -# Creates an executable of the given name using the pFUnit driver. Input -# variables are the executable name, a list of .pf files, the output -# directory for generated sources, and a list of regular Fortran files. -function(add_pFUnit_executable name pf_file_list output_directory - fortran_source_list) - - # Handle source code generation, add to list of sources. - process_pFUnit_source_list("${pf_file_list}" ${output_directory} - fortran_source_list) - - # Create the executable itself. - add_executable(${name} ${fortran_source_list}) - - # Handle pFUnit linking. - target_link_libraries(${name} "${PFUNIT_LIBRARIES}") - - # Necessary to include testSuites.inc - get_target_property(includes ${name} INCLUDE_DIRECTORIES) - if(NOT includes) - unset(includes) - endif() - list(APPEND includes ${output_directory} "${PFUNIT_INCLUDE_DIRS}") - set_target_properties(${name} PROPERTIES - INCLUDE_DIRECTORIES "${includes}") - - # The above lines are equivalent to: - # target_include_directories(${name} PRIVATE ${output_directory}) - # However, target_include_directories was not added until 2.8.11, and at - # the time of this writing, we can't depend on having such a recent - # version of CMake available on HPC systems. - -endfunction(add_pFUnit_executable) - -# Tells CTest what regular expressions are used to signal pass/fail from -# pFUnit output. -function(define_pFUnit_failure test_name) - # Set both pass and fail regular expressions to minimize the chance that - # the system under test will interfere with output and cause a false - # negative. - set_tests_properties(${test_name} PROPERTIES - FAIL_REGULAR_EXPRESSION "FAILURES!!!") - set_tests_properties(${test_name} PROPERTIES - PASS_REGULAR_EXPRESSION "OK") -endfunction(define_pFUnit_failure) - -# Does everything needed to create a pFUnit-based test, wrapping add_pFUnit_executable, -# add_test, and define_pFUnit_failure. -# -# Required input variables are the test name, the executable name, a list of .pf files, -# and a list of regular Fortran files. -# -# Optional input variables are GEN_OUTPUT_DIRECTORY and COMMAND (see usage notes at the -# top of this file for details). -# -# If executables need to be prefixed with an mpirun command, this prefix (e.g., -# "mpirun") should be given in the CMAKE variable PFUNIT_MPIRUN. -function(create_pFUnit_test test_name executable_name pf_file_list fortran_source_list) - - # Parse optional arguments - set(options "") - set(oneValueArgs GEN_OUTPUT_DIRECTORY) - set(multiValueArgs COMMAND) - cmake_parse_arguments(MY "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) - if (MY_UNPARSED_ARGUMENTS) - message(FATAL_ERROR "Unknown keywords given to create_pFUnit_test(): \"${MY_UNPARSED_ARGUMENTS}\"") - endif() - - # Change GEN_OUTPUT_DIRECTORY to an absolute path, relative to CMAKE_CURRENT_BINARY_DIR - # Note that, if GEN_OUTPUT_DIRECTORY isn't given, this logic will make the output - # directory default to CMAKE_CURRENT_BINARY_DIR - set(MY_GEN_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/${MY_GEN_OUTPUT_DIRECTORY}) - - # Give default values to optional arguments that aren't present - if (NOT MY_COMMAND) - set(MY_COMMAND ./${executable_name}) - endif() - - # Prefix command with an mpirun command - separate_arguments(PFUNIT_MPIRUN_LIST UNIX_COMMAND ${PFUNIT_MPIRUN}) - set (MY_COMMAND ${PFUNIT_MPIRUN_LIST} ${MY_COMMAND}) - - # Do the work - add_pFUnit_executable(${executable_name} "${pf_file_list}" - ${MY_GEN_OUTPUT_DIRECTORY} "${fortran_source_list}") - add_test(NAME ${test_name} COMMAND ${MY_COMMAND}) - define_pFUnit_failure(${test_name}) - -endfunction(create_pFUnit_test) diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 index aff6bf0ba6c..64cd5b768da 100644 --- a/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xatm/src/atm_comp_nuopc.F90 @@ -14,7 +14,7 @@ module atm_comp_nuopc use NUOPC_Model , only : NUOPC_ModelGet, SetVM use shr_sys_mod , only : shr_sys_abort use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock @@ -270,7 +270,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) end if ! Reset shr logging to original values - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine InitializeAdvertise @@ -295,8 +295,8 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) rc = ESMF_SUCCESS ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logUnit) ! generate the mesh call NUOPC_CompAttributeGet(gcomp, name='mesh_atm', value=cvalue, rc=rc) @@ -349,7 +349,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) if (chkerr(rc,__LINE__,u_FILE_u)) return endif - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine InitializeRealize @@ -375,8 +375,8 @@ subroutine ModelAdvance(gcomp, rc) end if call memcheck(subname, 3, mastertask) - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) !-------------------------------- ! Pack export state @@ -405,7 +405,7 @@ subroutine ModelAdvance(gcomp, rc) endif endif - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine ModelAdvance diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 index 07d8ecf1492..4b498f8d9c6 100644 --- a/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xglc/src/glc_comp_nuopc.F90 @@ -14,7 +14,7 @@ module glc_comp_nuopc use NUOPC_Model , only : NUOPC_ModelGet, SetVM use shr_sys_mod , only : shr_sys_abort use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 index 7a17ae2bd88..9185b8e532f 100644 --- a/CIME/non_py/src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xice/src/ice_comp_nuopc.F90 @@ -14,7 +14,7 @@ module ice_comp_nuopc use NUOPC_Model , only : NUOPC_ModelGet, SetVM use shr_sys_mod , only : shr_sys_abort use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock @@ -281,7 +281,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) ! Reset shr logging to original values !---------------------------------------------------------------------------- - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine InitializeAdvertise @@ -305,8 +305,8 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) ! Reset shr logging to my log file !---------------------------------------------------------------------------- - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logUnit) !-------------------------------- ! generate the mesh @@ -368,7 +368,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) if (chkerr(rc,__LINE__,u_FILE_u)) return endif - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine InitializeRealize @@ -390,8 +390,8 @@ subroutine ModelAdvance(gcomp, rc) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) call memcheck(subname, 3, mastertask) - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) !-------------------------------- ! Pack export state @@ -416,7 +416,7 @@ subroutine ModelAdvance(gcomp, rc) endif endif - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 index 8a5e539aca3..a43215939ad 100644 --- a/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xlnd/src/lnd_comp_nuopc.F90 @@ -14,7 +14,7 @@ module lnd_comp_nuopc use NUOPC_Model , only : NUOPC_ModelGet, SetVM use shr_sys_mod , only : shr_sys_abort use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock @@ -292,7 +292,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) ! Reset shr logging to original values !---------------------------------------------------------------------------- - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine InitializeAdvertise @@ -319,8 +319,8 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) ! Reset shr logging to my log file !---------------------------------------------------------------------------- - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logUnit) !-------------------------------- ! generate the mesh @@ -382,7 +382,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) if (chkerr(rc,__LINE__,u_FILE_u)) return endif - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) if (dbug > 5) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) @@ -406,8 +406,8 @@ subroutine ModelAdvance(gcomp, rc) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO, rc=rc) call memcheck(subname, 3, mastertask) - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) !-------------------------------- ! Pack export state @@ -432,7 +432,7 @@ subroutine ModelAdvance(gcomp, rc) endif endif - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 index b57b6feec56..87f8ca25102 100644 --- a/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xocn/src/ocn_comp_nuopc.F90 @@ -14,7 +14,7 @@ module ocn_comp_nuopc use NUOPC_Model , only : NUOPC_ModelGet, SetVM use shr_sys_mod , only : shr_sys_abort use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock @@ -238,7 +238,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) end if ! Reset shr logging to original values - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine InitializeAdvertise @@ -260,8 +260,8 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) rc = ESMF_SUCCESS ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) ! generate the mesh call NUOPC_CompAttributeGet(gcomp, name='mesh_ocn', value=cvalue, rc=rc) @@ -306,7 +306,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) if (chkerr(rc,__LINE__,u_FILE_u)) return endif - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine InitializeRealize @@ -328,8 +328,8 @@ subroutine ModelAdvance(gcomp, rc) call memcheck(subname, 3, mastertask) - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) ! Pack export state call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) @@ -343,7 +343,7 @@ subroutine ModelAdvance(gcomp, rc) if (chkerr(rc,__LINE__,u_FILE_u)) return endif - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine ModelAdvance diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 index 87e5dad9aee..1b5b9dd4901 100644 --- a/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xrof/src/rof_comp_nuopc.F90 @@ -14,7 +14,7 @@ module rof_comp_nuopc use NUOPC_Model , only : NUOPC_ModelGet, SetVM use shr_sys_mod , only : shr_sys_abort use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock @@ -229,7 +229,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) ! Reset shr logging to original values !---------------------------------------------------------------------------- - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine InitializeAdvertise @@ -251,8 +251,8 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) rc = ESMF_SUCCESS ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logUnit) + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logUnit) ! generate the mesh @@ -304,7 +304,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) if (chkerr(rc,__LINE__,u_FILE_u)) return endif - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine InitializeRealize @@ -329,8 +329,8 @@ subroutine ModelAdvance(gcomp, rc) end if call memcheck(subname, 3, mastertask) - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) ! Pack export state call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) @@ -348,7 +348,7 @@ subroutine ModelAdvance(gcomp, rc) endif endif - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) if (dbug > 5) then call ESMF_LogWrite(subname//' done', ESMF_LOGMSG_INFO, rc=rc) diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 index 883532a0ce1..a9ad38e2419 100644 --- a/CIME/non_py/src/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xshare/dead_methods_mod.F90 @@ -22,7 +22,7 @@ module dead_methods_mod use NUOPC_Model , only : NUOPC_ModelGet use shr_kind_mod , only : r8 => shr_kind_r8, cl=>shr_kind_cl, cs=>shr_kind_cs use shr_sys_mod , only : shr_sys_abort - use shr_file_mod , only : shr_file_setlogunit, shr_file_getLogUnit + use shr_log_mod , only : shr_log_setlogunit, shr_log_getLogUnit implicit none private @@ -157,7 +157,7 @@ subroutine set_component_logging(gcomp, mastertask, logunit, shrlogunit, rc) logUnit = 6 endif - call shr_file_setLogUnit (logunit) + call shr_log_setLogUnit (logunit) call ESMF_GridCompGet(gcomp, name=name, rc=rc) if (chkerr(rc,__LINE__,u_FILE_u)) return diff --git a/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 b/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 index 938652b5bce..aa4d982e530 100644 --- a/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 +++ b/CIME/non_py/src/components/xcpl_comps_nuopc/xwav/src/wav_comp_nuopc.F90 @@ -14,7 +14,7 @@ module wav_comp_nuopc use NUOPC_Model , only : NUOPC_ModelGet, SetVM use shr_sys_mod , only : shr_sys_abort use shr_kind_mod , only : r8=>shr_kind_r8, i8=>shr_kind_i8, cl=>shr_kind_cl, cs=>shr_kind_cs - use shr_file_mod , only : shr_file_getlogunit, shr_file_setlogunit + use shr_log_mod , only : shr_log_getlogunit, shr_log_setlogunit use dead_methods_mod , only : chkerr, state_setscalar, state_diagnose, alarmInit, memcheck use dead_methods_mod , only : set_component_logging, get_component_instance, log_clock_advance use dead_nuopc_mod , only : dead_read_inparms, ModelInitPhase, ModelSetRunClock @@ -228,7 +228,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) if (chkerr(rc,__LINE__,u_FILE_u)) return ! Reset shr logging to original values - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine InitializeAdvertise @@ -250,8 +250,8 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) rc = ESMF_SUCCESS ! Reset shr logging to my log file - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) ! generate the mesh call NUOPC_CompAttributeGet(gcomp, name='mesh_wav', value=cvalue, rc=rc) @@ -296,7 +296,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) if (chkerr(rc,__LINE__,u_FILE_u)) return endif - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine InitializeRealize @@ -318,8 +318,8 @@ subroutine ModelAdvance(gcomp, rc) call memcheck(subname, 3, mastertask) - call shr_file_getLogUnit (shrlogunit) - call shr_file_setLogUnit (logunit) + call shr_log_getLogUnit (shrlogunit) + call shr_log_setLogUnit (logunit) ! Pack export state call NUOPC_ModelGet(gcomp, modelClock=clock, exportState=exportState, rc=rc) @@ -337,7 +337,7 @@ subroutine ModelAdvance(gcomp, rc) endif endif - call shr_file_setLogUnit (shrlogunit) + call shr_log_setLogUnit (shrlogunit) end subroutine ModelAdvance diff --git a/CIME/non_py/src/timing/Makefile b/CIME/non_py/src/timing/Makefile index 89f3b4a7bed..7b0505fbffa 100644 --- a/CIME/non_py/src/timing/Makefile +++ b/CIME/non_py/src/timing/Makefile @@ -52,7 +52,11 @@ ifeq ($(strip $(MPILIB)), mpi-serial) FC := $(SFC) MPIFC := $(SFC) MPICC := $(SCC) - INCLDIR += -I$(GPTL_LIBDIR)/../mct/mpi-serial + ifdef MPI_SERIAL_PATH + INCLDIR += -I$(MPI_SERIAL_PATH)/include + else + INCLDIR += -I$(GPTL_LIBDIR)/../mct/mpi-serial + endif else CC := $(MPICC) FC := $(MPIFC) diff --git a/CIME/non_py/src/timing/perf_mod.F90 b/CIME/non_py/src/timing/perf_mod.F90 index 34a821cefa4..b8f9b50a2a8 100644 --- a/CIME/non_py/src/timing/perf_mod.F90 +++ b/CIME/non_py/src/timing/perf_mod.F90 @@ -30,7 +30,7 @@ module perf_mod use shr_kind_mod, only: SHR_KIND_CS, SHR_KIND_CM, SHR_KIND_CX, & SHR_KIND_R8, SHR_KIND_I8 use shr_mpi_mod, only: shr_mpi_barrier, shr_mpi_bcast - use shr_file_mod, only: shr_file_getUnit, shr_file_freeUnit + use shr_log_mod, only: shr_log_getUnit, shr_log_freeUnit use namelist_utils, only: find_group_name #endif use mpi @@ -1186,7 +1186,7 @@ subroutine t_prf(filename, mpicom, num_outpe, stride_outpe, & fname(i:i) = " " enddo - unitn = shr_file_getUnit() + unitn = shr_log_getUnit() ! determine what the current output mode is (append or write) if (GPTLprint_mode_query() == GPTLprint_write) then @@ -1391,7 +1391,7 @@ subroutine t_prf(filename, mpicom, num_outpe, stride_outpe, & endif - call shr_file_freeUnit( unitn ) + call shr_log_freeUnit( unitn ) ! reset GPTL output mode if (pr_write) then @@ -1539,7 +1539,7 @@ subroutine t_initf(NLFilename, LogPrint, LogUnit, mpicom, MasterTask, & ! Read in the prof_inparm namelist from NLFilename if it exists write(p_logunit,*) '(t_initf) Read in prof_inparm namelist from: '//trim(NLFilename) - unitn = shr_file_getUnit() + unitn = shr_log_getUnit() ierr = 1 open( unitn, file=trim(NLFilename), status="OLD", form="FORMATTED", access="SEQUENTIAL", iostat=ierr ) @@ -1560,7 +1560,7 @@ subroutine t_initf(NLFilename, LogPrint, LogUnit, mpicom, MasterTask, & close(unitn) endif - call shr_file_freeUnit( unitn ) + call shr_log_freeUnit( unitn ) endif @@ -1611,7 +1611,7 @@ subroutine t_initf(NLFilename, LogPrint, LogUnit, mpicom, MasterTask, & ! Read in the papi_inparm namelist from NLFilename if it exists write(p_logunit,*) '(t_initf) Read in papi_inparm namelist from: '//trim(NLFilename) - unitn = shr_file_getUnit() + unitn = shr_log_getUnit() ierr = 1 open( unitn, file=trim(NLFilename), status="OLD", form="FORMATTED", access="SEQUENTIAL", iostat=ierr ) @@ -1631,7 +1631,7 @@ subroutine t_initf(NLFilename, LogPrint, LogUnit, mpicom, MasterTask, & close(unitn) endif - call shr_file_freeUnit( unitn ) + call shr_log_freeUnit( unitn ) ! if enabled and nothing set, use "defaults" if ((papi_ctr1_str(1:11) .eq. "PAPI_NO_CTR") .and. & diff --git a/CIME/non_py/src/timing/perf_utils.F90 b/CIME/non_py/src/timing/perf_utils.F90 index 7b1d8382ec5..96d08ff9c9c 100644 --- a/CIME/non_py/src/timing/perf_utils.F90 +++ b/CIME/non_py/src/timing/perf_utils.F90 @@ -26,8 +26,8 @@ module perf_utils public perfutils_setunit public shr_sys_abort public shr_mpi_barrier - public shr_file_getUnit - public shr_file_freeUnit + public shr_log_getUnit + public shr_log_freeUnit public find_group_name public to_lower public shr_mpi_bcast @@ -322,11 +322,11 @@ END SUBROUTINE shr_mpi_bcastl0 !=============================================================================== -!================== Routines from csm_share/shr/shr_file_mod.F90 =============== +!================== Routines from csm_share/shr/shr_log_mod.F90 =============== !=============================================================================== !BOP =========================================================================== ! -! !IROUTINE: shr_file_getUnit -- Get a free FORTRAN unit number +! !IROUTINE: shr_log_getUnit -- Get a free FORTRAN unit number ! ! !DESCRIPTION: Get the next free FORTRAN unit number. ! @@ -336,47 +336,47 @@ END SUBROUTINE shr_mpi_bcastl0 ! ! !INTERFACE: ------------------------------------------------------------------ -INTEGER FUNCTION shr_file_getUnit () +INTEGER FUNCTION shr_log_getUnit () implicit none !EOP !----- local parameters ----- - integer(SHR_KIND_IN),parameter :: shr_file_minUnit = 10 ! Min unit number to give - integer(SHR_KIND_IN),parameter :: shr_file_maxUnit = 99 ! Max unit number to give + integer(SHR_KIND_IN),parameter :: shr_log_minUnit = 10 ! Min unit number to give + integer(SHR_KIND_IN),parameter :: shr_log_maxUnit = 99 ! Max unit number to give !----- local variables ----- integer(SHR_KIND_IN) :: n ! loop index logical :: opened ! If unit opened or not !----- formats ----- - character(*),parameter :: subName = '(shr_file_getUnit) ' - character(*),parameter :: F00 = "('(shr_file_getUnit) ',A,I4,A)" + character(*),parameter :: subName = '(shr_log_getUnit) ' + character(*),parameter :: F00 = "('(shr_log_getUnit) ',A,I4,A)" !------------------------------------------------------------------------------- ! Notes: !------------------------------------------------------------------------------- ! --- Choose first available unit other than 0, 5, or 6 ------ - do n=shr_file_minUnit, shr_file_maxUnit + do n=shr_log_minUnit, shr_log_maxUnit inquire( n, opened=opened ) if (n == 5 .or. n == 6 .or. opened) then cycle end if - shr_file_getUnit = n + shr_log_getUnit = n return end do call shr_sys_abort( subName//': Error: no available units found' ) -END FUNCTION shr_file_getUnit +END FUNCTION shr_log_getUnit !=============================================================================== !=============================================================================== !BOP =========================================================================== ! -! !IROUTINE: shr_file_freeUnit -- Free up a FORTRAN unit number +! !IROUTINE: shr_log_freeUnit -- Free up a FORTRAN unit number ! ! !DESCRIPTION: Free up the given unit number ! @@ -386,7 +386,7 @@ END FUNCTION shr_file_getUnit ! ! !INTERFACE: ------------------------------------------------------------------ -SUBROUTINE shr_file_freeUnit ( unit) +SUBROUTINE shr_log_freeUnit ( unit) implicit none @@ -397,18 +397,18 @@ SUBROUTINE shr_file_freeUnit ( unit) !EOP !----- local parameters ----- - integer(SHR_KIND_IN),parameter :: shr_file_minUnit = 10 ! Min unit number to give - integer(SHR_KIND_IN),parameter :: shr_file_maxUnit = 99 ! Max unit number to give + integer(SHR_KIND_IN),parameter :: shr_log_minUnit = 10 ! Min unit number to give + integer(SHR_KIND_IN),parameter :: shr_log_maxUnit = 99 ! Max unit number to give !----- formats ----- - character(*), parameter :: subName = '(shr_file_freeUnit) ' - character(*), parameter :: F00 = "('(shr_file_freeUnit) ',A,I4,A)" + character(*), parameter :: subName = '(shr_log_freeUnit) ' + character(*), parameter :: F00 = "('(shr_log_freeUnit) ',A,I4,A)" !------------------------------------------------------------------------------- ! Notes: !------------------------------------------------------------------------------- - if (unit < 0 .or. unit > shr_file_maxUnit) then + if (unit < 0 .or. unit > shr_log_maxUnit) then !pw if (s_loglev > 0) write(pu_logunit,F00) 'invalid unit number request:', unit else if (unit == 0 .or. unit == 5 .or. unit == 6) then call shr_sys_abort( subName//': Error: units 0, 5, and 6 must not be freed' ) @@ -416,7 +416,7 @@ SUBROUTINE shr_file_freeUnit ( unit) return -END SUBROUTINE shr_file_freeUnit +END SUBROUTINE shr_log_freeUnit !=============================================================================== !============= Routines from atm/cam/src/utils/namelist_utils.F90 ============== diff --git a/CIME/scripts/create_newcase.py b/CIME/scripts/create_newcase.py index 3faea5d6553..1e7b33ea315 100755 --- a/CIME/scripts/create_newcase.py +++ b/CIME/scripts/create_newcase.py @@ -239,6 +239,8 @@ def parse_command_line(args, cimeroot, description): parser.add_argument( "--driver", + # use get_cime_default_driver rather than config.driver_default as it considers + # environment, user config then config.driver_default default=get_cime_default_driver(), choices=drv_choices, help=drv_help, @@ -269,6 +271,18 @@ def parse_command_line(args, cimeroot, description): help="Specify number of GPUs used for simulation. ", ) + parser.add_argument( + "--gpu-type", + default=None, + help="Specify type of GPU hardware - currently supported are v100, a100, mi250", + ) + + parser.add_argument( + "--gpu-offload", + default=None, + help="Specify gpu offload method - currently supported are openacc, openmp, combined", + ) + args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) if args.srcroot is not None: @@ -345,6 +359,8 @@ def parse_command_line(args, cimeroot, description): args.extra_machines_dir, args.case_group, args.ngpus_per_node, + args.gpu_type, + args.gpu_offload, ) @@ -382,6 +398,8 @@ def _main_func(description=None): extra_machines_dir, case_group, ngpus_per_node, + gpu_type, + gpu_offload, ) = parse_command_line(sys.argv, cimeroot, description) if script_root is None: @@ -447,6 +465,8 @@ def _main_func(description=None): extra_machines_dir=extra_machines_dir, case_group=case_group, ngpus_per_node=ngpus_per_node, + gpu_type=gpu_type, + gpu_offload=gpu_offload, ) # Called after create since casedir does not exist yet diff --git a/CIME/scripts/create_test.py b/CIME/scripts/create_test.py index 753e627b24b..65fcc03b359 100755 --- a/CIME/scripts/create_test.py +++ b/CIME/scripts/create_test.py @@ -218,8 +218,8 @@ def parse_command_line(args, description): ) parser.add_argument( - "--xml-driver", - choices=("mct", "nuopc", "moab"), + "--driver", + choices=model_config.driver_choices, help="Override driver specified in tests and use this one.", ) @@ -473,12 +473,25 @@ def parse_command_line(args, description): f"The default is {srcroot_default}", ) + parser.add_argument( + "--force-rebuild", + action="store_true", + help="When used with 'use-existing' and 'test-id', the" + "tests will have their 'BUILD_SHAREDLIB' phase reset to 'PEND'.", + ) + CIME.utils.add_mail_type_args(parser) args = CIME.utils.parse_args_and_handle_standard_logging_options(args, parser) CIME.utils.resolve_mail_type_args(args) + if args.force_rebuild: + expect( + args.use_existing and args.test_id, + "Cannot force a rebuild without 'use-existing' and 'test-id'", + ) + # generate and compare flags may not point to the same directory if model_config.create_test_flag_mode == "cesm": if args.generate is not None: @@ -623,7 +636,7 @@ def parse_command_line(args, description): xml_testlist=args.xml_testlist, machine=machine_name, compiler=args.compiler, - driver=args.xml_driver, + driver=args.driver, ) test_names = [item["name"] for item in test_data] for test_datum in test_data: @@ -754,6 +767,7 @@ def parse_command_line(args, description): args.single_exe, args.workflow, args.chksum, + args.force_rebuild, ) @@ -913,6 +927,7 @@ def create_test( single_exe, workflow, chksum, + force_rebuild, ): ############################################################################### impl = TestScheduler( @@ -953,6 +968,7 @@ def create_test( single_exe=single_exe, workflow=workflow, chksum=chksum, + force_rebuild=force_rebuild, ) success = impl.run_tests( @@ -1054,6 +1070,7 @@ def _main_func(description=None): single_exe, workflow, chksum, + force_rebuild, ) = parse_command_line(sys.argv, description) success = False @@ -1105,6 +1122,7 @@ def _main_func(description=None): single_exe, workflow, chksum, + force_rebuild, ) run_count += 1 diff --git a/CIME/scripts/query_config.py b/CIME/scripts/query_config.py index 674713e0485..88d2151d1c1 100755 --- a/CIME/scripts/query_config.py +++ b/CIME/scripts/query_config.py @@ -8,7 +8,7 @@ from CIME.Tools.standard_script_setup import * import re -from CIME.utils import expect +from CIME.utils import expect, get_cime_default_driver, deprecate_action from CIME.XML.files import Files from CIME.XML.component import Component from CIME.XML.compsets import Compsets @@ -314,8 +314,16 @@ def parse_command_line(args, description): parser.add_argument( "--comp_interface", - choices=supported_comp_interfaces, + choices=supported_comp_interfaces, # same as config.driver_choices default="mct", + action=deprecate_action(", use --driver argument"), + help="DEPRECATED: Use --driver argument", + ) + + parser.add_argument( + "--driver", + choices=config.driver_choices, + default=get_cime_default_driver(), help="Coupler/Driver interface", ) @@ -332,7 +340,7 @@ def parse_command_line(args, description): args.machines, args.long, args.xml, - files[args.comp_interface], + files[args.driver], ) diff --git a/CIME/test_scheduler.py b/CIME/test_scheduler.py index a657e2a6b39..47119a09320 100644 --- a/CIME/test_scheduler.py +++ b/CIME/test_scheduler.py @@ -13,7 +13,7 @@ from collections import OrderedDict from CIME.XML.standard_module_setup import * -from CIME.get_tests import get_recommended_test_time, get_build_groups +from CIME.get_tests import get_recommended_test_time, get_build_groups, is_perf_test from CIME.utils import ( append_status, append_testlog, @@ -209,6 +209,7 @@ def __init__( single_exe=False, workflow=None, chksum=False, + force_rebuild=False, ): ########################################################################### self._cime_root = get_cime_root() @@ -224,7 +225,11 @@ def __init__( self._input_dir = input_dir self._pesfile = pesfile self._allow_baseline_overwrite = allow_baseline_overwrite - self._allow_pnl = allow_pnl + self._single_exe = single_exe + if self._single_exe: + self._allow_pnl = True + else: + self._allow_pnl = allow_pnl self._non_local = non_local self._build_groups = [] self._workflow = workflow @@ -287,6 +292,7 @@ def __init__( ) self._clean = clean + self._namelists_only = namelists_only self._walltime = walltime @@ -392,6 +398,9 @@ def __init__( if use_existing: for test in self._tests: with TestStatus(self._get_test_dir(test)) as ts: + if force_rebuild: + ts.set_status(SHAREDLIB_BUILD_PHASE, TEST_PEND_STATUS) + for phase, status in ts: if phase in CORE_PHASES: if status in [TEST_PEND_STATUS, TEST_FAIL_STATUS]: @@ -661,8 +670,17 @@ def _create_newcase_phase(self, test): pesize = case_opt[1:] create_newcase_cmd += " --pecount {}".format(pesize) elif case_opt.startswith("G"): - ngpus_per_node = case_opt[1:] - create_newcase_cmd += " --ngpus-per-node {}".format(ngpus_per_node) + if "-" in case_opt: + ngpus_per_node, gpu_type, gpu_offload = case_opt[1:].split("-") + else: + error = "GPU test argument format is ngpus_per_node-gpu_type-gpu_offload" + self._log_output(test, error) + return False, error + create_newcase_cmd += ( + " --ngpus-per-node {} --gpu-type {} --gpu-offload {}".format( + ngpus_per_node, gpu_type, gpu_offload + ) + ) elif case_opt.startswith("V"): self._cime_driver = case_opt[1:] create_newcase_cmd += " --driver {}".format(self._cime_driver) @@ -949,7 +967,10 @@ def _xml_phase(self, test): ) envtest.set_initial_values(case) case.set_value("TEST", True) - case.set_value("SAVE_TIMING", self._save_timing) + if is_perf_test(test): + case.set_value("SAVE_TIMING", True) + else: + case.set_value("SAVE_TIMING", self._save_timing) # handle single-exe here, all cases will use the EXEROOT from # the first case in the build group @@ -995,6 +1016,17 @@ def _setup_phase(self, test): "Fatal error in case.cmpgen_namelists: {}".format(output), ) + if self._single_exe: + with Case(self._get_test_dir(test), read_only=False) as case: + tests = Tests() + + try: + tests.support_single_exe(case) + except Exception: + self._update_test_status_file(test, SETUP_PHASE, TEST_FAIL_STATUS) + + raise + return rv ########################################################################### diff --git a/CIME/test_status.py b/CIME/test_status.py index 20d2cef93a9..5f306b7db0e 100644 --- a/CIME/test_status.py +++ b/CIME/test_status.py @@ -274,6 +274,17 @@ def get_status(self, phase): def get_comment(self, phase): return self._phase_statuses[phase][1] if phase in self._phase_statuses else None + def current_is(self, phase, status): + try: + latest = self.get_latest_phase() + except KeyError: + return False + + return latest == phase and self.get_status(phase) == status + + def get_latest_phase(self): + return list(self._phase_statuses.keys())[-1] + def phase_statuses_dump( self, prefix="", skip_passes=False, skip_phase_list=None, xfails=None ): @@ -449,7 +460,7 @@ def _get_overall_status_based_on_phases( if rv == TEST_PASS_STATUS: rv = NAMELIST_FAIL_STATUS - elif phase == BASELINE_PHASE: + elif phase in [BASELINE_PHASE, THROUGHPUT_PHASE, MEMCOMP_PHASE]: if rv in [NAMELIST_FAIL_STATUS, TEST_PASS_STATUS]: phase_responsible_for_status = phase rv = TEST_DIFF_STATUS @@ -501,7 +512,9 @@ def get_overall_test_status( >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A TPUTCOMP') ('PASS', 'RUN') >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A TPUTCOMP', check_throughput=True) - ('FAIL', 'TPUTCOMP') + ('DIFF', 'TPUTCOMP') + >>> _test_helper2('PASS ERS.foo.A RUN\nFAIL ERS.foo.A MEMCOMP', check_memory=True) + ('DIFF', 'MEMCOMP') >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPASS ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP') ('NLFAIL', 'RUN') >>> _test_helper2('PASS ERS.foo.A MODEL_BUILD\nPEND ERS.foo.A RUN\nFAIL ERS.foo.A NLCOMP') diff --git a/CIME/test_utils.py b/CIME/test_utils.py index 4f035839202..d4daa131496 100644 --- a/CIME/test_utils.py +++ b/CIME/test_utils.py @@ -150,7 +150,9 @@ def test_to_string( def get_test_status_files(test_root, compiler, test_id=None): test_id_glob = ( - "*{}*".format(compiler) if test_id is None else "*{}*".format(test_id) + "*{}*".format(compiler) + if test_id is None + else "*{}*{}*".format(compiler, test_id) ) test_status_files = glob.glob( "{}/{}/{}".format(test_root, test_id_glob, TEST_STATUS_FILENAME) diff --git a/CIME/tests/base.py b/CIME/tests/base.py index acadf7defc8..524900fac82 100644 --- a/CIME/tests/base.py +++ b/CIME/tests/base.py @@ -61,7 +61,7 @@ def setUp(self): self._hasbatch = self.MACHINE.has_batch_system() and not self.NO_BATCH self._do_teardown = not self.NO_TEARDOWN self._root_dir = os.getcwd() - + self._cprnc = self.MACHINE.get_value("CCSM_CPRNC") customize_path = os.path.join(utils.get_src_root(), "cime_config", "customize") self._config = Config.load(customize_path) diff --git a/CIME/tests/scripts_regression_tests.py b/CIME/tests/scripts_regression_tests.py index 039f13c7718..66f4c015298 100755 --- a/CIME/tests/scripts_regression_tests.py +++ b/CIME/tests/scripts_regression_tests.py @@ -44,6 +44,7 @@ from CIME.provenance import get_test_success, save_test_success from CIME import utils from CIME.tests.base import BaseTestCase +from CIME.config import Config os.environ["CIME_GLOBAL_WALLTIME"] = "0:05:00" @@ -151,6 +152,9 @@ def configure_tests( ): config = CIME.utils.get_cime_config() + customize_path = os.path.join(utils.get_src_root(), "cime_config", "customize") + Config.load(customize_path) + if timeout: BaseTestCase.GLOBAL_TIMEOUT = str(timeout) diff --git a/CIME/tests/test_sys_bless_tests_results.py b/CIME/tests/test_sys_bless_tests_results.py index 8cf76f8cbb5..3ae8d4f5b57 100644 --- a/CIME/tests/test_sys_bless_tests_results.py +++ b/CIME/tests/test_sys_bless_tests_results.py @@ -17,6 +17,10 @@ def setUp(self): # recording baselines are working restrictive_mask = 0o027 self._orig_umask = os.umask(restrictive_mask) + if not self._cprnc: + self.skipTest( + "Test cannot run without cprnc program defined in config_machines.xml" + ) def tearDown(self): super().tearDown() diff --git a/CIME/tests/test_sys_cime_case.py b/CIME/tests/test_sys_cime_case.py index ac64abd5a7b..dabea7d9600 100644 --- a/CIME/tests/test_sys_cime_case.py +++ b/CIME/tests/test_sys_cime_case.py @@ -12,6 +12,11 @@ from CIME.case.case import Case from CIME.XML.env_run import EnvRun +try: + collectionsAbc = collections.abc +except AttributeError: + collectionsAbc = collections + class TestCimeCase(base.BaseTestCase): def test_cime_case(self): @@ -86,7 +91,7 @@ def test_cime_case_prereq(self): prereq=prereq_name, job=job_name, skip_pnl=True, dry_run=True ) self.assertTrue( - isinstance(batch_commands, collections.Sequence), + isinstance(batch_commands, collectionsAbc.Sequence), "case.submit_jobs did not return a sequence for a dry run", ) self.assertTrue( @@ -99,7 +104,7 @@ def test_cime_case_prereq(self): # The prerequisite should be applied to all jobs, though we're only expecting one for batch_cmd in batch_commands: self.assertTrue( - isinstance(batch_cmd, collections.Sequence), + isinstance(batch_cmd, collectionsAbc.Sequence), "case.submit_jobs did not return a sequence of sequences", ) self.assertTrue( @@ -161,7 +166,7 @@ def test_cime_case_allow_failed_prereq(self): dry_run=True, ) self.assertTrue( - isinstance(batch_commands, collections.Sequence), + isinstance(batch_commands, collectionsAbc.Sequence), "case.submit_jobs did not return a sequence for a dry run", ) num_submissions = 1 @@ -190,7 +195,7 @@ def test_cime_case_resubmit_immediate(self): job=job_name, skip_pnl=True, dry_run=True, resubmit_immediate=True ) self.assertTrue( - isinstance(batch_commands, collections.Sequence), + isinstance(batch_commands, collectionsAbc.Sequence), "case.submit_jobs did not return a sequence for a dry run", ) if case.get_value("DOUT_S"): @@ -231,7 +236,7 @@ def test_cime_case_build_threaded_1(self): ) with Case(casedir, read_only=False) as case: - build_threaded = case.get_value("SMP_PRESENT") + build_threaded = case.get_value("BUILD_THREADED") self.assertFalse(build_threaded) build_threaded = case.get_build_threaded() @@ -249,7 +254,7 @@ def test_cime_case_build_threaded_2(self): ) with Case(casedir, read_only=False) as case: - build_threaded = case.get_value("SMP_PRESENT") + build_threaded = case.get_value("BUILD_THREADED") self.assertTrue(build_threaded) build_threaded = case.get_build_threaded() diff --git a/CIME/tests/test_sys_create_newcase.py b/CIME/tests/test_sys_create_newcase.py index 2e6927feec0..a0f07a001b6 100644 --- a/CIME/tests/test_sys_create_newcase.py +++ b/CIME/tests/test_sys_create_newcase.py @@ -68,12 +68,54 @@ def test_a_createnewcase(self): self.run_cmd_assert_result("./case.build", from_dir=testdir) with Case(testdir, read_only=False) as case: case.set_value("CHARGE_ACCOUNT", "fred") + # to be used in next test + batch_system = case.get_value("BATCH_SYSTEM") + + # on systems (like github workflow) that do not have batch, set this for the next test + if batch_system == "none": + self.run_cmd_assert_result( + './xmlchange --subgroup case.run BATCH_COMMAND_FLAGS="-q \$JOB_QUEUE"', + from_dir=testdir, + ) # this should not fail with a locked file issue self.run_cmd_assert_result("./case.build", from_dir=testdir) self.run_cmd_assert_result("./case.st_archive --test-all", from_dir=testdir) + with Case(testdir, read_only=False) as case: + batch_command = case.get_value("BATCH_COMMAND_FLAGS", subgroup="case.run") + + self.run_cmd_assert_result( + './xmlchange --append --subgroup case.run BATCH_COMMAND_FLAGS="-l trythis"', + from_dir=testdir, + ) + # Test that changes to BATCH_COMMAND_FLAGS work + with Case(testdir, read_only=False) as case: + new_batch_command = case.get_value( + "BATCH_COMMAND_FLAGS", subgroup="case.run" + ) + + self.assertTrue( + new_batch_command == batch_command + " -l trythis", + msg=f"Failed to correctly append BATCH_COMMAND_FLAGS {new_batch_command} {batch_command}#", + ) + + self.run_cmd_assert_result( + "./xmlchange JOB_QUEUE=fred --subgroup case.run --force", from_dir=testdir + ) + + with Case(testdir, read_only=False) as case: + new_batch_command = case.get_value( + "BATCH_COMMAND_FLAGS", subgroup="case.run" + ) + self.assertTrue( + "fred" in new_batch_command, + msg="Failed to update JOB_QUEUE in BATCH_COMMAND_FLAGS {}".format( + new_batch_command + ), + ) + # Trying to set values outside of context manager should fail case = Case(testdir, read_only=False) with self.assertRaises(utils.CIMEError): diff --git a/CIME/tests/test_sys_jenkins_generic_job.py b/CIME/tests/test_sys_jenkins_generic_job.py index 7fb2a83b740..30b31c5c8d6 100644 --- a/CIME/tests/test_sys_jenkins_generic_job.py +++ b/CIME/tests/test_sys_jenkins_generic_job.py @@ -61,18 +61,17 @@ def threaded_test(self, expect_works, extra_args, build_name=None): self._thread_error = str(e) def assert_num_leftovers(self, suite): - num_tests_in_tiny = len(get_tests.get_test_suite(suite)) + num_tests_in_suite = len(get_tests.get_test_suite(suite)) - jenkins_dirs = glob.glob( - "%s/*%s*/" % (self._jenkins_root, self._baseline_name.capitalize()) - ) # case dirs + case_glob = "%s/*%s*/" % (self._jenkins_root, self._baseline_name.capitalize()) + jenkins_dirs = glob.glob(case_glob) # Case dirs # scratch_dirs = glob.glob("%s/*%s*/" % (self._testroot, test_id)) # blr/run dirs self.assertEqual( - num_tests_in_tiny, + num_tests_in_suite, len(jenkins_dirs), - msg="Wrong number of leftover directories in %s, expected %d, see %s" - % (self._jenkins_root, num_tests_in_tiny, jenkins_dirs), + msg="Wrong number of leftover directories in %s, expected %d, see %s. Glob checked %s" + % (self._jenkins_root, num_tests_in_suite, jenkins_dirs, case_glob), ) # JGF: Can't test this at the moment due to root change flag given to jenkins_generic_job @@ -97,6 +96,21 @@ def test_jenkins_generic_job(self): ) # jenkins_generic_job should have automatically cleaned up leftovers from prior run self.assert_dashboard_has_build(build_name) + def test_jenkins_generic_job_save_timing(self): + self.simple_test( + True, "-t cime_test_timing --save-timing -b %s" % self._baseline_name + ) + self.assert_num_leftovers("cime_test_timing") + + jenkins_dirs = glob.glob( + "%s/*%s*/" % (self._jenkins_root, self._baseline_name.capitalize()) + ) # case dirs + case = jenkins_dirs[0] + result = self.run_cmd_assert_result( + "./xmlquery --value SAVE_TIMING", from_dir=case + ) + self.assertEqual(result, "TRUE") + def test_jenkins_generic_job_kill(self): build_name = "jenkins_generic_job_kill_%s" % utils.get_timestamp() run_thread = threading.Thread( diff --git a/CIME/tests/test_sys_test_scheduler.py b/CIME/tests/test_sys_test_scheduler.py index c5312ed7d4a..3dfd4b62124 100755 --- a/CIME/tests/test_sys_test_scheduler.py +++ b/CIME/tests/test_sys_test_scheduler.py @@ -20,22 +20,22 @@ def test_chksum(self, strftime): # pylint: disable=unused-argument self.skipTest("Skipping chksum test. Depends on CESM settings") ts = test_scheduler.TestScheduler( - ["SEQ_Ln9.f19_g16_rx1.A.cori-haswell_gnu"], - machine_name="cori-haswell", + ["SEQ_Ln9.f19_g16_rx1.A.perlmutter_gnu"], + machine_name="perlmutter", chksum=True, test_root="/tests", ) with mock.patch.object(ts, "_shell_cmd_for_phase") as _shell_cmd_for_phase: ts._run_phase( - "SEQ_Ln9.f19_g16_rx1.A.cori-haswell_gnu" + "SEQ_Ln9.f19_g16_rx1.A.perlmutter_gnu" ) # pylint: disable=protected-access _shell_cmd_for_phase.assert_called_with( - "SEQ_Ln9.f19_g16_rx1.A.cori-haswell_gnu", + "SEQ_Ln9.f19_g16_rx1.A.perlmutter_gnu", "./case.submit --skip-preview-namelist --chksum", "RUN", - from_dir="/tests/SEQ_Ln9.f19_g16_rx1.A.cori-haswell_gnu.00:00:00", + from_dir="/tests/SEQ_Ln9.f19_g16_rx1.A.perlmutter_gnu.00:00:00", ) def test_a_phases(self): @@ -279,6 +279,60 @@ def test_b_full(self): test_status.TEST_PASS_STATUS, ) + def test_force_rebuild(self): + tests = get_tests.get_full_test_names( + [ + "TESTBUILDFAIL_P1.f19_g16_rx1.A", + "TESTRUNFAIL_P1.f19_g16_rx1.A", + "TESTRUNPASS_P1.f19_g16_rx1.A", + ], + self._machine, + self._compiler, + ) + test_id = "%s-%s" % (self._baseline_name, utils.get_timestamp()) + ct = test_scheduler.TestScheduler( + tests, + test_id=test_id, + no_batch=self.NO_BATCH, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + ) + + log_lvl = logging.getLogger().getEffectiveLevel() + logging.disable(logging.CRITICAL) + try: + ct.run_tests() + finally: + logging.getLogger().setLevel(log_lvl) + + ct = test_scheduler.TestScheduler( + tests, + test_id=test_id, + no_batch=self.NO_BATCH, + test_root=self._testroot, + output_root=self._testroot, + compiler=self._compiler, + mpilib=self.TEST_MPILIB, + machine_name=self.MACHINE.get_machine_name(), + force_rebuild=True, + use_existing=True, + ) + + test_statuses = glob.glob("%s/*%s/TestStatus" % (self._testroot, test_id)) + + for x in test_statuses: + casedir = os.path.dirname(x) + + ts = test_status.TestStatus(test_dir=casedir) + + self.assertTrue( + ts.get_status(test_status.SHAREDLIB_BUILD_PHASE) + == test_status.TEST_PEND_STATUS + ) + def test_c_use_existing(self): tests = get_tests.get_full_test_names( [ diff --git a/CIME/tests/test_sys_unittest.py b/CIME/tests/test_sys_unittest.py old mode 100644 new mode 100755 index 19f232b246f..238c7355469 --- a/CIME/tests/test_sys_unittest.py +++ b/CIME/tests/test_sys_unittest.py @@ -16,31 +16,42 @@ def setUpClass(cls): cls._testroot = os.path.join(cls.TEST_ROOT, "TestUnitTests") cls._testdirs = [] - def _has_unit_test_support(self): - if self.TEST_COMPILER is None: - compiler = self.MACHINE.get_default_compiler() - else: - compiler = self.TEST_COMPILER + def setUp(self): + super().setUp() + + self._driver = utils.get_cime_default_driver() + self._has_pfunit = self._has_unit_test_support() - mach = self.MACHINE.get_machine_name() + def _has_unit_test_support(self): cmake_macros_dir = Files().get_value("CMAKE_MACROS_DIR") macros_to_check = [ - os.path.join(cmake_macros_dir, "{}_{}.cmake".format(compiler, mach)), - os.path.join(cmake_macros_dir, "{}.cmake".format(mach)), + os.path.join( + cmake_macros_dir, + "{}_{}.cmake".format(self._compiler, self._machine), + ), + os.path.join(cmake_macros_dir, "{}.cmake".format(self._machine)), + os.path.join( + os.environ.get("HOME"), + ".cime", + "{}_{}.cmake".format(self._compiler, self._machine), + ), + os.path.join( + os.environ.get("HOME"), ".cime", "{}.cmake".format(self._machine) + ), ] for macro_to_check in macros_to_check: if os.path.exists(macro_to_check): macro_text = open(macro_to_check, "r").read() - - return "PFUNIT_PATH" in macro_text + if "PFUNIT_PATH" in macro_text: + return True return False def test_a_unit_test(self): cls = self.__class__ - if not self._has_unit_test_support(): + if not self._has_pfunit: self.skipTest( "Skipping TestUnitTest - PFUNIT_PATH not found for the default compiler on this machine" ) @@ -55,8 +66,7 @@ def test_a_unit_test(self): test_spec_dir = os.path.join( os.path.dirname(unit_test_tool), "Examples", "interpolate_1d", "tests" ) - args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir) - args += " --machine {}".format(self.MACHINE.get_machine_name()) + args = f"--build-dir {test_dir} --test-spec-dir {test_spec_dir} --machine {self._machine} --compiler {self._compiler} --comp-interface {self._driver}" utils.run_cmd_no_fail("{} {}".format(unit_test_tool, args)) cls._do_teardown.append(test_dir) @@ -79,8 +89,7 @@ def test_b_cime_f90_unit_tests(self): test_spec_dir, "scripts", "fortran_unit_testing", "run_tests.py" ) ) - args = "--build-dir {} --test-spec-dir {}".format(test_dir, test_spec_dir) - args += " --machine {}".format(self.MACHINE.get_machine_name()) + args = f"--build-dir {test_dir} --test-spec-dir {test_spec_dir} --machine {self._machine} --compiler {self._compiler} --comp-interface {self._driver}" utils.run_cmd_no_fail("{} {}".format(unit_test_tool, args)) cls._do_teardown.append(test_dir) diff --git a/CIME/tests/test_unit_baselines_performance.py b/CIME/tests/test_unit_baselines_performance.py new file mode 100644 index 00000000000..1564541ba9a --- /dev/null +++ b/CIME/tests/test_unit_baselines_performance.py @@ -0,0 +1,655 @@ +#!/usr/bin/env python3 + +import gzip +import tempfile +import unittest +from unittest import mock +from pathlib import Path + +from CIME.baselines import performance +from CIME.tests.test_unit_system_tests import CPLLOG + + +def create_mock_case(tempdir, get_latest_cpl_logs=None): + caseroot = Path(tempdir, "0", "caseroot") + + rundir = caseroot / "run" + + if get_latest_cpl_logs is not None: + get_latest_cpl_logs.return_value = (str(rundir / "cpl.log.gz"),) + + baseline_root = Path(tempdir, "baselines") + + baseline_root.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + + return case, caseroot, rundir, baseline_root + + +class TestUnitBaselinesPerformance(unittest.TestCase): + @mock.patch("CIME.baselines.performance._perf_get_memory") + def test_perf_get_memory_default(self, _perf_get_memory): + _perf_get_memory.return_value = ("1000", "a") + + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_memory.side_effect = AttributeError + + mem = performance.perf_get_memory(case, config) + + assert mem == ("1000", "a") + + def test_perf_get_memory(self): + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_memory.return_value = ("1000", "a") + + mem = performance.perf_get_memory(case, config) + + assert mem == ("1000", "a") + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + def test_perf_get_throughput_default(self, _perf_get_throughput): + _perf_get_throughput.return_value = ("100", "a") + + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_throughput.side_effect = AttributeError + + tput = performance.perf_get_throughput(case, config) + + assert tput == ("100", "a") + + def test_perf_get_throughput(self): + case = mock.MagicMock() + + config = mock.MagicMock() + + config.perf_get_throughput.return_value = ("100", "a") + + tput = performance.perf_get_throughput(case, config) + + assert tput == ("100", "a") + + def test_get_cpl_throughput_no_file(self): + throughput = performance.get_cpl_throughput("/tmp/cpl.log") + + assert throughput is None + + def test_get_cpl_throughput(self): + with tempfile.TemporaryDirectory() as tempdir: + cpl_log_path = Path(tempdir, "cpl.log.gz") + + with gzip.open(cpl_log_path, "w") as fd: + fd.write(CPLLOG.encode("utf-8")) + + throughput = performance.get_cpl_throughput(str(cpl_log_path)) + + assert throughput == 719.635 + + def test_get_cpl_mem_usage_gz(self): + with tempfile.TemporaryDirectory() as tempdir: + cpl_log_path = Path(tempdir, "cpl.log.gz") + + with gzip.open(cpl_log_path, "w") as fd: + fd.write(CPLLOG.encode("utf-8")) + + mem_usage = performance.get_cpl_mem_usage(str(cpl_log_path)) + + assert mem_usage == [ + (10102.0, 1673.89), + (10103.0, 1673.89), + (10104.0, 1673.89), + (10105.0, 1673.89), + ] + + @mock.patch("CIME.baselines.performance.os.path.isfile") + def test_get_cpl_mem_usage(self, isfile): + isfile.return_value = True + + with mock.patch( + "builtins.open", mock.mock_open(read_data=CPLLOG.encode("utf-8")) + ) as mock_file: + mem_usage = performance.get_cpl_mem_usage("/tmp/cpl.log") + + assert mem_usage == [ + (10102.0, 1673.89), + (10103.0, 1673.89), + (10104.0, 1673.89), + (10105.0, 1673.89), + ] + + def test_read_baseline_file_multi_line(self): + with mock.patch( + "builtins.open", + mock.mock_open( + read_data="sha:1df0 date:2023 1000.0\nsha:3b05 date:2023 2000.0" + ), + ) as mock_file: + baseline = performance.read_baseline_file("/tmp/cpl-mem.log") + + mock_file.assert_called_with("/tmp/cpl-mem.log") + assert baseline == "sha:1df0 date:2023 1000.0\nsha:3b05 date:2023 2000.0" + + def test_read_baseline_file_content(self): + with mock.patch( + "builtins.open", mock.mock_open(read_data="sha:1df0 date:2023 1000.0") + ) as mock_file: + baseline = performance.read_baseline_file("/tmp/cpl-mem.log") + + mock_file.assert_called_with("/tmp/cpl-mem.log") + assert baseline == "sha:1df0 date:2023 1000.0" + + def test_write_baseline_file(self): + with mock.patch("builtins.open", mock.mock_open()) as mock_file: + performance.write_baseline_file("/tmp/cpl-tput.log", "1000") + + mock_file.assert_called_with("/tmp/cpl-tput.log", "a") + + @mock.patch("CIME.baselines.performance.get_cpl_throughput") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test__perf_get_throughput(self, get_latest_cpl_logs, get_cpl_throughput): + get_cpl_throughput.side_effect = FileNotFoundError() + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + with self.assertRaises(RuntimeError): + performance._perf_get_throughput(case) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test__perf_get_memory_override(self, get_latest_cpl_logs, get_cpl_mem_usage): + get_cpl_mem_usage.side_effect = FileNotFoundError() + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + with self.assertRaises(RuntimeError): + performance._perf_get_memory(case, "/tmp/override") + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test__perf_get_memory(self, get_latest_cpl_logs, get_cpl_mem_usage): + get_cpl_mem_usage.side_effect = FileNotFoundError() + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + with self.assertRaises(RuntimeError): + performance._perf_get_memory(case) + + @mock.patch("CIME.baselines.performance.write_baseline_file") + @mock.patch("CIME.baselines.performance.perf_get_memory") + @mock.patch("CIME.baselines.performance.perf_get_throughput") + def test_write_baseline_skip( + self, perf_get_throughput, perf_get_memory, write_baseline_file + ): + perf_get_throughput.return_value = "100" + + perf_get_memory.return_value = "1000" + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir) + + performance.perf_write_baseline( + case, + baseline_root, + False, + False, + ) + + perf_get_throughput.assert_not_called() + perf_get_memory.assert_not_called() + write_baseline_file.assert_not_called() + + @mock.patch("CIME.baselines.performance.write_baseline_file") + @mock.patch("CIME.baselines.performance.perf_get_memory") + @mock.patch("CIME.baselines.performance.perf_get_throughput") + def test_write_baseline_runtimeerror( + self, perf_get_throughput, perf_get_memory, write_baseline_file + ): + perf_get_throughput.side_effect = RuntimeError + + perf_get_memory.side_effect = RuntimeError + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir) + + performance.perf_write_baseline(case, baseline_root) + + perf_get_throughput.assert_called() + perf_get_memory.assert_called() + write_baseline_file.assert_not_called() + + @mock.patch("CIME.baselines.performance.write_baseline_file") + @mock.patch("CIME.baselines.performance.perf_get_memory") + @mock.patch("CIME.baselines.performance.perf_get_throughput") + def test_perf_write_baseline( + self, perf_get_throughput, perf_get_memory, write_baseline_file + ): + perf_get_throughput.return_value = ("100", "a") + + perf_get_memory.return_value = ("1000", "a") + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir) + + performance.perf_write_baseline(case, baseline_root) + + perf_get_throughput.assert_called() + perf_get_memory.assert_called() + write_baseline_file.assert_any_call( + str(baseline_root / "cpl-tput.log"), "100", "a" + ) + write_baseline_file.assert_any_call( + str(baseline_root / "cpl-mem.log"), "1000", "a" + ) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline_no_baseline_file( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.side_effect = FileNotFoundError + + _perf_get_throughput.return_value = 504 + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_value.side_effect = ( + str(baseline_root), + "master/ERIO.ne30_g16_rx1.A.docker_gnu", + "/tmp/components/cpl", + 0.05, + ) + + with self.assertRaises(FileNotFoundError): + performance.perf_compare_throughput_baseline(case) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline_no_baseline( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.return_value = "" + + _perf_get_throughput.return_value = ("504", "a") + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert below_tolerance is None + assert ( + comment + == "Could not compare throughput to baseline, as baseline had no value." + ) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline_no_tolerance( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.return_value = "500" + + _perf_get_throughput.return_value = ("504", "a") + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + None, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert below_tolerance + assert ( + comment + == "TPUTCOMP: Throughput changed by -0.80%: baseline=500.000 sypd, tolerance=10%, current=504.000 sypd" + ) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline_above_threshold( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.return_value = "1000" + + _perf_get_throughput.return_value = ("504", "a") + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert not below_tolerance + assert ( + comment + == "Error: TPUTCOMP: Throughput changed by 49.60%: baseline=1000.000 sypd, tolerance=5%, current=504.000 sypd" + ) + + @mock.patch("CIME.baselines.performance._perf_get_throughput") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_throughput_baseline( + self, get_latest_cpl_logs, read_baseline_file, _perf_get_throughput + ): + read_baseline_file.return_value = "500" + + _perf_get_throughput.return_value = ("504", "a") + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_throughput_baseline( + case + ) + + assert below_tolerance + assert ( + comment + == "TPUTCOMP: Throughput changed by -0.80%: baseline=500.000 sypd, tolerance=5%, current=504.000 sypd" + ) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_no_baseline( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = "" + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + (3, 1002.0), + (4, 1003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance + assert ( + comment + == "MEMCOMP: Memory usage highwater changed by 0.00%: baseline=0.000 MB, tolerance=5%, current=1003.000 MB" + ) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_not_enough_samples( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = ["1000.0"] + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_value.side_effect = ( + str(baseline_root), + "master/ERIO.ne30_g16_rx1.A.docker_gnu", + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance is None + assert comment == "Found 2 memory usage samples, need atleast 4" + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_no_baseline_file( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.side_effect = FileNotFoundError + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + (3, 1002.0), + (4, 1003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_value.side_effect = ( + str(baseline_root), + "master/ERIO.ne30_g16_rx1.A.docker_gnu", + "/tmp/components/cpl", + 0.05, + ) + + with self.assertRaises(FileNotFoundError): + performance.perf_compare_memory_baseline(case) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_no_tolerance( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = "1000.0" + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + (3, 1002.0), + (4, 1003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + None, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance + assert ( + comment + == "MEMCOMP: Memory usage highwater changed by 0.30%: baseline=1000.000 MB, tolerance=10%, current=1003.000 MB" + ) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline_above_threshold( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = "1000.0" + + get_cpl_mem_usage.return_value = [ + (1, 2000.0), + (2, 2001.0), + (3, 2002.0), + (4, 2003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert not below_tolerance + assert ( + comment + == "Error: MEMCOMP: Memory usage highwater changed by 100.30%: baseline=1000.000 MB, tolerance=5%, current=2003.000 MB" + ) + + @mock.patch("CIME.baselines.performance.get_cpl_mem_usage") + @mock.patch("CIME.baselines.performance.read_baseline_file") + @mock.patch("CIME.baselines.performance.get_latest_cpl_logs") + def test_perf_compare_memory_baseline( + self, get_latest_cpl_logs, read_baseline_file, get_cpl_mem_usage + ): + read_baseline_file.return_value = "1000.0" + + get_cpl_mem_usage.return_value = [ + (1, 1000.0), + (2, 1001.0), + (3, 1002.0), + (4, 1003.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + case, _, _, baseline_root = create_mock_case(tempdir, get_latest_cpl_logs) + + case.get_baseline_dir.return_value = str( + baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + ) + + case.get_value.side_effect = ( + "/tmp/components/cpl", + 0.05, + ) + + (below_tolerance, comment) = performance.perf_compare_memory_baseline(case) + + assert below_tolerance + assert ( + comment + == "MEMCOMP: Memory usage highwater changed by 0.30%: baseline=1000.000 MB, tolerance=5%, current=1003.000 MB" + ) + + def test_get_latest_cpl_logs_found_multiple(self): + with tempfile.TemporaryDirectory() as tempdir: + run_dir = Path(tempdir) / "run" + run_dir.mkdir(parents=True, exist_ok=False) + + cpl_log_path = run_dir / "cpl.log.gz" + cpl_log_path.touch() + + cpl_log_2_path = run_dir / "cpl-2023-01-01.log.gz" + cpl_log_2_path.touch() + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(run_dir), + "mct", + ) + + latest_cpl_logs = performance.get_latest_cpl_logs(case) + + assert len(latest_cpl_logs) == 2 + assert sorted(latest_cpl_logs) == sorted( + [str(cpl_log_path), str(cpl_log_2_path)] + ) + + def test_get_latest_cpl_logs_found_single(self): + with tempfile.TemporaryDirectory() as tempdir: + run_dir = Path(tempdir) / "run" + run_dir.mkdir(parents=True, exist_ok=False) + + cpl_log_path = run_dir / "cpl.log.gz" + cpl_log_path.touch() + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(run_dir), + "mct", + ) + + latest_cpl_logs = performance.get_latest_cpl_logs(case) + + assert len(latest_cpl_logs) == 1 + assert latest_cpl_logs[0] == str(cpl_log_path) + + def test_get_latest_cpl_logs(self): + case = mock.MagicMock() + case.get_value.side_effect = ( + f"/tmp/run", + "mct", + ) + + latest_cpl_logs = performance.get_latest_cpl_logs(case) + + assert len(latest_cpl_logs) == 0 diff --git a/CIME/tests/test_unit_bless_test_results.py b/CIME/tests/test_unit_bless_test_results.py new file mode 100644 index 00000000000..91315b30f8d --- /dev/null +++ b/CIME/tests/test_unit_bless_test_results.py @@ -0,0 +1,1026 @@ +import re +import unittest +import tempfile +from unittest import mock +from pathlib import Path + +from CIME.bless_test_results import ( + bless_test_results, + _bless_throughput, + _bless_memory, + bless_history, + bless_namelists, + is_bless_needed, +) + + +class TestUnitBlessTestResults(unittest.TestCase): + @mock.patch("CIME.bless_test_results.generate_baseline") + @mock.patch("CIME.bless_test_results.compare_baseline") + def test_bless_history_fail(self, compare_baseline, generate_baseline): + generate_baseline.return_value = (False, "") + + compare_baseline.return_value = (False, "") + + case = mock.MagicMock() + case.get_value.side_effect = [ + "USER", + "SMS.f19_g16.S", + "/tmp/run", + ] + + success, comment = bless_history( + "SMS.f19_g16.S", case, "master", "/tmp/baselines", False, True + ) + + assert not success + assert comment == "Generate baseline failed: " + + @mock.patch("CIME.bless_test_results.generate_baseline") + @mock.patch("CIME.bless_test_results.compare_baseline") + def test_bless_history_force(self, compare_baseline, generate_baseline): + generate_baseline.return_value = (True, "") + + compare_baseline.return_value = (False, "") + + case = mock.MagicMock() + case.get_value.side_effect = [ + "USER", + "SMS.f19_g16.S", + "/tmp/run", + ] + + success, comment = bless_history( + "SMS.f19_g16.S", case, "master", "/tmp/baselines", False, True + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.compare_baseline") + def test_bless_history(self, compare_baseline): + compare_baseline.return_value = (True, "") + + case = mock.MagicMock() + case.get_value.side_effect = [ + "USER", + "SMS.f19_g16.S", + "/tmp/run", + ] + + success, comment = bless_history( + "SMS.f19_g16.S", case, "master", "/tmp/baselines", True, False + ) + + assert success + assert comment is None + + def test_bless_namelists_report_only(self): + success, comment = bless_namelists( + "SMS.f19_g16.S", + True, + False, + None, + "master", + "/tmp/baselines", + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.get_scripts_root") + @mock.patch("CIME.bless_test_results.run_cmd") + def test_bless_namelists_pes_file(self, run_cmd, get_scripts_root): + get_scripts_root.return_value = "/tmp/cime" + + run_cmd.return_value = [1, None, None] + + success, comment = bless_namelists( + "SMS.f19_g16.S", + False, + True, + "/tmp/pes/new_layout.xml", + "master", + "/tmp/baselines", + ) + + assert not success + assert comment == "Namelist regen failed: 'None'" + + call = run_cmd.call_args_list[0] + + assert re.match( + r"/tmp/cime/create_test SMS.f19_g16.S --namelists-only -g (?:-b )?master --pesfile /tmp/pes/new_layout.xml --baseline-root /tmp/baselines -o", + call[0][0], + ) + + @mock.patch("CIME.bless_test_results.get_scripts_root") + @mock.patch("CIME.bless_test_results.run_cmd") + def test_bless_namelists_new_test_id(self, run_cmd, get_scripts_root): + get_scripts_root.return_value = "/tmp/cime" + + run_cmd.return_value = [1, None, None] + + success, comment = bless_namelists( + "SMS.f19_g16.S", + False, + True, + None, + "master", + "/tmp/baselines", + new_test_root="/tmp/other-test-root", + new_test_id="hello", + ) + + assert not success + assert comment == "Namelist regen failed: 'None'" + + call = run_cmd.call_args_list[0] + + assert re.match( + r"/tmp/cime/create_test SMS.f19_g16.S --namelists-only -g (?:-b )?master --test-root=/tmp/other-test-root --output-root=/tmp/other-test-root -t hello --baseline-root /tmp/baselines -o", + call[0][0], + ) + + @mock.patch("CIME.bless_test_results.get_scripts_root") + @mock.patch("CIME.bless_test_results.run_cmd") + def test_bless_namelists_new_test_root(self, run_cmd, get_scripts_root): + get_scripts_root.return_value = "/tmp/cime" + + run_cmd.return_value = [1, None, None] + + success, comment = bless_namelists( + "SMS.f19_g16.S", + False, + True, + None, + "master", + "/tmp/baselines", + new_test_root="/tmp/other-test-root", + ) + + assert not success + assert comment == "Namelist regen failed: 'None'" + + call = run_cmd.call_args_list[0] + + assert re.match( + r"/tmp/cime/create_test SMS.f19_g16.S --namelists-only -g (?:-b )?master --test-root=/tmp/other-test-root --output-root=/tmp/other-test-root --baseline-root /tmp/baselines -o", + call[0][0], + ) + + @mock.patch("CIME.bless_test_results.get_scripts_root") + @mock.patch("CIME.bless_test_results.run_cmd") + def test_bless_namelists_fail(self, run_cmd, get_scripts_root): + get_scripts_root.return_value = "/tmp/cime" + + run_cmd.return_value = [1, None, None] + + success, comment = bless_namelists( + "SMS.f19_g16.S", + False, + True, + None, + "master", + "/tmp/baselines", + ) + + assert not success + assert comment == "Namelist regen failed: 'None'" + + call = run_cmd.call_args_list[0] + + assert re.match( + r"/tmp/cime/create_test SMS.f19_g16.S --namelists-only -g (?:-b )?master --baseline-root /tmp/baselines -o", + call[0][0], + ) + + @mock.patch("CIME.bless_test_results.get_scripts_root") + @mock.patch("CIME.bless_test_results.run_cmd") + def test_bless_namelists_force(self, run_cmd, get_scripts_root): + get_scripts_root.return_value = "/tmp/cime" + + run_cmd.return_value = [0, None, None] + + success, comment = bless_namelists( + "SMS.f19_g16.S", + False, + True, + None, + "master", + "/tmp/baselines", + ) + + assert success + assert comment is None + + call = run_cmd.call_args_list[0] + + assert re.match( + r"/tmp/cime/create_test SMS.f19_g16.S --namelists-only -g (?:-b )?master --baseline-root /tmp/baselines -o", + call[0][0], + ) + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_memory_baseline") + def test_bless_memory_force_error( + self, perf_compare_memory_baseline, perf_write_baseline + ): + perf_write_baseline.side_effect = Exception + + perf_compare_memory_baseline.return_value = (False, "") + + case = mock.MagicMock() + + success, comment = _bless_memory( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert not success + assert ( + comment + == "Failed to write baseline memory usage for test 'SMS.f19_g16.S': " + ) + perf_write_baseline.assert_called() + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_memory_baseline") + def test_bless_memory_force( + self, perf_compare_memory_baseline, perf_write_baseline + ): + perf_compare_memory_baseline.return_value = (False, "") + + case = mock.MagicMock() + + success, comment = _bless_memory( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert success + assert comment is None + perf_write_baseline.assert_called() + + @mock.patch("CIME.bless_test_results.perf_compare_memory_baseline") + def test_bless_memory_report_only(self, perf_compare_memory_baseline): + perf_compare_memory_baseline.return_value = (True, "") + + case = mock.MagicMock() + + success, comment = _bless_memory( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", True, False + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_memory_baseline") + def test_bless_memory_general_error( + self, perf_compare_memory_baseline, perf_write_baseline + ): + perf_compare_memory_baseline.side_effect = Exception + + case = mock.MagicMock() + + success, comment = _bless_memory( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_memory_baseline") + def test_bless_memory_file_not_found_error( + self, perf_compare_memory_baseline, perf_write_baseline + ): + perf_compare_memory_baseline.side_effect = FileNotFoundError + + case = mock.MagicMock() + + success, comment = _bless_memory( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.perf_compare_memory_baseline") + def test_bless_memory(self, perf_compare_memory_baseline): + perf_compare_memory_baseline.return_value = (True, "") + + case = mock.MagicMock() + + success, comment = _bless_memory( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, False + ) + + assert success + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_throughput_baseline") + def test_bless_throughput_force_error( + self, perf_compare_throughput_baseline, perf_write_baseline + ): + perf_write_baseline.side_effect = Exception + + perf_compare_throughput_baseline.return_value = (False, "") + + case = mock.MagicMock() + + success, comment = _bless_throughput( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert not success + assert comment == "Failed to write baseline throughput for 'SMS.f19_g16.S': " + perf_write_baseline.assert_called() + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_throughput_baseline") + def test_bless_throughput_force( + self, perf_compare_throughput_baseline, perf_write_baseline + ): + perf_compare_throughput_baseline.return_value = (False, "") + + case = mock.MagicMock() + + success, comment = _bless_throughput( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert success + assert comment is None + perf_write_baseline.assert_called() + + @mock.patch("CIME.bless_test_results.perf_compare_throughput_baseline") + def test_bless_throughput_report_only(self, perf_compare_throughput_baseline): + perf_compare_throughput_baseline.return_value = (True, "") + + case = mock.MagicMock() + + success, comment = _bless_throughput( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", True, False + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.perf_compare_throughput_baseline") + def test_bless_throughput_general_error(self, perf_compare_throughput_baseline): + perf_compare_throughput_baseline.side_effect = Exception + + case = mock.MagicMock() + + success, comment = _bless_throughput( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.perf_write_baseline") + @mock.patch("CIME.bless_test_results.perf_compare_throughput_baseline") + def test_bless_throughput_file_not_found_error( + self, + perf_compare_throughput_baseline, + perf_write_baseline, + ): + perf_compare_throughput_baseline.side_effect = FileNotFoundError + + case = mock.MagicMock() + + success, comment = _bless_throughput( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, True + ) + + assert success + assert comment is None + + @mock.patch("CIME.bless_test_results.perf_compare_throughput_baseline") + def test_bless_throughput(self, perf_compare_throughput_baseline): + perf_compare_throughput_baseline.return_value = (True, "") + + case = mock.MagicMock() + + success, comment = _bless_throughput( + case, "SMS.f19_g16.S", "/tmp/baselines", "master", False, False + ) + + assert success + + @mock.patch("CIME.bless_test_results._bless_throughput") + @mock.patch("CIME.bless_test_results._bless_memory") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_perf( + self, + get_test_status_files, + TestStatus, + Case, + _bless_memory, + _bless_throughput, + ): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "FAIL", "FAIL", "FAIL"] + + case = Case.return_value.__enter__.return_value + + _bless_memory.return_value = (True, "") + + _bless_throughput.return_value = (True, "") + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + bless_perf=True, + ) + + assert success + _bless_memory.assert_called() + _bless_throughput.assert_called() + + @mock.patch("CIME.bless_test_results._bless_throughput") + @mock.patch("CIME.bless_test_results._bless_memory") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_memory_only( + self, + get_test_status_files, + TestStatus, + Case, + _bless_memory, + _bless_throughput, + ): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "FAIL", "FAIL"] + + case = Case.return_value.__enter__.return_value + + _bless_memory.return_value = (True, "") + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + bless_mem=True, + ) + + assert success + _bless_memory.assert_called() + _bless_throughput.assert_not_called() + + @mock.patch("CIME.bless_test_results._bless_throughput") + @mock.patch("CIME.bless_test_results._bless_memory") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_throughput_only( + self, + get_test_status_files, + TestStatus, + Case, + _bless_memory, + _bless_throughput, + ): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "FAIL", "FAIL"] + + case = Case.return_value.__enter__.return_value + + _bless_throughput.return_value = (True, "") + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + bless_tput=True, + ) + + assert success + _bless_memory.assert_not_called() + _bless_throughput.assert_called() + + @mock.patch("CIME.bless_test_results.bless_namelists") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_namelists_only( + self, + get_test_status_files, + TestStatus, + Case, + bless_namelists, + ): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["FAIL", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + bless_namelists.return_value = (True, "") + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + namelists_only=True, + ) + + assert success + bless_namelists.assert_called() + + @mock.patch("CIME.bless_test_results.bless_history") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_hist_only( + self, + get_test_status_files, + TestStatus, + Case, + bless_history, + ): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "FAIL"] + + case = Case.return_value.__enter__.return_value + + bless_history.return_value = (True, "") + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + hist_only=True, + ) + + assert success + bless_history.assert_called() + + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_specific(self, get_test_status_files, TestStatus, Case): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker-gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS"] * 10 + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + bless_tests=["SMS"], + ) + + assert success + + @mock.patch("CIME.bless_test_results._bless_memory") + @mock.patch("CIME.bless_test_results._bless_throughput") + @mock.patch("CIME.bless_test_results.bless_history") + @mock.patch("CIME.bless_test_results.bless_namelists") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_tests_results_homme( + self, + get_test_status_files, + TestStatus, + Case, + bless_namelists, + bless_history, + _bless_throughput, + _bless_memory, + ): + _bless_memory.return_value = (False, "") + + _bless_throughput.return_value = (False, "") + + bless_history.return_value = (False, "") + + bless_namelists.return_value = (False, "") + + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker-gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.HOMME.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "PASS", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + no_skip_pass=True, + ) + + assert not success + + @mock.patch("CIME.bless_test_results._bless_memory") + @mock.patch("CIME.bless_test_results._bless_throughput") + @mock.patch("CIME.bless_test_results.bless_history") + @mock.patch("CIME.bless_test_results.bless_namelists") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_tests_results_fail( + self, + get_test_status_files, + TestStatus, + Case, + bless_namelists, + bless_history, + _bless_throughput, + _bless_memory, + ): + _bless_memory.return_value = (False, "") + + _bless_throughput.return_value = (False, "") + + bless_history.return_value = (False, "") + + bless_namelists.return_value = (False, "") + + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker-gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "PASS", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + no_skip_pass=True, + ) + + assert not success + + @mock.patch("CIME.bless_test_results._bless_memory") + @mock.patch("CIME.bless_test_results._bless_throughput") + @mock.patch("CIME.bless_test_results.bless_history") + @mock.patch("CIME.bless_test_results.bless_namelists") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_no_skip_pass( + self, + get_test_status_files, + TestStatus, + Case, + bless_namelists, + bless_history, + _bless_throughput, + _bless_memory, + ): + _bless_memory.return_value = (True, "") + + _bless_throughput.return_value = (True, "") + + bless_history.return_value = (True, "") + + bless_namelists.return_value = (True, "") + + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker-gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "PASS", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + no_skip_pass=True, + ) + + assert success + + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_baseline_root_none(self, get_test_status_files, TestStatus, Case): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker-gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["FAIL"] + ["PASS"] * 9 + + case = Case.return_value.__enter__.return_value + case.get_value.side_effect = [None, None] + + success = bless_test_results( + "master", + None, + "/tmp/cases", + "gnu", + force=True, + ) + + assert not success + + @mock.patch("CIME.utils.get_current_branch") + @mock.patch("CIME.bless_test_results.bless_namelists") + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_baseline_name_none( + self, + get_test_status_files, + TestStatus, + Case, + bless_namelists, + get_current_branch, + ): + get_current_branch.return_value = "master" + + bless_namelists.return_value = (True, "") + + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["FAIL"] + ["PASS"] * 9 + + case = Case.return_value.__enter__.return_value + case.get_value.side_effect = [None, None] + + success = bless_test_results( + None, + "/tmp/baselines", + "/tmp/cases", + "gnu", + force=True, + ) + + assert success + + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_exclude(self, get_test_status_files, TestStatus, Case): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker-gnu.12345/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "PASS", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + exclude="SMS", + ) + + assert success + + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_multiple_files(self, get_test_status_files, TestStatus, Case): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu.12345/TestStatus", + "/tmp/cases/SMS.f19_g16.S.docker-gnu.23456/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "PASS", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + ) + + assert success + + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_tests_no_match(self, get_test_status_files, TestStatus, Case): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + "/tmp/cases/PET.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS"] * 10 + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + bless_tests=["SEQ"], + ) + + assert success + + @mock.patch("CIME.bless_test_results.Case") + @mock.patch("CIME.bless_test_results.TestStatus") + @mock.patch("CIME.bless_test_results.get_test_status_files") + def test_bless_all(self, get_test_status_files, TestStatus, Case): + get_test_status_files.return_value = [ + "/tmp/cases/SMS.f19_g16.S.docker_gnu/TestStatus", + ] + + ts = TestStatus.return_value + ts.get_name.return_value = "SMS.f19_g16.S.docker_gnu" + ts.get_overall_test_status.return_value = ("PASS", "RUN") + ts.get_status.side_effect = ["PASS", "PASS", "PASS", "PASS", "PASS"] + + case = Case.return_value.__enter__.return_value + + success = bless_test_results( + "master", + "/tmp/baseline", + "/tmp/cases", + "gnu", + force=True, + ) + + assert success + + def test_is_bless_needed_no_skip_fail(self): + ts = mock.MagicMock() + ts.get_status.side_effect = [ + "PASS", + ] + + broken_blesses = [] + + needed = is_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "PASS", True, "RUN" + ) + + assert needed + assert broken_blesses == [] + + def test_is_bless_needed_overall_fail(self): + ts = mock.MagicMock() + ts.get_status.side_effect = [ + "PASS", + ] + + broken_blesses = [] + + needed = is_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "FAIL", False, "RUN" + ) + + assert not needed + assert broken_blesses == [("SMS.f19_g16.A", "test did not pass")] + + def test_is_bless_needed_baseline_fail(self): + ts = mock.MagicMock() + ts.get_status.side_effect = ["PASS", "FAIL"] + + broken_blesses = [] + + needed = is_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "PASS", False, "RUN" + ) + + assert needed + assert broken_blesses == [] + + def test_is_bless_needed_run_phase_fail(self): + ts = mock.MagicMock() + ts.get_status.side_effect = [ + "FAIL", + ] + + broken_blesses = [] + + needed = is_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "PASS", False, "RUN" + ) + + assert not needed + assert broken_blesses == [("SMS.f19_g16.A", "run phase did not pass")] + + def test_is_bless_needed_no_run_phase(self): + ts = mock.MagicMock() + ts.get_status.side_effect = [None] + + broken_blesses = [] + + needed = is_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "PASS", False, "RUN" + ) + + assert not needed + assert broken_blesses == [("SMS.f19_g16.A", "no run phase")] + + def test_is_bless_needed(self): + ts = mock.MagicMock() + ts.get_status.side_effect = ["PASS", "PASS"] + + broken_blesses = [] + + needed = is_bless_needed( + "SMS.f19_g16.A", ts, broken_blesses, "PASS", False, "RUN" + ) + + assert not needed diff --git a/CIME/tests/test_unit_case.py b/CIME/tests/test_unit_case.py index ed473cea21f..e7f8c9a2ead 100755 --- a/CIME/tests/test_unit_case.py +++ b/CIME/tests/test_unit_case.py @@ -22,10 +22,20 @@ def make_valid_case(path): class TestCaseSubmit(unittest.TestCase): def test_check_case(self): case = mock.MagicMock() + # get_value arguments TEST, COMP_WAV, COMP_INTERFACE, BUILD_COMPLETE + case.get_value.side_effect = [False, "", "", True] case_submit.check_case(case, chksum=True) case.check_all_input_data.assert_called_with(chksum=True) + def test_check_case_test(self): + case = mock.MagicMock() + # get_value arguments TEST, COMP_WAV, COMP_INTERFACE, BUILD_COMPLETE + case.get_value.side_effect = [True, "", "", True] + case_submit.check_case(case, chksum=True) + + case.check_all_input_data.assert_not_called() + @mock.patch("CIME.case.case_submit.lock_file") @mock.patch("CIME.case.case_submit.unlock_file") @mock.patch("os.path.basename") @@ -76,6 +86,7 @@ def test_submit( batch_args=None, workflow=True, chksum=True, + dryrun=False, ) @@ -222,14 +233,14 @@ def test_copy( self.srcroot, "A", "f19_g16_rx1", - machine_name="cori-haswell", + machine_name="perlmutter", ) # Check that they're all called configure.assert_called_with( "A", "f19_g16_rx1", - machine_name="cori-haswell", + machine_name="perlmutter", project=None, pecount=None, compiler=None, @@ -251,6 +262,8 @@ def test_copy( extra_machines_dir=None, case_group=None, ngpus_per_node=0, + gpu_type=None, + gpu_offload=None, ) create_caseroot.assert_called() apply_user_mods.assert_called() @@ -297,14 +310,14 @@ def test_create( self.srcroot, "A", "f19_g16_rx1", - machine_name="cori-haswell", + machine_name="perlmutter", ) # Check that they're all called configure.assert_called_with( "A", "f19_g16_rx1", - machine_name="cori-haswell", + machine_name="perlmutter", project=None, pecount=None, compiler=None, @@ -326,6 +339,8 @@ def test_create( extra_machines_dir=None, case_group=None, ngpus_per_node=0, + gpu_type=None, + gpu_offload=None, ) create_caseroot.assert_called() apply_user_mods.assert_called() diff --git a/CIME/tests/test_unit_compare_test_results.py b/CIME/tests/test_unit_compare_test_results.py index bc298aaf9f9..4844a96c1a6 100755 --- a/CIME/tests/test_unit_compare_test_results.py +++ b/CIME/tests/test_unit_compare_test_results.py @@ -41,7 +41,7 @@ def tearDown(self): shutil.rmtree(self.tempdir, ignore_errors=True) def _compare_test_results(self, baseline, test_id, phases, **kwargs): - test_status_root = os.path.join(self.test_root, test_id) + test_status_root = os.path.join(self.test_root, "gnu." + test_id) os.makedirs(test_status_root) with TestStatus(test_status_root, "test") as status: diff --git a/CIME/tests/test_unit_compare_two.py b/CIME/tests/test_unit_compare_two.py index 9f593c44f0d..3ce0abb9b05 100755 --- a/CIME/tests/test_unit_compare_two.py +++ b/CIME/tests/test_unit_compare_two.py @@ -15,6 +15,7 @@ import os import shutil import tempfile +from unittest import mock from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo import CIME.test_status as test_status @@ -287,6 +288,40 @@ def get_compare_phase_name(self, mytest): ) return compare_phase_name + def test_resetup_case_single_exe(self): + # Setup + case1root = os.path.join(self.tempdir, "case1") + case1 = CaseFake(case1root) + case1._read_only_mode = False + + mytest = SystemTestsCompareTwoFake(case1) + + case1.set_value = mock.MagicMock() + case1.get_value = mock.MagicMock() + case1.get_value.side_effect = ["/tmp", "/tmp/bld", False] + + mytest._resetup_case(test_status.RUN_PHASE, reset=True) + + case1.set_value.assert_not_called() + + case1.get_value.side_effect = ["/tmp", "/tmp/bld", True] + + mytest._resetup_case(test_status.RUN_PHASE, reset=True) + + case1.set_value.assert_not_called() + + case1.get_value.side_effect = ["/tmp", "/other/bld", False] + + mytest._resetup_case(test_status.RUN_PHASE, reset=True) + + case1.set_value.assert_not_called() + + case1.get_value.side_effect = ["/tmp", "/other/bld", True] + + mytest._resetup_case(test_status.RUN_PHASE, reset=True) + + case1.set_value.assert_called_with("BUILD_COMPLETE", True) + def test_setup(self): # Ensure that test setup properly sets up case 1 and case 2 diff --git a/CIME/tests/test_unit_cs_status.py b/CIME/tests/test_unit_cs_status.py index 39ca99b0e42..70efd47935f 100755 --- a/CIME/tests/test_unit_cs_status.py +++ b/CIME/tests/test_unit_cs_status.py @@ -71,6 +71,23 @@ def set_phase_to_status(test_dir_path, test_name, phase, status): # Begin actual tests # ------------------------------------------------------------------------ + def test_force_rebuild(self): + test_name = "my.test.name" + test_dir = "my.test.name.testid" + test_dir_path = self.create_test_dir(test_dir) + self.create_test_status_core_passes(test_dir_path, test_name) + cs_status( + [os.path.join(test_dir_path, "TestStatus")], + force_rebuild=True, + out=self._output, + ) + self.assert_status_of_phase( + self._output.getvalue(), + test_status.TEST_PEND_STATUS, + test_status.SHAREDLIB_BUILD_PHASE, + test_name, + ) + def test_single_test(self): """cs_status for a single test should include some minimal expected output""" test_name = "my.test.name" diff --git a/CIME/tests/test_unit_hist_utils.py b/CIME/tests/test_unit_hist_utils.py new file mode 100644 index 00000000000..fe6d4866c34 --- /dev/null +++ b/CIME/tests/test_unit_hist_utils.py @@ -0,0 +1,66 @@ +import io +import unittest +from unittest import mock + +from CIME.hist_utils import copy_histfiles +from CIME.XML.archive import Archive + + +class TestHistUtils(unittest.TestCase): + @mock.patch("CIME.hist_utils.safe_copy") + def test_copy_histfiles_exclude(self, safe_copy): + case = mock.MagicMock() + + case.get_env.return_value.get_latest_hist_files.side_effect = [ + ["/tmp/testing.cpl.hi.nc"], + ["/tmp/testing.atm.hi.nc"], + ] + + case.get_env.return_value.exclude_testing.side_effect = [True, False] + + case.get_value.side_effect = [ + "/tmp", # RUNDIR + None, # RUN_REFCASE + "testing", # CASE + True, # TEST + True, # TEST + ] + + case.get_compset_components.return_value = ["atm"] + + test_files = [ + "testing.cpl.hi.nc", + ] + + with mock.patch("os.listdir", return_value=test_files): + comments, num_copied = copy_histfiles(case, "base") + + assert num_copied == 1 + + @mock.patch("CIME.hist_utils.safe_copy") + def test_copy_histfiles(self, safe_copy): + case = mock.MagicMock() + + case.get_env.return_value.get_latest_hist_files.return_value = [ + "/tmp/testing.cpl.hi.nc", + ] + + case.get_env.return_value.exclude_testing.return_value = False + + case.get_value.side_effect = [ + "/tmp", # RUNDIR + None, # RUN_REFCASE + "testing", # CASE + True, # TEST + ] + + case.get_compset_components.return_value = [] + + test_files = [ + "testing.cpl.hi.nc", + ] + + with mock.patch("os.listdir", return_value=test_files): + comments, num_copied = copy_histfiles(case, "base") + + assert num_copied == 1 diff --git a/CIME/tests/test_unit_system_tests.py b/CIME/tests/test_unit_system_tests.py new file mode 100644 index 00000000000..1c05bed45be --- /dev/null +++ b/CIME/tests/test_unit_system_tests.py @@ -0,0 +1,640 @@ +#!/usr/bin/env python3 + +import os +import tempfile +import gzip +import re +from re import A +import unittest +from unittest import mock +from pathlib import Path + +from CIME.config import Config +from CIME.SystemTests.system_tests_common import SystemTestsCommon +from CIME.SystemTests.system_tests_compare_two import SystemTestsCompareTwo +from CIME.SystemTests.system_tests_compare_n import SystemTestsCompareN + +CPLLOG = """ + tStamp_write: model date = 00010102 0 wall clock = 2023-09-19 19:39:42 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010102 0 memory = 1673.89 MB (highwater) 387.77 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + tStamp_write: model date = 00010103 0 wall clock = 2023-09-19 19:39:42 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010103 0 memory = 1673.89 MB (highwater) 390.09 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + tStamp_write: model date = 00010104 0 wall clock = 2023-09-19 19:39:42 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010104 0 memory = 1673.89 MB (highwater) 391.64 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + tStamp_write: model date = 00010105 0 wall clock = 2023-09-19 19:39:43 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010105 0 memory = 1673.89 MB (highwater) 392.67 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + tStamp_write: model date = 00010106 0 wall clock = 2023-09-19 19:39:43 avg dt = 0.33 dt = 0.33 + memory_write: model date = 00010106 0 memory = 1673.89 MB (highwater) 393.44 MB (usage) (pe= 0 comps= cpl ATM LND ICE OCN GLC ROF WAV IAC ESP) + +(seq_mct_drv): =============== SUCCESSFUL TERMINATION OF CPL7-e3sm =============== +(seq_mct_drv): =============== at YMD,TOD = 00010106 0 =============== +(seq_mct_drv): =============== # simulated days (this run) = 5.000 =============== +(seq_mct_drv): =============== compute time (hrs) = 0.000 =============== +(seq_mct_drv): =============== # simulated years / cmp-day = 719.635 =============== +(seq_mct_drv): =============== pes min memory highwater (MB) 851.957 =============== +(seq_mct_drv): =============== pes max memory highwater (MB) 1673.891 =============== +(seq_mct_drv): =============== pes min memory last usage (MB) 182.742 =============== +(seq_mct_drv): =============== pes max memory last usage (MB) 393.441 =============== +""" + + +def create_mock_case(tempdir, idx=None, cpllog_data=None): + if idx is None: + idx = 0 + + case = mock.MagicMock() + + caseroot = Path(tempdir, str(idx), "caseroot") + baseline_root = caseroot.parent / "baselines" + run_dir = caseroot / "run" + run_dir.mkdir(parents=True, exist_ok=False) + + if cpllog_data is not None: + cpllog = run_dir / "cpl.log.gz" + + with gzip.open(cpllog, "w") as fd: + fd.write(cpllog_data.encode("utf-8")) + + case.get_latest_cpl_log.return_value = str(cpllog) + + hist_file = run_dir / "cpl.hi.2023-01-01.nc" + hist_file.touch() + + case.get_env.return_value.get_latest_hist_files.return_value = [str(hist_file)] + + case.get_compset_components.return_value = [] + + return case, caseroot, baseline_root, run_dir + + +class TestUnitSystemTests(unittest.TestCase): + @mock.patch("CIME.SystemTests.system_tests_common.load_coupler_customization") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + @mock.patch("CIME.SystemTests.system_tests_common.perf_get_memory_list") + @mock.patch("CIME.SystemTests.system_tests_common.get_latest_cpl_logs") + def test_check_for_memleak_runtime_error( + self, + get_latest_cpl_logs, + perf_get_memory_list, + append_testlog, + load_coupler_customization, + ): + load_coupler_customization.return_value.perf_check_for_memory_leak.side_effect = ( + AttributeError + ) + + perf_get_memory_list.side_effect = RuntimeError + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + rundir = caseroot / "run" + rundir.mkdir(parents=True, exist_ok=False) + + cpllog = rundir / "cpl.log.gz" + + get_latest_cpl_logs.return_value = [ + str(cpllog), + ] + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + 0.01, + ) + + common = SystemTestsCommon(case) + + common._test_status = mock.MagicMock() + + common._check_for_memleak() + + common._test_status.set_status.assert_any_call( + "MEMLEAK", "PASS", comments="insufficient data for memleak test" + ) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.load_coupler_customization") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + @mock.patch("CIME.SystemTests.system_tests_common.perf_get_memory_list") + @mock.patch("CIME.SystemTests.system_tests_common.get_latest_cpl_logs") + def test_check_for_memleak_not_enough_samples( + self, + get_latest_cpl_logs, + perf_get_memory_list, + append_testlog, + load_coupler_customization, + ): + load_coupler_customization.return_value.perf_check_for_memory_leak.side_effect = ( + AttributeError + ) + + perf_get_memory_list.return_value = [ + (1, 1000.0), + (2, 0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + rundir = caseroot / "run" + rundir.mkdir(parents=True, exist_ok=False) + + cpllog = rundir / "cpl.log.gz" + + get_latest_cpl_logs.return_value = [ + str(cpllog), + ] + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + 0.01, + ) + + common = SystemTestsCommon(case) + + common._test_status = mock.MagicMock() + + common._check_for_memleak() + + common._test_status.set_status.assert_any_call( + "MEMLEAK", "PASS", comments="data for memleak test is insufficient" + ) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.load_coupler_customization") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + @mock.patch("CIME.SystemTests.system_tests_common.perf_get_memory_list") + @mock.patch("CIME.SystemTests.system_tests_common.get_latest_cpl_logs") + def test_check_for_memleak_found( + self, + get_latest_cpl_logs, + perf_get_memory_list, + append_testlog, + load_coupler_customization, + ): + load_coupler_customization.return_value.perf_check_for_memory_leak.side_effect = ( + AttributeError + ) + + perf_get_memory_list.return_value = [ + (1, 1000.0), + (2, 2000.0), + (3, 3000.0), + (4, 3000.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + rundir = caseroot / "run" + rundir.mkdir(parents=True, exist_ok=False) + + cpllog = rundir / "cpl.log.gz" + + get_latest_cpl_logs.return_value = [ + str(cpllog), + ] + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + 0.01, + ) + + common = SystemTestsCommon(case) + + common._test_status = mock.MagicMock() + + common._check_for_memleak() + + expected_comment = "memleak detected, memory went from 2000.000000 to 3000.000000 in 2 days" + + common._test_status.set_status.assert_any_call( + "MEMLEAK", "FAIL", comments=expected_comment + ) + + append_testlog.assert_any_call(expected_comment, str(caseroot)) + + @mock.patch("CIME.SystemTests.system_tests_common.load_coupler_customization") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + @mock.patch("CIME.SystemTests.system_tests_common.perf_get_memory_list") + @mock.patch("CIME.SystemTests.system_tests_common.get_latest_cpl_logs") + def test_check_for_memleak( + self, + get_latest_cpl_logs, + perf_get_memory_list, + append_testlog, + load_coupler_customization, + ): + load_coupler_customization.return_value.perf_check_for_memory_leak.side_effect = ( + AttributeError + ) + + perf_get_memory_list.return_value = [ + (1, 3040.0), + (2, 3002.0), + (3, 3030.0), + (4, 3008.0), + ] + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + rundir = caseroot / "run" + rundir.mkdir(parents=True, exist_ok=False) + + cpllog = rundir / "cpl.log.gz" + + get_latest_cpl_logs.return_value = [ + str(cpllog), + ] + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + 0.01, + ) + + common = SystemTestsCommon(case) + + common._test_status = mock.MagicMock() + + common._check_for_memleak() + + common._test_status.set_status.assert_any_call( + "MEMLEAK", "PASS", comments="" + ) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_throughput_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_throughput(self, append_testlog, perf_compare_throughput_baseline): + perf_compare_throughput_baseline.return_value = ( + True, + "TPUTCOMP: Computation time changed by 2.00% relative to baseline", + ) + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(Path(tempdir) / "caseroot"), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + ) + + common = SystemTestsCommon(case) + + common._compare_throughput() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_any_call( + "TPUTCOMP: Computation time changed by 2.00% relative to baseline", + str(caseroot), + ) + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_throughput_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_throughput_error_diff( + self, append_testlog, perf_compare_throughput_baseline + ): + perf_compare_throughput_baseline.return_value = (None, "Error diff value") + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(Path(tempdir) / "caseroot"), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + ) + + common = SystemTestsCommon(case) + + common._compare_throughput() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_throughput_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_throughput_fail( + self, append_testlog, perf_compare_throughput_baseline + ): + perf_compare_throughput_baseline.return_value = ( + False, + "Error: TPUTCOMP: Computation time increase > 5% from baseline", + ) + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(Path(tempdir) / "caseroot"), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + ) + + common = SystemTestsCommon(case) + + common._compare_throughput() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_any_call( + "Error: TPUTCOMP: Computation time increase > 5% from baseline", + str(caseroot), + ) + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_memory_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_memory(self, append_testlog, perf_compare_memory_baseline): + perf_compare_memory_baseline.return_value = ( + True, + "MEMCOMP: Memory usage highwater has changed by 2.00% relative to baseline", + ) + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + ) + + common = SystemTestsCommon(case) + + common._compare_memory() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_any_call( + "MEMCOMP: Memory usage highwater has changed by 2.00% relative to baseline", + str(caseroot), + ) + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_memory_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_memory_erorr_diff( + self, append_testlog, perf_compare_memory_baseline + ): + perf_compare_memory_baseline.return_value = (None, "Error diff value") + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + ) + + common = SystemTestsCommon(case) + + common._compare_memory() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_not_called() + + @mock.patch("CIME.SystemTests.system_tests_common.perf_compare_memory_baseline") + @mock.patch("CIME.SystemTests.system_tests_common.append_testlog") + def test_compare_memory_erorr_fail( + self, append_testlog, perf_compare_memory_baseline + ): + perf_compare_memory_baseline.return_value = ( + False, + "Error: Memory usage increase >5% from baseline's 1000.000000 to 1002.000000", + ) + + with tempfile.TemporaryDirectory() as tempdir: + caseroot = Path(tempdir) / "caseroot" + caseroot.mkdir(parents=True, exist_ok=False) + + case = mock.MagicMock() + case.get_value.side_effect = ( + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + ) + + common = SystemTestsCommon(case) + + common._compare_memory() + + assert common._test_status.get_overall_test_status() == ("PASS", None) + + append_testlog.assert_any_call( + "Error: Memory usage increase >5% from baseline's 1000.000000 to 1002.000000", + str(caseroot), + ) + + def test_generate_baseline(self): + with tempfile.TemporaryDirectory() as tempdir: + case, caseroot, baseline_root, run_dir = create_mock_case( + tempdir, cpllog_data=CPLLOG + ) + + get_value_calls = [ + str(caseroot), + "ERIO.ne30_g16_rx1.A.docker_gnu", + "mct", + str(run_dir), + "case.std", + str(baseline_root), + "master/ERIO.ne30_g16_rx1.A.docker_gnu", + "ERIO.ne30_g16_rx1.A.docker_gnu.G.20230919_193255_z9hg2w", + "mct", + str(run_dir), + "ERIO", + "ERIO.ne30_g16_rx1.A.docker_gnu", + "master/ERIO.ne30_g16_rx1.A.docker_gnu", + str(baseline_root), + "master/ERIO.ne30_g16_rx1.A.docker_gnu", + str(run_dir), + "mct", + "/tmp/components/cpl", + str(run_dir), + "mct", + str(run_dir), + "mct", + ] + + if Config.instance().create_bless_log: + get_value_calls.insert(12, os.getcwd()) + + case.get_value.side_effect = get_value_calls + + common = SystemTestsCommon(case) + + common._generate_baseline() + + baseline_dir = baseline_root / "master" / "ERIO.ne30_g16_rx1.A.docker_gnu" + + assert (baseline_dir / "cpl.log.gz").exists() + assert (baseline_dir / "cpl-tput.log").exists() + assert (baseline_dir / "cpl-mem.log").exists() + assert (baseline_dir / "cpl.hi.2023-01-01.nc").exists() + + with open(baseline_dir / "cpl-tput.log") as fd: + lines = fd.readlines() + + assert len(lines) == 1 + assert re.match("sha:.* date:.* (\d+\.\d+)", lines[0]) + + with open(baseline_dir / "cpl-mem.log") as fd: + lines = fd.readlines() + + assert len(lines) == 1 + assert re.match("sha:.* date:.* (\d+\.\d+)", lines[0]) + + def test_kwargs(self): + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + _ = SystemTestsCommon(case, something="random") + + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + orig1 = SystemTestsCompareTwo._get_caseroot + orig2 = SystemTestsCompareTwo._get_caseroot2 + + SystemTestsCompareTwo._get_caseroot = mock.MagicMock() + SystemTestsCompareTwo._get_caseroot2 = mock.MagicMock() + + _ = SystemTestsCompareTwo(case, something="random") + + SystemTestsCompareTwo._get_caseroot = orig1 + SystemTestsCompareTwo._get_caseroot2 = orig2 + + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + orig = SystemTestsCompareN._get_caseroots + + SystemTestsCompareN._get_caseroots = mock.MagicMock() + + _ = SystemTestsCompareN(case, something="random") + + SystemTestsCompareN._get_caseroots = orig + + def test_dry_run(self): + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + orig = SystemTestsCompareTwo._setup_cases_if_not_yet_done + + SystemTestsCompareTwo._setup_cases_if_not_yet_done = mock.MagicMock() + + system_test = SystemTestsCompareTwo(case, dry_run=True) + + system_test._setup_cases_if_not_yet_done.assert_not_called() + + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + system_test = SystemTestsCompareTwo(case) + + system_test._setup_cases_if_not_yet_done.assert_called() + + SystemTestsCompareTwo._setup_cases_if_not_yet_done = orig + + orig = SystemTestsCompareN._setup_cases_if_not_yet_done + + SystemTestsCompareN._setup_cases_if_not_yet_done = mock.MagicMock() + + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + system_test = SystemTestsCompareN(case, dry_run=True) + + system_test._setup_cases_if_not_yet_done.assert_not_called() + + case = mock.MagicMock() + + case.get_value.side_effect = ( + "/caseroot", + "SMS.f19_g16.S", + "cpl", + "/caseroot", + "SMS.f19_g16.S", + ) + + system_test = SystemTestsCompareN(case) + + system_test._setup_cases_if_not_yet_done.assert_called() + + SystemTestsCompareN._setup_cases_if_not_yet_done = orig diff --git a/CIME/tests/test_unit_test_status.py b/CIME/tests/test_unit_test_status.py index 0b79c8bac6a..9b3036801fc 100755 --- a/CIME/tests/test_unit_test_status.py +++ b/CIME/tests/test_unit_test_status.py @@ -41,6 +41,20 @@ def _set_phase_to_status(self, phase, status): with self._ts: self._ts.set_status(phase, status) + def test_get_latest_phase(self): + assert self._ts.get_latest_phase() == test_status.RUN_PHASE + + def test_current_is(self): + assert self._ts.current_is(test_status.RUN_PHASE, test_status.TEST_PASS_STATUS) + + assert not self._ts.current_is( + test_status.RUN_PHASE, test_status.TEST_PEND_STATUS + ) + + assert not self._ts.current_is( + test_status.SUBMIT_PHASE, test_status.TEST_PASS_STATUS + ) + # ------------------------------------------------------------------------ # Tests of TestStatus.phase_statuses_dump # ------------------------------------------------------------------------ diff --git a/CIME/tests/test_unit_user_nl_utils.py b/CIME/tests/test_unit_user_nl_utils.py index e2748e0995b..9220182eeeb 100755 --- a/CIME/tests/test_unit_user_nl_utils.py +++ b/CIME/tests/test_unit_user_nl_utils.py @@ -72,6 +72,40 @@ def test_append(self): expected_contents, os.path.join(self._caseroot, filename) ) + def test_append_list(self): + # Define some variables + component = "foo" + # deliberately exclude new line from file contents, to make sure that's + # handled correctly + orig_contents = "bar = 42" + contents_to_append_1 = "baz = 101" + contents_to_append_2 = "qux = 987" + contents_to_append = [ + contents_to_append_1, + contents_to_append_2, + ] + + # Setup + filename = self.write_user_nl_file(component, orig_contents) + + # Exercise + user_nl_utils.append_to_user_nl_files( + caseroot=self._caseroot, component=component, contents=contents_to_append + ) + + # Verify + expected_contents = ( + orig_contents + + "\n" + + contents_to_append_1 + + "\n" + + contents_to_append_2 + + "\n" + ) + self.assertFileContentsEqual( + expected_contents, os.path.join(self._caseroot, filename) + ) + def test_append_multiple_files(self): # Simulates a multi-instance test component = "foo" diff --git a/CIME/tests/test_unit_xml_archive_base.py b/CIME/tests/test_unit_xml_archive_base.py new file mode 100644 index 00000000000..98f58055c9c --- /dev/null +++ b/CIME/tests/test_unit_xml_archive_base.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python3 + +import os +import io +import unittest +import tempfile +from contextlib import contextmanager +from pathlib import Path +from unittest import mock + +from CIME.XML.archive_base import ArchiveBase + +TEST_CONFIG = """ + + unique\.name\.unique.* + +""" + +EXACT_TEST_CONFIG = """ + + unique\.name\.unique.nc + +""" + +EXCLUDE_TEST_CONFIG = """ + + unique\.name\.unique.nc + + + unique\.name\.unique.nc + + + unique\.name\.unique.nc + +""" + + +class TestXMLArchiveBase(unittest.TestCase): + @contextmanager + def _setup_environment(self, test_files): + with tempfile.TemporaryDirectory() as temp_dir: + for x in test_files: + Path(temp_dir, x).touch() + + yield temp_dir + + def test_exclude_testing(self): + archiver = ArchiveBase() + + archiver.read_fd(io.StringIO(EXCLUDE_TEST_CONFIG)) + + # no attribute + assert not archiver.exclude_testing("eam") + + # not in config + assert not archiver.exclude_testing("mpassi") + + # set false + assert not archiver.exclude_testing("mpasso") + + # set true + assert archiver.exclude_testing("cpl") + + def test_match_files(self): + archiver = ArchiveBase() + + archiver.read_fd(io.StringIO(TEST_CONFIG)) + + fail_files = [ + "othername.eam.unique.name.unique.0001-01-01-0000.nc", # casename mismatch + "casename.satm.unique.name.unique.0001-01-01-0000.nc", # model (component?) mismatch + "casename.eam.0001-01-01-0000.nc", # missing hist_file_extension + "casename.eam.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc", + ] + + test_files = [ + "casename.eam1.unique.name.unique.0001-01-01-0000.nc", + "casename.eam1_.unique.name.unique.0001-01-01-0000.nc", + "casename.eam_.unique.name.unique.0001-01-01-0000.nc", + "casename.eam1990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam_1990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam1_1990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam11990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.0001-01-01-0000.nc.base", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc.base", + ] + + with self._setup_environment(fail_files + test_files) as temp_dir: + hist_files = archiver.get_all_hist_files( + "casename", "eam", from_dir=temp_dir + ) + + test_files.sort() + hist_files.sort() + + assert len(hist_files) == len(test_files) + + # assert all match except first + for x, y in zip(test_files, hist_files): + assert x == y, f"{x} != {y}" + + def test_extension_included(self): + archiver = ArchiveBase() + + archiver.read_fd(io.StringIO(EXACT_TEST_CONFIG)) + + fail_files = [ + "othername.eam.unique.name.unique.0001-01-01-0000.nc", # casename mismatch + "casename.satm.unique.name.unique.0001-01-01-0000.nc", # model (component?) mismatch + "casename.eam.0001-01-01-0000.nc", # missing hist_file_extension + "casename.eam.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.0001-01-01-0000.nc.base", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc.base", + ] + + test_files = [ + "casename.eam1.unique.name.unique.nc", + "casename.eam1_.unique.name.unique.nc", + "casename.eam_.unique.name.unique.nc", + "casename.eam1990.unique.name.unique.nc", + "casename.eam_1990.unique.name.unique.nc", + "casename.eam1_1990.unique.name.unique.nc", + "casename.eam11990.unique.name.unique.nc", + "casename.eam.unique.name.unique.nc", + ] + + with self._setup_environment(fail_files + test_files) as temp_dir: + hist_files = archiver.get_all_hist_files( + "casename", "eam", suffix="nc", from_dir=temp_dir + ) + + test_files.sort() + hist_files.sort() + + assert len(hist_files) == len(test_files) + + # assert all match except first + for x, y in zip(test_files, hist_files): + assert x == y, f"{x} != {y}" + + def test_suffix(self): + archiver = ArchiveBase() + + archiver.read_fd(io.StringIO(TEST_CONFIG)) + + fail_files = [ + "othername.eam.unique.name.unique.0001-01-01-0000.nc", # casename mismatch + "casename.satm.unique.name.unique.0001-01-01-0000.nc", # model (component?) mismatch + "casename.eam.0001-01-01-0000.nc", # missing hist_file_extension + "casename.eam.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc", + # ensure these do not match when suffix is provided + "casename.eam1.unique.name.unique.0001-01-01-0000.nc", + "casename.eam1_.unique.name.unique.0001-01-01-0000.nc", + "casename.eam_.unique.name.unique.0001-01-01-0000.nc", + "casename.eam1990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam_1990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam1_1990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam11990.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.0001-01-01-0000.nc", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc", + ] + + test_files = [ + "casename.eam.unique.name.unique.0001-01-01-0000.nc.base", + "casename.eam.unique.name.unique.some.extra.0001-01-01-0000.nc.base", + ] + + with self._setup_environment(fail_files + test_files) as temp_dir: + hist_files = archiver.get_all_hist_files( + "casename", "eam", suffix="base", from_dir=temp_dir + ) + + assert len(hist_files) == len(test_files) + + hist_files.sort() + test_files.sort() + + for x, y in zip(hist_files, test_files): + assert x == y, f"{x} != {y}" diff --git a/CIME/tests/test_unit_xml_env_batch.py b/CIME/tests/test_unit_xml_env_batch.py index 01b96d1ead2..d59c4b080c9 100755 --- a/CIME/tests/test_unit_xml_env_batch.py +++ b/CIME/tests/test_unit_xml_env_batch.py @@ -5,12 +5,189 @@ import tempfile from unittest import mock -from CIME.XML.env_batch import EnvBatch +from CIME.utils import CIMEError +from CIME.XML.env_batch import EnvBatch, get_job_deps # pylint: disable=unused-argument class TestXMLEnvBatch(unittest.TestCase): + @mock.patch("CIME.XML.env_batch.EnvBatch._submit_single_job") + def test_submit_jobs(self, _submit_single_job): + case = mock.MagicMock() + + case.get_value.side_effect = [ + False, + ] + + env_batch = EnvBatch() + + with self.assertRaises(CIMEError): + env_batch.submit_jobs(case) + + @mock.patch("CIME.XML.env_batch.os.path.isfile") + @mock.patch("CIME.XML.env_batch.get_batch_script_for_job") + @mock.patch("CIME.XML.env_batch.EnvBatch._submit_single_job") + def test_submit_jobs_dependency( + self, _submit_single_job, get_batch_script_for_job, isfile + ): + case = mock.MagicMock() + + case.get_env.return_value.get_jobs.return_value = [ + "case.build", + "case.run", + ] + + case.get_env.return_value.get_value.side_effect = [ + None, + "", + None, + "case.build", + ] + + case.get_value.side_effect = [ + False, + ] + + _submit_single_job.side_effect = ["0", "1"] + + isfile.return_value = True + + get_batch_script_for_job.side_effect = [".case.build", ".case.run"] + + env_batch = EnvBatch() + + depid = env_batch.submit_jobs(case) + + _submit_single_job.assert_any_call( + case, + "case.build", + skip_pnl=False, + resubmit_immediate=False, + dep_jobs=[], + allow_fail=False, + no_batch=False, + mail_user=None, + mail_type=None, + batch_args=None, + dry_run=False, + workflow=True, + ) + _submit_single_job.assert_any_call( + case, + "case.run", + skip_pnl=False, + resubmit_immediate=False, + dep_jobs=[ + "0", + ], + allow_fail=False, + no_batch=False, + mail_user=None, + mail_type=None, + batch_args=None, + dry_run=False, + workflow=True, + ) + assert depid == {"case.build": "0", "case.run": "1"} + + @mock.patch("CIME.XML.env_batch.os.path.isfile") + @mock.patch("CIME.XML.env_batch.get_batch_script_for_job") + @mock.patch("CIME.XML.env_batch.EnvBatch._submit_single_job") + def test_submit_jobs_single( + self, _submit_single_job, get_batch_script_for_job, isfile + ): + case = mock.MagicMock() + + case.get_env.return_value.get_jobs.return_value = [ + "case.run", + ] + + case.get_env.return_value.get_value.return_value = None + + case.get_value.side_effect = [ + False, + ] + + _submit_single_job.return_value = "0" + + isfile.return_value = True + + get_batch_script_for_job.side_effect = [ + ".case.run", + ] + + env_batch = EnvBatch() + + depid = env_batch.submit_jobs(case) + + _submit_single_job.assert_any_call( + case, + "case.run", + skip_pnl=False, + resubmit_immediate=False, + dep_jobs=[], + allow_fail=False, + no_batch=False, + mail_user=None, + mail_type=None, + batch_args=None, + dry_run=False, + workflow=True, + ) + assert depid == {"case.run": "0"} + + def test_get_job_deps(self): + # no jobs + job_deps = get_job_deps("", {}) + + assert job_deps == [] + + # dependency doesn't exist + job_deps = get_job_deps("case.run", {}) + + assert job_deps == [] + + job_deps = get_job_deps("case.run", {"case.run": 0}) + + assert job_deps == [ + "0", + ] + + job_deps = get_job_deps( + "case.run case.post_run_io", {"case.run": 0, "case.post_run_io": 1} + ) + + assert job_deps == ["0", "1"] + + # old syntax + job_deps = get_job_deps("case.run and case.post_run_io", {"case.run": 0}) + + assert job_deps == [ + "0", + ] + + # old syntax + job_deps = get_job_deps( + "(case.run and case.post_run_io) or case.test", {"case.run": 0} + ) + + assert job_deps == [ + "0", + ] + + job_deps = get_job_deps("", {}, user_prereq="2") + + assert job_deps == [ + "2", + ] + + job_deps = get_job_deps("", {}, prev_job="1") + + assert job_deps == [ + "1", + ] + def test_get_submit_args_job_queue(self): with tempfile.NamedTemporaryFile() as tfile: tfile.write( diff --git a/CIME/tests/test_unit_xml_machines.py b/CIME/tests/test_unit_xml_machines.py new file mode 100644 index 00000000000..d051a5d7d3e --- /dev/null +++ b/CIME/tests/test_unit_xml_machines.py @@ -0,0 +1,166 @@ +import unittest +import io + +from CIME.XML.machines import Machines + +MACHINE_TEST_XML = """ + + Some default machine definition + ubuntu + gnu,intel + mpi-serial + custom + /data/timings + testing + /data/scratch + /data/inputdata + /data/inputdata/atm/datm7 + $CIME_OUTPUT_ROOT/archive/$CASE + /data/baselines/$COMPILER + /data/tools/cprnc + 8 + e3sm_developer + 4 + slurm + developers + 8 + 8 + FALSE + + srun + + -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit + -c $SHELL{echo 128/ {{ tasks_per_node }} |bc} + $SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} + -m plane={{ tasks_per_node }} + + + + /opt/ubuntu/pe/modules/default/init/perl.pm + /opt/ubuntu/pe/modules/default/init/python.py + /opt/ubuntu/pe/modules/default/init/sh + /opt/ubuntu/pe/modules/default/init/csh + /opt/ubuntu/pe/modules/default/bin/modulecmd perl + /opt/ubuntu/pe/modules/default/bin/modulecmd python + module + module + + ubuntupe + ubuntu-mpich + ubuntu-parallel-netcdf + ubuntu-hdf5-parallel + ubuntu-hdf5 + ubuntu-netcdf + ubuntu-netcdf-hdf5parallel + ubuntupe/2.7.15 + + + PrgEnv-ubuntu + PrgEnv-gnu + PrgEnv-gnu/8.3.3 + gcc/12.1.0 + + + ubuntu-mpich/8.1.16 + ubuntu-hdf5-parallel/1.12.1.3 + ubuntu-netcdf-hdf5parallel/4.8.1.3 + ubuntu-parallel-netcdf/1.12.2.3 + + + + $CIME_OUTPUT_ROOT/$CASE/run + $CIME_OUTPUT_ROOT/$CASE/bld + 0.1 + 1000 + + /usr/lib/perl5/5.26.2 + /opt/ubuntu/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/ + /opt/ubuntu/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/ + $SHELL{dirname $(dirname $(which pnetcdf_version))} + + + 128M + + + cores + + + + Some default machine definition + ubuntu + gnu,intel + mpi-serial + custom + /data/timings + testing + /data/scratch + /data/inputdata + /data/inputdata/atm/datm7 + $CIME_OUTPUT_ROOT/archive/$CASE + /data/baselines/$COMPILER + /data/tools/cprnc + 8 + e3sm_developer + 4 + none + developers + 8 + 8 + FALSE + + srun + + -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit + -c $SHELL{echo 128/ {{ tasks_per_node }} |bc} + $SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;} + -m plane={{ tasks_per_node }} + + + $CIME_OUTPUT_ROOT/$CASE/run + $CIME_OUTPUT_ROOT/$CASE/bld + 0.1 + 1000 + + /usr/lib/perl5/5.26.2 + /opt/ubuntu/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/ + /opt/ubuntu/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/ + $SHELL{dirname $(dirname $(which pnetcdf_version))} + + + 128M + + + cores + + + +""" + + +class TestUnitXMLMachines(unittest.TestCase): + def setUp(self): + Machines._FILEMAP = {} + # read_only=False for github testing + # MACHINE IS SET BELOW TO USE DEFINITION IN "MACHINE_TEST_XML" + self.machine = Machines() + + self.machine.read_fd(io.StringIO(MACHINE_TEST_XML)) + + self.machine.set_machine("default") + + def test_has_batch_system(self): + assert self.machine.has_batch_system() + + self.machine.set_machine("default-no-batch") + + assert not self.machine.has_batch_system() + + def test_is_valid_MPIlib(self): + assert self.machine.is_valid_MPIlib("mpi-serial") + + assert not self.machine.is_valid_MPIlib("mpi-bogus") + + def test_is_valid_compiler(self): + assert self.machine.is_valid_compiler("gnu") + + assert not self.machine.is_valid_compiler("bogus") diff --git a/CIME/tests/test_unit_xml_tests.py b/CIME/tests/test_unit_xml_tests.py new file mode 100644 index 00000000000..88a9750130d --- /dev/null +++ b/CIME/tests/test_unit_xml_tests.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 + +import re +import unittest +import tempfile +from pathlib import Path +from unittest import mock + +from CIME.XML.tests import Tests + + +class TestXMLTests(unittest.TestCase): + def setUp(self): + # reset file caching + Tests._FILEMAP = {} + + # skip hard to mock function call + @mock.patch( + "CIME.SystemTests.system_tests_compare_two.SystemTestsCompareTwo._setup_cases_if_not_yet_done" + ) + def test_support_single_exe(self, _setup_cases_if_not_yet_done): + with tempfile.TemporaryDirectory() as tdir: + test_file = Path(tdir) / "sms.py" + + test_file.touch(exist_ok=True) + + caseroot = Path(tdir) / "caseroot1" + + caseroot.mkdir(exist_ok=True) + + case = mock.MagicMock() + + case.get_compset_components.return_value = () + + case.get_value.side_effect = ( + "SMS", + tdir, + f"{caseroot}", + "SMS.f19_g16.S", + "cpl", + "SMS.f19_g16.S", + f"{caseroot}", + "SMS.f19_g16.S", + ) + + tests = Tests() + + tests.support_single_exe(case) + + # skip hard to mock function call + @mock.patch( + "CIME.SystemTests.system_tests_compare_two.SystemTestsCompareTwo._setup_cases_if_not_yet_done" + ) + def test_support_single_exe_error(self, _setup_cases_if_not_yet_done): + with tempfile.TemporaryDirectory() as tdir: + test_file = Path(tdir) / "erp.py" + + test_file.touch(exist_ok=True) + + caseroot = Path(tdir) / "caseroot1" + + caseroot.mkdir(exist_ok=True) + + case = mock.MagicMock() + + case.get_compset_components.return_value = () + + case.get_value.side_effect = ( + "ERP", + tdir, + f"{caseroot}", + "ERP.f19_g16.S", + "cpl", + "ERP.f19_g16.S", + f"{caseroot}", + "ERP.f19_g16.S", + ) + + tests = Tests() + + with self.assertRaises(Exception) as e: + tests.support_single_exe(case) + + assert ( + re.search( + r"does not support the '--single-exe' option as it requires separate builds", + f"{e.exception}", + ) + is not None + ), f"{e.exception}" + + +if __name__ == "__main__": + unittest.main() diff --git a/CIME/utils.py b/CIME/utils.py index b79fcd5116e..7471c2e4f4c 100644 --- a/CIME/utils.py +++ b/CIME/utils.py @@ -8,8 +8,10 @@ import importlib.util import errno, signal, warnings, filecmp import stat as statlib +from argparse import Action from contextlib import contextmanager +# pylint: disable=deprecated-module from distutils import file_util # Return this error code if the scripts worked but tests failed @@ -21,6 +23,14 @@ GLOBAL = {} +def deprecate_action(message): + class ActionStoreDeprecated(Action): + def __call__(self, parser, namespace, values, option_string=None): + raise DeprecationWarning(f"{option_string} is deprecated{message}") + + return ActionStoreDeprecated + + def import_from_file(name, file_path): loader = importlib.machinery.SourceFileLoader(name, file_path) @@ -192,7 +202,7 @@ def check_name(fullname, additional_chars=None, fullpath=False): False """ - chars = "+*?<>/{}[\]~`@:" # pylint: disable=anomalous-backslash-in-string + chars = r"+*?<>/{}[\]~`@:" if additional_chars is not None: chars += additional_chars if fullname.endswith("/"): @@ -262,7 +272,7 @@ def _read_cime_config_file(): cime_config_file = os.path.abspath( os.path.join(os.path.expanduser("~"), ".cime", "config") ) - cime_config = configparser.SafeConfigParser() + cime_config = configparser.ConfigParser() if os.path.isfile(cime_config_file): cime_config.read(cime_config_file) for section in cime_config.sections(): @@ -652,8 +662,8 @@ def import_and_run_sub_or_cmd( run_sub_or_cmd( cmd, cmdargs, subname, subargs, logfile, case, from_dir, timeout ) - except Exception as e: - raise e from None + except Exception as e1: + raise e1 from None except Exception: if logfile: with open(logfile, "a") as log_fd: @@ -1005,6 +1015,12 @@ def parse_test_name(test_name): ['ERS', None, 'fe12_123', 'JGF', 'machine', 'compiler', None] >>> parse_test_name('ERS.fe12_123.JGF.machine_compiler.test-mods') ['ERS', None, 'fe12_123', 'JGF', 'machine', 'compiler', ['test/mods']] + >>> parse_test_name('ERS.fe12_123.JGF.*_compiler.test-mods') + ['ERS', None, 'fe12_123', 'JGF', None, 'compiler', ['test/mods']] + >>> parse_test_name('ERS.fe12_123.JGF.machine_*.test-mods') + ['ERS', None, 'fe12_123', 'JGF', 'machine', None, ['test/mods']] + >>> parse_test_name('ERS.fe12_123.JGF.*_*.test-mods') + ['ERS', None, 'fe12_123', 'JGF', None, None, ['test/mods']] >>> parse_test_name('ERS.fe12_123.JGF.machine_compiler.test-mods--other-dir-path--and-one-more') ['ERS', None, 'fe12_123', 'JGF', 'machine', 'compiler', ['test/mods', 'other/dir/path', 'and/one/more']] >>> parse_test_name('SMS.f19_g16.2000_DATM%QI.A_XLND_SICE_SOCN_XROF_XGLC_SWAV.mach-ine_compiler.test-mods') # doctest: +IGNORE_EXCEPTION_DETAIL @@ -1038,6 +1054,10 @@ def parse_test_name(test_name): ), ) rv[4:5] = rv[4].split("_") + if rv[4] == "*": + rv[4] = None + if rv[5] == "*": + rv[5] = None rv.pop() if rv[-1] is not None: @@ -1130,7 +1150,6 @@ def get_full_test_name( ] result = partial_test - for partial_val, arg_val, name in required_fields: if partial_val is None: # Add to result based on args @@ -1140,9 +1159,14 @@ def get_full_test_name( partial_test, name ), ) - result = "{}{}{}".format( - result, "_" if name == "compiler" else ".", arg_val - ) + if name == "machine" and "*_" in result: + result = result.replace("*_", arg_val + "_") + elif name == "compiler" and "_*" in result: + result = result.replace("_*", "_" + arg_val) + else: + result = "{}{}{}".format( + result, "_" if name == "compiler" else ".", arg_val + ) elif arg_val is not None and partial_val != partial_compiler: expect( arg_val == partial_val, @@ -1390,6 +1414,7 @@ def safe_copy(src_path, tgt_path, preserve_meta=True): tgt_path, preserve_mode=preserve_meta, preserve_times=preserve_meta, + verbose=0, ) else: # I am not the owner, just copy file contents @@ -1403,6 +1428,7 @@ def safe_copy(src_path, tgt_path, preserve_meta=True): tgt_path, preserve_mode=preserve_meta, preserve_times=preserve_meta, + verbose=0, ) # If src file was executable, then the tgt file should be too @@ -1602,20 +1628,25 @@ def find_files(rootdir, pattern): def setup_standard_logging_options(parser): + group = parser.add_argument_group("Logging options") + helpfile = os.path.join(os.getcwd(), os.path.basename("{}.log".format(sys.argv[0]))) - parser.add_argument( + + group.add_argument( "-d", "--debug", action="store_true", help="Print debug information (very verbose) to file {}".format(helpfile), ) - parser.add_argument( + + group.add_argument( "-v", "--verbose", action="store_true", help="Add additional context (time and file) to log messages", ) - parser.add_argument( + + group.add_argument( "-s", "--silent", action="store_true", @@ -2681,3 +2712,46 @@ def clear_folder(_dir): os.rmdir(file_path) except Exception as e: print(e) + + +def add_flag_to_cmd(flag, val): + """ + Given a flag and value for a shell command, return a string + + >>> add_flag_to_cmd("-f", "hi") + '-f hi' + >>> add_flag_to_cmd("--foo", 42) + '--foo 42' + >>> add_flag_to_cmd("--foo=", 42) + '--foo=42' + >>> add_flag_to_cmd("--foo:", 42) + '--foo:42' + >>> add_flag_to_cmd("--foo:", " hi ") + '--foo:hi' + """ + no_space_chars = "=:" + no_space = False + for item in no_space_chars: + if flag.endswith(item): + no_space = True + + separator = "" if no_space else " " + return "{}{}{}".format(flag, separator, str(val).strip()) + + +def is_comp_standalone(case): + """ + Test if the case is a single component standalone + such as FKESSLER + """ + stubcnt = 0 + classes = case.get_values("COMP_CLASSES") + for comp in classes: + if case.get_value("COMP_{}".format(comp)) == "s{}".format(comp.lower()): + stubcnt = stubcnt + 1 + else: + model = comp.lower() + numclasses = len(classes) + if stubcnt >= numclasses - 2: + return True, model + return False, get_model() diff --git a/CMakeLists.txt b/CMakeLists.txt index 94546b8e405..b8de549f4ed 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -22,73 +22,6 @@ include_directories(${NetCDF_C_INCLUDE_DIRS} ${NetCDF_Fortran_INCLUDE_DIRS}) # TODO: Some of the below should be done in the relevant directories, not in # this top level CMakeLists. -# ------------------------------------------------------------------------ -# Build mct -# ------------------------------------------------------------------------ -if (EXISTS ${SRC_ROOT}/libraries/mct) - set(MCT_ROOT "${SRC_ROOT}/libraries/mct") -else() - set(MCT_ROOT "${SRC_ROOT}/externals/mct") -endif() - -if (USE_MPI_SERIAL) - set(ENABLE_MPI_SERIAL "--enable-mpiserial") -else() - set(ENABLE_MPI_SERIAL "") -endif() - -ExternalProject_add(mct_project - PREFIX ${CMAKE_CURRENT_BINARY_DIR} - SOURCE_DIR ${MCT_ROOT} - BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}/mct - CONFIGURE_COMMAND ${MCT_ROOT}/configure ${ENABLE_MPI_SERIAL} --enable-debugging --prefix=${CMAKE_CURRENT_BINARY_DIR} CC=${CMAKE_C_COMPILER} FC=${CMAKE_Fortran_COMPILER} CFLAGS=${CFLAGS} FCFLAGS=${FFLAGS} SRCDIR=${MCT_ROOT} DEBUG="-g" - BUILD_COMMAND $(MAKE) SRCDIR=${MCT_ROOT} - # Leave things in rather than "installing", because we have - # no need to move things around inside of the CMake binary directory. Also, - # mpi-serial doesn't install properly in the out-of-source build - INSTALL_COMMAND : - ) -# This copy_makefiles step is needed because mct currently doesn't support an -# out-of-source build. I am replicating what is done for the CIME system build. -ExternalProject_add_step(mct_project copy_makefiles - DEPENDEES configure - DEPENDERS build - WORKING_DIRECTORY - COMMAND cp -p /Makefile . - COMMAND mkdir -p mct - COMMAND cp -p /mct/Makefile mct/ - COMMAND mkdir -p mpeu - COMMAND cp -p /mpeu/Makefile mpeu/ - ) -if (USE_MPI_SERIAL) - ExternalProject_add_step(mct_project copy_mpi_serial_files - DEPENDEES configure - DEPENDERS build - WORKING_DIRECTORY - COMMAND mkdir -p mpi-serial - COMMAND cp -p /mpi-serial/Makefile mpi-serial/ - COMMAND cp /mpi-serial/mpif.h mpi-serial/ - COMMAND cp /mpi-serial/mpi.h mpi-serial/ - ) -endif() - -# Tell cmake to look for libraries & mod files here, because this is where we built libraries -include_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mct) -include_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mpeu) -link_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mct) -link_directories(${CMAKE_CURRENT_BINARY_DIR}/mct/mpeu) -if (USE_MPI_SERIAL) - # We need to list the mpi-serial include directory before system-level - # directories so that we're sure to use mpi-serial's mpif.h instead of - # an mpif.h from a system path. - include_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/mct/mpi-serial) - link_directories(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/mct/mpi-serial) -endif() - -# ------------------------------------------------------------------------ -# Done MCT build -# ------------------------------------------------------------------------ - # Now a bunch of includes for share code. # csm_share (we don't build it here because it seems to be built differently @@ -96,7 +29,6 @@ endif() if (EXISTS ${SRC_ROOT}/share/src) add_subdirectory(${SRC_ROOT}/share/src share_src) - add_subdirectory(${SRC_ROOT}/components/cpl7/mct_shr mct_src) add_subdirectory(${SRC_ROOT}/share/unit_test_stubs/util csm_share_stubs) include_directories(${SRC_ROOT}/share/include) else() @@ -115,9 +47,4 @@ else() endif() # Now the actual test directories. -if (EXISTS ${SRC_ROOT}/components/cpl7/driver/unit_test) - add_subdirectory(${SRC_ROOT}/components/cpl7/driver/unit_test unit_test) -else() - add_subdirectory(${SRC_ROOT}/driver-mct/unit_test unit_test) -endif() add_subdirectory(${SRC_ROOT}/share/test/unit ${CMAKE_BINARY_DIR}/unittests) diff --git a/Externals.cfg b/Externals.cfg index b010c3e325e..5efcc075503 100644 --- a/Externals.cfg +++ b/Externals.cfg @@ -1,19 +1,19 @@ [ccs_config] -tag = ccs_config_cesm0.0.57 +tag = ccs_config_cesm0.0.91 protocol = git repo_url = https://github.com/ESMCI/ccs_config_cesm local_path = ccs_config required = True [cmeps] -tag = cmeps0.14.16 +tag = cmeps0.14.49 protocol = git repo_url = https://github.com/ESCOMP/CMEPS.git local_path = components/cmeps required = True [cdeps] -tag = cdeps1.0.7 +tag = cdeps1.0.26 protocol = git repo_url = https://github.com/ESCOMP/CDEPS.git local_path = components/cdeps @@ -21,14 +21,14 @@ externals = Externals_CDEPS.cfg required = True [cpl7] -tag = cpl7.0.15 +tag = cpl77.0.8 protocol = git repo_url = https://github.com/ESCOMP/CESM_CPL7andDataComps local_path = components/cpl7 required = True [share] -tag = share1.0.16 +tag = share1.0.18 protocol = git repo_url = https://github.com/ESCOMP/CESM_share local_path = share @@ -42,7 +42,7 @@ local_path = libraries/mct required = True [parallelio] -tag = pio2_5_10 +tag = pio2_6_2 protocol = git repo_url = https://github.com/NCAR/ParallelIO local_path = libraries/parallelio diff --git a/Externals_cime.cfg b/Externals_cime.cfg new file mode 100644 index 00000000000..04d7306e241 --- /dev/null +++ b/Externals_cime.cfg @@ -0,0 +1,7 @@ +[CIME/non_py/cprnc] +protocol = git +from_submodule=True +required = True + +[externals_description] +schema_version = 1.0.0 diff --git a/doc/source/users_guide/cime-customize.rst b/doc/source/users_guide/cime-customize.rst index ed90e21472a..6431f5c388a 100644 --- a/doc/source/users_guide/cime-customize.rst +++ b/doc/source/users_guide/cime-customize.rst @@ -44,7 +44,6 @@ default_short_term_archiving True bool If set to `Tr driver_choices ('mct', 'nuopc') tuple Sets the available driver choices for the model. driver_default nuopc str Sets the default driver for the model. enable_smp True bool If set to `True` then `SMP=` is added to model compile command. -gpus_use_set_device_rank True bool If set to `True` and NGPUS_PER_NODE > 0 then `$RUNDIR/set_device_rank.sh` is appended when the MPI run command is generated. make_case_run_batch_script False bool If set to `True` and case is not a test then `case.run.sh` is created in case directory from `$MACHDIR/template.case.run.sh`. mct_path {srcroot}/libraries/mct str Sets the path to the mct library. serialize_sharedlib_builds True bool If set to `True` then the TestScheduler will use `proc_pool + 1` processors to build shared libraries otherwise a single processor is used. diff --git a/doc/source/users_guide/create-a-case.rst b/doc/source/users_guide/create-a-case.rst index 71b39b5cbd8..c9257da19a6 100644 --- a/doc/source/users_guide/create-a-case.rst +++ b/doc/source/users_guide/create-a-case.rst @@ -207,6 +207,17 @@ As an example, the directory could contain the following files: :: > shell_commands (this would contain ./xmlchange commands) > SourceMods/src.cam/dyncomp.F90 +It is important to note that the file containing the **xmlchange** +commands must be named ``shell_commands`` in order for it to be recognised +and run upon case creation. + +The structure of the component directories do not need to be the +same as in the component source code. As an example, should the user +want to modify the ``src/dynamics/eul/dyncomp.F90`` file within the +CAM source code, the modified file should be put into the directory +``SourceMods/src.cam`` directly. There is no need to mimic the source +code structure, such as ``SourceMods/src.cam/dynamics/eul``. + When the user calls **create_newcase** with the ``--user-mods-dir`` pointing to the full pathname of the directory containing these changes, then the ``CASEROOT`` will be created with these changes applied. diff --git a/doc/source/users_guide/testing.rst b/doc/source/users_guide/testing.rst index ea8c6288749..061c62e3152 100644 --- a/doc/source/users_guide/testing.rst +++ b/doc/source/users_guide/testing.rst @@ -371,29 +371,152 @@ Interpreting test output is pretty easy, looking at an example:: You can see that `create_test <../Tools_user/create_test.html>`_ informs the user of the case directory and of the progress and duration of the various test phases. -=================== -Managing baselines -=================== -.. _`Managing baselines`: +========= +Baselines +========= +.. _`Baselines`: -A big part of testing is managing your baselines (sometimes called gold results). We have provided -tools to help the user do this without having to repeat full runs of test cases with `create_test <../Tools_user/create_test.html>`_ . +A big part of testing is managing your baselines (sometimes called gold results). We have provided tools to help the user do this without having to repeat full runs of test cases with `create_test <../Tools_user/create_test.html>`_ . -bless_test_results: Takes a batch of cases of tests that have already been run and copy their -results to a baseline area. +------------------- +Creating a baseline +------------------- +.. _`Creating a baseline`: -compare_test_results: Takes a batch of cases of tests that have already been run and compare their -results to a baseline area. +A baseline can be generated by passing ``-g`` to `create_test <../Tools_user/create_test.html>`_. There are additional options to control generating baselines.:: -Take a batch of results for the jenkins user for the testid 'mytest' and copy the results to -baselines for 'master':: + ./scripts/create_test -b master -g SMS.ne30_f19_g16_rx1.A - ./bless_test_results -r /home/jenkins/e3sm/scratch/jenkins/ -t mytest -b master +-------------------- +Comparing a baseline +-------------------- +.. _`Comparing a baseline`: -Take a batch of results for the jenkins user for the testid 'mytest' and compare the results to -baselines for 'master':: +Comparing the output of a test to a baseline is achieved by passing ``-c`` to `create_test <../Tools_user/create_test.html>`_.:: + + ./scripts/create_test -b master -c SMS.ne30_f19_g16_rx1.A + +------------------ +Managing baselines +------------------ +.. _`Managing baselines`: - ./compare_test_results -r /home/jenkins/e3sm/scratch/jenkins/ -t mytest -b master +Once a baseline has been generated it can be managed using the `bless_test_results <../Tools_user/bless_test_results.html>`_ tool. The tool provides the ability to bless different features of the baseline. The currently supported features are namelist files, history files, and performance metrics. The performance metrics are separated into throughput and memory usage. + +The following command can be used to compare a test to a baseline and bless an update to the history file.:: + + ./CIME/Tools/bless_test_results -b master --hist-only SMS.ne30_f19_g16_rx1.A + +The `compare_test_results <../Tools_user/compare_test_results.html>_` tool can be used to quickly compare tests to baselines and report any `diffs`.:: + + ./CIME/Tools/compare_test_results -b master SMS.ne30_f19_g16_rx1.A + +--------------------- +Performance baselines +--------------------- +.. _`Performance baselines`: +By default performance baselines are generated by parsing the coupler log and comparing the throughput in SYPD (Simulated Years Per Day) and the memory usage high water. + +This can be customized by creating a python module under ``$DRIVER_ROOT/cime_config/customize``. There are four hooks that can be used to customize the generation and comparison. + +- perf_get_throughput +- perf_get_memory +- perf_compare_throughput_baseline +- perf_compare_memory_baseline + +.. + TODO need to add api docs and link +The following pseudo code is an example of this customization.:: + + # $DRIVER/cime_config/customize/perf_baseline.py + + def perf_get_throughput(case): + """ + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str + Storing throughput value. + str + Open baseline file for writing. + """ + current = analyze_throughput(...) + + return json.dumps(current), "w" + + def perf_get_memory(case): + """ + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + + Returns + ------- + str + Storing memory value. + str + Open baseline file for writing. + """ + current = analyze_memory(case) + + return json.dumps(current), "w" + + def perf_compare_throughput_baseline(case, baseline, tolerance): + """ + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline : str + Baseline throughput value. + tolerance : float + Allowed difference tolerance. + + Returns + ------- + bool + Whether throughput diff is below tolerance. + str + Comments about the results. + """ + current = analyze_throughput(case) + + baseline = json.loads(baseline) + + diff, comments = generate_diff(...) + + return diff, comments + + def perf_compare_memory_baseline(case, baseline, tolerance): + """ + Parameters + ---------- + case : CIME.case.case.Case + Current case object. + baseline : str + Baseline memory value. + tolerance : float + Allowed difference tolerance. + + Returns + ------- + bool + Whether memory diff is below tolerance. + str + Comments about the results. + """ + current = analyze_memory(case) + + baseline = json.loads(baseline) + + diff, comments = generate_diff(...) + + return diff, comments ============= Adding tests @@ -450,54 +573,9 @@ If this test will only be run as a single test, you can now create a test name and follow the individual_ test instructions for create_test. If you want this test to be part of a suite, then it must be described in the relevant testlists_YYY.xml file. -=============================== -CIME's scripts regression tests -=============================== -.. _`CIME's scripts regression tests`: - -**$CIMEROOT/scripts/lib/CIME/tests/scripts_regression_tests.py** is the suite of internal tests we run -for the stand-alone CIME testing. With no arguments, it will run the full suite. You can limit testing to a specific -test class or even a specific test within a test class. - -Run full suite:: - - python scripts/lib/CIME/tests/scripts_regression_tests.py - -Run a test class:: - - python scripts/lib/CIME/tests/scripts_regression_tests.py CIME.tests.test_unit_case - -Run a specific test:: - - python scripts/lib/CIME/tests/scripts_regression_tests.py CIME.tests.test_unit_case.TestCaseSubmit.test_check_case - -If a test fails, the unittest module that drives scripts_regression_tests wil note the failure, but -won't print the output of the test until testing has completed. When there are failures for a -test, the case directories for that test will not be cleaned up so that the user can do a post-mortem -analysis. The user will be notified of the specific directories that will be left for them to -examine. - -The test suite can also be ran with `pytest` and `pytest-cov`. After the test suite is done running -a coverage report will be presented. - -Install dependencies:: - - python -m pip install pytest pytest-cov - -Run full suite:: - - pytest -vvv - -Run just unit tests:: - - pytest -vvv scripts/lib/CIME/tests/test_unit* - -Run a test class:: - - pytest -vvv scripts/lib/CIME/tests/test_unit_case.py - -Run a specific test:: - - pytest -vvv scripts/lib/CIME/tests/test_unit_case.py::TestCaseSubmit::test_check_case +====================== +CIME Developer's guide +====================== +.. _`CIME Developer's guide`: -More description can be found in https://github.com/ESCOMP/ctsm/wiki/System-Testing-Guide +The CIME Developer's guide can be found on the project's GitHub `wiki `_. diff --git a/doc/source/users_guide/unit_testing.rst b/doc/source/users_guide/unit_testing.rst index 809a92890fd..af8025a6f44 100644 --- a/doc/source/users_guide/unit_testing.rst +++ b/doc/source/users_guide/unit_testing.rst @@ -35,7 +35,7 @@ These consist of: #. A Python script that provides a simple front end for the CMake-based tests. -The Fortran unit tests use `pFUnit `_, which is a Fortran testing framework that follows conventions of other xUnit frameworks. +The Fortran unit tests use `pFUnit `_, which is a Fortran testing framework that follows conventions of other xUnit frameworks. CIME's support for pFUnit requires pFUnit version 4 or greater. .. _running_unit_tests: @@ -105,22 +105,24 @@ unit testing support by building pFUnit on your machine and then pointing to the build in your ** *MACH*_*COMPILER*.cmake** file. Those processes are described in the following sections. -At a minimum, do a serial build of pFUnit (without MPI or OpenMP) using the default compiler on your machine. -That is the default that **run_tests.py** and that is required for **scripts_regression_tests.py** to run the unit tests on your machine. - -Optionally, you can also provide pFUnit builds with other supported compilers on your machine. -You can also provide additional pFUnit builds with other combinations of MPI and OpenMP on or off. -At this time, however, no unit tests require parallel support so no benefit is gained by providing MPI-enabled builds. - Building pFUnit ~~~~~~~~~~~~~~~ -For a serial build of pFUnit, follow these instructions: +Follow the instructions below to build pFUnit using the default compiler on your machine. +That is the default for **run_tests.py** and that is required for **scripts_regression_tests.py** to run the unit tests on your machine. +For the CMake step, we typically build with ``-DSKIP_MPI=YES``, ``-DSKIP_OPENMP=YES`` and ``-DCMAKE_INSTALL_PREFIX`` set to the directory where you want pFUnit to be installed. +(At this time, no unit tests require parallel support, so we build without MPI support to keep things simple.) +Optionally, you can also provide pFUnit builds with other supported compilers on your machine. #. Obtain pFUnit from https://github.com/Goddard-Fortran-Ecosystem/pFUnit (see - https://github.com/Goddard-Fortran-Ecosystem/pFUnit#obtaining-pfunit for details; note - that if you have an older version of cmake you may also need to use an older version of - pFUnit) + https://github.com/Goddard-Fortran-Ecosystem/pFUnit#obtaining-pfunit for details) + +#. Create a directory for the build and cd to that directory: + + .. code-block:: shell + + > mkdir build-dir + > cd build-dir #. Set up your environment to be similar to the environment used in CIME system builds. For example, load the appropriate compilers into your path. @@ -130,11 +132,10 @@ For a serial build of pFUnit, follow these instructions: > $CIMEROOT/CIME/scripts/configure --mpilib mpi-serial - If you are doing an MPI-enabled build, also change the ``--mpilib`` argument. Then source either **./.env_mach_specific.sh** or **./.env_mach_specific.csh**, depending on your shell. - On some systems, you may still need to explicitly set the ``FC`` and ``CC`` environment - variables, e.g., with: + On some systems, you may need to explicitly set the ``FC`` and ``CC`` environment + variables so that pFUnit's CMake build picks up the correct compilers, e.g., with: .. code-block:: shell @@ -145,16 +146,14 @@ For a serial build of pFUnit, follow these instructions: .. code-block:: shell - > export PFUNIT=/glade/p/cesmdata/cseg/tools/pFUnit/pFUnit3.2.8_cheyenne_Intel17.0.1_noMPI_noOpenMP + > export PFUNIT=$CESMDATAROOT/tools/pFUnit/pFUnit4.7.0_cheyenne_Intel19.1.1_noMPI_noOpenMP #. Configure and build pFUnit: .. code-block:: shell - > mkdir build - > cd build - > cmake -DMPI=NO -DOPENMP=NO -DCMAKE_INSTALL_PREFIX=$PFUNIT .. - > make -j 4 + > cmake -DSKIP_MPI=YES -DSKIP_OPENMP=YES -DCMAKE_INSTALL_PREFIX=$PFUNIT .. + > make -j 8 #. Run pFUnit's self-tests: @@ -168,22 +167,21 @@ For a serial build of pFUnit, follow these instructions: > make install -You can repeat this process with different compiler environments and/or different choices of ``-DMPI`` and ``-DOPENMP`` in the cmake step. (Each of them can have the value ``NO`` or ``YES``.) +You can repeat this process with different compiler environments. Make sure to choose a different installation directory for each build by setting the ``PFUNIT`` variable differently. -Adding to the xml file -~~~~~~~~~~~~~~~~~~~~~~ +Adding to the appropriate cmake file +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ After you build pFUnit, tell CIME about your build or builds. -To do this, specify the appropriate path(s) using the ``PFUNIT_PATH`` element in ** *MACH*_*COMPILER*.cmake** file. - -The ``MPILIB`` attribute should be either: +To do this, specify the appropriate path using the ``PFUNIT_PATH`` CMake variable in the ** *MACH*_*COMPILER*.cmake** file. +For a build with no MPI or openMP support (as recommended above), the block should look like this (with the actual path replaced with the PFUNIT path you specified when doing the build): -* ``mpi-serial`` for a pFUnit build where ``-DMPI=NO``, or + .. code-block:: cmake -* the name of the MPI library you used for a pFUnit build where ``-DMPI=YES``. (For example, you might use ``mpich``, which should be one of the machine's MPI libraries specified by ``MPILIBS`` in **config_machines.xml**.) - -The ``compile_threaded`` attribute should be either ``TRUE`` or ``FALSE`` depending on the value of ``-DOPENMP``. + if (MPILIB STREQUAL mpi-serial AND NOT compile_threaded) + set(PFUNIT_PATH "$ENV{CESMDATAROOT}/tools/pFUnit/pFUnit4.7.0_cheyenne_Intel19.1.1_noMPI_noOpenMP") + endif() Once you have specified the path for your build(s), you should be able to run the unit tests by following the instructions in :ref:`running_unit_tests`. @@ -410,23 +408,12 @@ You can also see examples of the unit test build scripts by viewing the Other pFUnit documentation sources ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Extensive documentation and examples are included in the following when you obtain -pFUnit from https://github.com/Goddard-Fortran-Ecosystem/pFUnit: - -* documentation/pFUnit3-ReferenceManual.pdf - -* Examples/ - -* tests/ - -The tests are tests of the pFUnit code itself, written in pFUnit. They demonstrate -many uses of pFUnit features. Other documentation includes additional assertion -methods that are available. +Unfortunately, the documentation inside the pFUnit repository (in the documentation and Examples directories) is out-of-date (at least as of April, 2023): much of this documentation refers to version 3 of pFUnit, which differs in some ways from version 4. However, some working examples are provided in https://github.com/Goddard-Fortran-Ecosystem/pFUnit_demos. Documentation of the unit test build system ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The CMake build infrastructure is in **$CIMEROOT/src/externals/CMake**. +The CMake build infrastructure is in **$CIMEROOT/CIME/non_py/src/CMake**. The infrastructure for building and running tests with **run_tests.py** is in **$CIMEROOT/scripts/fortran_unit_testing**. That directory also contains general diff --git a/docker/config_machines.xml b/docker/.cime/config_machines.v2.xml similarity index 90% rename from docker/config_machines.xml rename to docker/.cime/config_machines.v2.xml index 83db416a54d..242150d750c 100644 --- a/docker/config_machines.xml +++ b/docker/.cime/config_machines.v2.xml @@ -3,7 +3,7 @@ Docker - docker + LINUX gnu,gnuX @@ -37,6 +37,8 @@ 1 1 + /opt/conda + /opt/conda diff --git a/docker/.cime/config_machines.v3.xml b/docker/.cime/config_machines.v3.xml new file mode 100644 index 00000000000..98a0cba3f66 --- /dev/null +++ b/docker/.cime/config_machines.v3.xml @@ -0,0 +1,7 @@ + + + + + docker + + diff --git a/docker/.cime/docker.cmake b/docker/.cime/docker.cmake new file mode 100644 index 00000000000..c60655fd6be --- /dev/null +++ b/docker/.cime/docker.cmake @@ -0,0 +1,15 @@ +string(APPEND CXXFLAGS " -std=c++14") +string(APPEND CXX_LIBS " -lstdc++") + +# DEBUGGING variables +# get_cmake_property(_variableNames VARIABLES) +# foreach (_variableName ${_variableNames}) +# message("${_variableName}=${${_variableName}}") +# endforeach() +# message( FATAL_ERROR "EXIT") + +# required for grid generation tests that use make +if (CMAKE_SOURCE_DIR MATCHES "^.*TestGridGeneration.*$") +string(APPEND FFLAGS " -I/opt/conda/include") +string(APPEND SLIBS " -L/opt/conda/lib -lnetcdf -lnetcdff") +endif() diff --git a/docker/.cime/docker/config_machines.xml b/docker/.cime/docker/config_machines.xml new file mode 100644 index 00000000000..e15fd7eaa49 --- /dev/null +++ b/docker/.cime/docker/config_machines.xml @@ -0,0 +1,39 @@ + + Docker + LINUX + + gnu,gnuX + openmpi + CIME + /storage/timings + CIME + /storage/cases + /storage/inputdata + /storage/inputdata-clmforc + /storage/archive/$CASE + /storage/baselines/$COMPILER + /storage/tools/cprnc + make + 4 + e3sm_developer + none + boutte3@llnl.gov + 8 + 8 + + mpiexec + + -n {{ total_tasks }} + --oversubscribe + + + + $CASEROOT/run + $CASEROOT/bld + + 1 + 1 + /opt/conda + /opt/conda + + diff --git a/docker/Dockerfile b/docker/Dockerfile index dbc1fa4f585..a148d921d4c 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,26 +1,14 @@ ARG MAMBAFORGE_VERSION=4.14.0-0 FROM condaforge/mambaforge:${MAMBAFORGE_VERSION} AS base -ARG PNETCDF_VERSION=1.12.3 -ENV PNETCDF_VERSION=${PNETCDF_VERSION} - -ARG LIBNETCDF_VERSION=4.8.1 -ENV LIBNETCDF_VERSION=${LIBNETCDF_VERSION} - -ARG NETCDF_FORTRAN_VERSION=4.6.0 -ENV NETCDF_FORTRAN_VERSION=${NETCDF_FORTRAN_VERSION} - -ARG ESMF_VERSION=8.4.0 -ENV ESMF_VERSION=${ESMF_VERSION} - -ARG GCC_VERSION=10.* -ENV GCC_VERSION=${GCC_VERSION} - -ENV USER=root -ENV LOGNAME=root - SHELL ["/bin/bash", "-c"] +# First layer as they never change, required for E3SM testing, TODO: fix in unittesting as well +RUN mkdir -p /storage/inputdata/cpl/gridmaps/oQU240 /storage/inputdata/share/domains && \ + wget -O /storage/inputdata/cpl/gridmaps/oQU240/map_oQU240_to_ne4np4_aave.160614.nc https://portal.nersc.gov/project/e3sm/inputdata/cpl/gridmaps/oQU240/map_oQU240_to_ne4np4_aave.160614.nc && \ + wget -O /storage/inputdata/share/domains/domain.ocn.ne4np4_oQU240.160614.nc https://portal.nersc.gov/project/e3sm/inputdata/share/domains/domain.ocn.ne4np4_oQU240.160614.nc && \ + wget -O /storage/inputdata/share/domains/domain.lnd.ne4np4_oQU240.160614.nc https://portal.nersc.gov/project/e3sm/inputdata/share/domains/domain.lnd.ne4np4_oQU240.160614.nc + # Install common packages RUN mamba install --yes -c conda-forge \ cmake \ @@ -33,24 +21,43 @@ RUN mamba install --yes -c conda-forge \ pytest-cov\ pyyaml \ vim \ + rsync \ openssh && \ rm -rf /opt/conda/pkgs/* +# Compilers and libraries +ARG LIBNETCDF_VERSION=4.9.1 +ENV LIBNETCDF_VERSION=${LIBNETCDF_VERSION} +ARG NETCDF_FORTRAN_VERSION=* +ENV NETCDF_FORTRAN_VERSION=${NETCDF_FORTRAN_VERSION} +ARG ESMF_VERSION=* +ENV ESMF_VERSION=${ESMF_VERSION} +ARG GCC_VERSION=10.* +ENV GCC_VERSION=${GCC_VERSION} + # Install version locked packages +# gcc, gxx, gfortran provide symlinks for x86_64-conda-linux-gnu-* +# ar and ranlib are not symlinked RUN mamba install --yes -c conda-forge \ + lapack \ + blas \ libnetcdf=${LIBNETCDF_VERSION}=*openmpi* \ netcdf-fortran=${NETCDF_FORTRAN_VERSION}=*openmpi* \ esmf=${ESMF_VERSION}=*openmpi* \ gcc_linux-64=${GCC_VERSION} \ gxx_linux-64=${GCC_VERSION} \ openmpi-mpifort \ - gfortran_linux-64=${GCC_VERSION} && \ + gfortran_linux-64=${GCC_VERSION} \ + gcc \ + gxx \ + gfortran && \ rm -rf /opt/conda/pkgs/* && \ ln -sf /opt/conda/bin/x86_64-conda-linux-gnu-ar /opt/conda/bin/ar && \ - ln -sf /opt/conda/bin/x86_64-conda-linux-gnu-ranlib /opt/conda/bin/ranlib + ln -sf /opt/conda/bin/x86_64-conda-linux-gnu-ranlib /opt/conda/bin/ranlib && \ + cpan install XML::LibXML Switch -# Install cpan packages -RUN cpan install XML::LibXML Switch +ARG PNETCDF_VERSION=1.12.3 +ENV PNETCDF_VERSION=${PNETCDF_VERSION} # Build pnetcdf RUN curl -L -k -o "${PWD}/pnetcdf.tar.gz" \ @@ -70,10 +77,38 @@ RUN curl -L -k -o "${PWD}/pnetcdf.tar.gz" \ make install && \ rm -rf "${PWD}/pnetcdf" -RUN mkdir /root/.cime +# CESM dependencies +ENV CCS_CONFIG_TAG=ccs_config_cesm0.0.88 +ENV CMEPS_TAG=cmeps0.14.47 +ENV CDEPS_TAG=cdeps1.0.26 +ENV CPL7_TAG=cpl77.0.8 +ENV SHARE_TAG=share1.0.18 +ENV MCT_TAG=MCT_2.11.0 +ENV PARALLELIO_TAG=pio2_6_2 + +RUN git clone -b ${CCS_CONFIG_TAG} https://github.com/ESMCI/ccs_config_cesm /src/ccs_config && \ + git clone -b ${CMEPS_TAG} https://github.com/ESCOMP/CMEPS.git /src/components/cmeps && \ + git clone -b ${CDEPS_TAG} https://github.com/ESCOMP/CDEPS.git /src/components/cdeps && \ + git clone -b ${CPL7_TAG} https://github.com/ESCOMP/CESM_CPL7andDataComps /src/components/cpl7 && \ + git clone -b ${SHARE_TAG} https://github.com/ESCOMP/CESM_share /src/share && \ + git clone -b ${MCT_TAG} https://github.com/MCSclimate/MCT /src/libraries/mct && \ + git clone -b ${PARALLELIO_TAG} https://github.com/NCAR/ParallelIO /src/libraries/parallelio && \ + mkdir -p /storage/timings + +ARG CIME_BRANCH=master +ARG CIME_REPO=https://github.com/esmci/cime + +# Separate layer, it's most likely to change +RUN git clone -b ${CIME_BRANCH} ${CIME_REPO} /src/cime + +# General variables +ENV USER=root +ENV LOGNAME=container +ENV ESMFMKFILE=/opt/conda/lib/esmf.mk + +WORKDIR /src/cime -COPY config_machines.xml /root/.cime/ -COPY docker.cmake /root/.cime/ +COPY .cime /root/.cime COPY entrypoint.sh /entrypoint.sh ENTRYPOINT [ "/entrypoint.sh" ] diff --git a/docker/docker.cmake b/docker/docker.cmake deleted file mode 100644 index eb95c7113ce..00000000000 --- a/docker/docker.cmake +++ /dev/null @@ -1,71 +0,0 @@ -set(AR "/opt/conda/bin/x86_64-conda-linux-gnu-ar") -string(APPEND CFLAGS " -mcmodel=medium") -if (compile_threaded) - string(APPEND CFLAGS " -fopenmp") -endif() -if (DEBUG) - string(APPEND CFLAGS " -g -Wall -fbacktrace -fcheck=bounds -ffpe-trap=invalid,zero,overflow") -endif() -if (NOT DEBUG) - string(APPEND CFLAGS " -O") -endif() -if (COMP_NAME STREQUAL csm_share) - string(APPEND CFLAGS " -std=c99") -endif() -string(APPEND CXXFLAGS " -std=c++14") -if (compile_threaded) - string(APPEND CXXFLAGS " -fopenmp") -endif() -if (DEBUG) - string(APPEND CXXFLAGS " -g -Wall -fbacktrace") -endif() -if (NOT DEBUG) - string(APPEND CXXFLAGS " -O") -endif() -if (COMP_NAME STREQUAL cism) - string(APPEND CMAKE_OPTS " -D CISM_GNU=ON") -endif() -string(APPEND CMAKE_OPTS " -D CMAKE_AR=/opt/conda/bin/x86_64-conda-linux-gnu-ar") -string(APPEND CMAKE_OPTS " -DCMAKE_Fortran_COMPILER_RANLIB=/opt/conda/bin/x86_64-conda-linux-gnu-ranlib") -string(APPEND CMAKE_OPTS " -DCMAKE_C_COMPILER_RANLIB=/opt/conda/bin/x86_64-conda-linux-gnu-ranlib") -string(APPEND CMAKE_OPTS " -DCMAKE_CXX_COMPILER_RANLIB=/opt/conda/bin/x86_64-conda-linux-gnu-ranlib") -string(APPEND CPPDEFS " -DFORTRANUNDERSCORE -DNO_R16 -DCPRGNU") -if (DEBUG) - string(APPEND CPPDEFS " -DYAKL_DEBUG") -endif() -set(SLIBS "-L/opt/conda/lib -lnetcdf -lnetcdff") -set(CXX_LIBS "-lstdc++") -set(CXX_LINKER "FORTRAN") -string(APPEND FC_AUTO_R8 " -fdefault-real-8") -string(APPEND FFLAGS " -I/opt/conda/include -mcmodel=medium -fconvert=big-endian -ffree-line-length-none -ffixed-line-length-none") -if (compile_threaded) - string(APPEND FFLAGS " -fopenmp") -endif() -if (DEBUG) - string(APPEND FFLAGS " -g -Wall -fbacktrace -fcheck=bounds -ffpe-trap=zero,overflow") -endif() -if (NOT DEBUG) - string(APPEND FFLAGS " -O") -endif() -string(APPEND FFLAGS_NOOPT " -O0") -string(APPEND FIXEDFLAGS " -ffixed-form") -string(APPEND FREEFLAGS " -ffree-form") -set(HAS_F2008_CONTIGUOUS "FALSE") -if (compile_threaded) - string(APPEND LDFLAGS " -fopenmp") -endif() -set(SLIBS " -L/opt/conda/lib -lnetcdff -lnetcdf") -set(MPI_PATH "/opt/conda") -set(MPICC "/opt/conda/bin/mpicc") -set(MPICXX "/opt/conda/bin/mpicxx") -set(MPIFC "/opt/conda/bin/mpif90") -set(NETCDF_C_PATH "/opt/conda") -set(NETCDF_FORTRAN_PATH "/opt/conda") -set(PNETCDF_PATH "/opt/conda") -set(SCC "/opt/conda/bin/x86_64-conda-linux-gnu-gcc") -set(SCXX "/opt/conda/bin/x86_64-conda-linux-gnu-g++") -set(SFC "/opt/conda/bin/x86_64-conda-linux-gnu-gfortran") -set(SUPPORTS_CXX "TRUE") -if (CMAKE_Fortran_COMPILER_VERSION VERSION_GREATER_EQUAL 10) - string(APPEND FFLAGS " -fallow-argument-mismatch -fallow-invalid-boz ") -endif() diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index d6c966c502b..dd0ac467233 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -1,182 +1,114 @@ #!/bin/bash -set -x - -readonly INIT=${INIT:-"true"} -readonly UPDATE_CIME=${UPDATE_CIME:-"false"} -readonly GIT_SHALLOW=${GIT_SHALLOW:-"false"} - -declare -xr CIME_REPO=${CIME_REPO:-https://github.com/ESMCI/cime} -declare -xr E3SM_REPO=${E3SM_REPO:-https://github.com/E3SM-Project/E3SM} -declare -xr CESM_REPO=${CESM_REPO:-https://github.com/ESCOMP/CESM} - -####################################### -# Clones git repository -####################################### -function clone_repo() { - local repo="${1}" - local path="${2}" - local branch="${3}" - local extras="" - - if [[ "${GIT_SHALLOW}" == "true" ]] - then - extras="${extras} --depth 1" - fi - - git clone -b "${branch}" ${extras} "${repo}" "${path}" || true -} +DEBUG="${DEBUG:-false}" +SRC_PATH="${SRC_PATH:-`pwd`}" +# Treeless clone +GIT_FLAGS="${GIT_FLAGS:---filter=tree:0}" +# Shallow submodule checkout +GIT_SUBMODULE_FLAGS="${GIT_SUBMODULE_FLAGS:---recommend-shallow}" + +echo "DEBUG = ${DEBUG}" +echo "SRC_PATH = ${SRC_PATH}" +echo "GIT_FLAGS = ${GIT_FLAGS}" +echo "GIT_SUBMODULE_FLAGS = ${GIT_SUBMODULE_FLAGS}" + +if [[ "$(echo ${DEBUG} | tr -s '[:upper:]' '[:lower:]')" == "true" ]] +then + set -x +fi ####################################### # Fixes mct/mpeu to use ARFLAGS environment variable # # TODO need to make an offical PR this is temporary. ####################################### -function fixup_mct { +function fix_mct_arflags { local mct_path="${1}" # TODO make PR to fix if [[ ! -e "${mct_path}/mct/Makefile.bak" ]] then + echo "Fixing AR variable in ${mct_path}/mct/Makefile" + sed -i".bak" "s/\$(AR)/\$(AR) \$(ARFLAGS)/g" "${mct_path}/mct/Makefile" fi if [[ ! -e "${mct_path}/mpeu/Makefile.bak" ]] then + echo "Fixing AR variable in ${mct_path}/mpeu/Makefile" + sed -i".bak" "s/\$(AR)/\$(AR) \$(ARFLAGS)/g" "${mct_path}/mpeu/Makefile" fi } ####################################### +# Fixes gitmodules to use https rather than ssh ####################################### -function update_cime() { - local path="${1}" - - if [[ "${UPDATE_CIME}" == "true" ]] - then - echo "Updating CIME using repository ${CIME_REPO} and branch ${CIME_BRANCH}" - - pushd "${path}" - - git remote set-url origin "${CIME_REPO}" - - if [[ "${GIT_SHALLOW}" == "true" ]] - then - git remote set-branches origin "*" - fi - - git fetch origin - - git checkout "${CIME_BRANCH:-master}" - - popd - fi +function fix_gitmodules() { + sed -i".bak" "s/git@github.com:/https:\/\/github.com\//g" "${1}/.gitmodules" } -####################################### -# Creates an environment with E3SM source. -####################################### -function init_e3sm() { - export CIME_MODEL="e3sm" - - local extras="" - local install_path="${INSTALL_PATH:-/src/E3SM}" - local cache_path="${cache_path:-/storage/inputdata}" - - if [[ ! -e "${install_path}" ]] - then - clone_repo "${E3SM_REPO}" "${install_path}" "${E3SM_BRANCH:-master}" - - cd "${install_path}" - - if [[ ! -e "${PWD}/.gitmodules.bak" ]] - then - sed -i".bak" "s/git@github.com:/https:\/\/github.com\//g" "${PWD}/.gitmodules" - fi - - if [[ "${GIT_SHALLOW}" == "true" ]] - then - extras=" --depth 1" - fi - - git submodule update --init ${extras} - fi - - fixup_mct "${install_path}/externals/mct" +if [[ "${CIME_MODEL}" == "e3sm" ]] +then + echo "Setting up E3SM" - update_cime "${install_path}/cime" + [[ ! -e "${SRC_PATH}/E3SM" ]] && git clone -b ${E3SM_BRANCH:-master} ${GIT_FLAGS} ${E3SM_REPO:-https://github.com/E3SM-Project/E3SM} "${SRC_PATH}/E3SM" - curl -L --create-dirs \ - -o ${cache_path}/cpl/gridmaps/oQU240/map_oQU240_to_ne4np4_aave.160614.nc \ - https://web.lcrc.anl.gov/public/e3sm/inputdata/cpl/gridmaps/oQU240/map_oQU240_to_ne4np4_aave.160614.nc - curl -L --create-dirs \ - -o ${cache_path}/share/domains/domain.ocn.ne4np4_oQU240.160614.nc \ - https://web.lcrc.anl.gov/public/e3sm/inputdata/share/domains/domain.ocn.ne4np4_oQU240.160614.nc - curl -L --create-dirs \ - -o ${cache_path}/share/domains/domain.lnd.ne4np4_oQU240.160614.nc \ - https://web.lcrc.anl.gov/public/e3sm/inputdata/share/domains/domain.lnd.ne4np4_oQU240.160614.nc + pushd "${SRC_PATH}/E3SM" - cd "${install_path}/cime" -} + git config --global --add safe.directory "${PWD}" -####################################### -# Creates an environment with CESM source. -####################################### -function init_cesm() { - export CIME_MODEL="cesm" + # fix E3SM gitmodules + fix_gitmodules "${PWD}" - local install_path="${INSTALL_PATH:-/src/CESM}" + git status - if [[ ! -e "${install_path}" ]] - then - clone_repo "${CESM_REPO}" "${install_path}" "${CESM_BRANCH:-master}" - fi + # checkout submodules + git submodule update --init "${GIT_SUBMODULE_FLAGS}" - cd "${install_path}" + # fix mct arflags flags + fix_mct_arflags "${SRC_PATH}/E3SM/externals/mct" - "${install_path}/manage_externals/checkout_externals" + pushd cime - fixup_mct "${install_path}/libraries/mct" + # fix CIME gitmodules + fix_gitmodules "${PWD}" - update_cime "${install_path}/cime/" + git config --global --add safe.directory "${PWD}" + git config --global --add safe.directory "${PWD}/CIME/non_py/cprnc" - cd "${install_path}/cime" -} + # checkout submodules + git submodule update --init "${GIT_SUBMODULE_FLAGS}" -####################################### -# Creates an environment with minimal model requirements. -# Similar to old github actions environment. -####################################### -function init_cime() { - export CIME_MODEL="cesm" - export ESMFMKFILE="/opt/conda/lib/esmf.mk" - - local install_path="${INSTALL_PATH:-/src/cime}" + # link v2 config_machines + ln -sf /root/.cime/config_machines.v2.xml /root/.cime/config_machines.xml +elif [[ "${CIME_MODEL}" == "cesm" ]] +then + echo "Setting up CESM" - if [[ ! -e "${install_path}" ]] + # copy pre cloned repos to new source path + if [[ "${SRC_PATH}" != "/src/cime" ]] then - clone_repo "${CIME_REPO}" "${install_path}" "${CIME_BRANCH:-master}" + cp -rf /src/ccs_config /src/components /src/libraries /src/share "${SRC_PATH}/../" fi - # required to using checkout_externals script - clone_repo "${CESM_REPO}" "/src/CESM" "${CESM_BRANCH:-master}" - - cd "${install_path}" + git config --global --add safe.directory "${PWD}" + git config --global --add safe.directory "${PWD}/CIME/non_py/cprnc" - "/src/CESM/manage_externals/checkout_externals" + # fix CIME gitmodules + fix_gitmodules "${PWD}" - fixup_mct "${install_path}/libraries/mct" + # update CIME submodules + git submodule update --init "${GIT_SUBMODULE_FLAGS}" - update_cime "${install_path}" + # fix mct argflags + fix_mct_arflags /src/libraries/mct - cd "${install_path}" -} - -if [[ ! -e "${HOME}/.cime" ]] -then - ln -sf "/root/.cime" "${HOME}/.cime" + # link v3 config_machines + ln -sf /root/.cime/config_machines.v3.xml /root/.cime/config_machines.xml fi +# load batch specific entrypoint if [[ -e "/entrypoint_batch.sh" ]] then echo "Sourcing batch entrypoint" @@ -184,17 +116,4 @@ then . "/entrypoint_batch.sh" fi -if [[ "${INIT}" == "true" ]] -then - if [[ "${CIME_MODEL}" == "e3sm" ]] - then - init_e3sm - elif [[ "${CIME_MODEL}" == "cesm" ]] - then - init_cesm - else - init_cime - fi - - exec "${@}" -fi +exec "${@}" diff --git a/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt index 00fe6689977..dac4fec1f30 100644 --- a/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt +++ b/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/CMakeLists.txt @@ -1,4 +1,6 @@ set(sources_needed circle.F90) extract_sources("${sources_needed}" "${circle_area_sources}" test_sources) -create_pFUnit_test(pFunit_circle_area pFunittest_circle_area_exe "test_circle.pf" ${test_sources}) +add_pfunit_ctest(pFunit_circle_area + TEST_SOURCES "test_circle.pf" + OTHER_SOURCES "${test_sources}") diff --git a/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf b/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf index 6d557d8af56..b59af13d15c 100644 --- a/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf +++ b/scripts/fortran_unit_testing/Examples/circle_area/tests/pFUnit/test_circle.pf @@ -1,6 +1,6 @@ module test_circle -use pfunit_mod +use funit use circle, only: circle_area, pi, r8 diff --git a/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt index f370d036242..1bddfc1cbd4 100644 --- a/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt +++ b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/CMakeLists.txt @@ -1,2 +1,5 @@ extract_sources("interpolate_1d.F90" "${interpolate_sources}" test_sources) -create_pFUnit_test(pFunit_interpolate pFunit_interpolate_exe "test_interpolate_point.pf" ${test_sources}) + +add_pfunit_ctest(pFunit_interpolate + TEST_SOURCES "test_interpolate_point.pf" + OTHER_SOURCES "${test_sources}") diff --git a/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf index 0e92bf7e686..295aad0228c 100644 --- a/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf +++ b/scripts/fortran_unit_testing/Examples/interpolate_1d/tests/pFUnit/test_interpolate_point.pf @@ -1,6 +1,6 @@ module test_interpolate_point -use pfunit_mod +use funit use interpolate_1d diff --git a/scripts/fortran_unit_testing/README b/scripts/fortran_unit_testing/README index 107609b48a1..b91da3e0c65 100644 --- a/scripts/fortran_unit_testing/README +++ b/scripts/fortran_unit_testing/README @@ -130,55 +130,6 @@ Quick guide to the CIME unit testing framework Preprocesses genf90 files and puts them in the output directory. The named list will have generated sources appended to it. -** FindpFUnit - Find module for the pFUnit library - - This is a typical CMake Find module; it defines the following variables - with their conventional CMake meanings: - - - PFUNIT_FOUND - - PFUNIT_LIBRARY - - PFUNIT_LIBRARIES - - PFUNIT_INCLUDE_DIR - - PFUNIT_INCLUDE_DIRS - - Three additional, pFUnit-specific variables are defined: - - - PFUNIT_MODULE_DIR :: Directory with *.mod files. This is already - included in PFUNIT_INCLUDE_DIRS, so you usually shouldn't need - this. - - PFUNIT_DRIVER :: Path to the pFUnit driver source. - - PFUNIT_PARSER :: Path to pFUnitParser.py (the pFUnit preprocessor). - - If run_tests.py can find the pFUnit directory in the cmake_macros, - the variable $PFUNIT will be set to assist the FindpFUnit module. - Otherwise, you must do one of the following: - - - Define the environment variable $PFUNIT with the location of the - installation. - - Put the pFUnit "bin" directory in your $PATH. - -** pFUnit_utils - pFUnit preprocessing and driver tools - - This module aims to greatly simplify use of the pFUnit parser and - driver. (Currently, it assumes that both are being used.) - - This module requires the variables defined by the FindpFUnit module. - - - add_pFUnit_executable(name pf_file_list output_directory - fortran_source_list). - - This function automatically processes the .pf files to create tests, - then links them with the Fortran files, pFUnit's library, and the - pFUnit driver to create a test executable with the given name. - - The output_directory is a location where generated sources should be - placed; ${CMAKE_CURRENT_BUILD_DIR} is usually a safe place. - - - define_pFUnit_failure(test_name) - - This tells CTest to detect test success or failure using regular - expressions appropriate for pFUnit. - ** Sourcelist_utils - Utilities for VPATH emulation This module provides functions for working with lists of source code @@ -282,7 +233,7 @@ end program test_driver module test_circle -use pfunit_mod +use funit use circle, only: circle_area, pi, r8 diff --git a/scripts/fortran_unit_testing/run_tests.py b/scripts/fortran_unit_testing/run_tests.py index 904630c6557..6a7df6f1adf 100755 --- a/scripts/fortran_unit_testing/run_tests.py +++ b/scripts/fortran_unit_testing/run_tests.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -from __future__ import print_function import os, sys _CIMEROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../..") @@ -109,29 +108,6 @@ def parse_command_line(args): help="""Number of processes to use for build.""", ) - parser.add_argument( - "--use-mpi", - action="store_true", - help="""If specified, run unit tests with an mpi-enabled version - of pFUnit, via mpirun. (Default is to use a serial build without - mpirun.) This requires a pFUnit build with MPI support.""", - ) - - parser.add_argument( - "--mpilib", - help="""MPI Library to use in build. - If not specified, use the default for this machine/compiler. - Must match an MPILIB option in config_compilers.xml. - e.g., for cheyenne, can use 'mpt'. - Only relevant if --use-mpi is specified.""", - ) - - parser.add_argument( - "--mpirun-command", - help="""Command to use to run an MPI executable. - If not specified, uses the default for this machine. - Only relevant if --use-mpi is specified.""", - ) parser.add_argument( "--test-spec-dir", default=".", @@ -185,9 +161,6 @@ def parse_command_line(args): args.machine, args.machines_dir, args.make_j, - args.use_mpi, - args.mpilib, - args.mpirun_command, args.test_spec_dir, args.ctest_args, args.use_openmp, @@ -202,7 +175,6 @@ def cmake_stage( test_spec_dir, build_optimized, use_mpiserial, - mpirun_command, output, pfunit_path, cmake_args=None, @@ -246,8 +218,7 @@ def cmake_stage( "-DCIME_CMAKE_MODULE_DIRECTORY=" + os.path.abspath(os.path.join(_CIMEROOT, "CIME", "non_py", "src", "CMake")), "-DCMAKE_BUILD_TYPE=" + build_type, - "-DPFUNIT_MPIRUN='" + mpirun_command + "'", - "-DPFUNIT_PATH=" + pfunit_path, + "-DCMAKE_PREFIX_PATH=" + pfunit_path, ] if use_mpiserial: cmake_command.append("-DUSE_MPI_SERIAL=ON") @@ -329,9 +300,6 @@ def _main(): machine, machines_dir, make_j, - use_mpi, - mpilib, - mpirun_command, test_spec_dir, ctest_args, use_openmp, @@ -391,11 +359,12 @@ def _main(): # Functions to perform various stages of build. # ================================================= - if not use_mpi: - mpilib = "mpi-serial" - elif mpilib is None: - mpilib = machobj.get_default_MPIlib() - logger.info("Using mpilib: {}".format(mpilib)) + # In the switch from pFUnit3 to pFUnit4, we have dropped support for MPI for now + # because it seems like the way this is done differs for pFUnit4 and we weren't + # leveraging the parallel capabilities of pFUnit anyway. So we force mpilib = + # "mpi-serial" and use_mpiserial = True for now until we need to generalize this. + mpilib = "mpi-serial" + use_mpiserial = True if compiler is None: compiler = machobj.get_default_compiler() @@ -454,24 +423,6 @@ def _main(): ) os.environ["NETCDF"] = os.environ["NETCDFROOT"] - if not use_mpi: - mpirun_command = "" - elif mpirun_command is None: - mpi_attribs = { - "compiler": compiler, - "mpilib": mpilib, - "threaded": use_openmp, - "comp_interface": comp_interface, - "unit_testing": True, - } - - # We can get away with specifying case=None since we're using exe_only=True - mpirun_command, _, _, _ = machspecific.get_mpirun( - None, mpi_attribs, None, exe_only=True - ) - mpirun_command = machspecific.get_resolved_value(mpirun_command) - logger.info("mpirun command is '{}'".format(mpirun_command)) - # ================================================= # Run tests. # ================================================= @@ -496,13 +447,11 @@ def _main(): if not os.path.islink("Macros.cmake"): os.symlink(os.path.join(build_dir, "Macros.cmake"), "Macros.cmake") - use_mpiserial = not use_mpi cmake_stage( name, directory, build_optimized, use_mpiserial, - mpirun_command, output, pfunit_path, verbose=verbose, diff --git a/setup.cfg b/setup.cfg index 772767f44b9..1c4058ebd85 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,7 +16,6 @@ console_scripts = [tool:pytest] junit_family=xunit2 -addopts = --cov=CIME --cov-report term-missing --cov-report html:test_coverage/html --cov-report xml:test_coverage/coverage.xml -s python_files = test_*.py testpaths = CIME/tests diff --git a/tools/mapping/gen_domain_files/INSTALL b/tools/mapping/gen_domain_files/INSTALL index 71d7caefcae..b9d474e0cc9 100644 --- a/tools/mapping/gen_domain_files/INSTALL +++ b/tools/mapping/gen_domain_files/INSTALL @@ -3,7 +3,7 @@ HOW TO BUILD ============ (1) $ cd src -(2) $ ../../../configure --macros-format Makefile --mpilib mpi-serial +(2) $ ../../../../CIME/scripts/configure --macros-format Makefile --mpilib mpi-serial Bash users: (3) $ (. ./.env_mach_specific.sh ; gmake) csh users: diff --git a/tools/mapping/gen_domain_files/src/Makefile b/tools/mapping/gen_domain_files/src/Makefile index 5a12b7daec5..a6fa89bf9bc 100644 --- a/tools/mapping/gen_domain_files/src/Makefile +++ b/tools/mapping/gen_domain_files/src/Makefile @@ -122,7 +122,7 @@ OBJS := gen_domain.o # Append user defined compiler and load flags to Makefile defaults CFLAGS += $(USER_CFLAGS) -I$(INC_NETCDF) -FFLAGS += $(USER_FFLAGS) -I$(MOD_NETCDF) -I$(INC_NETCDF) +FFLAGS += $(USER_FFLAGS) -I$(MOD_NETCDF) -I$(INC_NETCDF) $(CMAKE_Fortran_FLAGS) LDFLAGS += $(USER_LDFLAGS) # Set user specified linker diff --git a/tools/mapping/gen_domain_files/src/gen_domain.F90 b/tools/mapping/gen_domain_files/src/gen_domain.F90 index f0b97f20e4e..b06f1e7442a 100644 --- a/tools/mapping/gen_domain_files/src/gen_domain.F90 +++ b/tools/mapping/gen_domain_files/src/gen_domain.F90 @@ -53,7 +53,7 @@ program fmain set_omask = .false. ! Make sure we have arguments - nargs = iargc() + nargs = command_argument_count() if (nargs == 0) then write(6,*)'invoke gen_domain -h for usage' stop @@ -64,47 +64,47 @@ program fmain n = 1 do while (n <= nargs) arg = ' ' - call getarg (n, arg) + call get_command_argument (n, arg) n = n + 1 select case (arg) case ('-m') ! input mapping file - call getarg (n, arg) + call get_command_argument (n, arg) n = n + 1 fmap = trim(arg) cmdline = trim(cmdline) // ' -m ' // trim(arg) case ('-o') ! output ocean grid name - call getarg (n, arg) + call get_command_argument (n, arg) n = n + 1 fn1_out = trim(arg) cmdline = trim(cmdline) // ' -o ' // trim(arg) case ('-l') ! output land grid name - call getarg (n, arg) + call get_command_argument (n, arg) n = n + 1 fn2_out = trim(arg) cmdline = trim(cmdline) // ' -l ' // trim(arg) case ('-p') ! set pole on this grid [0,1,2] - call getarg (n, arg) + call get_command_argument (n, arg) n = n + 1 set_fv_pole_yc = ichar(trim(arg))-48 write(6,*)'set_fv_pole_yc is ',set_fv_pole_yc case ('--fminval') ! set fminval (min allowable land fraction) - call getarg (n, arg) + call get_command_argument (n, arg) n = n + 1 read(arg,*) fminval case ('--fmaxval') ! set fminval (min allowable land fraction) - call getarg (n, arg) + call get_command_argument (n, arg) n = n + 1 read(arg,*) fmaxval case ('-c') ! user comment - call getarg (n, arg) + call get_command_argument (n, arg) n = n + 1 usercomment = trim(arg) case ('--set-omask') diff --git a/tools/mapping/gen_domain_files/test_gen_domain.sh b/tools/mapping/gen_domain_files/test_gen_domain.sh index b80d482ce13..06e995a8c62 100755 --- a/tools/mapping/gen_domain_files/test_gen_domain.sh +++ b/tools/mapping/gen_domain_files/test_gen_domain.sh @@ -81,12 +81,11 @@ fi # Build the cprnc executable (for comparison of netcdf files) echo "" >> ${test_log} echo "Building cprnc in ${PWD}/builds ..." >> ${test_log} -cp ${cime_root}/CIME/non_py/cprnc/*.F90 . -cp ${cime_root}/CIME/non_py/cprnc/Makefile . -cp ${cime_root}/CIME/non_py/cprnc/Depends . -cp ${cime_root}/CIME/non_py/cprnc/*.in . -(. .env_mach_specific.sh && make GENF90=${cime_root}/CIME/non_py/externals/genf90/genf90.pl) >> ${test_log} 2>&1 -if [ ! -f cprnc ]; then +mkdir ${PWD}/builds/cprnc +cd ${PWD}/builds/cprnc +cmake -DCMAKE_INSTALL_PREFIX=${PWD} ${cime_root}/CIME/non_py/cprnc +make install +if [ ! -f bin/cprnc ]; then echo "ERROR building cprnc" >&2 echo "cat ${test_log} for more info" >&2 exit 1 @@ -104,33 +103,33 @@ for baseline in ${ocn_baseline} ${lnd_baseline}; do # and adding in datestring for current day and .nc file extension. testfile=`basename ${baseline} | rev | cut -d. -f3- | rev`.${datestring}.nc if [ ! -f ${testfile} ]; then - echo "ERROR: ${testfile} not generated" >&2 - echo "cat ${test_log} for more info" >&2 - exit 1 + echo "ERROR: ${testfile} not generated" >&2 + echo "cat ${test_log} for more info" >&2 + exit 1 fi # Compare against baseline and print report from cprnc comparison echo "Comparing $testfile against ${baseline}..." - (. builds/.env_mach_specific.sh && ./builds/cprnc -m ${testfile} ${baseline}) >> ${test_log} 2>&1 + (. builds/.env_mach_specific.sh && ./builds/bin/cprnc -m ${testfile} ${baseline}) >> ${test_log} 2>&1 # Check results last=`tail -n3 ${test_log}` if [[ ${last} =~ "STOP" ]]; then - echo ${last} >&2 - echo "Error running cprnc" >&2 - echo "cat ${test_log} for more info" >&2 - exit 1 + echo ${last} >&2 + echo "Error running cprnc" >&2 + echo "cat ${test_log} for more info" >&2 + exit 1 fi if [[ ${last} =~ "DIFFERENT" ]]; then - echo ${last} >&2 - echo ${baseline} DIFFERENT FROM ${testfile} >&2 - echo "cat ${test_log} for more info" >&2 - exit 1 + echo ${last} >&2 + echo ${baseline} DIFFERENT FROM ${testfile} >&2 + echo "cat ${test_log} for more info" >&2 + exit 1 fi if ! [[ ${last} =~ "IDENTICAL" ]]; then - echo ${last} >&2 - echo "undetermined output from cprnc" >&2 - echo "cat ${test_log} for more info" >&2 - exit 1 + echo ${last} >&2 + echo "undetermined output from cprnc" >&2 + echo "cat ${test_log} for more info" >&2 + exit 1 fi done