FloPy benchmarks #293
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: FloPy benchmarks | |
on: | |
schedule: | |
- cron: '0 8 * * *' # run at 8 AM UTC (12 am PST) | |
jobs: | |
benchmark: | |
name: Benchmarks | |
runs-on: ${{ matrix.os }} | |
strategy: | |
fail-fast: false | |
matrix: | |
os: [ ubuntu-latest, macos-latest, windows-latest ] | |
python-version: [ 3.8, 3.9, "3.10", "3.11" ] | |
exclude: | |
# avoid shutil.copytree infinite recursion bug | |
# https://github.com/python/cpython/pull/17098 | |
- python-version: '3.8.0' | |
defaults: | |
run: | |
shell: bash | |
timeout-minutes: 90 | |
steps: | |
- name: Checkout repo | |
uses: actions/checkout@v3 | |
- name: Setup Python | |
if: runner.os != 'Windows' | |
uses: actions/setup-python@v4 | |
with: | |
python-version: ${{ matrix.python-version }} | |
cache: 'pip' | |
cache-dependency-path: pyproject.toml | |
- name: Install Python dependencies | |
if: runner.os != 'Windows' | |
run: | | |
pip install --upgrade pip | |
pip install . | |
pip install ".[test, optional]" | |
- name: Setup Micromamba | |
if: runner.os == 'Windows' | |
uses: mamba-org/setup-micromamba@v1 | |
with: | |
environment-file: etc/environment.yml | |
cache-environment: true | |
cache-downloads: true | |
create-args: >- | |
python=${{ matrix.python-version }} | |
init-shell: >- | |
bash | |
powershell | |
- name: Install extra Python dependencies | |
if: runner.os == 'Windows' | |
shell: bash -l {0} | |
run: | | |
pip install xmipy | |
pip install . | |
- name: Install Modflow executables | |
uses: modflowpy/install-modflow-action@v1 | |
- name: Run benchmarks | |
if: runner.os != 'Windows' | |
working-directory: ./autotest | |
run: | | |
mkdir -p .benchmarks | |
pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ matrix.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed | |
env: | |
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
- name: Run benchmarks | |
if: runner.os == 'Windows' | |
shell: bash -l {0} | |
working-directory: ./autotest | |
run: | | |
mkdir -p .benchmarks | |
pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ matrix.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed | |
env: | |
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
- name: Upload failed benchmark artifact | |
uses: actions/upload-artifact@v3 | |
if: failure() | |
with: | |
name: failed-benchmark-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }} | |
path: | | |
./autotest/.failed/** | |
- name: Upload benchmark result artifact | |
uses: actions/upload-artifact@v3 | |
with: | |
name: benchmarks-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }} | |
path: | | |
./autotest/.benchmarks/**/*.json | |
post_benchmark: | |
needs: | |
- benchmark | |
name: Process benchmark results | |
runs-on: ubuntu-latest | |
defaults: | |
run: | |
shell: bash | |
timeout-minutes: 10 | |
steps: | |
- name: Checkout repo | |
uses: actions/checkout@v3 | |
- name: Setup Python | |
uses: actions/setup-python@v4 | |
with: | |
python-version: 3.8 | |
cache: 'pip' | |
cache-dependency-path: pyproject.toml | |
- name: Install Python dependencies | |
run: | | |
pip install --upgrade pip | |
pip install numpy pandas matplotlib seaborn | |
- name: Download all artifacts | |
uses: actions/download-artifact@v3 | |
with: | |
path: ./autotest/.benchmarks | |
- name: Process benchmark results | |
run: | | |
artifact_json=$(gh api -X GET -H "Accept: application/vnd.github+json" /repos/modflowpy/flopy/actions/artifacts) | |
get_artifact_ids=" | |
import json | |
import sys | |
from os import linesep | |
artifacts = json.load(sys.stdin, strict=False)['artifacts'] | |
artifacts = [a for a in artifacts if a['name'].startswith('benchmarks-') and a['name'].split('-')[-1].isdigit()] | |
print(linesep.join([str(a['id']) for a in artifacts])) | |
" | |
echo $artifact_json \ | |
| python -c "$get_artifact_ids" \ | |
| xargs -I@ bash -c "gh api -H 'Accept: application/vnd.github+json' /repos/modflowpy/flopy/actions/artifacts/@/zip >> ./autotest/.benchmarks/@.zip" | |
zipfiles=( ./autotest/.benchmarks/*.zip ) | |
if (( ${#zipfiles[@]} )); then | |
unzip -o './autotest/.benchmarks/*.zip' -d ./autotest/.benchmarks | |
fi | |
python ./scripts/process_benchmarks.py ./autotest/.benchmarks ./autotest/.benchmarks | |
env: | |
ARTIFACTS: ${{steps.run_tests.outputs.artifact_ids}} | |
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | |
- name: Upload benchmark results | |
uses: actions/upload-artifact@v3 | |
with: | |
name: benchmarks-${{ github.run_id }} | |
path: | | |
./autotest/.benchmarks/*.csv | |
./autotest/.benchmarks/*.png |