Skip to content

FloPy benchmarks

FloPy benchmarks #653

Workflow file for this run

name: FloPy benchmarks
on:
schedule:
- cron: '0 8 * * *' # run at 8 AM UTC (12 am PST)
jobs:
benchmark:
name: Benchmarks
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ ubuntu-latest, macos-latest, windows-latest ]
python-version: [ 3.8, 3.9, "3.10", "3.11", "3.12" ]
exclude:
# avoid shutil.copytree infinite recursion bug
# https://github.com/python/cpython/pull/17098
- python-version: '3.8.0'
defaults:
run:
shell: bash -l {0}
timeout-minutes: 90
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Setup Micromamba
uses: mamba-org/setup-micromamba@v1
with:
environment-file: etc/environment.yml
cache-environment: true
cache-downloads: true
create-args: >-
python=${{ matrix.python-version }}
init-shell: >-
bash
powershell
- name: Install FloPy
run: pip install .
- name: Install Modflow executables
uses: modflowpy/install-modflow-action@v1
- name: Run benchmarks
working-directory: autotest
run: |
mkdir -p .benchmarks
pytest -v --durations=0 --benchmark-only --benchmark-json .benchmarks/${{ matrix.os }}_python${{ matrix.python-version }}.json --keep-failed=.failed
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload failed benchmark artifact
uses: actions/upload-artifact@v4
if: failure()
with:
name: failed-benchmark-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }}
path: autotest/.failed/**
- name: Upload benchmark result artifact
uses: actions/upload-artifact@v4
with:
name: benchmarks-${{ matrix.os }}-${{ matrix.python-version }}-${{ github.run_id }}
path: autotest/.benchmarks/**/*.json
post_benchmark:
needs:
- benchmark
name: Process benchmark results
runs-on: ubuntu-latest
defaults:
run:
shell: bash
timeout-minutes: 10
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: 3.12
cache: 'pip'
cache-dependency-path: pyproject.toml
- name: Install Python dependencies
run: |
pip install --upgrade pip
pip install numpy pandas matplotlib seaborn
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: autotest/.benchmarks
- name: Process benchmark results
run: |
repo="${{ github.repository }}"
path="autotest/.benchmarks"
# list benchmark artifacts
artifact_json=$(gh api -X GET -H "Accept: application/vnd.github+json" /repos/$repo/actions/artifacts)
# get artifact ids and download artifacts
get_artifact_ids="
import json
import sys
from os import linesep
artifacts = json.load(sys.stdin, strict=False)['artifacts']
artifacts = [a for a in artifacts if a['name'].startswith('benchmarks-') and a['name'].split('-')[-1].isdigit()]
print(linesep.join([str(a['id']) for a in artifacts]))
"
echo $artifact_json \
| python -c "$get_artifact_ids" \
| xargs -I@ bash -c "gh api -H 'Accept: application/vnd.github+json' /repos/$repo/actions/artifacts/@/zip >> $path/@.zip"
# unzip artifacts
zipfiles=( $path/*.zip )
if (( ${#zipfiles[@]} )); then
unzip -o "$path/*.zip" -d $path
fi
# process benchmarks
python scripts/process_benchmarks.py $path $path
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload benchmark results
uses: actions/upload-artifact@v4
with:
name: benchmarks-${{ github.run_id }}
path: |
autotest/.benchmarks/*.csv
autotest/.benchmarks/*.png