diff --git a/.github/workflows/bench-deploy.yml b/.github/workflows/bench-deploy.yml index 2e8c393be0..85108420e8 100644 --- a/.github/workflows/bench-deploy.yml +++ b/.github/workflows/bench-deploy.yml @@ -1,13 +1,16 @@ -name: GPU benchmarks +name: GPU benchmark on `master` on: - workflow_dispatch: - release: - types: [published] + push: + branches: + - master jobs: + # TODO: Account for different `justfile` and `bench.env` files + # One option is to upload them to gh-pages for qualitative comparison + # TODO: Fall back to a default if `justfile`/`bench.env` not present benchmark: name: Bench and deploy - runs-on: [self-hosted, gpu-bench-t4] + runs-on: [self-hosted, gpu-bench, gh-pages] steps: # Install deps - uses: actions/checkout@v4 @@ -16,14 +19,12 @@ jobs: - uses: taiki-e/install-action@v2 with: tool: just@1.15.0 - # Set up GPU # Check we have access to the machine's Nvidia drivers - run: nvidia-smi # Check that CUDA is installed with a driver-compatible version # This must also be compatible with the GPU architecture, see above link - run: nvcc --version - # Run benchmarks and deploy - name: Get old benchmarks uses: actions/checkout@v4 @@ -34,16 +35,20 @@ jobs: - name: Install criterion run: cargo install cargo-criterion - name: Run benchmarks - run: just --dotenv-filename bench.env gpu-bench fibonacci + run: just --dotenv-filename bench.env gpu-bench fibonacci_lem + # TODO: Prettify labels for easier viewing + # Compress the benchmark file and metadata for later analysis - name: Compress artifacts - run: tar -cvzf ${{ github.sha }}.tar.gz Cargo.lock ${{ github.sha }}.json + run: | + echo $LABELS > labels.md + tar -cvzf ${{ github.sha }}.tar.gz Cargo.lock ${{ github.sha }}.json labels.md - name: Deploy latest benchmark report uses: peaceiris/actions-gh-pages@v3 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: ./target/criterion destination_dir: benchmarks/criterion - - name: Move benchmark json to history + - name: Copy benchmark json to history run: mkdir history; cp ${{ github.sha }}.tar.gz history/ - name: Deploy benchmark history uses: peaceiris/actions-gh-pages@v3 @@ -51,4 +56,4 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: history/ destination_dir: benchmarks/history - keep_files: true + keep_files: true \ No newline at end of file diff --git a/.github/workflows/bench-pr-comment.yml b/.github/workflows/bench-pr-comment.yml index 83071abe07..8078cb662c 100644 --- a/.github/workflows/bench-pr-comment.yml +++ b/.github/workflows/bench-pr-comment.yml @@ -35,7 +35,7 @@ jobs: - uses: boa-dev/criterion-compare-action@v3 with: # Optional. Compare only this benchmark target - benchName: "end2end" + benchName: "fibonacci_lem" # Needed. The name of the branch to compare with branchName: ${{ github.ref_name }} @@ -43,11 +43,11 @@ jobs: # `gh pr checkout {{ github.event.issue.number}}` with `env: GH_TOKEN` gpu-benchmark: name: run fibonacci benchmark on GPU - runs-on: [self-hosted, gpu-bench-t4] + runs-on: [self-hosted, gpu-bench] if: github.event.issue.pull_request && github.event.issue.state == 'open' - && contains(github.event.comment.body, '!benchmark') + && contains(github.event.comment.body, '!gpu-benchmark') && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') steps: # Set up GPU @@ -83,7 +83,7 @@ jobs: - uses: boa-dev/criterion-compare-action@v3 with: # Optional. Compare only this benchmark target - benchName: "fibonacci" + benchName: "fibonacci_lem" # Optional. Features activated in the benchmark features: "cuda" # Needed. The name of the branch to compare with diff --git a/.github/workflows/gpu.yml b/.github/workflows/gpu-ci.yml similarity index 94% rename from .github/workflows/gpu.yml rename to .github/workflows/gpu-ci.yml index eed43b1bbf..36a8911f62 100644 --- a/.github/workflows/gpu.yml +++ b/.github/workflows/gpu-ci.yml @@ -2,9 +2,10 @@ name: GPU tests on: - push: - branches: - - master + pull_request: + types: [opened, synchronize, reopened, ready_for_review] + branches: [master] + merge_group: env: CARGO_TERM_COLOR: always @@ -36,6 +37,7 @@ concurrency: jobs: cuda: name: Rust tests on CUDA + if: github.event_name != 'pull_request' || github.event.action == 'enqueued' runs-on: [self-hosted, gpu-ci] env: NVIDIA_VISIBLE_DEVICES: all @@ -68,6 +70,7 @@ jobs: opencl: name: Rust tests on OpenCL + if: github.event_name != 'pull_request' || github.event.action == 'enqueued' runs-on: [self-hosted, gpu-ci] env: NVIDIA_VISIBLE_DEVICES: all diff --git a/.github/workflows/merge-group.yml b/.github/workflows/merge-tests.yml similarity index 74% rename from .github/workflows/merge-group.yml rename to .github/workflows/merge-tests.yml index 4ce3302b5c..bfd358b8e6 100644 --- a/.github/workflows/merge-group.yml +++ b/.github/workflows/merge-tests.yml @@ -6,8 +6,6 @@ on: types: [opened, synchronize, reopened, ready_for_review] branches: [master] merge_group: - # Manual trigger for early signal on local branches - workflow_dispatch: concurrency: group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} @@ -67,15 +65,13 @@ jobs: cargo nextest run --profile ci --workspace --cargo-profile dev-no-assertions -E 'test(circuit::gadgets)' # TODO: Make this a required status check - # TODO: Cache successful bench run from PR branch on master, keyed on commit hash - # Run comparative benchmark against master + # Run comparative benchmark against master, reject on regression gpu-benchmark: - # [TEMPORARY] Test one run before attempting merge - #if: github.event_name != 'pull_request' || github.event.action == 'enqueued' + if: github.event_name != 'pull_request' || github.event.action == 'enqueued' name: Run fibonacci bench on GPU - runs-on: [self-hosted, gpu-bench-t4] + runs-on: [self-hosted, gpu-bench] steps: - # TODO: Factor this out into an action or into justfile, it's used in 4 places + # TODO: Factor out GPU setup into an action or into justfile, it's used in 4 places # Set up GPU # Check we have access to the machine's Nvidia drivers - run: nvidia-smi @@ -83,40 +79,37 @@ jobs: # This must also be compatible with the GPU architecture, see above link - run: nvcc --version - uses: actions/checkout@v4 - # Checkout base branch for comparative bench - - uses: actions/checkout@v4 - with: - ref: master - - run: ls -a - - name: Set base ref variable - run: echo "BASE_REF=$(git rev-parse HEAD)" >> $GITHUB_ENV - # Checkout the justfile and env of the source branch so the base can bench - - run: git restore --source ${{ github.sha }} justfile bench.env - - run: ls -a # Install dependencies - uses: actions-rs/toolchain@v1 - uses: Swatinem/rust-cache@v2 - uses: taiki-e/install-action@v2 with: - tool: just@1 - # Run benchmark on base branch + tool: just@1.15 - name: Install criterion run: | cargo install cargo-criterion cargo install criterion-table - - name: Run GPU bench - run: just --dotenv-filename bench.env gpu-bench fibonacci - # Switch to triggering branch and run benchmark - - run: rm justfile bench.env + # Checkout base branch for comparative bench - uses: actions/checkout@v4 with: - ref: ${{ github.sha }} - - name: Run GPU bench on source branch - run: just --dotenv-filename bench.env gpu-bench fibonacci - # Create a comparative `criterion-table` and write in commit comment + ref: master + path: master + # Copy the script so the base can bench with the same parameters + - name: Copy source script to base branch + run: cd benches && cp justfile bench.env ../master/benches + - name: Set base ref variable + run: cd master && echo "BASE_REF=$(git rev-parse HEAD)" >> $GITHUB_ENV + - run: echo ${{ env.BASE_REF }} + - name: Run GPU bench on base branch + run: cd master/benches && just --dotenv-filename bench.env gpu-bench fibonacci_lem + - name: Copy bench output to PR branch + run: cp master/${{ env.BASE_REF }}.json . + - name: Run GPU bench on PR branch + run: cd benches && just --dotenv-filename bench.env gpu-bench fibonacci_lem + # Create a `criterion-table` and write in commit comment - name: Run `criterion-table` - run: cat ${{ env.BASE_REF }}.json ${{ github.sha }}.json | criterion-table > BENCHMARKS.md - - name: Write comparative bench on commit comment + run: cat ${{ github.sha }}.json | criterion-table > BENCHMARKS.md + - name: Write bench on commit comment uses: peter-evans/commit-comment@v3 with: body-path: BENCHMARKS.md @@ -132,3 +125,4 @@ jobs: with: script: | core.setFailed('Fibonacci bench regression detected') + diff --git a/bench.env b/benches/bench.env similarity index 73% rename from bench.env rename to benches/bench.env index a202bbe1b1..b59c73e77a 100644 --- a/bench.env +++ b/benches/bench.env @@ -1,9 +1,9 @@ # Lurk config LURK_PERF=max-parallel-simple LURK_RC=100,600 -LURK_BENCH_NOISE_THRESHOLD=0.10 +LURK_BENCH_NOISE_THRESHOLD=0.05 # CUDA config NVIDIA_VISIBLE_DEVICES=all NVIDIA_DRIVER_CAPABILITITES=compute,utility -EC_GPU_FRAMEWORK=cuda \ No newline at end of file +EC_GPU_FRAMEWORK=cuda diff --git a/benches/fibonacci.rs b/benches/fibonacci.rs index e861eb5e5b..ed263f2d15 100644 --- a/benches/fibonacci.rs +++ b/benches/fibonacci.rs @@ -1,6 +1,5 @@ use std::{cell::RefCell, rc::Rc, sync::Arc, time::Duration}; -use anyhow::anyhow; use criterion::{ black_box, criterion_group, criterion_main, measurement, BatchSize, BenchmarkGroup, BenchmarkId, Criterion, SamplingMode, @@ -119,44 +118,14 @@ fn fibo_prove( ); } -fn rc_env() -> anyhow::Result> { - std::env::var("LURK_RC") - .map_err(|e| anyhow!("Reduction count env var isn't set: {e}")) - .and_then(|rc| { - let vec: anyhow::Result> = rc - .split(',') - .map(|rc| { - rc.parse::() - .map_err(|e| anyhow!("Failed to parse RC: {e}")) - }) - .collect(); - vec - }) -} - -fn noise_threshold_env() -> anyhow::Result { - std::env::var("LURK_BENCH_NOISE_THRESHOLD") - .map_err(|e| anyhow!("Noise threshold env var isn't set: {e}")) - .and_then(|nt| { - nt.parse::() - .map_err(|e| anyhow!("Failed to parse noise threshold: {e}")) - }) -} - fn fibonacci_prove(c: &mut Criterion) { - tracing_subscriber::fmt::init(); set_bench_config(); tracing::debug!("{:?}", lurk::config::LURK_CONFIG); - - let reduction_counts = rc_env().unwrap_or_else(|_| vec![100]); - tracing::debug!("Fibonacci bench RCs: {:?}", &reduction_counts); + let reduction_counts = [100, 600, 700, 800, 900]; let batch_sizes = [100, 200]; - let mut group: BenchmarkGroup<'_, _> = c.benchmark_group("Prove"); group.sampling_mode(SamplingMode::Flat); // This can take a *while* group.sample_size(10); - group.noise_threshold(noise_threshold_env().unwrap_or(0.05)); - let state = State::init_lurk_state().rccell(); for fib_n in batch_sizes.iter() { diff --git a/benches/fibonacci_lem.rs b/benches/fibonacci_lem.rs index ed340e9af7..48ece2ab90 100644 --- a/benches/fibonacci_lem.rs +++ b/benches/fibonacci_lem.rs @@ -1,5 +1,6 @@ use std::{cell::RefCell, rc::Rc, sync::Arc, time::Duration}; +use anyhow::anyhow; use criterion::{ black_box, criterion_group, criterion_main, measurement, BatchSize, BenchmarkGroup, BenchmarkId, Criterion, SamplingMode, @@ -113,14 +114,42 @@ fn fibo_prove( ); } +fn rc_env() -> anyhow::Result> { + std::env::var("LURK_RC") + .map_err(|e| anyhow!("Reduction count env var isn't set: {e}")) + .and_then(|rc| { + let vec: anyhow::Result> = rc + .split(',') + .map(|rc| { + rc.parse::() + .map_err(|e| anyhow!("Failed to parse RC: {e}")) + }) + .collect(); + vec + }) +} + +fn noise_threshold_env() -> anyhow::Result { + std::env::var("LURK_BENCH_NOISE_THRESHOLD") + .map_err(|e| anyhow!("Noise threshold env var isn't set: {e}")) + .and_then(|nt| { + nt.parse::() + .map_err(|e| anyhow!("Failed to parse noise threshold: {e}")) + }) +} + fn fibonacci_prove(c: &mut Criterion) { + tracing_subscriber::fmt::init(); set_bench_config(); tracing::debug!("{:?}", lurk::config::LURK_CONFIG); - let reduction_counts = [100, 600, 700, 800, 900]; + + let reduction_counts = rc_env().unwrap_or_else(|_| vec![100]); let batch_sizes = [100, 200]; let mut group: BenchmarkGroup<'_, _> = c.benchmark_group("Prove"); group.sampling_mode(SamplingMode::Flat); // This can take a *while* group.sample_size(10); + group.noise_threshold(noise_threshold_env().unwrap_or(0.05)); + let state = State::init_lurk_state().rccell(); for fib_n in batch_sizes.iter() { diff --git a/justfile b/benches/justfile similarity index 96% rename from justfile rename to benches/justfile index 15a9c39abd..5817577721 100644 --- a/justfile +++ b/benches/justfile @@ -1,5 +1,5 @@ # Install with `cargo install just` -# Usage: `just --dotenv-filename /path/to/file.env ` +# Usage: `just --dotenv-filename /path/to/file.env ` # TODO: Move dotenv-filename into justfile once the feature is available set dotenv-load @@ -28,7 +28,7 @@ gpu-bench +benches: env | grep -E "LURK|EC_GPU|CUDA" if [ '{{benches}}' != '' ]; then for bench in {{benches}}; do - cargo criterion --bench $bench --features "cuda" --message-format=json 2>&1 > {{commit}}.json + cargo criterion --bench $bench --features "cuda" --message-format=json 2>&1 > ../{{commit}}.json done else echo "Invalid input, enter at least one non-empty string"