diff --git a/Cargo.toml b/Cargo.toml index d4a3aaac1b..c1010eb136 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -175,6 +175,10 @@ harness = false name = "fibonacci" harness = false +[[bench]] +name = "fibonacci_lem" +harness = false + [[bench]] name = "synthesis" harness = false diff --git a/benches/fibonacci.rs b/benches/fibonacci.rs index 81e5ae04b2..ed263f2d15 100644 --- a/benches/fibonacci.rs +++ b/benches/fibonacci.rs @@ -5,263 +5,136 @@ use criterion::{ BenchmarkId, Criterion, SamplingMode, }; +use pasta_curves::pallas; + use lurk::{ - eval::lang::{Coproc, Lang}, - field::LurkField, - proof::{ - nova::{NovaProver, Proof}, - Prover, + circuit::circuit_frame::MultiFrame, + eval::{ + empty_sym_env, + lang::{Coproc, Lang}, }, + field::LurkField, + proof::nova::NovaProver, + proof::Prover, + ptr::Ptr, public_parameters::{ instance::{Instance, Kind}, public_params, }, state::State, + store::Store, }; -use pasta_curves::pallas; mod common; use common::set_bench_config; -#[allow(clippy::upper_case_acronyms)] -#[derive(Copy, Debug, Clone, PartialEq, Eq)] -enum Version { - ALPHA, - LEM, +fn fib(store: &Store, state: Rc>, _a: u64) -> Ptr { + let program = r#" +(letrec ((next (lambda (a b) (next b (+ a b)))) + (fib (next 0 1))) + (fib)) +"#; + + store.read_with_state(state, program).unwrap() +} + +// The env output in the `fib_frame`th frame of the above, infinite Fibonacci computation will contain a binding of the +// nth Fibonacci number to `a`. +// means of computing it.] +fn fib_frame(n: usize) -> usize { + 11 + 16 * n +} + +// Set the limit so the last step will be filled exactly, since Lurk currently only pads terminal/error continuations. +fn fib_limit(n: usize, rc: usize) -> usize { + let frame = fib_frame(n); + rc * (frame / rc + usize::from(frame % rc != 0)) } #[derive(Clone, Debug, Copy)] -pub struct ProveParams { - folding_steps: usize, +struct ProveParams { + fib_n: usize, reduction_count: usize, - version: Version, } impl ProveParams { fn name(&self) -> String { - format!("{:?},rc={}", self.version, self.reduction_count) - } -} - -mod alpha { - use lurk::{circuit::circuit_frame::MultiFrame, eval::empty_sym_env, ptr::Ptr, store::Store}; - - use super::*; - - fn fib(store: &Store, state: Rc>) -> Ptr { - let program = r#" - (letrec ((next (lambda (a b) (next b (+ a b)))) - (fib (next 0 1))) - (fib)) - "#; - - store.read_with_state(state, program).unwrap() - } - - pub fn prove( - prove_params: ProveParams, - c: &mut BenchmarkGroup<'_, M>, - state: &Rc>, - ) { - let ProveParams { - folding_steps, - reduction_count, - version, - } = prove_params; - - assert_eq!(version, Version::ALPHA); - let limit = reduction_count * (folding_steps + 1); - - // Track the number of `folded iterations / sec` - c.throughput(criterion::Throughput::Elements( - (reduction_count * folding_steps) as u64, - )); - - let lang_pallas = Lang::>::new(); - let lang_rc = Arc::new(lang_pallas.clone()); - - // use cached public params - let instance = Instance::new( - reduction_count, - lang_rc.clone(), - true, - Kind::NovaPublicParams, - ); - let pp = public_params::<_, _, MultiFrame<'_, _, _>>(&instance).unwrap(); - let date = env!("VERGEN_GIT_COMMIT_DATE"); let sha = env!("VERGEN_GIT_SHA"); - let parameter = format!("{},{},steps={}", date, sha, folding_steps); - - c.bench_with_input( - BenchmarkId::new(prove_params.name(), parameter), - &prove_params, - |b, prove_params| { - let store = Store::default(); - - let env = empty_sym_env(&store); - let ptr = fib::(&store, state.clone()); - let prover = NovaProver::new(prove_params.reduction_count, lang_pallas.clone()); - - let frames = &prover - .get_evaluation_frames(ptr, env, &store, limit, lang_rc.clone()) - .unwrap(); - - // Here we split the proving step by first generating the recursive snark, - // then have `criterion` only bench the rest of the folding steps - let (recursive_snark, circuits, z0, _zi, _num_steps) = prover - .recursive_snark(&pp, frames, &store, &lang_rc) - .unwrap(); - - b.iter_batched( - || (recursive_snark.clone(), z0.clone(), lang_rc.clone()), - |(recursive_snark, z0, lang_rc)| { - let result = Proof::prove_recursively( - &pp, - &store, - Some(recursive_snark), - &circuits, - reduction_count, - z0, - lang_rc, - ); - let _ = black_box(result); - }, - BatchSize::LargeInput, - ) - }, - ); + format!("{date}:{sha}:Fibonacci-rc={}", self.reduction_count) } } -mod lem { - use lurk::lem::{eval::evaluate, multiframe::MultiFrame, pointers::Ptr, store::Store}; - - use super::*; - - fn fib(store: &Store, state: Rc>) -> Ptr { - let program = r#" -(letrec ((next (lambda (a b) (next b (+ a b)))) - (fib (next 0 1))) - (fib)) -"#; - - store.read(state, program).unwrap() - } - - pub fn prove( - prove_params: ProveParams, - c: &mut BenchmarkGroup<'_, M>, - state: &Rc>, - ) { - let ProveParams { - folding_steps, - reduction_count, - version, - } = prove_params; - - assert_eq!(version, Version::LEM); - let limit = reduction_count * (folding_steps + 1); - - // Track the number of `folded iterations / sec` - c.throughput(criterion::Throughput::Elements( - (reduction_count * folding_steps) as u64, - )); - - let lang_pallas = Lang::>::new(); - let lang_rc = Arc::new(lang_pallas.clone()); - - // use cached public params - let instance: Instance< - '_, - pasta_curves::Fq, - Coproc, - MultiFrame<'_, pasta_curves::Fq, Coproc>, - > = Instance::new( - reduction_count, - lang_rc.clone(), - true, - Kind::NovaPublicParams, - ); - let pp = public_params::<_, _, MultiFrame<'_, _, _>>(&instance).unwrap(); - - let date = env!("VERGEN_GIT_COMMIT_DATE"); - let sha = env!("VERGEN_GIT_SHA"); - let parameter = format!("{},{},steps={}", date, sha, folding_steps); - - c.bench_with_input( - BenchmarkId::new(prove_params.name(), parameter), - &prove_params, - |b, prove_params| { - let store = Store::default(); - - let ptr = fib::(&store, state.clone()); - let prover = NovaProver::new(prove_params.reduction_count, lang_pallas.clone()); - - let frames = &evaluate::>( - None, ptr, &store, limit, - ) - .unwrap() - .0; - - // Here we split the proving step by first generating the recursive snark, - // then have `criterion` only bench the rest of the folding steps - let (recursive_snark, circuits, z0, _zi, _num_steps) = prover - .recursive_snark(&pp, frames, &store, &lang_rc) - .unwrap(); - - b.iter_batched( - || (recursive_snark.clone(), z0.clone(), lang_rc.clone()), - |(recursive_snark, z0, lang_rc)| { - let result = Proof::prove_recursively( - &pp, - &store, - Some(recursive_snark), - &circuits, - reduction_count, - z0, - lang_rc, - ); - let _ = black_box(result); - }, - BatchSize::LargeInput, - ) - }, - ); - } +fn fibo_prove( + prove_params: ProveParams, + c: &mut BenchmarkGroup<'_, M>, + state: &Rc>, +) { + let ProveParams { + fib_n, + reduction_count, + } = prove_params; + + let limit = fib_limit(fib_n, reduction_count); + let lang_pallas = Lang::>::new(); + let lang_rc = Arc::new(lang_pallas.clone()); + + // use cached public params + let instance = Instance::new( + reduction_count, + lang_rc.clone(), + true, + Kind::NovaPublicParams, + ); + let pp = public_params::<_, _, MultiFrame<'_, _, _>>(&instance).unwrap(); + + c.bench_with_input( + BenchmarkId::new(prove_params.name(), fib_n), + &prove_params, + |b, prove_params| { + let store = Store::default(); + + let env = empty_sym_env(&store); + let ptr = fib::( + &store, + state.clone(), + black_box(prove_params.fib_n as u64), + ); + let prover = NovaProver::new(prove_params.reduction_count, lang_pallas.clone()); + + let frames = &prover + .get_evaluation_frames(ptr, env, &store, limit, lang_rc.clone()) + .unwrap(); + + b.iter_batched( + || (frames, lang_rc.clone()), + |(frames, lang_rc)| { + let result = prover.prove(&pp, frames, &store, &lang_rc); + let _ = black_box(result); + }, + BatchSize::LargeInput, + ) + }, + ); } -fn fib_bench(c: &mut Criterion) { +fn fibonacci_prove(c: &mut Criterion) { set_bench_config(); tracing::debug!("{:?}", lurk::config::LURK_CONFIG); let reduction_counts = [100, 600, 700, 800, 900]; - let folding_step_sizes = [2, 4, 8]; - - let mut group: BenchmarkGroup<'_, _> = c.benchmark_group("Fibonacci"); + let batch_sizes = [100, 200]; + let mut group: BenchmarkGroup<'_, _> = c.benchmark_group("Prove"); group.sampling_mode(SamplingMode::Flat); // This can take a *while* group.sample_size(10); - let state = State::init_lurk_state().rccell(); - for folding_steps in folding_step_sizes.iter() { - for reduction_count in reduction_counts.iter() { - let alpha_params = ProveParams { - folding_steps: *folding_steps, - reduction_count: *reduction_count, - version: Version::ALPHA, - }; - alpha::prove(alpha_params, &mut group, &state); - } - } - - for folding_steps in folding_step_sizes.iter() { + for fib_n in batch_sizes.iter() { for reduction_count in reduction_counts.iter() { - let lem_params = ProveParams { - folding_steps: *folding_steps, + let prove_params = ProveParams { + fib_n: *fib_n, reduction_count: *reduction_count, - version: Version::LEM, }; - lem::prove(lem_params, &mut group, &state); + fibo_prove(prove_params, &mut group, &state); } } } @@ -275,7 +148,7 @@ cfg_if::cfg_if! { .sample_size(10) .with_profiler(pprof::criterion::PProfProfiler::new(100, pprof::criterion::Output::Flamegraph(None))); targets = - fib_bench, + fibonacci_prove, } } else { criterion_group! { @@ -284,7 +157,7 @@ cfg_if::cfg_if! { .measurement_time(Duration::from_secs(120)) .sample_size(10); targets = - fib_bench, + fibonacci_prove, } } } diff --git a/benches/fibonacci_lem.rs b/benches/fibonacci_lem.rs new file mode 100644 index 0000000000..ed340e9af7 --- /dev/null +++ b/benches/fibonacci_lem.rs @@ -0,0 +1,160 @@ +use std::{cell::RefCell, rc::Rc, sync::Arc, time::Duration}; + +use criterion::{ + black_box, criterion_group, criterion_main, measurement, BatchSize, BenchmarkGroup, + BenchmarkId, Criterion, SamplingMode, +}; + +use pasta_curves::pallas; + +use lurk::{ + eval::lang::{Coproc, Lang}, + field::LurkField, + lem::{eval::evaluate, multiframe::MultiFrame, pointers::Ptr, store::Store}, + proof::nova::NovaProver, + proof::Prover, + public_parameters::{ + instance::{Instance, Kind}, + public_params, + }, + state::State, +}; + +mod common; +use common::set_bench_config; + +fn fib(store: &Store, state: Rc>, _a: u64) -> Ptr { + let program = r#" +(letrec ((next (lambda (a b) (next b (+ a b)))) + (fib (next 0 1))) + (fib)) +"#; + + store.read(state, program).unwrap() +} + +// The env output in the `fib_frame`th frame of the above, infinite Fibonacci computation will contain a binding of the +// nth Fibonacci number to `a`. +// means of computing it.] +fn fib_frame(n: usize) -> usize { + 11 + 16 * n +} + +// Set the limit so the last step will be filled exactly, since Lurk currently only pads terminal/error continuations. +fn fib_limit(n: usize, rc: usize) -> usize { + let frame = fib_frame(n); + rc * (frame / rc + usize::from(frame % rc != 0)) +} + +#[derive(Clone, Debug, Copy)] +struct ProveParams { + fib_n: usize, + reduction_count: usize, +} + +impl ProveParams { + fn name(&self) -> String { + let date = env!("VERGEN_GIT_COMMIT_DATE"); + let sha = env!("VERGEN_GIT_SHA"); + format!("{date}:{sha}:Fibonacci-LEM-rc={}", self.reduction_count) + } +} + +fn fibo_prove( + prove_params: ProveParams, + c: &mut BenchmarkGroup<'_, M>, + state: &Rc>, +) { + let ProveParams { + fib_n, + reduction_count, + } = prove_params; + + let limit = fib_limit(fib_n, reduction_count); + let lang_pallas = Lang::>::new(); + let lang_rc = Arc::new(lang_pallas.clone()); + + // use cached public params + let instance = Instance::new( + reduction_count, + lang_rc.clone(), + true, + Kind::NovaPublicParams, + ); + let pp = public_params::<_, _, MultiFrame<'_, _, _>>(&instance).unwrap(); + + c.bench_with_input( + BenchmarkId::new(prove_params.name(), fib_n), + &prove_params, + |b, prove_params| { + let store = Store::default(); + + let ptr = fib::( + &store, + state.clone(), + black_box(prove_params.fib_n as u64), + ); + let prover = NovaProver::new(prove_params.reduction_count, lang_pallas.clone()); + + let frames = + &evaluate::>(None, ptr, &store, limit) + .unwrap() + .0; + + b.iter_batched( + || (frames, lang_rc.clone()), + |(frames, lang_rc)| { + let result = prover.prove(&pp, frames, &store, &lang_rc); + let _ = black_box(result); + }, + BatchSize::LargeInput, + ) + }, + ); +} + +fn fibonacci_prove(c: &mut Criterion) { + set_bench_config(); + tracing::debug!("{:?}", lurk::config::LURK_CONFIG); + let reduction_counts = [100, 600, 700, 800, 900]; + let batch_sizes = [100, 200]; + let mut group: BenchmarkGroup<'_, _> = c.benchmark_group("Prove"); + group.sampling_mode(SamplingMode::Flat); // This can take a *while* + group.sample_size(10); + let state = State::init_lurk_state().rccell(); + + for fib_n in batch_sizes.iter() { + for reduction_count in reduction_counts.iter() { + let prove_params = ProveParams { + fib_n: *fib_n, + reduction_count: *reduction_count, + }; + fibo_prove(prove_params, &mut group, &state); + } + } +} + +cfg_if::cfg_if! { + if #[cfg(feature = "flamegraph")] { + criterion_group! { + name = benches; + config = Criterion::default() + .measurement_time(Duration::from_secs(120)) + .sample_size(10) + .with_profiler(pprof::criterion::PProfProfiler::new(100, pprof::criterion::Output::Flamegraph(None))); + targets = + fibonacci_prove, + } + } else { + criterion_group! { + name = benches; + config = Criterion::default() + .measurement_time(Duration::from_secs(120)) + .sample_size(10); + targets = + fibonacci_prove, + } + } +} + +criterion_main!(benches); diff --git a/src/proof/nova.rs b/src/proof/nova.rs index bbde499690..a0fd883bcc 100644 --- a/src/proof/nova.rs +++ b/src/proof/nova.rs @@ -306,13 +306,17 @@ where store: &'a M::Store, lang: &Arc>, ) -> Result<(Proof<'a, F, C, M>, Vec, Vec, usize), ProofError> { - let (recursive_snark, circuits, z0, zi, num_steps) = - self.recursive_snark(pp, frames, store, lang)?; + store.hydrate_z_cache(); + let z0 = M::io_to_scalar_vector(store, frames[0].input()).map_err(|e| e.into())?; + let zi = + M::io_to_scalar_vector(store, frames.last().unwrap().output()).map_err(|e| e.into())?; + let folding_config = Arc::new(FoldingConfig::new_ivc(lang.clone(), self.reduction_count())); + let circuits = M::from_frames(self.reduction_count(), frames, store, &folding_config); + let num_steps = circuits.len(); let proof = Proof::prove_recursively( pp, store, - Some(recursive_snark), &circuits, self.reduction_count, z0.clone(), @@ -342,53 +346,6 @@ where )?; self.prove(pp, &frames, store, lang) } - - /// Returns the first step of the [RecursiveSNARK] to be proved - pub fn recursive_snark( - &self, - pp: &PublicParams, - frames: &[M::EvalFrame], - store: &'a M::Store, - lang: &Arc>, - ) -> Result< - ( - RecursiveSNARK, G2, M, C2>, - Vec, - Vec, - Vec, - usize, - ), - ProofError, - > { - store.hydrate_z_cache(); - let z0 = M::io_to_scalar_vector(store, frames[0].input()).map_err(|e| e.into())?; - let zi = - M::io_to_scalar_vector(store, frames.last().unwrap().output()).map_err(|e| e.into())?; - - let folding_config = Arc::new(FoldingConfig::new_ivc(lang.clone(), self.reduction_count())); - let circuits = M::from_frames(self.reduction_count(), frames, store, &folding_config); - - assert!(!circuits.is_empty()); - assert_eq!(circuits[0].arity(), z0.len()); - let num_steps = circuits.len(); - - let z0_primary = &z0; - let z0_secondary = Proof::::z0_secondary(); - - Ok(( - RecursiveSNARK::new( - &pp.pp, - &circuits[0], - &TrivialCircuit::default(), - z0_primary.clone(), - z0_secondary.clone(), - ), - circuits, - z0, - zi, - num_steps, - )) - } } impl<'a, F: CurveCycleEquipped, C: Coprocessor, M: MultiFrameTrait<'a, F, C>> Proof<'a, F, C, M> @@ -401,7 +358,6 @@ where pub fn prove_recursively( pp: &PublicParams, store: &M::Store, - recursive_snark: Option, G2, M, C2>>, circuits: &[M], num_iters_per_step: usize, z0: Vec, @@ -422,7 +378,7 @@ where tracing::debug!("circuits.len: {}", circuits.len()); // produce a recursive SNARK - let mut recursive_snark: Option, G2, M, C2>> = recursive_snark; + let mut recursive_snark: Option, G2, M, C2>> = None; // the shadowing here is voluntary let recursive_snark = if lurk_config(None, None) diff --git a/src/proof/supernova.rs b/src/proof/supernova.rs index 98bf73f6bc..67037601bd 100644 --- a/src/proof/supernova.rs +++ b/src/proof/supernova.rs @@ -124,7 +124,6 @@ where #[tracing::instrument(skip_all, name = "supernova::prove_recursively")] pub fn prove_recursively( pp: &PublicParams, - recursive_snark: Option, G2>>, _store: &M::Store, nivc_steps: &[M], z0: Vec, @@ -132,7 +131,7 @@ where // Is this assertion strictly necessary? assert!(!nivc_steps.is_empty()); - let mut recursive_snark_option: Option, G2>> = recursive_snark; + let mut recursive_snark_option: Option, G2>> = None; let z0_primary = z0; let z0_secondary = Self::z0_secondary(); @@ -282,7 +281,7 @@ where let num_steps = nivc_steps.len(); let (proof, last_running_claim) = - Proof::prove_recursively(pp, None, store, &nivc_steps, z0.clone())?; + Proof::prove_recursively(pp, store, &nivc_steps, z0.clone())?; Ok((proof, z0, zi, num_steps, last_running_claim)) } @@ -308,45 +307,6 @@ where info!("got {} evaluation frames", frames.len()); self.prove(pp, &frames, store, lang) } - - /// Returns the first step of the [RecursiveSNARK] to be proved - pub fn recursive_snark( - &self, - pp: &PublicParams, - frames: &[M::EvalFrame], - store: &'a M::Store, - lang: &Arc>, - ) -> Result<(RecursiveSNARK, G2>, Vec, Vec, usize), ProofError> { - store.hydrate_z_cache(); - let z0 = M::io_to_scalar_vector(store, frames[0].input()).map_err(|e| e.into())?; - let zi = - M::io_to_scalar_vector(store, frames.last().unwrap().output()).map_err(|e| e.into())?; - let folding_config = Arc::new(FoldingConfig::new_ivc(lang.clone(), self.reduction_count())); - - let nivc_steps = M::from_frames(self.reduction_count(), frames, store, &folding_config); - - let num_steps = nivc_steps.len(); - - let z0_primary = &z0; - let z0_secondary = Proof::::z0_secondary(); - - let augmented_circuit_index: &usize = &nivc_steps[0].circuit_index(); - let program_counter = F::from(*augmented_circuit_index as u64); - - let recursive_snark = RecursiveSNARK::iter_base_step( - &pp.pp, - *augmented_circuit_index, - &nivc_steps[0], - &nivc_steps[0].secondary_circuit(), - Some(program_counter), - *augmented_circuit_index, - nivc_steps[0].num_circuits(), - z0_primary, - &z0_secondary, - ) - .unwrap(); - Ok((recursive_snark, z0, zi, num_steps)) - } } #[derive(Clone, Debug)]