Skip to content

Commit

Permalink
Use recursion factor of 4 for all proofs (#1440)
Browse files Browse the repository at this point in the history
  • Loading branch information
andyleiserson authored Nov 19, 2024
1 parent e831a21 commit 9336e59
Show file tree
Hide file tree
Showing 13 changed files with 206 additions and 159 deletions.
9 changes: 1 addition & 8 deletions ipa-core/src/helpers/gateway/send.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ use crate::{
labels::{ROLE, STEP},
metrics::{BYTES_SENT, RECORDS_SENT},
},
utils::non_zero_prev_power_of_two,
};

/// Sending end of the gateway channel.
Expand Down Expand Up @@ -256,14 +257,6 @@ impl SendChannelConfig {
total_records: TotalRecords,
record_size: usize,
) -> Self {
// this computes the greatest positive power of 2 that is
// less than or equal to target.
fn non_zero_prev_power_of_two(target: usize) -> usize {
let bits = usize::BITS - target.leading_zeros();

1 << (std::cmp::max(1, bits) - 1)
}

assert!(record_size > 0, "Message size cannot be 0");

let total_capacity = gateway_config.active.get() * record_size;
Expand Down
11 changes: 7 additions & 4 deletions ipa-core/src/protocol/context/dzkp_validator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use crate::{
},
ipa_prf::{
validation_protocol::{proof_generation::ProofBatch, validation::BatchToVerify},
LargeProofGenerator, SmallProofGenerator,
CompressedProofGenerator, FirstProofGenerator,
},
Gate, RecordId, RecordIdRange,
},
Expand Down Expand Up @@ -50,6 +50,9 @@ const BIT_ARRAY_SHIFT: usize = BIT_ARRAY_LEN.ilog2() as usize;
// A smaller value is used for tests, to enable covering some corner cases with a
// reasonable runtime. Some of these tests use TARGET_PROOF_SIZE directly, so for tests
// it does need to be a power of two.
//
// TARGET_PROOF_SIZE is closely related to MAX_PROOF_RECURSION; see the assertion that
// `uv_values.len() <= max_uv_values` in `ProofBatch` for more detail.
#[cfg(test)]
pub const TARGET_PROOF_SIZE: usize = 8192;
#[cfg(not(test))]
Expand All @@ -73,7 +76,7 @@ pub const TARGET_PROOF_SIZE: usize = 50_000_000;
// to blocks of 256), leaving some margin is advised.
//
// The implementation requires that MAX_PROOF_RECURSION is at least 2.
pub const MAX_PROOF_RECURSION: usize = 9;
pub const MAX_PROOF_RECURSION: usize = 14;

/// `MultiplicationInputsBlock` is a block of fixed size of intermediate values
/// that occur duringa multiplication.
Expand Down Expand Up @@ -601,8 +604,8 @@ impl Batch {
ctx: Base<'_, B>,
batch_index: usize,
) -> Result<(), Error> {
const PRSS_RECORDS_PER_BATCH: usize = LargeProofGenerator::PROOF_LENGTH
+ (MAX_PROOF_RECURSION - 1) * SmallProofGenerator::PROOF_LENGTH
const PRSS_RECORDS_PER_BATCH: usize = FirstProofGenerator::PROOF_LENGTH
+ (MAX_PROOF_RECURSION - 1) * CompressedProofGenerator::PROOF_LENGTH
+ 2; // P and Q masks

let proof_ctx = ctx.narrow(&Step::GenerateProof);
Expand Down
29 changes: 19 additions & 10 deletions ipa-core/src/protocol/hybrid/oprf.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
use std::cmp::max;

use futures::{stream, StreamExt, TryStreamExt};
use typenum::Const;

Expand All @@ -17,8 +19,9 @@ use crate::{
protocol::{
basics::{BooleanProtocols, Reveal},
context::{
dzkp_validator::DZKPValidator, reshard_try_stream, DZKPUpgraded, MacUpgraded,
MaliciousProtocolSteps, ShardedContext, UpgradableContext, Validator,
dzkp_validator::{DZKPValidator, TARGET_PROOF_SIZE},
reshard_try_stream, DZKPUpgraded, MacUpgraded, MaliciousProtocolSteps, ShardedContext,
UpgradableContext, Validator,
},
hybrid::step::HybridStep,
ipa_prf::{
Expand All @@ -34,7 +37,9 @@ use crate::{
Vectorizable,
},
seq_join::seq_join,
utils::non_zero_prev_power_of_two,
};

// In theory, we could support (runtime-configured breakdown count) ≤ (compile-time breakdown count)
// ≤ 2^|bk|, with all three values distinct, but at present, there is no runtime configuration and
// the latter two must be equal. The implementation of `move_single_value_to_bucket` does support a
Expand Down Expand Up @@ -64,13 +69,17 @@ pub const CONV_CHUNK: usize = 256;
/// Vectorization dimension for PRF
pub const PRF_CHUNK: usize = 16;

// We expect 2*256 = 512 gates in total for two additions per conversion. The vectorization factor
// is CONV_CHUNK. Let `len` equal the number of converted shares. The total amount of
// multiplications is CONV_CHUNK*512*len. We want CONV_CHUNK*512*len ≈ 50M, or len ≈ 381, for a
// reasonably-sized proof. There is also a constraint on proof chunks to be powers of two, so
// we pick the closest power of two close to 381 but less than that value. 256 gives us around 33M
// multiplications per batch
const CONV_PROOF_CHUNK: usize = 256;
/// Returns a suitable proof chunk size (in records) for use with `convert_to_fp25519`.
///
/// We expect 2*256 = 512 gates in total for two additions per conversion. The
/// vectorization factor is `CONV_CHUNK`. Let `len` equal the number of converted
/// shares. The total amount of multiplications is `CONV_CHUNK`*512*len. We want
/// `CONV_CHUNK`*512*len ≈ 50M for a reasonably-sized proof. There is also a constraint
/// on proof chunks to be powers of two, and we don't want to compute a proof chunk
/// of zero when `TARGET_PROOF_SIZE` is smaller for tests.
fn conv_proof_chunk() -> usize {
non_zero_prev_power_of_two(max(2, TARGET_PROOF_SIZE / CONV_CHUNK / 512))
}

/// This computes the Dodis-Yampolsky PRF value on every match key from input,
/// and reshards the reports according to the computed PRF. At the end, reports with the
Expand Down Expand Up @@ -101,7 +110,7 @@ where
protocol: &HybridStep::ConvertFp25519,
validate: &HybridStep::ConvertFp25519Validate,
},
CONV_PROOF_CHUNK,
conv_proof_chunk(),
);
let m_ctx = validator.context();

Expand Down
9 changes: 8 additions & 1 deletion ipa-core/src/protocol/ipa_prf/aggregation/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ use crate::{
replicated::semi_honest::AdditiveShare as Replicated, BitDecomposed, FieldSimd,
SharedValue, TransposeFrom, Vectorizable,
},
utils::non_zero_prev_power_of_two,
};

pub(crate) mod breakdown_reveal;
Expand Down Expand Up @@ -96,8 +97,14 @@ pub type AggResult<const B: usize> = Result<BitDecomposed<Replicated<Boolean, B>
/// saturating the output) is:
///
/// $\sum_{i = 1}^k 2^{k - i} (b + i - 1) \approx 2^k (b + 1) = N (b + 1)$
///
/// We set a floor of 2 to avoid computing a chunk of zero when `TARGET_PROOF_SIZE` is
/// smaller for tests.
pub fn aggregate_values_proof_chunk(input_width: usize, input_item_bits: usize) -> usize {
max(2, TARGET_PROOF_SIZE / input_width / (input_item_bits + 1)).next_power_of_two()
non_zero_prev_power_of_two(max(
2,
TARGET_PROOF_SIZE / input_width / (input_item_bits + 1),
))
}

// This is the step count for AggregateChunkStep. We need it to size RecordId arrays.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ mod tests {
helpers::stream::process_slice_by_chunks,
protocol::{
context::{dzkp_validator::DZKPValidator, UpgradableContext, TEST_DZKP_STEPS},
ipa_prf::{CONV_CHUNK, CONV_PROOF_CHUNK, PRF_CHUNK},
ipa_prf::{conv_proof_chunk, CONV_CHUNK, PRF_CHUNK},
},
rand::thread_rng,
secret_sharing::SharedValue,
Expand Down Expand Up @@ -415,7 +415,7 @@ mod tests {
let [res0, res1, res2] = world
.semi_honest(records.into_iter(), |ctx, records| async move {
let c_ctx = ctx.set_total_records((COUNT + CONV_CHUNK - 1) / CONV_CHUNK);
let validator = &c_ctx.dzkp_validator(TEST_DZKP_STEPS, CONV_PROOF_CHUNK);
let validator = &c_ctx.dzkp_validator(TEST_DZKP_STEPS, conv_proof_chunk());
let m_ctx = validator.context();
seq_join(
m_ctx.active_work(),
Expand Down
22 changes: 11 additions & 11 deletions ipa-core/src/protocol/ipa_prf/malicious_security/prover.rs
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
use std::{borrow::Borrow, iter::zip, marker::PhantomData};

#[cfg(all(test, unit_test))]
use crate::ff::Fp31;
use crate::{
error::Error::{self, DZKPMasks},
ff::{Fp61BitPrime, PrimeField},
helpers::hashing::{compute_hash, hash_to_field},
protocol::{
context::Context,
ipa_prf::malicious_security::lagrange::{CanonicalLagrangeDenominator, LagrangeTable},
ipa_prf::{
malicious_security::lagrange::{CanonicalLagrangeDenominator, LagrangeTable},
CompressedProofGenerator,
},
prss::SharedRandomness,
RecordId, RecordIdRange,
},
Expand Down Expand Up @@ -84,8 +85,8 @@ where
// compute final uv values
let (u_values, v_values) = &mut self.uv_chunks[0];
// shift first element to last position
u_values[SmallProofGenerator::RECURSION_FACTOR - 1] = u_values[0];
v_values[SmallProofGenerator::RECURSION_FACTOR - 1] = v_values[0];
u_values[CompressedProofGenerator::RECURSION_FACTOR - 1] = u_values[0];
v_values[CompressedProofGenerator::RECURSION_FACTOR - 1] = v_values[0];
// set masks in first position
u_values[0] = my_p_mask;
v_values[0] = my_q_mask;
Expand All @@ -105,15 +106,11 @@ pub struct ProofGenerator<F: PrimeField, const L: usize, const P: usize, const M
phantom_data: PhantomData<F>,
}

#[cfg(all(test, unit_test))]
pub type TestProofGenerator = ProofGenerator<Fp31, 4, 7, 3>;

// Compression Factor is L
// P, Proof size is 2*L - 1
// M, the number of interpolated points is L - 1
// The reason we need these is that Rust doesn't support basic math operations on const generics
pub type SmallProofGenerator = ProofGenerator<Fp61BitPrime, 8, 15, 7>;
pub type LargeProofGenerator = ProofGenerator<Fp61BitPrime, 32, 63, 31>;
pub type SmallProofGenerator = ProofGenerator<Fp61BitPrime, 4, 7, 3>;

impl<F: PrimeField, const L: usize, const P: usize, const M: usize> ProofGenerator<F, L, P, M> {
// define constants such that they can be used externally
Expand Down Expand Up @@ -265,7 +262,7 @@ mod test {
context::Context,
ipa_prf::malicious_security::{
lagrange::{CanonicalLagrangeDenominator, LagrangeTable},
prover::{LargeProofGenerator, SmallProofGenerator, TestProofGenerator, UVValues},
prover::{ProofGenerator, SmallProofGenerator, UVValues},
},
RecordId, RecordIdRange,
},
Expand All @@ -274,6 +271,9 @@ mod test {
test_fixture::{Runner, TestWorld},
};

type TestProofGenerator = ProofGenerator<Fp31, 4, 7, 3>;
type LargeProofGenerator = ProofGenerator<Fp61BitPrime, 32, 63, 31>;

fn zip_chunks<F: PrimeField, const U: usize, I, J>(a: I, b: J) -> UVValues<F, U>
where
I: IntoIterator<Item = u128>,
Expand Down
31 changes: 19 additions & 12 deletions ipa-core/src/protocol/ipa_prf/mod.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use std::{convert::Infallible, iter::zip, num::NonZeroU32, ops::Add};
use std::{cmp::max, convert::Infallible, iter::zip, num::NonZeroU32, ops::Add};

use futures::{stream, StreamExt, TryStreamExt};
use generic_array::{ArrayLength, GenericArray};
Expand All @@ -24,8 +24,8 @@ use crate::{
protocol::{
basics::{BooleanArrayMul, BooleanProtocols, Reveal},
context::{
dzkp_validator::DZKPValidator, DZKPUpgraded, MacUpgraded, MaliciousProtocolSteps,
UpgradableContext,
dzkp_validator::{DZKPValidator, TARGET_PROOF_SIZE},
DZKPUpgraded, MacUpgraded, MaliciousProtocolSteps, UpgradableContext,
},
ipa_prf::{
boolean_ops::convert_to_fp25519,
Expand All @@ -44,6 +44,7 @@ use crate::{
BitDecomposed, FieldSimd, SharedValue, TransposeFrom, Vectorizable,
},
seq_join::seq_join,
utils::non_zero_prev_power_of_two,
};

pub(crate) mod aggregation;
Expand All @@ -58,7 +59,9 @@ pub(crate) mod shuffle;
pub(crate) mod step;
pub mod validation_protocol;

pub use malicious_security::prover::{LargeProofGenerator, SmallProofGenerator};
pub type FirstProofGenerator = malicious_security::prover::SmallProofGenerator;
pub type CompressedProofGenerator = malicious_security::prover::SmallProofGenerator;

pub use shuffle::Shuffle;

/// Match key type
Expand Down Expand Up @@ -409,13 +412,17 @@ where
Ok(noisy_output_histogram)
}

// We expect 2*256 = 512 gates in total for two additions per conversion. The vectorization factor
// is CONV_CHUNK. Let `len` equal the number of converted shares. The total amount of
// multiplications is CONV_CHUNK*512*len. We want CONV_CHUNK*512*len ≈ 50M, or len ≈ 381, for a
// reasonably-sized proof. There is also a constraint on proof chunks to be powers of two, so
// we pick the closest power of two close to 381 but less than that value. 256 gives us around 33M
// multiplications per batch
const CONV_PROOF_CHUNK: usize = 256;
/// Returns a suitable proof chunk size (in records) for use with `convert_to_fp25519`.
///
/// We expect 2*256 = 512 gates in total for two additions per conversion. The
/// vectorization factor is `CONV_CHUNK`. Let `len` equal the number of converted
/// shares. The total amount of multiplications is `CONV_CHUNK`*512*len. We want
/// `CONV_CHUNK`*512*len ≈ 50M for a reasonably-sized proof. There is also a constraint
/// on proof chunks to be powers of two, and we don't want to compute a proof chunk
/// of zero when `TARGET_PROOF_SIZE` is smaller for tests.
fn conv_proof_chunk() -> usize {
non_zero_prev_power_of_two(max(2, TARGET_PROOF_SIZE / CONV_CHUNK / 512))
}

#[tracing::instrument(name = "compute_prf_for_inputs", skip_all)]
async fn compute_prf_for_inputs<C, BK, TV, TS>(
Expand Down Expand Up @@ -443,7 +450,7 @@ where
protocol: &Step::ConvertFp25519,
validate: &Step::ConvertFp25519Validate,
},
CONV_PROOF_CHUNK,
conv_proof_chunk(),
);
let m_ctx = validator.context();

Expand Down
6 changes: 5 additions & 1 deletion ipa-core/src/protocol/ipa_prf/prf_sharding/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ use crate::{
replicated::{semi_honest::AdditiveShare as Replicated, ReplicatedSecretSharing},
BitDecomposed, FieldSimd, SharedValue, TransposeFrom, Vectorizable,
},
utils::non_zero_prev_power_of_two,
};

pub mod feature_label_dot_product;
Expand Down Expand Up @@ -515,7 +516,10 @@ where
// TODO: this override was originally added to work around problems with
// read_size vs. batch size alignment. Those are now fixed (in #1332), but this
// is still observed to help performance (see #1376), so has been retained.
std::cmp::min(sh_ctx.active_work().get(), chunk_size.next_power_of_two()),
std::cmp::min(
sh_ctx.active_work().get(),
non_zero_prev_power_of_two(chunk_size),
),
);
dzkp_validator.set_total_records(TotalRecords::specified(histogram[1]).unwrap());
let ctx_for_row_number = set_up_contexts(&dzkp_validator.context(), histogram)?;
Expand Down
3 changes: 2 additions & 1 deletion ipa-core/src/protocol/ipa_prf/quicksort.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ use crate::{
Vectorizable,
},
seq_join::seq_join,
utils::non_zero_prev_power_of_two,
};

impl<K> ChunkBuffer<SORT_CHUNK> for (Vec<AdditiveShare<K>>, Vec<AdditiveShare<K>>)
Expand Down Expand Up @@ -98,7 +99,7 @@ where
}

fn quicksort_proof_chunk(key_bits: usize) -> usize {
(TARGET_PROOF_SIZE / key_bits / SORT_CHUNK).next_power_of_two()
non_zero_prev_power_of_two(TARGET_PROOF_SIZE / key_bits / SORT_CHUNK)
}

/// Insecure quicksort using MPC comparisons and a key extraction function `get_key`.
Expand Down
Loading

0 comments on commit 9336e59

Please sign in to comment.