Skip to content

Commit

Permalink
Add blobVersionedHash to the preimage of challengeDigest (#1211)
Browse files Browse the repository at this point in the history
* add blobVersionedHash to the preimage of challenge digest

* (doc): aggregator/src/blob.rs

Co-authored-by: z2trillion <[email protected]>

* (doc): aggregator/src/aggregation/rlc/gates.rs

* (doc): aggregator/src/blob.rs

---------

Co-authored-by: z2trillion <[email protected]>
  • Loading branch information
roynalnaruto and z2trillion authored Apr 12, 2024
1 parent 6c6c662 commit 5776400
Show file tree
Hide file tree
Showing 8 changed files with 147 additions and 67 deletions.
21 changes: 4 additions & 17 deletions aggregator/src/aggregation/barycentric.rs
Original file line number Diff line number Diff line change
Expand Up @@ -349,21 +349,9 @@ pub fn interpolate(z: Scalar, coefficients: &[Scalar; BLOB_WIDTH]) -> Scalar {
#[cfg(test)]
mod tests {
use super::*;
use crate::blob::BlobData;
use c_kzg::{Blob as RethBlob, KzgProof, KzgSettings};
use once_cell::sync::Lazy;
use std::{collections::BTreeSet, sync::Arc};

/// KZG trusted setup
pub static MAINNET_KZG_TRUSTED_SETUP: Lazy<Arc<KzgSettings>> = Lazy::new(|| {
Arc::new(
c_kzg::KzgSettings::load_trusted_setup(
&revm_primitives::kzg::G1_POINTS.0,
&revm_primitives::kzg::G2_POINTS.0,
)
.expect("failed to load trusted setup"),
)
});
use crate::blob::{BlobData, KZG_TRUSTED_SETUP};
use c_kzg::{Blob as RethBlob, KzgProof};
use std::collections::BTreeSet;

#[test]
fn log_blob_width() {
Expand Down Expand Up @@ -425,8 +413,7 @@ mod tests {
)
.unwrap();
let (_proof, y) =
KzgProof::compute_kzg_proof(&blob, &to_be_bytes(z).into(), &MAINNET_KZG_TRUSTED_SETUP)
.unwrap();
KzgProof::compute_kzg_proof(&blob, &to_be_bytes(z).into(), &KZG_TRUSTED_SETUP).unwrap();
from_canonical_be_bytes(*y)
}

Expand Down
25 changes: 18 additions & 7 deletions aggregator/src/aggregation/blob_data.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ pub struct BlobDataConfig {

pub struct AssignedBlobDataExport {
pub num_valid_chunks: AssignedCell<Fr, Fr>,
pub challenge_digest: Vec<AssignedCell<Fr, Fr>>,
pub versioned_hash: Vec<AssignedCell<Fr, Fr>>,
pub chunk_data_digests: Vec<Vec<AssignedCell<Fr, Fr>>>,
}

Expand Down Expand Up @@ -304,7 +304,8 @@ impl BlobDataConfig {

// - metadata_digest: 32 bytes
// - chunk[i].chunk_data_digest: 32 bytes each
let preimage_len = 32.expr() * (MAX_AGG_SNARKS + 1).expr();
// - versioned_hash: 32 bytes
let preimage_len = 32.expr() * (MAX_AGG_SNARKS + 1 + 1).expr();

[
1.expr(), // q_enable
Expand Down Expand Up @@ -733,6 +734,7 @@ impl BlobDataConfig {

let challenge_digest_preimage_rlc_specified = &rows.last().unwrap().preimage_rlc;
let challenge_digest_rlc_specified = &rows.last().unwrap().digest_rlc;
let versioned_hash_rlc = &rows.get(N_ROWS_DIGEST_RLC - 2).unwrap().digest_rlc;

// ensure that on the last row of this section the is_boundary is turned on
// which would enable the keccak table lookup for challenge_digest
Expand Down Expand Up @@ -810,6 +812,7 @@ impl BlobDataConfig {
.collect::<Vec<_>>();
for (i, digest_rlc_specified) in std::iter::once(metadata_digest_rlc_specified)
.chain(chunk_digest_evm_rlcs)
.chain(std::iter::once(versioned_hash_rlc))
.chain(std::iter::once(challenge_digest_rlc_specified))
.enumerate()
{
Expand All @@ -833,7 +836,7 @@ impl BlobDataConfig {

// compute the keccak input RLC:
// we do this only for the metadata and chunks, not for the blob row itself.
if i < MAX_AGG_SNARKS + 1 {
if i < MAX_AGG_SNARKS + 1 + 1 {
let digest_keccak_rlc = rlc_config.rlc(
&mut region,
&digest_bytes,
Expand Down Expand Up @@ -880,13 +883,21 @@ impl BlobDataConfig {
for chunk in chunk_data_digests_bytes.chunks_exact(N_BYTES_U256) {
chunk_data_digests.push(chunk.to_vec());
}
let challenge_digest = assigned_rows
.iter()
.rev()
.take(N_BYTES_U256)
.map(|row| row.byte.clone())
.collect::<Vec<AssignedCell<Fr, Fr>>>();
let export = AssignedBlobDataExport {
num_valid_chunks,
challenge_digest: assigned_rows
versioned_hash: assigned_rows
.iter()
.rev()
.skip(N_BYTES_U256)
.take(N_BYTES_U256)
.map(|row| row.byte.clone())
.rev()
.collect(),
chunk_data_digests,
};
Expand All @@ -906,19 +917,19 @@ impl BlobDataConfig {

let challenge_digest_limb1 = rlc_config.inner_product(
&mut region,
&export.challenge_digest[0..11],
&challenge_digest[0..11],
&pows_of_256,
&mut rlc_config_offset,
)?;
let challenge_digest_limb2 = rlc_config.inner_product(
&mut region,
&export.challenge_digest[11..22],
&challenge_digest[11..22],
&pows_of_256,
&mut rlc_config_offset,
)?;
let challenge_digest_limb3 = rlc_config.inner_product(
&mut region,
&export.challenge_digest[22..32],
&challenge_digest[22..32],
&pows_of_256[0..10],
&mut rlc_config_offset,
)?;
Expand Down
8 changes: 8 additions & 0 deletions aggregator/src/aggregation/circuit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -479,6 +479,14 @@ impl Circuit<Fr> for AggregationCircuit {
region.constrain_equal(c.cell(), ec.cell())?;
}

for (c, ec) in blob_data_exports
.versioned_hash
.iter()
.zip_eq(assigned_batch_hash.blob.versioned_hash.iter())
{
region.constrain_equal(c.cell(), ec.cell())?;
}

Ok(())
},
)?;
Expand Down
14 changes: 7 additions & 7 deletions aggregator/src/aggregation/rlc/gates.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ use super::RlcConfig;

const FIXED_OFFSET_32: usize = MAX_AGG_SNARKS + 1;
const FIXED_OFFSET_168: usize = FIXED_OFFSET_32 + 1;
const FIXED_OFFSET_200: usize = FIXED_OFFSET_168 + 1;
const FIXED_OFFSET_2_POW_32: usize = FIXED_OFFSET_200 + 1;
const FIXED_OFFSET_232: usize = FIXED_OFFSET_168 + 1;
const FIXED_OFFSET_2_POW_32: usize = FIXED_OFFSET_232 + 1;
const FIXED_OFFSET_256: usize = FIXED_OFFSET_2_POW_32 + 1;
const FIXED_OFFSET_EMPTY_KECCAK: usize = FIXED_OFFSET_256 + POWS_OF_256;

Expand All @@ -33,7 +33,7 @@ impl RlcConfig {
/// | MAX_AGG_SNARKS | MAX_AGG_SNARKS |
/// | MAX_AGG_SNARKS + 1 | 32 |
/// | MAX_AGG_SNARKS + 2 | 168 |
/// | MAX_AGG_SNARKS + 3 | 200 |
/// | MAX_AGG_SNARKS + 3 | 232 |
/// | MAX_AGG_SNARKS + 4 | 2 ^ 32 |
/// | MAX_AGG_SNARKS + 5 | 256 |
/// | MAX_AGG_SNARKS + 6 | 256 ^ 2 |
Expand All @@ -60,8 +60,8 @@ impl RlcConfig {
}
assert_eq!(offset, FIXED_OFFSET_32);

// [32, 168, 200, 1 << 32]
for const_val in [32, 168, 200, 1 << 32] {
// [32, 168, 232, 1 << 32]
for const_val in [32, 168, 232, 1 << 32] {
region.assign_fixed(
|| format!("const at offset={offset}"),
self.fixed,
Expand Down Expand Up @@ -189,10 +189,10 @@ impl RlcConfig {
}

#[inline]
pub(crate) fn two_hundred_cell(&self, region_index: RegionIndex) -> Cell {
pub(crate) fn two_hundred_and_thirty_two_cell(&self, region_index: RegionIndex) -> Cell {
Cell {
region_index,
row_offset: FIXED_OFFSET_200,
row_offset: FIXED_OFFSET_232,
column: self.fixed.into(),
}
}
Expand Down
28 changes: 15 additions & 13 deletions aggregator/src/batch.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
//! This module implements related functions that aggregates public inputs of many chunks into a
//! single one.
use eth_types::{Field, ToBigEndian, H256, U256};
use eth_types::{Field, ToBigEndian, H256};
use ethers_core::utils::keccak256;

use crate::{
Expand Down Expand Up @@ -34,7 +34,10 @@ pub struct BatchHash {
pub(crate) public_input_hash: H256,
/// The number of chunks that contain meaningful data, i.e. not padded chunks.
pub(crate) number_of_valid_chunks: usize,
/// 4844-Blob related fields.
pub(crate) blob: BlobAssignments,
/// The 4844 versioned hash for the blob.
pub(crate) versioned_hash: H256,
}

impl BatchHash {
Expand Down Expand Up @@ -117,6 +120,7 @@ impl BatchHash {

let blob_data = BlobData::new(number_of_valid_chunks, chunks_with_padding);
let blob_assignments = BlobAssignments::from(&blob_data);
let versioned_hash = blob_data.get_versioned_hash();

// public input hash is build as
// keccak(
Expand All @@ -126,7 +130,8 @@ impl BatchHash {
// chunk[k-1].withdraw_root ||
// batch_data_hash ||
// z ||
// y
// y ||
// versioned_hash
// )
let preimage = [
chunks_with_padding[0].chain_id.to_be_bytes().as_ref(),
Expand All @@ -140,6 +145,7 @@ impl BatchHash {
batch_data_hash.as_slice(),
blob_assignments.challenge.to_be_bytes().as_ref(),
blob_assignments.evaluation.to_be_bytes().as_ref(),
versioned_hash.as_bytes(),
]
.concat();
let public_input_hash: H256 = keccak256(preimage).into();
Expand All @@ -155,9 +161,10 @@ impl BatchHash {
chain_id: chunks_with_padding[0].chain_id,
chunks_with_padding: chunks_with_padding.to_vec(),
data_hash: batch_data_hash.into(),
blob: blob_assignments,
public_input_hash,
number_of_valid_chunks,
blob: blob_assignments,
versioned_hash,
}
}

Expand Down Expand Up @@ -187,15 +194,9 @@ impl BatchHash {
// chunk[k-1].withdraw_root ||
// batch_data_hash ||
// z ||
// y )
// TODO: make BLS_MODULUS into a static variable using lazy_static!()
let (_, z) = self.blob.challenge_digest.div_mod(
U256::from_str_radix(
"0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001",
16,
)
.unwrap(),
);
// y ||
// blob_versioned_hash
// )
let batch_public_input_hash_preimage = [
self.chain_id.to_be_bytes().as_ref(),
self.chunks_with_padding[0].prev_state_root.as_bytes(),
Expand All @@ -206,8 +207,9 @@ impl BatchHash {
.withdraw_root
.as_bytes(),
self.data_hash.as_bytes(),
&z.to_be_bytes(),
&self.blob.challenge.to_be_bytes(),
&self.blob.evaluation.to_be_bytes(),
self.versioned_hash.as_bytes(),
]
.concat();
res.push(batch_public_input_hash_preimage);
Expand Down
Loading

0 comments on commit 5776400

Please sign in to comment.