diff --git a/aggregator/src/aggregation/barycentric.rs b/aggregator/src/aggregation/barycentric.rs index 902dabda0a..e6e6799065 100644 --- a/aggregator/src/aggregation/barycentric.rs +++ b/aggregator/src/aggregation/barycentric.rs @@ -349,21 +349,9 @@ pub fn interpolate(z: Scalar, coefficients: &[Scalar; BLOB_WIDTH]) -> Scalar { #[cfg(test)] mod tests { use super::*; - use crate::blob::BlobData; - use c_kzg::{Blob as RethBlob, KzgProof, KzgSettings}; - use once_cell::sync::Lazy; - use std::{collections::BTreeSet, sync::Arc}; - - /// KZG trusted setup - pub static MAINNET_KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { - Arc::new( - c_kzg::KzgSettings::load_trusted_setup( - &revm_primitives::kzg::G1_POINTS.0, - &revm_primitives::kzg::G2_POINTS.0, - ) - .expect("failed to load trusted setup"), - ) - }); + use crate::blob::{BlobData, KZG_TRUSTED_SETUP}; + use c_kzg::{Blob as RethBlob, KzgProof}; + use std::collections::BTreeSet; #[test] fn log_blob_width() { @@ -425,8 +413,7 @@ mod tests { ) .unwrap(); let (_proof, y) = - KzgProof::compute_kzg_proof(&blob, &to_be_bytes(z).into(), &MAINNET_KZG_TRUSTED_SETUP) - .unwrap(); + KzgProof::compute_kzg_proof(&blob, &to_be_bytes(z).into(), &KZG_TRUSTED_SETUP).unwrap(); from_canonical_be_bytes(*y) } diff --git a/aggregator/src/aggregation/blob_data.rs b/aggregator/src/aggregation/blob_data.rs index 81d0358cbe..d886dd45a1 100644 --- a/aggregator/src/aggregation/blob_data.rs +++ b/aggregator/src/aggregation/blob_data.rs @@ -59,7 +59,7 @@ pub struct BlobDataConfig { pub struct AssignedBlobDataExport { pub num_valid_chunks: AssignedCell, - pub challenge_digest: Vec>, + pub versioned_hash: Vec>, pub chunk_data_digests: Vec>>, } @@ -304,7 +304,8 @@ impl BlobDataConfig { // - metadata_digest: 32 bytes // - chunk[i].chunk_data_digest: 32 bytes each - let preimage_len = 32.expr() * (MAX_AGG_SNARKS + 1).expr(); + // - versioned_hash: 32 bytes + let preimage_len = 32.expr() * (MAX_AGG_SNARKS + 1 + 1).expr(); [ 1.expr(), // q_enable @@ -733,6 +734,7 @@ impl BlobDataConfig { let challenge_digest_preimage_rlc_specified = &rows.last().unwrap().preimage_rlc; let challenge_digest_rlc_specified = &rows.last().unwrap().digest_rlc; + let versioned_hash_rlc = &rows.get(N_ROWS_DIGEST_RLC - 2).unwrap().digest_rlc; // ensure that on the last row of this section the is_boundary is turned on // which would enable the keccak table lookup for challenge_digest @@ -810,6 +812,7 @@ impl BlobDataConfig { .collect::>(); for (i, digest_rlc_specified) in std::iter::once(metadata_digest_rlc_specified) .chain(chunk_digest_evm_rlcs) + .chain(std::iter::once(versioned_hash_rlc)) .chain(std::iter::once(challenge_digest_rlc_specified)) .enumerate() { @@ -833,7 +836,7 @@ impl BlobDataConfig { // compute the keccak input RLC: // we do this only for the metadata and chunks, not for the blob row itself. - if i < MAX_AGG_SNARKS + 1 { + if i < MAX_AGG_SNARKS + 1 + 1 { let digest_keccak_rlc = rlc_config.rlc( &mut region, &digest_bytes, @@ -880,13 +883,21 @@ impl BlobDataConfig { for chunk in chunk_data_digests_bytes.chunks_exact(N_BYTES_U256) { chunk_data_digests.push(chunk.to_vec()); } + let challenge_digest = assigned_rows + .iter() + .rev() + .take(N_BYTES_U256) + .map(|row| row.byte.clone()) + .collect::>>(); let export = AssignedBlobDataExport { num_valid_chunks, - challenge_digest: assigned_rows + versioned_hash: assigned_rows .iter() .rev() + .skip(N_BYTES_U256) .take(N_BYTES_U256) .map(|row| row.byte.clone()) + .rev() .collect(), chunk_data_digests, }; @@ -906,19 +917,19 @@ impl BlobDataConfig { let challenge_digest_limb1 = rlc_config.inner_product( &mut region, - &export.challenge_digest[0..11], + &challenge_digest[0..11], &pows_of_256, &mut rlc_config_offset, )?; let challenge_digest_limb2 = rlc_config.inner_product( &mut region, - &export.challenge_digest[11..22], + &challenge_digest[11..22], &pows_of_256, &mut rlc_config_offset, )?; let challenge_digest_limb3 = rlc_config.inner_product( &mut region, - &export.challenge_digest[22..32], + &challenge_digest[22..32], &pows_of_256[0..10], &mut rlc_config_offset, )?; diff --git a/aggregator/src/aggregation/circuit.rs b/aggregator/src/aggregation/circuit.rs index 92931ce2cb..e544528f99 100644 --- a/aggregator/src/aggregation/circuit.rs +++ b/aggregator/src/aggregation/circuit.rs @@ -479,6 +479,14 @@ impl Circuit for AggregationCircuit { region.constrain_equal(c.cell(), ec.cell())?; } + for (c, ec) in blob_data_exports + .versioned_hash + .iter() + .zip_eq(assigned_batch_hash.blob.versioned_hash.iter()) + { + region.constrain_equal(c.cell(), ec.cell())?; + } + Ok(()) }, )?; diff --git a/aggregator/src/aggregation/rlc/gates.rs b/aggregator/src/aggregation/rlc/gates.rs index f32473b1a6..946008fd25 100644 --- a/aggregator/src/aggregation/rlc/gates.rs +++ b/aggregator/src/aggregation/rlc/gates.rs @@ -13,8 +13,8 @@ use super::RlcConfig; const FIXED_OFFSET_32: usize = MAX_AGG_SNARKS + 1; const FIXED_OFFSET_168: usize = FIXED_OFFSET_32 + 1; -const FIXED_OFFSET_200: usize = FIXED_OFFSET_168 + 1; -const FIXED_OFFSET_2_POW_32: usize = FIXED_OFFSET_200 + 1; +const FIXED_OFFSET_232: usize = FIXED_OFFSET_168 + 1; +const FIXED_OFFSET_2_POW_32: usize = FIXED_OFFSET_232 + 1; const FIXED_OFFSET_256: usize = FIXED_OFFSET_2_POW_32 + 1; const FIXED_OFFSET_EMPTY_KECCAK: usize = FIXED_OFFSET_256 + POWS_OF_256; @@ -33,7 +33,7 @@ impl RlcConfig { /// | MAX_AGG_SNARKS | MAX_AGG_SNARKS | /// | MAX_AGG_SNARKS + 1 | 32 | /// | MAX_AGG_SNARKS + 2 | 168 | - /// | MAX_AGG_SNARKS + 3 | 200 | + /// | MAX_AGG_SNARKS + 3 | 232 | /// | MAX_AGG_SNARKS + 4 | 2 ^ 32 | /// | MAX_AGG_SNARKS + 5 | 256 | /// | MAX_AGG_SNARKS + 6 | 256 ^ 2 | @@ -60,8 +60,8 @@ impl RlcConfig { } assert_eq!(offset, FIXED_OFFSET_32); - // [32, 168, 200, 1 << 32] - for const_val in [32, 168, 200, 1 << 32] { + // [32, 168, 232, 1 << 32] + for const_val in [32, 168, 232, 1 << 32] { region.assign_fixed( || format!("const at offset={offset}"), self.fixed, @@ -189,10 +189,10 @@ impl RlcConfig { } #[inline] - pub(crate) fn two_hundred_cell(&self, region_index: RegionIndex) -> Cell { + pub(crate) fn two_hundred_and_thirty_two_cell(&self, region_index: RegionIndex) -> Cell { Cell { region_index, - row_offset: FIXED_OFFSET_200, + row_offset: FIXED_OFFSET_232, column: self.fixed.into(), } } diff --git a/aggregator/src/batch.rs b/aggregator/src/batch.rs index d5550987f8..d865f25835 100644 --- a/aggregator/src/batch.rs +++ b/aggregator/src/batch.rs @@ -1,7 +1,7 @@ //! This module implements related functions that aggregates public inputs of many chunks into a //! single one. -use eth_types::{Field, ToBigEndian, H256, U256}; +use eth_types::{Field, ToBigEndian, H256}; use ethers_core::utils::keccak256; use crate::{ @@ -34,7 +34,10 @@ pub struct BatchHash { pub(crate) public_input_hash: H256, /// The number of chunks that contain meaningful data, i.e. not padded chunks. pub(crate) number_of_valid_chunks: usize, + /// 4844-Blob related fields. pub(crate) blob: BlobAssignments, + /// The 4844 versioned hash for the blob. + pub(crate) versioned_hash: H256, } impl BatchHash { @@ -117,6 +120,7 @@ impl BatchHash { let blob_data = BlobData::new(number_of_valid_chunks, chunks_with_padding); let blob_assignments = BlobAssignments::from(&blob_data); + let versioned_hash = blob_data.get_versioned_hash(); // public input hash is build as // keccak( @@ -126,7 +130,8 @@ impl BatchHash { // chunk[k-1].withdraw_root || // batch_data_hash || // z || - // y + // y || + // versioned_hash // ) let preimage = [ chunks_with_padding[0].chain_id.to_be_bytes().as_ref(), @@ -140,6 +145,7 @@ impl BatchHash { batch_data_hash.as_slice(), blob_assignments.challenge.to_be_bytes().as_ref(), blob_assignments.evaluation.to_be_bytes().as_ref(), + versioned_hash.as_bytes(), ] .concat(); let public_input_hash: H256 = keccak256(preimage).into(); @@ -155,9 +161,10 @@ impl BatchHash { chain_id: chunks_with_padding[0].chain_id, chunks_with_padding: chunks_with_padding.to_vec(), data_hash: batch_data_hash.into(), - blob: blob_assignments, public_input_hash, number_of_valid_chunks, + blob: blob_assignments, + versioned_hash, } } @@ -187,15 +194,9 @@ impl BatchHash { // chunk[k-1].withdraw_root || // batch_data_hash || // z || - // y ) - // TODO: make BLS_MODULUS into a static variable using lazy_static!() - let (_, z) = self.blob.challenge_digest.div_mod( - U256::from_str_radix( - "0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001", - 16, - ) - .unwrap(), - ); + // y || + // blob_versioned_hash + // ) let batch_public_input_hash_preimage = [ self.chain_id.to_be_bytes().as_ref(), self.chunks_with_padding[0].prev_state_root.as_bytes(), @@ -206,8 +207,9 @@ impl BatchHash { .withdraw_root .as_bytes(), self.data_hash.as_bytes(), - &z.to_be_bytes(), + &self.blob.challenge.to_be_bytes(), &self.blob.evaluation.to_be_bytes(), + self.versioned_hash.as_bytes(), ] .concat(); res.push(batch_public_input_hash_preimage); diff --git a/aggregator/src/blob.rs b/aggregator/src/blob.rs index 7954338f97..323dbe77c4 100644 --- a/aggregator/src/blob.rs +++ b/aggregator/src/blob.rs @@ -3,14 +3,22 @@ use crate::{ BatchHash, ChunkHash, MAX_AGG_SNARKS, }; -use eth_types::U256; -use ethers_core::utils::keccak256; +use eth_types::{ToBigEndian, H256, U256}; +use ethers_core::{ + k256::sha2::{Digest, Sha256}, + utils::keccak256, +}; use halo2_proofs::{ circuit::Value, halo2curves::{bls12_381::Scalar, bn256::Fr}, }; use itertools::Itertools; -use std::iter::{once, repeat}; +use once_cell::sync::Lazy; +use revm_primitives::VERSIONED_HASH_VERSION_KZG; +use std::{ + iter::{once, repeat}, + sync::Arc, +}; use zkevm_circuits::util::Challenges; /// The number of coefficients (BLS12-381 scalars) to represent the blob polynomial in evaluation @@ -43,7 +51,11 @@ pub const N_ROWS_METADATA: usize = N_ROWS_NUM_CHUNKS + N_ROWS_CHUNK_SIZES; pub const N_ROWS_DATA: usize = N_BLOB_BYTES - N_ROWS_METADATA; /// The number of rows in Blob Data config's layout to represent the "digest rlc" section. -pub const N_ROWS_DIGEST_RLC: usize = 1 + 1 + MAX_AGG_SNARKS; +/// - metadata digest RLC (1 row) +/// - chunk_digests RLC for each chunk (MAX_AGG_SNARKS rows) +/// - blob versioned hash RLC (1 row) +/// - challenge digest RLC (1 row) +pub const N_ROWS_DIGEST_RLC: usize = 1 + MAX_AGG_SNARKS + 1 + 1; /// The number of rows in Blob Data config's layout to represent the "digest bytes" section. pub const N_ROWS_DIGEST_BYTES: usize = N_ROWS_DIGEST_RLC * N_BYTES_U256; @@ -54,6 +66,17 @@ pub const N_ROWS_DIGEST: usize = N_ROWS_DIGEST_RLC + N_ROWS_DIGEST_BYTES; /// The total number of rows used in Blob Data config's layout. pub const N_ROWS_BLOB_DATA_CONFIG: usize = N_ROWS_METADATA + N_ROWS_DATA + N_ROWS_DIGEST; +/// KZG trusted setup +pub static KZG_TRUSTED_SETUP: Lazy> = Lazy::new(|| { + Arc::new( + c_kzg::KzgSettings::load_trusted_setup( + &revm_primitives::kzg::G1_POINTS.0, + &revm_primitives::kzg::G2_POINTS.0, + ) + .expect("failed to load trusted setup"), + ) +}); + /// Helper struct to generate witness for the Blob Data Config. #[derive(Clone, Debug)] pub struct BlobData { @@ -125,6 +148,12 @@ impl Default for BlobData { } } +fn kzg_to_versioned_hash(commitment: &c_kzg::KzgCommitment) -> H256 { + let mut res = Sha256::digest(commitment.as_slice()); + res[0] = VERSIONED_HASH_VERSION_KZG; + H256::from_slice(&res[..]) +} + impl BlobData { pub(crate) fn new(num_valid_chunks: usize, chunks_with_padding: &[ChunkHash]) -> Self { assert!(num_valid_chunks > 0); @@ -165,21 +194,40 @@ impl BlobData { } impl BlobData { + /// Get the versioned hash as per EIP-4844. + pub(crate) fn get_versioned_hash(&self) -> H256 { + let coefficients = self.get_coefficients(); + let blob = c_kzg::Blob::from_bytes( + &coefficients + .iter() + .cloned() + .flat_map(|coeff| coeff.to_be_bytes()) + .collect::>(), + ) + .expect("blob-coefficients to 4844 blob should succeed"); + let c = c_kzg::KzgCommitment::blob_to_kzg_commitment(&blob, &KZG_TRUSTED_SETUP) + .expect("blob to kzg commitment should succeed"); + kzg_to_versioned_hash(&c) + } + /// Get the preimage of the challenge digest. pub(crate) fn get_challenge_digest_preimage(&self) -> Vec { let metadata_digest = keccak256(self.to_metadata_bytes()); let chunk_digests = self.chunk_data.iter().map(keccak256); + let blob_versioned_hash = self.get_versioned_hash(); // preimage = // metadata_digest || // chunk[0].chunk_data_digest || ... - // chunk[MAX_AGG_SNARKS-1].chunk_data_digest + // chunk[MAX_AGG_SNARKS-1].chunk_data_digest || + // blob_versioned_hash // // where chunk_data_digest for a padded chunk is set equal to the "last valid chunk"'s // chunk_data_digest. metadata_digest .into_iter() .chain(chunk_digests.flatten()) + .chain(blob_versioned_hash.to_fixed_bytes()) .collect::>() } @@ -413,15 +461,23 @@ impl BlobData { acc * challenge.evm_word() + Value::known(Fr::from(byte as u64)) }); + // blob versioned hash + let versioned_hash = self.get_versioned_hash(); + let versioned_hash_rlc = versioned_hash.as_bytes().iter().fold(zero, |acc, &byte| { + acc * challenge.evm_word() + Value::known(Fr::from(byte as u64)) + }); + // - metadata digest rlc // - chunks[i].chunk_data_digest rlc for each chunk + // - versioned hash rlc // - challenge digest rlc // - metadata digest bytes // - chunks[i].chunk_data_digest bytes for each chunk + // - versioned hash bytes // - challenge digest bytes once(BlobDataRow { - digest_rlc: metadata_digest_rlc, preimage_rlc: Value::known(Fr::zero()), + digest_rlc: metadata_digest_rlc, // this is_padding assignment does not matter as we have already crossed the "chunk // data" section. This assignment to 1 is simply to allow the custom gate to check: // - padding transitions from 0 -> 1 only once. @@ -434,38 +490,51 @@ impl BlobData { .zip_eq(self.chunk_sizes.iter()) .enumerate() .map(|(i, (&digest_rlc, &chunk_size))| BlobDataRow { + preimage_rlc: Value::known(Fr::zero()), digest_rlc, chunk_idx: (i + 1) as u64, accumulator: chunk_size as u64, - preimage_rlc: Value::known(Fr::zero()), ..Default::default() }), ) + // versioned hash RLC + .chain(once(BlobDataRow { + preimage_rlc: Value::known(Fr::zero()), + digest_rlc: versioned_hash_rlc, + ..Default::default() + })) .chain(once(BlobDataRow { preimage_rlc: challenge_digest_preimage_rlc, digest_rlc: challenge_digest_rlc, - accumulator: 32 * (MAX_AGG_SNARKS + 1) as u64, + accumulator: 32 * (MAX_AGG_SNARKS + 1 + 1) as u64, is_boundary: true, ..Default::default() })) .chain(metadata_digest.iter().map(|&byte| BlobDataRow { - byte, preimage_rlc: Value::known(Fr::zero()), digest_rlc: Value::known(Fr::zero()), + byte, ..Default::default() })) .chain(chunk_digests.iter().flat_map(|digest| { digest.iter().map(|&byte| BlobDataRow { - byte, preimage_rlc: Value::known(Fr::zero()), digest_rlc: Value::known(Fr::zero()), + byte, ..Default::default() }) })) - .chain(challenge_digest.iter().map(|&byte| BlobDataRow { + // bytes of versioned hash + .chain(versioned_hash.as_bytes().iter().map(|&byte| BlobDataRow { + preimage_rlc: Value::known(Fr::zero()), + digest_rlc: Value::known(Fr::zero()), byte, + ..Default::default() + })) + .chain(challenge_digest.iter().map(|&byte| BlobDataRow { preimage_rlc: Value::known(Fr::zero()), digest_rlc: Value::known(Fr::zero()), + byte, ..Default::default() })) .collect() @@ -512,11 +581,7 @@ impl From<&BlobData> for BlobAssignments { // y = P(z) let evaluation = U256::from_little_endian( - &interpolate( - Scalar::from_raw(challenge_digest.0), - &coefficients_as_scalars, - ) - .to_bytes(), + &interpolate(Scalar::from_raw(challenge.0), &coefficients_as_scalars).to_bytes(), ); Self { @@ -636,12 +701,15 @@ mod tests { let default_metadata_digest = keccak256(default_metadata); let default_chunk_digests = [keccak256([]); MAX_AGG_SNARKS]; + let default_blob = BlobData::default(); + let versioned_hash = default_blob.get_versioned_hash(); assert_eq!( - BlobData::default().get_challenge_digest(), + default_blob.get_challenge_digest(), U256::from(keccak256( default_metadata_digest .into_iter() .chain(default_chunk_digests.into_iter().flatten()) + .chain(versioned_hash.to_fixed_bytes()) .collect::>() )), ) diff --git a/aggregator/src/constants.rs b/aggregator/src/constants.rs index aea4172918..bdefc6709b 100644 --- a/aggregator/src/constants.rs +++ b/aggregator/src/constants.rs @@ -44,9 +44,11 @@ pub(crate) const CHUNK_TX_DATA_HASH_INDEX: usize = 136; // - chunk_data_hash 32 bytes // - z 32 bytes // - y 32 bytes +// - versioned_hash 32 bytes pub(crate) const BATCH_Z_OFFSET: usize = 136; pub(crate) const BATCH_Y_OFFSET: usize = 168; +pub(crate) const BATCH_VH_OFFSET: usize = 200; // ================================ // aggregator parameters diff --git a/aggregator/src/core.rs b/aggregator/src/core.rs index 8dda2fc077..340a66cd06 100644 --- a/aggregator/src/core.rs +++ b/aggregator/src/core.rs @@ -34,8 +34,8 @@ use zkevm_circuits::{ use crate::{ constants::{ - BATCH_Y_OFFSET, BATCH_Z_OFFSET, CHAIN_ID_LEN, DIGEST_LEN, INPUT_LEN_PER_ROUND, LOG_DEGREE, - MAX_AGG_SNARKS, + BATCH_VH_OFFSET, BATCH_Y_OFFSET, BATCH_Z_OFFSET, CHAIN_ID_LEN, DIGEST_LEN, + INPUT_LEN_PER_ROUND, LOG_DEGREE, MAX_AGG_SNARKS, }, util::{ assert_conditional_equal, assert_equal, assert_exist, get_indices, get_max_keccak_updates, @@ -162,6 +162,7 @@ pub(crate) struct ExtractedHashCells { pub(crate) struct ExpectedBlobCells { pub(crate) z: Vec>, pub(crate) y: Vec>, + pub(crate) versioned_hash: Vec>, pub(crate) chunk_tx_data_digests: Vec>>, } @@ -238,6 +239,7 @@ pub(crate) fn assign_batch_hashes( let expected_blob_cells = ExpectedBlobCells { z: batch_pi_input[BATCH_Z_OFFSET..BATCH_Z_OFFSET + 32].to_vec(), y: batch_pi_input[BATCH_Y_OFFSET..BATCH_Y_OFFSET + 32].to_vec(), + versioned_hash: batch_pi_input[BATCH_VH_OFFSET..BATCH_VH_OFFSET + 32].to_vec(), chunk_tx_data_digests: (0..MAX_AGG_SNARKS) .map(|i| { let chunk_pi_input = &extracted_hash_cells.hash_input_cells @@ -858,16 +860,16 @@ pub(crate) fn conditional_constraints( } // 7. the hash input length are correct - // - hashes[0] has 200 bytes - // - hashes[1..MAX_AGG_SNARKS+1] has 168 bytes input + // - hashes[0] has 232 bytes (preimage of batch pi hash) + // - hashes[1..MAX_AGG_SNARKS+1] has 168 bytes input (preimage of chunk pi hash) // - batch's data_hash length is 32 * number_of_valid_snarks - // - hashes[0] has 200 bytes // note: hash_input_len_cells[0] is from dummy rows of keccak circuit. let batch_pi_hash_input_cell = hash_input_len_cells[2].cell(); region.constrain_equal( batch_pi_hash_input_cell, - rlc_config.two_hundred_cell(batch_pi_hash_input_cell.region_index), + rlc_config + .two_hundred_and_thirty_two_cell(batch_pi_hash_input_cell.region_index), )?; // - hashes[1..MAX_AGG_SNARKS+1] has 168 bytes input