diff --git a/Cargo.lock b/Cargo.lock index c08bc4e66..49cbd7fb7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1571,7 +1571,6 @@ dependencies = [ "multiexp", "rand_core", "schnorr-signatures", - "serde", "std-shims", "thiserror", "zeroize", @@ -7343,7 +7342,6 @@ dependencies = [ "log", "modular-frost", "parity-scale-codec", - "rand_chacha", "rand_core", "schnorr-signatures", "serai-client", diff --git a/coins/bitcoin/src/wallet/send.rs b/coins/bitcoin/src/wallet/send.rs index a33fe4f19..5235ae888 100644 --- a/coins/bitcoin/src/wallet/send.rs +++ b/coins/bitcoin/src/wallet/send.rs @@ -358,7 +358,7 @@ impl SignMachine for TransactionSignMachine { _: (), _: ThresholdKeys, _: CachedPreprocess, - ) -> Result { + ) -> (Self, Self::Preprocess) { unimplemented!( "Bitcoin transactions don't support caching their preprocesses due to {}", "being already bound to a specific transaction" diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index eecfd3fe7..7b23d6cb0 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -226,7 +226,7 @@ impl SignMachine for TransactionSignMachine { ); } - fn from_cache(_: (), _: ThresholdKeys, _: CachedPreprocess) -> Result { + fn from_cache(_: (), _: ThresholdKeys, _: CachedPreprocess) -> (Self, Self::Preprocess) { unimplemented!( "Monero transactions don't support caching their preprocesses due to {}", "being already bound to a specific transaction" diff --git a/common/db/src/create_db.rs b/common/db/src/create_db.rs index 1c5bfad1e..a1e182a9f 100644 --- a/common/db/src/create_db.rs +++ b/common/db/src/create_db.rs @@ -45,6 +45,7 @@ macro_rules! create_db { pub struct $field_name; impl $field_name { pub fn key($($arg: $arg_type),*) -> Vec { + use scale::Encode; $crate::serai_db_key( stringify!($db_name).as_bytes(), stringify!($field_name).as_bytes(), diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index 0efac51b6..cbcb3aebb 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -18,7 +18,6 @@ async-trait = { version = "0.1", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } -rand_chacha = { version = "0.3", default-features = false, features = ["std"] } blake2 = { version = "0.10", default-features = false, features = ["std"] } @@ -38,7 +37,7 @@ message-queue = { package = "serai-message-queue", path = "../message-queue" } tributary = { package = "tributary-chain", path = "./tributary" } sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } -serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] } +serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] } hex = { version = "0.4", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } diff --git a/coordinator/src/db.rs b/coordinator/src/db.rs index 560946bc9..810bc2751 100644 --- a/coordinator/src/db.rs +++ b/coordinator/src/db.rs @@ -4,6 +4,7 @@ use blake2::{ }; use scale::Encode; +use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{ primitives::NetworkId, validator_sets::primitives::{Session, ValidatorSet}, @@ -20,7 +21,6 @@ create_db!( HandledMessageDb: (network: NetworkId) -> u64, ActiveTributaryDb: () -> Vec, RetiredTributaryDb: (set: ValidatorSet) -> (), - SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec, FirstPreprocessDb: ( network: NetworkId, id_type: RecognizedIdType, @@ -43,7 +43,7 @@ impl ActiveTributaryDb { let mut tributaries = vec![]; while !bytes_ref.is_empty() { - tributaries.push(TributarySpec::read(&mut bytes_ref).unwrap()); + tributaries.push(TributarySpec::deserialize_reader(&mut bytes_ref).unwrap()); } (bytes, tributaries) @@ -57,7 +57,7 @@ impl ActiveTributaryDb { } } - spec.write(&mut existing_bytes).unwrap(); + spec.serialize(&mut existing_bytes).unwrap(); ActiveTributaryDb::set(txn, &existing_bytes); } @@ -72,28 +72,13 @@ impl ActiveTributaryDb { let mut bytes = vec![]; for active in active { - active.write(&mut bytes).unwrap(); + active.serialize(&mut bytes).unwrap(); } Self::set(txn, &bytes); RetiredTributaryDb::set(txn, set, &()); } } -impl SignedTransactionDb { - pub fn take_signed_transaction( - txn: &mut impl DbTxn, - order: &[u8], - nonce: u32, - ) -> Option { - let res = SignedTransactionDb::get(txn, order, nonce) - .map(|bytes| Transaction::read(&mut bytes.as_slice()).unwrap()); - if res.is_some() { - Self::del(txn, order, nonce); - } - res - } -} - impl FirstPreprocessDb { pub fn save_first_preprocess( txn: &mut impl DbTxn, diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index e67684502..840af4a2f 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -31,12 +31,12 @@ use tokio::{ time::sleep, }; -use ::tributary::{ - ProvidedError, TransactionKind, TransactionError, TransactionTrait, Block, Tributary, -}; +use ::tributary::{ProvidedError, TransactionKind, TransactionTrait, Block, Tributary}; mod tributary; -use crate::tributary::{TributarySpec, SignData, Transaction, scanner::RecognizedIdType, PlanIds}; +use crate::tributary::{ + TributarySpec, Label, SignData, Transaction, scanner::RecognizedIdType, PlanIds, +}; mod db; use db::*; @@ -126,48 +126,6 @@ async fn add_tributary( .unwrap(); } -async fn publish_signed_transaction( - txn: &mut D::Transaction<'_>, - tributary: &Tributary, - tx: Transaction, -) { - log::debug!("publishing transaction {}", hex::encode(tx.hash())); - - let (order, signer) = if let TransactionKind::Signed(order, signed) = tx.kind() { - let signer = signed.signer; - - // Safe as we should deterministically create transactions, meaning if this is already on-disk, - // it's what we're saving now - SignedTransactionDb::set(txn, &order, signed.nonce, &tx.serialize()); - - (order, signer) - } else { - panic!("non-signed transaction passed to publish_signed_transaction"); - }; - - // If we're trying to publish 5, when the last transaction published was 3, this will delay - // publication until the point in time we publish 4 - while let Some(tx) = SignedTransactionDb::take_signed_transaction( - txn, - &order, - tributary - .next_nonce(&signer, &order) - .await - .expect("we don't have a nonce, meaning we aren't a participant on this tributary"), - ) { - // We need to return a proper error here to enable that, due to a race condition around - // multiple publications - match tributary.add_transaction(tx.clone()).await { - Ok(_) => {} - // Some asynchonicity if InvalidNonce, assumed safe to deterministic nonces - Err(TransactionError::InvalidNonce) => { - log::warn!("publishing TX {tx:?} returned InvalidNonce. was it already added?") - } - Err(e) => panic!("created an invalid transaction: {e:?}"), - } - } -} - // TODO: Find a better pattern for this static HANDOVER_VERIFY_QUEUE_LOCK: OnceLock> = OnceLock::new(); @@ -317,7 +275,9 @@ async fn handle_processor_message( BatchDb::set(&mut txn, batch.batch.network, batch.batch.id, &batch.clone()); // Get the next-to-execute batch ID - let mut next = substrate::get_expected_next_batch(serai, network).await; + let Ok(mut next) = substrate::expected_next_batch(serai, network).await else { + return false; + }; // Since we have a new batch, publish all batches yet to be published to Serai // This handles the edge-case where batch n+1 is signed before batch n is @@ -329,7 +289,10 @@ async fn handle_processor_message( while let Some(batch) = batches.pop_front() { // If this Batch should no longer be published, continue - if substrate::get_expected_next_batch(serai, network).await > batch.batch.id { + let Ok(expected_next_batch) = substrate::expected_next_batch(serai, network).await else { + return false; + }; + if expected_next_batch > batch.batch.id { continue; } @@ -398,7 +361,11 @@ async fn handle_processor_message( let txs = match msg.msg.clone() { ProcessorMessage::KeyGen(inner_msg) => match inner_msg { key_gen::ProcessorMessage::Commitments { id, commitments } => { - vec![Transaction::DkgCommitments(id.attempt, commitments, Transaction::empty_signed())] + vec![Transaction::DkgCommitments { + attempt: id.attempt, + commitments, + signed: Transaction::empty_signed(), + }] } key_gen::ProcessorMessage::InvalidCommitments { id: _, faulty } => { // This doesn't need the ID since it's a Provided transaction which everyone will provide @@ -411,7 +378,7 @@ async fn handle_processor_message( } key_gen::ProcessorMessage::Shares { id, mut shares } => { // Create a MuSig-based machine to inform Substrate of this key generation - let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, id.attempt); + let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt); let our_i = spec .i(pub_key) @@ -449,7 +416,7 @@ async fn handle_processor_message( // As for the safety of calling error_generating_key_pair, the processor is presumed // to only send InvalidShare or GeneratedKeyPair for a given attempt let mut txs = if let Some(faulty) = - crate::tributary::error_generating_key_pair::<_>(&txn, key, spec, id.attempt) + crate::tributary::error_generating_key_pair(&mut txn, key, spec, id.attempt) { vec![Transaction::RemoveParticipant(faulty)] } else { @@ -480,7 +447,11 @@ async fn handle_processor_message( match share { Ok(share) => { - vec![Transaction::DkgConfirmed(id.attempt, share, Transaction::empty_signed())] + vec![Transaction::DkgConfirmed { + attempt: id.attempt, + confirmation_share: share, + signed: Transaction::empty_signed(), + }] } Err(p) => { vec![Transaction::RemoveParticipant(p)] @@ -511,18 +482,20 @@ async fn handle_processor_message( vec![] } else { - vec![Transaction::SignPreprocess(SignData { + vec![Transaction::Sign(SignData { plan: id.id, attempt: id.attempt, + label: Label::Preprocess, data: preprocesses, signed: Transaction::empty_signed(), })] } } sign::ProcessorMessage::Share { id, shares } => { - vec![Transaction::SignShare(SignData { + vec![Transaction::Sign(SignData { plan: id.id, attempt: id.attempt, + label: Label::Share, data: shares, signed: Transaction::empty_signed(), })] @@ -555,9 +528,10 @@ async fn handle_processor_message( vec![] } coordinator::ProcessorMessage::CosignPreprocess { id, preprocesses } => { - vec![Transaction::SubstratePreprocess(SignData { + vec![Transaction::SubstrateSign(SignData { plan: id.id, attempt: id.attempt, + label: Label::Preprocess, data: preprocesses.into_iter().map(Into::into).collect(), signed: Transaction::empty_signed(), })] @@ -586,13 +560,13 @@ async fn handle_processor_message( preprocesses.into_iter().map(Into::into).collect(), ); - let intended = Transaction::Batch( - block.0, - match id.id { + let intended = Transaction::Batch { + block: block.0, + batch: match id.id { SubstrateSignableId::Batch(id) => id, _ => panic!("BatchPreprocess did not contain Batch ID"), }, - ); + }; // If this is the new key's first Batch, only create this TX once we verify all // all prior published `Batch`s @@ -649,18 +623,20 @@ async fn handle_processor_message( res } } else { - vec![Transaction::SubstratePreprocess(SignData { + vec![Transaction::SubstrateSign(SignData { plan: id.id, attempt: id.attempt, + label: Label::Preprocess, data: preprocesses.into_iter().map(Into::into).collect(), signed: Transaction::empty_signed(), })] } } coordinator::ProcessorMessage::SubstrateShare { id, shares } => { - vec![Transaction::SubstrateShare(SignData { + vec![Transaction::SubstrateSign(SignData { plan: id.id, attempt: id.attempt, + label: Label::Share, data: shares.into_iter().map(|share| share.to_vec()).collect(), signed: Transaction::empty_signed(), })] @@ -706,7 +682,7 @@ async fn handle_processor_message( } TransactionKind::Signed(_, _) => { tx.sign(&mut OsRng, genesis, key); - publish_signed_transaction(&mut txn, tributary, tx).await; + tributary::publish_signed_transaction(&mut txn, tributary, tx).await; } } } @@ -1079,16 +1055,18 @@ pub async fn run( }; let mut tx = match id_type { - RecognizedIdType::Batch => Transaction::SubstratePreprocess(SignData { + RecognizedIdType::Batch => Transaction::SubstrateSign(SignData { data: get_preprocess(&raw_db, id_type, &id).await, plan: SubstrateSignableId::Batch(id.as_slice().try_into().unwrap()), + label: Label::Preprocess, attempt: 0, signed: Transaction::empty_signed(), }), - RecognizedIdType::Plan => Transaction::SignPreprocess(SignData { + RecognizedIdType::Plan => Transaction::Sign(SignData { data: get_preprocess(&raw_db, id_type, &id).await, plan: id.try_into().unwrap(), + label: Label::Preprocess, attempt: 0, signed: Transaction::empty_signed(), }), @@ -1119,7 +1097,7 @@ pub async fn run( // TODO: Should this not take a txn accordingly? It's best practice to take a txn, yet // taking a txn fails to declare its achieved independence let mut txn = raw_db.txn(); - publish_signed_transaction(&mut txn, tributary, tx).await; + tributary::publish_signed_transaction(&mut txn, tributary, tx).await; txn.commit(); break; } diff --git a/coordinator/src/substrate/cosign.rs b/coordinator/src/substrate/cosign.rs index ffb5d202b..7d2b41d3d 100644 --- a/coordinator/src/substrate/cosign.rs +++ b/coordinator/src/substrate/cosign.rs @@ -12,57 +12,48 @@ ensure any block needing cosigned is consigned within a reasonable amount of time. */ -use core::{ops::Deref, time::Duration}; -use std::{ - sync::Arc, - collections::{HashSet, HashMap}, -}; - use zeroize::Zeroizing; -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use ciphersuite::{Ciphersuite, Ristretto}; + +use borsh::{BorshSerialize, BorshDeserialize}; -use scale::{Encode, Decode}; use serai_client::{ - SeraiError, Block, Serai, TemporalSerai, - primitives::{BlockHash, NetworkId}, - validator_sets::{ - primitives::{Session, ValidatorSet, KeyPair, amortize_excess_key_shares}, - ValidatorSetsEvent, - }, - in_instructions::InInstructionsEvent, - coins::CoinsEvent, + SeraiError, Serai, + primitives::NetworkId, + validator_sets::primitives::{Session, ValidatorSet}, }; use serai_db::*; -use processor_messages::SubstrateContext; - -use tokio::{sync::mpsc, time::sleep}; - -use crate::{ - Db, - processors::Processors, - tributary::{TributarySpec, SeraiBlockNumber}, -}; +use crate::{Db, substrate::in_set, tributary::SeraiBlockNumber}; // 5 minutes, expressed in blocks // TODO: Pull a constant for block time const COSIGN_DISTANCE: u64 = 5 * 60 / 6; +#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +enum HasEvents { + KeyGen, + Yes, + No, +} + create_db!( SubstrateCosignDb { - CosignTriggered: () -> (), IntendedCosign: () -> (u64, Option), - BlockHasEvents: (block: u64) -> u8, + BlockHasEvents: (block: u64) -> HasEvents, LatestCosignedBlock: () -> u64, } ); impl IntendedCosign { + // Sets the intended to cosign block, clearing the prior value entirely. pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) { Self::set(txn, &(intended, None::)); } + + // Sets the cosign skipped since the last intended to cosign block. pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) { let (intended, prior_skipped) = Self::get(txn).unwrap(); assert!(prior_skipped.is_none()); @@ -89,12 +80,6 @@ impl CosignTransactions { } } -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] -enum HasEvents { - KeyGen, - Yes, - No, -} async fn block_has_events( txn: &mut impl DbTxn, serai: &Serai, @@ -122,143 +107,193 @@ async fn block_has_events( let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes }; - let has_events = has_events.encode(); - assert_eq!(has_events.len(), 1); - BlockHasEvents::set(txn, block, &has_events[0]); + BlockHasEvents::set(txn, block, &has_events); Ok(HasEvents::Yes) } - Some(code) => Ok(HasEvents::decode(&mut [code].as_slice()).unwrap()), + Some(code) => Ok(code), } } -/* - Advances the cosign protocol as should be done per the latest block. - - A block is considered cosigned if: - A) It was cosigned - B) It's the parent of a cosigned block - C) It immediately follows a cosigned block and has no events requiring cosigning (TODO) -*/ -async fn advance_cosign_protocol(db: &mut impl Db, serai: &Serai, latest_number: u64) -> Result<(), ()> { - let Some((last_intended_to_cosign_block, mut skipped_block)) = IntendedCosign::get(&txn) else { - let mut txn = db.txn(); - IntendedCosign::set_intended_cosign(&mut txn, 1); - txn.commit(); - return Ok(()); - }; -} - -// If we haven't flagged skipped, and a block within the distance had events, flag the first -// such block as skipped -let mut distance_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE; -// If we've never triggered a cosign, don't skip any cosigns -if CosignTriggered::get(&txn).is_none() { - distance_end_exclusive = 0; -} -if skipped_block.is_none() { - for b in (last_intended_to_cosign_block + 1) .. distance_end_exclusive { - if b > latest_number { - break; - } - - if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes { - skipped_block = Some(b); - log::debug!("skipping cosigning {b} due to proximity to prior cosign"); - IntendedCosign::set_skipped_cosign(&mut txn, b); - break; - } +async fn potentially_cosign_block( + txn: &mut impl DbTxn, + serai: &Serai, + block: u64, + skipped_block: Option, + window_end_exclusive: u64, +) -> Result { + // The following code regarding marking cosigned if prior block is cosigned expects this block to + // not be zero + // While we could perform this check there, there's no reason not to optimize the entire function + // as such + if block == 0 { + return Ok(false); } -} -let mut has_no_cosigners = None; -let mut cosign = vec![]; + let block_has_events = block_has_events(txn, serai, block).await?; -// Block we should cosign no matter what if no prior blocks qualified for cosigning -let maximally_latent_cosign_block = - skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE); -for block in (last_intended_to_cosign_block + 1) ..= latest_number { - let actual_block = serai - .finalized_block_by_number(block) - .await? - .expect("couldn't get block which should've been finalized"); - SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block); + // If this block had no events and immediately follows a cosigned block, mark it as cosigned + if (block_has_events == HasEvents::No) && + (LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1)) + { + LatestCosignedBlock::set(txn, &block); + } - let mut set = false; + // If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks + // trigger a cosigning protocol covering it + // This means there will be the maximum delay allowed from a block needing cosigning occuring + // and a cosign for it triggering + let maximally_latent_cosign_block = + skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE); - let block_has_events = block_has_events(&mut txn, serai, block).await?; - // If this block is within the distance, - if block < distance_end_exclusive { + // If this block is within the window, + if block < window_end_exclusive { // and set a key, cosign it if block_has_events == HasEvents::KeyGen { - IntendedCosign::set_intended_cosign(&mut txn, block); - set = true; + IntendedCosign::set_intended_cosign(txn, block); // Carry skipped if it isn't included by cosigning this block if let Some(skipped) = skipped_block { if skipped > block { - IntendedCosign::set_skipped_cosign(&mut txn, block); + IntendedCosign::set_skipped_cosign(txn, block); } } + return Ok(true); } - } else if (Some(block) == maximally_latent_cosign_block) || - (block_has_events != HasEvents::No) - { - // Since this block was outside the distance and had events/was maximally latent, cosign it - IntendedCosign::set_intended_cosign(&mut txn, block); - set = true; + } else if (Some(block) == maximally_latent_cosign_block) || (block_has_events != HasEvents::No) { + // Since this block was outside the window and had events/was maximally latent, cosign it + IntendedCosign::set_intended_cosign(txn, block); + return Ok(true); } + Ok(false) +} - if set { - // Get the keys as of the prior block - // That means if this block is setting new keys (which won't lock in until we process this - // block), we won't freeze up waiting for the yet-to-be-processed keys to sign this block - let serai = serai.as_of(actual_block.header.parent_hash.into()); - - has_no_cosigners = Some(actual_block.clone()); - - for network in serai_client::primitives::NETWORKS { - // Get the latest session to have set keys - let Some(latest_session) = serai.validator_sets().session(network).await? else { - continue; - }; - let prior_session = Session(latest_session.0.saturating_sub(1)); - let set_with_keys = if serai - .validator_sets() - .keys(ValidatorSet { network, session: prior_session }) - .await? - .is_some() - { - ValidatorSet { network, session: prior_session } - } else { - let set = ValidatorSet { network, session: latest_session }; - if serai.validator_sets().keys(set).await?.is_none() { - continue; - } - set - }; +/* + Advances the cosign protocol as should be done per the latest block. + + A block is considered cosigned if: + A) It was cosigned + B) It's the parent of a cosigned block + C) It immediately follows a cosigned block and has no events requiring cosigning - // Since this is a valid cosigner, don't flag this block as having no cosigners - has_no_cosigners = None; - log::debug!("{:?} will be cosigning {block}", set_with_keys.network); + This only actually performs advancement within a limited bound (generally until it finds a block + which should be cosigned). Accordingly, it is necessary to call multiple times even if + `latest_number` doesn't change. +*/ +pub async fn advance_cosign_protocol( + db: &mut impl Db, + key: &Zeroizing<::F>, + serai: &Serai, + latest_number: u64, +) -> Result<(), SeraiError> { + let mut txn = db.txn(); + + const INITIAL_INTENDED_COSIGN: u64 = 1; + let (last_intended_to_cosign_block, mut skipped_block) = { + let intended_cosign = IntendedCosign::get(&txn); + // If we haven't prior intended to cosign a block, set the intended cosign to 1 + if let Some(intended_cosign) = intended_cosign { + intended_cosign + } else { + IntendedCosign::set_intended_cosign(&mut txn, INITIAL_INTENDED_COSIGN); + IntendedCosign::get(&txn).unwrap() + } + }; - if in_set(key, &serai, set_with_keys).await?.unwrap() { - cosign.push((set_with_keys, block, actual_block.hash())); + // "windows" refers to the window of blocks where even if there's a block which should be + // cosigned, it won't be due to proximity due to the prior cosign + let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE; + // If we've never triggered a cosign, don't skip any cosigns based on proximity + if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN { + window_end_exclusive = 0; + } + + // Check all blocks within the window to see if they should be cosigned + // If so, we're skipping them and need to flag them as skipped so that once the window closes, we + // do cosign them + // We only perform this check if we haven't already marked a block as skipped since the cosign + // the skipped block will cause will cosign all other blocks within this window + if skipped_block.is_none() { + for b in (last_intended_to_cosign_block + 1) .. window_end_exclusive.min(latest_number) { + if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes { + skipped_block = Some(b); + log::debug!("skipping cosigning {b} due to proximity to prior cosign"); + IntendedCosign::set_skipped_cosign(&mut txn, b); + break; } } + } + + // A block which should be cosigned + let mut to_cosign = None; + // A list of sets which are cosigning, along with a boolean of if we're in the set + let mut cosigning = vec![]; + + for block in (last_intended_to_cosign_block + 1) ..= latest_number { + let actual_block = serai + .finalized_block_by_number(block) + .await? + .expect("couldn't get block which should've been finalized"); + + // Save the block number for this block, as needed by the cosigner to perform cosigning + SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block); + + if potentially_cosign_block(&mut txn, serai, block, skipped_block, window_end_exclusive).await? + { + to_cosign = Some((block, actual_block.hash())); + + // Get the keys as of the prior block + // If this key sets new keys, the coordinator won't acknowledge so until we process this + // block + // We won't process this block until its co-signed + // Using the keys of the prior block ensures this deadlock isn't reached + let serai = serai.as_of(actual_block.header.parent_hash.into()); + + for network in serai_client::primitives::NETWORKS { + // Get the latest session to have set keys + let set_with_keys = { + let Some(latest_session) = serai.validator_sets().session(network).await? else { + continue; + }; + let prior_session = Session(latest_session.0.saturating_sub(1)); + if serai + .validator_sets() + .keys(ValidatorSet { network, session: prior_session }) + .await? + .is_some() + { + ValidatorSet { network, session: prior_session } + } else { + let set = ValidatorSet { network, session: latest_session }; + if serai.validator_sets().keys(set).await?.is_none() { + continue; + } + set + } + }; + + log::debug!("{:?} will be cosigning {block}", set_with_keys.network); + cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys).await?.unwrap())); + } - break; + break; + } } -} -// If this block doesn't have cosigners, yet does have events, automatically mark it as -// cosigned -if let Some(has_no_cosigners) = has_no_cosigners { - log::debug!("{} had no cosigners available, marking as cosigned", has_no_cosigners.number()); - LatestCosignedBlock::set(&mut txn, &has_no_cosigners.number()); -} else { - CosignTriggered::set(&mut txn, &()); - for (set, block, hash) in cosign { - log::debug!("cosigning {block} with {:?} {:?}", set.network, set.session); - CosignTransactions::append_cosign(&mut txn, set, block, hash); + if let Some((number, hash)) = to_cosign { + // If this block doesn't have cosigners, yet does have events, automatically mark it as + // cosigned + if cosigning.is_empty() { + log::debug!("{} had no cosigners available, marking as cosigned", number); + LatestCosignedBlock::set(&mut txn, &number); + } else { + for (set, in_set) in cosigning { + if in_set { + log::debug!("cosigning {number} with {:?} {:?}", set.network, set.session); + CosignTransactions::append_cosign(&mut txn, set, number, hash); + } + } + } } + txn.commit(); + + Ok(()) } -txn.commit(); diff --git a/coordinator/src/substrate/db.rs b/coordinator/src/substrate/db.rs index e2e33c51e..0f1a05647 100644 --- a/coordinator/src/substrate/db.rs +++ b/coordinator/src/substrate/db.rs @@ -1,61 +1,32 @@ -use scale::Encode; - -use serai_client::{ - primitives::NetworkId, - validator_sets::primitives::{Session, ValidatorSet}, -}; +use serai_client::primitives::NetworkId; pub use serai_db::*; -create_db!( - SubstrateDb { - CosignTriggered: () -> (), - IntendedCosign: () -> (u64, Option), - BlockHasEvents: (block: u64) -> u8, - LatestCosignedBlock: () -> u64, - NextBlock: () -> u64, - EventDb: (id: &[u8], index: u32) -> (), - BatchInstructionsHashDb: (network: NetworkId, id: u32) -> [u8; 32] - } -); +mod inner_db { + use super::*; -impl IntendedCosign { - pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) { - Self::set(txn, &(intended, None::)); - } - pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) { - let (intended, prior_skipped) = Self::get(txn).unwrap(); - assert!(prior_skipped.is_none()); - Self::set(txn, &(intended, Some(skipped))); - } + create_db!( + SubstrateDb { + NextBlock: () -> u64, + HandledEvent: (block: [u8; 32]) -> u32, + BatchInstructionsHashDb: (network: NetworkId, id: u32) -> [u8; 32] + } + ); } +pub use inner_db::{NextBlock, BatchInstructionsHashDb}; -impl LatestCosignedBlock { - pub fn latest_cosigned_block(getter: &impl Get) -> u64 { - Self::get(getter).unwrap_or_default().max(1) +pub struct HandledEvent; +impl HandledEvent { + fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 { + inner_db::HandledEvent::get(getter, block).map(|last| last + 1).unwrap_or(0) } -} - -impl EventDb { - pub fn is_unhandled(getter: &impl Get, id: &[u8], index: u32) -> bool { - Self::get(getter, id, index).is_none() + pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32) -> bool { + let next = Self::next_to_handle_event(getter, block); + assert!(next >= event_id); + next == event_id } - - pub fn handle_event(txn: &mut impl DbTxn, id: &[u8], index: u32) { - assert!(Self::is_unhandled(txn, id, index)); - Self::set(txn, id, index, &()); - } -} - -db_channel! { - SubstrateDbChannels { - CosignTransactions: (network: NetworkId) -> (Session, u64, [u8; 32]), - } -} - -impl CosignTransactions { - // Append a cosign transaction. - pub fn append_cosign(txn: &mut impl DbTxn, set: ValidatorSet, number: u64, hash: [u8; 32]) { - CosignTransactions::send(txn, set.network, &(set.session, number, hash)) + pub fn handle_event(txn: &mut impl DbTxn, block: [u8; 32], index: u32) { + assert!(Self::next_to_handle_event(txn, block) == index); + inner_db::HandledEvent::set(txn, block, &index); } } diff --git a/coordinator/src/substrate/mod.rs b/coordinator/src/substrate/mod.rs index b5c58f2b9..9c702f8f5 100644 --- a/coordinator/src/substrate/mod.rs +++ b/coordinator/src/substrate/mod.rs @@ -8,12 +8,11 @@ use zeroize::Zeroizing; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; -use scale::{Encode, Decode}; use serai_client::{ SeraiError, Block, Serai, TemporalSerai, primitives::{BlockHash, NetworkId}, validator_sets::{ - primitives::{Session, ValidatorSet, KeyPair, amortize_excess_key_shares}, + primitives::{ValidatorSet, KeyPair, amortize_excess_key_shares}, ValidatorSetsEvent, }, in_instructions::InInstructionsEvent, @@ -26,15 +25,14 @@ use processor_messages::SubstrateContext; use tokio::{sync::mpsc, time::sleep}; -use crate::{ - Db, - processors::Processors, - tributary::{TributarySpec, SeraiBlockNumber}, -}; +use crate::{Db, processors::Processors, tributary::TributarySpec}; mod db; pub use db::*; +mod cosign; +pub use cosign::*; + async fn in_set( key: &Zeroizing<::F>, serai: &TemporalSerai<'_>, @@ -110,7 +108,7 @@ async fn handle_new_set( new_tributary_spec.send(spec).unwrap(); } else { - log::info!("not present in set {:?}", set); + log::info!("not present in new set {:?}", set); } Ok(()) @@ -147,8 +145,8 @@ async fn handle_key_gen( Ok(()) } -async fn handle_batch_and_burns( - db: &mut D, +async fn handle_batch_and_burns( + txn: &mut impl DbTxn, processors: &Pro, serai: &Serai, block: &Block, @@ -178,9 +176,7 @@ async fn handle_batch_and_burns( { network_had_event(&mut burns, &mut batches, network); - let mut txn = db.txn(); - BatchInstructionsHashDb::set(&mut txn, network, id, &instructions_hash); - txn.commit(); + BatchInstructionsHashDb::set(txn, network, id, &instructions_hash); // Make sure this is the only Batch event for this network in this Block assert!(batch_block.insert(network, network_block).is_none()); @@ -257,8 +253,8 @@ async fn handle_block( for new_set in serai.as_of(hash).validator_sets().new_set_events().await? { // Individually mark each event as handled so on reboot, we minimize duplicates // Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000 - // events will successfully be incrementally handled (though the Serai connection should be - // stable) + // events will successfully be incrementally handled + // (though the Serai connection should be stable, making this unnecessary) let ValidatorSetsEvent::NewSet { set } = new_set else { panic!("NewSet event wasn't NewSet: {new_set:?}"); }; @@ -269,11 +265,11 @@ async fn handle_block( continue; } - if EventDb::is_unhandled(db, &hash, event_id) { + if HandledEvent::is_unhandled(db, hash, event_id) { log::info!("found fresh new set event {:?}", new_set); let mut txn = db.txn(); handle_new_set::(&mut txn, key, new_tributary_spec, serai, &block, set).await?; - EventDb::handle_event(&mut txn, &hash, event_id); + HandledEvent::handle_event(&mut txn, hash, event_id); txn.commit(); } event_id += 1; @@ -281,7 +277,7 @@ async fn handle_block( // If a key pair was confirmed, inform the processor for key_gen in serai.as_of(hash).validator_sets().key_gen_events().await? { - if EventDb::is_unhandled(db, &hash, event_id) { + if HandledEvent::is_unhandled(db, hash, event_id) { log::info!("found fresh key gen event {:?}", key_gen); if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen { handle_key_gen(processors, serai, &block, set, key_pair).await?; @@ -289,7 +285,7 @@ async fn handle_block( panic!("KeyGen event wasn't KeyGen: {key_gen:?}"); } let mut txn = db.txn(); - EventDb::handle_event(&mut txn, &hash, event_id); + HandledEvent::handle_event(&mut txn, hash, event_id); txn.commit(); } event_id += 1; @@ -304,28 +300,26 @@ async fn handle_block( continue; } - if EventDb::is_unhandled(db, &hash, event_id) { + if HandledEvent::is_unhandled(db, hash, event_id) { log::info!("found fresh set retired event {:?}", retired_set); let mut txn = db.txn(); crate::ActiveTributaryDb::retire_tributary(&mut txn, set); tributary_retired.send(set).unwrap(); - EventDb::handle_event(&mut txn, &hash, event_id); + HandledEvent::handle_event(&mut txn, hash, event_id); txn.commit(); } event_id += 1; } // Finally, tell the processor of acknowledged blocks/burns - // This uses a single event as. unlike prior events which individually executed code, all + // This uses a single event as unlike prior events which individually executed code, all // following events share data collection - // This does break the uniqueness of (hash, event_id) -> one event, yet - // (network, (hash, event_id)) remains valid as a unique ID for an event - if EventDb::is_unhandled(db, &hash, event_id) { - handle_batch_and_burns(db, processors, serai, &block).await?; + if HandledEvent::is_unhandled(db, hash, event_id) { + let mut txn = db.txn(); + handle_batch_and_burns(&mut txn, processors, serai, &block).await?; + HandledEvent::handle_event(&mut txn, hash, event_id); + txn.commit(); } - let mut txn = db.txn(); - EventDb::handle_event(&mut txn, &hash, event_id); - txn.commit(); Ok(()) } @@ -342,181 +336,8 @@ async fn handle_new_blocks( // Check if there's been a new Substrate block let latest_number = serai.latest_finalized_block().await?.number(); - // TODO: If this block directly builds off a cosigned block *and* doesn't contain events, mark - // cosigned, - { - // If: - // A) This block has events and it's been at least X blocks since the last cosign or - // B) This block doesn't have events but it's been X blocks since a skipped block which did - // have events or - // C) This block key gens (which changes who the cosigners are) - // cosign this block. - const COSIGN_DISTANCE: u64 = 5 * 60 / 6; // 5 minutes, expressed in blocks - - #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] - enum HasEvents { - KeyGen, - Yes, - No, - } - async fn block_has_events( - txn: &mut impl DbTxn, - serai: &Serai, - block: u64, - ) -> Result { - let cached = BlockHasEvents::get(txn, block); - match cached { - None => { - let serai = serai.as_of( - serai - .finalized_block_by_number(block) - .await? - .expect("couldn't get block which should've been finalized") - .hash(), - ); - - if !serai.validator_sets().key_gen_events().await?.is_empty() { - return Ok(HasEvents::KeyGen); - } - - let has_no_events = serai.coins().burn_with_instruction_events().await?.is_empty() && - serai.in_instructions().batch_events().await?.is_empty() && - serai.validator_sets().new_set_events().await?.is_empty() && - serai.validator_sets().set_retired_events().await?.is_empty(); - - let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes }; - - let has_events = has_events.encode(); - assert_eq!(has_events.len(), 1); - BlockHasEvents::set(txn, block, &has_events[0]); - Ok(HasEvents::Yes) - } - Some(code) => Ok(HasEvents::decode(&mut [code].as_slice()).unwrap()), - } - } - - let mut txn = db.txn(); - let Some((last_intended_to_cosign_block, mut skipped_block)) = IntendedCosign::get(&txn) else { - IntendedCosign::set_intended_cosign(&mut txn, 1); - txn.commit(); - return Ok(()); - }; - - // If we haven't flagged skipped, and a block within the distance had events, flag the first - // such block as skipped - let mut distance_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE; - // If we've never triggered a cosign, don't skip any cosigns - if CosignTriggered::get(&txn).is_none() { - distance_end_exclusive = 0; - } - if skipped_block.is_none() { - for b in (last_intended_to_cosign_block + 1) .. distance_end_exclusive { - if b > latest_number { - break; - } - - if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes { - skipped_block = Some(b); - log::debug!("skipping cosigning {b} due to proximity to prior cosign"); - IntendedCosign::set_skipped_cosign(&mut txn, b); - break; - } - } - } - - let mut has_no_cosigners = None; - let mut cosign = vec![]; - - // Block we should cosign no matter what if no prior blocks qualified for cosigning - let maximally_latent_cosign_block = - skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE); - for block in (last_intended_to_cosign_block + 1) ..= latest_number { - let actual_block = serai - .finalized_block_by_number(block) - .await? - .expect("couldn't get block which should've been finalized"); - SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block); - - let mut set = false; - - let block_has_events = block_has_events(&mut txn, serai, block).await?; - // If this block is within the distance, - if block < distance_end_exclusive { - // and set a key, cosign it - if block_has_events == HasEvents::KeyGen { - IntendedCosign::set_intended_cosign(&mut txn, block); - set = true; - // Carry skipped if it isn't included by cosigning this block - if let Some(skipped) = skipped_block { - if skipped > block { - IntendedCosign::set_skipped_cosign(&mut txn, block); - } - } - } - } else if (Some(block) == maximally_latent_cosign_block) || - (block_has_events != HasEvents::No) - { - // Since this block was outside the distance and had events/was maximally latent, cosign it - IntendedCosign::set_intended_cosign(&mut txn, block); - set = true; - } - - if set { - // Get the keys as of the prior block - // That means if this block is setting new keys (which won't lock in until we process this - // block), we won't freeze up waiting for the yet-to-be-processed keys to sign this block - let serai = serai.as_of(actual_block.header.parent_hash.into()); - - has_no_cosigners = Some(actual_block.clone()); - - for network in serai_client::primitives::NETWORKS { - // Get the latest session to have set keys - let Some(latest_session) = serai.validator_sets().session(network).await? else { - continue; - }; - let prior_session = Session(latest_session.0.saturating_sub(1)); - let set_with_keys = if serai - .validator_sets() - .keys(ValidatorSet { network, session: prior_session }) - .await? - .is_some() - { - ValidatorSet { network, session: prior_session } - } else { - let set = ValidatorSet { network, session: latest_session }; - if serai.validator_sets().keys(set).await?.is_none() { - continue; - } - set - }; - - // Since this is a valid cosigner, don't flag this block as having no cosigners - has_no_cosigners = None; - log::debug!("{:?} will be cosigning {block}", set_with_keys.network); - - if in_set(key, &serai, set_with_keys).await?.unwrap() { - cosign.push((set_with_keys, block, actual_block.hash())); - } - } - - break; - } - } - - // If this block doesn't have cosigners, yet does have events, automatically mark it as - // cosigned - if let Some(has_no_cosigners) = has_no_cosigners { - log::debug!("{} had no cosigners available, marking as cosigned", has_no_cosigners.number()); - LatestCosignedBlock::set(&mut txn, &has_no_cosigners.number()); - } else { - CosignTriggered::set(&mut txn, &()); - for (set, block, hash) in cosign { - log::debug!("cosigning {block} with {:?} {:?}", set.network, set.session); - CosignTransactions::append_cosign(&mut txn, set, block, hash); - } - } - txn.commit(); - } + // Advance the cosigning protocol + advance_cosign_protocol(db, key, serai, latest_number).await?; // Reduce to the latest cosigned block let latest_number = latest_number.min(LatestCosignedBlock::latest_cosigned_block(db)); @@ -526,24 +347,19 @@ async fn handle_new_blocks( } for b in *next_block ..= latest_number { - log::info!("found substrate block {b}"); - handle_block( - db, - key, - new_tributary_spec, - tributary_retired, - processors, - serai, - serai - .finalized_block_by_number(b) - .await? - .expect("couldn't get block before the latest finalized block"), - ) - .await?; + let block = serai + .finalized_block_by_number(b) + .await? + .expect("couldn't get block before the latest finalized block"); + + log::info!("handling substrate block {b}"); + handle_block(db, key, new_tributary_spec, tributary_retired, processors, serai, block).await?; *next_block += 1; + let mut txn = db.txn(); NextBlock::set(&mut txn, next_block); txn.commit(); + log::info!("handled substrate block {b}"); } @@ -578,6 +394,7 @@ pub async fn scan_task( }; */ // TODO: Restore the above subscription-based system + // That would require moving serai-client from HTTP to websockets let new_substrate_block_notifier = { let serai = &serai; move |next_substrate_block| async move { @@ -648,22 +465,25 @@ pub async fn scan_task( } /// Gets the expected ID for the next Batch. -pub(crate) async fn get_expected_next_batch(serai: &Serai, network: NetworkId) -> u32 { - let mut first = true; - loop { - if !first { - log::error!("{} {network:?}", "couldn't connect to Serai node to get the next batch ID for",); - sleep(Duration::from_secs(5)).await; +/// +/// Will log an error and apply a slight sleep on error, letting the caller simply immediately +/// retry. +pub(crate) async fn expected_next_batch( + serai: &Serai, + network: NetworkId, +) -> Result { + async fn expected_next_batch_inner(serai: &Serai, network: NetworkId) -> Result { + let serai = serai.as_of_latest_finalized_block().await?; + let last = serai.in_instructions().last_batch_for_network(network).await?; + Ok(if let Some(last) = last { last + 1 } else { 0 }) + } + match expected_next_batch_inner(serai, network).await { + Ok(next) => Ok(next), + Err(e) => { + log::error!("couldn't get the expected next batch from substrate: {e:?}"); + sleep(Duration::from_millis(100)).await; + Err(e) } - first = false; - - let Ok(serai) = serai.as_of_latest_finalized_block().await else { - continue; - }; - let Ok(last) = serai.in_instructions().last_batch_for_network(network).await else { - continue; - }; - break if let Some(last) = last { last + 1 } else { 0 }; } } diff --git a/coordinator/src/tests/tributary/chain.rs b/coordinator/src/tests/tributary/chain.rs index 6a5d8ee46..496f3e0d9 100644 --- a/coordinator/src/tests/tributary/chain.rs +++ b/coordinator/src/tests/tributary/chain.rs @@ -13,7 +13,7 @@ use ciphersuite::{ }; use sp_application_crypto::sr25519; - +use borsh::BorshDeserialize; use serai_client::{ primitives::NetworkId, validator_sets::primitives::{Session, ValidatorSet}, @@ -58,21 +58,26 @@ pub fn new_spec( .collect::>(); let res = TributarySpec::new(serai_block, start_time, set, set_participants); - assert_eq!(TributarySpec::read::<&[u8]>(&mut res.serialize().as_ref()).unwrap(), res); + assert_eq!( + TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(), + res, + ); res } pub async fn new_tributaries( keys: &[Zeroizing<::F>], spec: &TributarySpec, -) -> Vec<(LocalP2p, Tributary)> { +) -> Vec<(MemDb, LocalP2p, Tributary)> { let p2p = LocalP2p::new(keys.len()); let mut res = vec![]; for (i, key) in keys.iter().enumerate() { + let db = MemDb::new(); res.push(( + db.clone(), p2p[i].clone(), Tributary::<_, Transaction, _>::new( - MemDb::new(), + db, spec.genesis(), spec.start_time(), key.clone(), @@ -152,7 +157,11 @@ async fn tributary_test() { let keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); - let mut tributaries = new_tributaries(&keys, &spec).await; + let mut tributaries = new_tributaries(&keys, &spec) + .await + .into_iter() + .map(|(_, p2p, tributary)| (p2p, tributary)) + .collect::>(); let mut blocks = 0; let mut last_block = spec.genesis(); diff --git a/coordinator/src/tests/tributary/dkg.rs b/coordinator/src/tests/tributary/dkg.rs index 347e94da7..89d1558df 100644 --- a/coordinator/src/tests/tributary/dkg.rs +++ b/coordinator/src/tests/tributary/dkg.rs @@ -8,7 +8,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use frost::Participant; use sp_runtime::traits::Verify; -use serai_client::validator_sets::primitives::KeyPair; +use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair}; use tokio::time::sleep; @@ -34,10 +34,18 @@ use crate::{ #[tokio::test] async fn dkg_test() { + env_logger::init(); + let keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); - let tributaries = new_tributaries(&keys, &spec).await; + let full_tributaries = new_tributaries(&keys, &spec).await; + let mut dbs = vec![]; + let mut tributaries = vec![]; + for (db, p2p, tributary) in full_tributaries { + dbs.push(db); + tributaries.push((p2p, tributary)); + } // Run the tributaries in the background tokio::spawn(run_tributaries(tributaries.clone())); @@ -49,8 +57,11 @@ async fn dkg_test() { let mut commitments = vec![0; 256]; OsRng.fill_bytes(&mut commitments); - let mut tx = - Transaction::DkgCommitments(attempt, vec![commitments], Transaction::empty_signed()); + let mut tx = Transaction::DkgCommitments { + attempt, + commitments: vec![commitments], + signed: Transaction::empty_signed(), + }; tx.sign(&mut OsRng, spec.genesis(), key); txs.push(tx); } @@ -71,7 +82,7 @@ async fn dkg_test() { .iter() .enumerate() .map(|(i, tx)| { - if let Transaction::DkgCommitments(_, commitments, _) = tx { + if let Transaction::DkgCommitments { commitments, .. } = tx { (Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone()) } else { panic!("txs had non-commitments"); @@ -80,20 +91,20 @@ async fn dkg_test() { .collect(); async fn new_processors( + db: &mut MemDb, key: &Zeroizing<::F>, spec: &TributarySpec, tributary: &Tributary, - ) -> (MemDb, MemProcessors) { - let mut scanner_db = MemDb::new(); + ) -> MemProcessors { let processors = MemProcessors::new(); - handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>( - &mut scanner_db, + handle_new_blocks::<_, _, _, _, _, LocalP2p>( + db, key, - |_, _, _, _| async { + &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called in new_processors") }, &processors, - |_, _, _| async { panic!("test tried to publish a new Serai TX in new_processors") }, + &|_, _, _| async { panic!("test tried to publish a new Serai TX in new_processors") }, &|_| async { panic!( "test tried to publish a new Tributary TX from handle_application_tx in new_processors" @@ -103,11 +114,11 @@ async fn dkg_test() { &tributary.reader(), ) .await; - (scanner_db, processors) + processors } // Instantiate a scanner and verify it has nothing to report - let (mut scanner_db, processors) = new_processors(&keys[0], &spec, &tributaries[0].1).await; + let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await; assert!(processors.0.read().await.is_empty()); // Publish the last commitment @@ -117,14 +128,14 @@ async fn dkg_test() { sleep(Duration::from_secs(Tributary::::block_time().into())).await; // Verify the scanner emits a KeyGen::Commitments message - handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>( - &mut scanner_db, + handle_new_blocks::<_, _, _, _, _, LocalP2p>( + &mut dbs[0], &keys[0], - |_, _, _, _| async { + &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after Commitments") }, &processors, - |_, _, _| async { panic!("test tried to publish a new Serai TX after Commitments") }, + &|_, _, _| async { panic!("test tried to publish a new Serai TX after Commitments") }, &|_| async { panic!( "test tried to publish a new Tributary TX from handle_application_tx after Commitments" @@ -151,8 +162,8 @@ async fn dkg_test() { } // Verify all keys exhibit this scanner behavior - for (i, key) in keys.iter().enumerate() { - let (_, processors) = new_processors(key, &spec, &tributaries[i].1).await; + for (i, key) in keys.iter().enumerate().skip(1) { + let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await; let mut msgs = processors.0.write().await; assert_eq!(msgs.len(), 1); let msgs = msgs.get_mut(&spec.set().network).unwrap(); @@ -182,12 +193,14 @@ async fn dkg_test() { } } + let mut txn = dbs[k].txn(); let mut tx = Transaction::DkgShares { attempt, shares, - confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, 0), + confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0), signed: Transaction::empty_signed(), }; + txn.commit(); tx.sign(&mut OsRng, spec.genesis(), key); txs.push(tx); } @@ -201,14 +214,14 @@ async fn dkg_test() { } // With just 4 sets of shares, nothing should happen yet - handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>( - &mut scanner_db, + handle_new_blocks::<_, _, _, _, _, LocalP2p>( + &mut dbs[0], &keys[0], - |_, _, _, _| async { + &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after some shares") }, &processors, - |_, _, _| async { panic!("test tried to publish a new Serai TX after some shares") }, + &|_, _, _| async { panic!("test tried to publish a new Serai TX after some shares") }, &|_| async { panic!( "test tried to publish a new Tributary TX from handle_application_tx after some shares" @@ -254,28 +267,30 @@ async fn dkg_test() { }; // Any scanner which has handled the prior blocks should only emit the new event - handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>( - &mut scanner_db, - &keys[0], - |_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") }, - &processors, - |_, _, _| async { panic!("test tried to publish a new Serai TX") }, - &|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") }, - &spec, - &tributaries[0].1.reader(), - ) - .await; - { - let mut msgs = processors.0.write().await; - assert_eq!(msgs.len(), 1); - let msgs = msgs.get_mut(&spec.set().network).unwrap(); - assert_eq!(msgs.pop_front().unwrap(), shares_for(0)); - assert!(msgs.is_empty()); + for (i, key) in keys.iter().enumerate() { + handle_new_blocks::<_, _, _, _, _, LocalP2p>( + &mut dbs[i], + key, + &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") }, + &processors, + &|_, _, _| async { panic!("test tried to publish a new Serai TX") }, + &|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") }, + &spec, + &tributaries[i].1.reader(), + ) + .await; + { + let mut msgs = processors.0.write().await; + assert_eq!(msgs.len(), 1); + let msgs = msgs.get_mut(&spec.set().network).unwrap(); + assert_eq!(msgs.pop_front().unwrap(), shares_for(i)); + assert!(msgs.is_empty()); + } } // Yet new scanners should emit all events for (i, key) in keys.iter().enumerate() { - let (_, processors) = new_processors(key, &spec, &tributaries[i].1).await; + let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await; let mut msgs = processors.0.write().await; assert_eq!(msgs.len(), 1); let msgs = msgs.get_mut(&spec.set().network).unwrap(); @@ -302,17 +317,16 @@ async fn dkg_test() { let mut txs = vec![]; for (i, key) in keys.iter().enumerate() { let attempt = 0; - let mut scanner_db = &mut scanner_db; - let (mut local_scanner_db, _) = new_processors(key, &spec, &tributaries[0].1).await; - if i != 0 { - scanner_db = &mut local_scanner_db; - } - let mut txn = scanner_db.txn(); + let mut txn = dbs[i].txn(); let share = crate::tributary::generated_key_pair::(&mut txn, key, &spec, &key_pair, 0).unwrap(); txn.commit(); - let mut tx = Transaction::DkgConfirmed(attempt, share, Transaction::empty_signed()); + let mut tx = Transaction::DkgConfirmed { + attempt, + confirmation_share: share, + signed: Transaction::empty_signed(), + }; tx.sign(&mut OsRng, spec.genesis(), key); txs.push(tx); } @@ -325,14 +339,14 @@ async fn dkg_test() { } // The scanner should successfully try to publish a transaction with a validly signed signature - handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>( - &mut scanner_db, + handle_new_blocks::<_, _, _, _, _, LocalP2p>( + &mut dbs[0], &keys[0], - |_, _, _, _| async { + &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after DKG confirmation") }, &processors, - |set, tx_type, tx| { + &|set: ValidatorSet, tx_type, tx: serai_client::Transaction| { assert_eq!(tx_type, PstTxType::SetKeys); let spec = spec.clone(); diff --git a/coordinator/src/tests/tributary/handle_p2p.rs b/coordinator/src/tests/tributary/handle_p2p.rs index 00ef6d345..756f45610 100644 --- a/coordinator/src/tests/tributary/handle_p2p.rs +++ b/coordinator/src/tests/tributary/handle_p2p.rs @@ -27,7 +27,11 @@ async fn handle_p2p_test() { let keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); - let mut tributaries = new_tributaries(&keys, &spec).await; + let mut tributaries = new_tributaries(&keys, &spec) + .await + .into_iter() + .map(|(_, p2p, tributary)| (p2p, tributary)) + .collect::>(); let mut tributary_senders = vec![]; let mut tributary_arcs = vec![]; diff --git a/coordinator/src/tests/tributary/mod.rs b/coordinator/src/tests/tributary/mod.rs index 515202be4..66f65ee45 100644 --- a/coordinator/src/tests/tributary/mod.rs +++ b/coordinator/src/tests/tributary/mod.rs @@ -7,7 +7,7 @@ use processor_messages::coordinator::SubstrateSignableId; use tributary::{ReadWrite, tests::random_signed_with_nonce}; -use crate::tributary::{SignData, Transaction}; +use crate::tributary::{Label, SignData, Transaction}; mod chain; pub use chain::*; @@ -34,11 +34,12 @@ fn random_vec(rng: &mut R, limit: usize) -> Vec { fn random_sign_data( rng: &mut R, plan: Id, - nonce: u32, + label: Label, ) -> SignData { SignData { plan, attempt: random_u32(&mut OsRng), + label, data: { let mut res = vec![]; @@ -48,7 +49,7 @@ fn random_sign_data(value: SignData) { let mut buf = vec![]; value.write(&mut buf).unwrap(); - assert_eq!(value, SignData::read(&mut buf.as_slice(), value.signed.nonce).unwrap()) + assert_eq!(value, SignData::read(&mut buf.as_slice()).unwrap()) } let mut plan = [0; 3]; @@ -95,28 +96,28 @@ fn serialize_sign_data() { test_read_write(random_sign_data::<_, _>( &mut OsRng, plan, - u32::try_from(OsRng.next_u64() >> 32).unwrap(), + if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); let mut plan = [0; 5]; OsRng.fill_bytes(&mut plan); test_read_write(random_sign_data::<_, _>( &mut OsRng, plan, - u32::try_from(OsRng.next_u64() >> 32).unwrap(), + if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); let mut plan = [0; 8]; OsRng.fill_bytes(&mut plan); test_read_write(random_sign_data::<_, _>( &mut OsRng, plan, - u32::try_from(OsRng.next_u64() >> 32).unwrap(), + if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); let mut plan = [0; 24]; OsRng.fill_bytes(&mut plan); test_read_write(random_sign_data::<_, _>( &mut OsRng, plan, - u32::try_from(OsRng.next_u64() >> 32).unwrap(), + if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); } @@ -134,11 +135,11 @@ fn serialize_transaction() { OsRng.fill_bytes(&mut temp); commitments.push(temp); } - test_read_write(Transaction::DkgCommitments( - random_u32(&mut OsRng), + test_read_write(Transaction::DkgCommitments { + attempt: random_u32(&mut OsRng), commitments, - random_signed_with_nonce(&mut OsRng, 0), - )); + signed: random_signed_with_nonce(&mut OsRng, 0), + }); } { @@ -192,25 +193,25 @@ fn serialize_transaction() { }); } - test_read_write(Transaction::DkgConfirmed( - random_u32(&mut OsRng), - { + test_read_write(Transaction::DkgConfirmed { + attempt: random_u32(&mut OsRng), + confirmation_share: { let mut share = [0; 32]; OsRng.fill_bytes(&mut share); share }, - random_signed_with_nonce(&mut OsRng, 2), - )); + signed: random_signed_with_nonce(&mut OsRng, 2), + }); { let mut key = [0; 32]; OsRng.fill_bytes(&mut key); - test_read_write(Transaction::DkgRemovalPreprocess(random_sign_data(&mut OsRng, key, 0))); + test_read_write(Transaction::DkgRemoval(random_sign_data(&mut OsRng, key, Label::Preprocess))); } { let mut key = [0; 32]; OsRng.fill_bytes(&mut key); - test_read_write(Transaction::DkgRemovalShare(random_sign_data(&mut OsRng, key, 1))); + test_read_write(Transaction::DkgRemoval(random_sign_data(&mut OsRng, key, Label::Share))); } { @@ -224,38 +225,38 @@ fn serialize_transaction() { OsRng.fill_bytes(&mut block); let mut batch = [0; 5]; OsRng.fill_bytes(&mut batch); - test_read_write(Transaction::Batch(block, batch)); + test_read_write(Transaction::Batch { block, batch }); } test_read_write(Transaction::SubstrateBlock(OsRng.next_u64())); { let mut plan = [0; 5]; OsRng.fill_bytes(&mut plan); - test_read_write(Transaction::SubstratePreprocess(random_sign_data( + test_read_write(Transaction::SubstrateSign(random_sign_data( &mut OsRng, SubstrateSignableId::Batch(plan), - 0, + Label::Preprocess, ))); } { let mut plan = [0; 5]; OsRng.fill_bytes(&mut plan); - test_read_write(Transaction::SubstrateShare(random_sign_data( + test_read_write(Transaction::SubstrateSign(random_sign_data( &mut OsRng, SubstrateSignableId::Batch(plan), - 1, + Label::Share, ))); } { let mut plan = [0; 32]; OsRng.fill_bytes(&mut plan); - test_read_write(Transaction::SignPreprocess(random_sign_data(&mut OsRng, plan, 0))); + test_read_write(Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess))); } { let mut plan = [0; 32]; OsRng.fill_bytes(&mut plan); - test_read_write(Transaction::SignShare(random_sign_data(&mut OsRng, plan, 1))); + test_read_write(Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share))); } { diff --git a/coordinator/src/tests/tributary/sync.rs b/coordinator/src/tests/tributary/sync.rs index 1267368fa..8723c04cb 100644 --- a/coordinator/src/tests/tributary/sync.rs +++ b/coordinator/src/tests/tributary/sync.rs @@ -31,7 +31,11 @@ async fn sync_test() { // Ensure this can have a node fail assert!(spec.n() > spec.t()); - let mut tributaries = new_tributaries(&keys, &spec).await; + let mut tributaries = new_tributaries(&keys, &spec) + .await + .into_iter() + .map(|(_, p2p, tributary)| (p2p, tributary)) + .collect::>(); // Keep a Tributary back, effectively having it offline let syncer_key = keys.pop().unwrap(); diff --git a/coordinator/src/tests/tributary/tx.rs b/coordinator/src/tests/tributary/tx.rs index cfe1bab81..da9433b67 100644 --- a/coordinator/src/tests/tributary/tx.rs +++ b/coordinator/src/tests/tributary/tx.rs @@ -23,7 +23,11 @@ async fn tx_test() { let keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); - let tributaries = new_tributaries(&keys, &spec).await; + let tributaries = new_tributaries(&keys, &spec) + .await + .into_iter() + .map(|(_, p2p, tributary)| (p2p, tributary)) + .collect::>(); // Run the tributaries in the background tokio::spawn(run_tributaries(tributaries.clone())); @@ -39,8 +43,11 @@ async fn tx_test() { // Create the TX with a null signature so we can get its sig hash let block_before_tx = tributaries[sender].1.tip().await; - let mut tx = - Transaction::DkgCommitments(attempt, vec![commitments.clone()], Transaction::empty_signed()); + let mut tx = Transaction::DkgCommitments { + attempt, + commitments: vec![commitments.clone()], + signed: Transaction::empty_signed(), + }; tx.sign(&mut OsRng, spec.genesis(), &key); assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true)); diff --git a/coordinator/src/tributary/db.rs b/coordinator/src/tributary/db.rs index 2d485af00..562c04e54 100644 --- a/coordinator/src/tributary/db.rs +++ b/coordinator/src/tributary/db.rs @@ -1,23 +1,23 @@ -use core::ops::Deref; use std::collections::HashMap; -use zeroize::Zeroizing; -use ciphersuite::{Ciphersuite, Ristretto, group::GroupEncoding}; +use scale::Encode; + use frost::Participant; use serai_client::validator_sets::primitives::KeyPair; use processor_messages::coordinator::SubstrateSignableId; -use scale::{Encode, Decode}; - pub use serai_db::*; -use crate::tributary::TributarySpec; +use tributary::ReadWrite; + +use crate::tributary::{Label, Transaction}; -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)] pub enum Topic { Dkg, + DkgConfirmation, DkgRemoval([u8; 32]), SubstrateSign(SubstrateSignableId), Sign([u8; 32]), @@ -27,7 +27,7 @@ pub enum Topic { #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)] pub struct DataSpecification { pub topic: Topic, - pub label: &'static str, + pub label: Label, pub attempt: u32, } @@ -42,9 +42,9 @@ pub enum Accumulation { } create_db!( - NewTributary { + Tributary { SeraiBlockNumber: (hash: [u8; 32]) -> u64, - LastBlock: (genesis: [u8; 32]) -> [u8; 32], + LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32], FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>, FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (), DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec, @@ -52,12 +52,13 @@ create_db!( ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap>, RemovalNonces: (genesis: [u8; 32], removing: [u8; 32], attempt: u32) -> HashMap>, - CurrentlyCompletingKeyPair: (genesis: [u8; 32]) -> KeyPair, + DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair, DkgCompleted: (genesis: [u8; 32]) -> (), AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32, DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16, DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec, - EventDb: (id: [u8; 32], index: u32) -> (), + + SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec, } ); @@ -84,78 +85,24 @@ impl AttemptDb { pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option { let attempt = Self::get(getter, genesis, &topic); // Don't require explicit recognition of the Dkg topic as it starts when the chain does - if attempt.is_none() && (topic == Topic::Dkg) { + if attempt.is_none() && ((topic == Topic::Dkg) || (topic == Topic::DkgConfirmation)) { return Some(0); } attempt } } -impl DataDb { - pub fn accumulate( +impl SignedTransactionDb { + pub fn take_signed_transaction( txn: &mut impl DbTxn, - our_key: &Zeroizing<::F>, - spec: &TributarySpec, - data_spec: &DataSpecification, - signer: ::G, - data: &Vec, - ) -> Accumulation { - let genesis = spec.genesis(); - if Self::get(txn, genesis, data_spec, &signer.to_bytes()).is_some() { - panic!("accumulating data for a participant multiple times"); + order: &[u8], + nonce: u32, + ) -> Option { + let res = SignedTransactionDb::get(txn, order, nonce) + .map(|bytes| Transaction::read(&mut bytes.as_slice()).unwrap()); + if res.is_some() { + Self::del(txn, order, nonce); } - let signer_shares = { - let signer_i = - spec.i(signer).expect("transaction signed by a non-validator for this tributary"); - u16::from(signer_i.end) - u16::from(signer_i.start) - }; - - let prior_received = DataReceived::get(txn, genesis, data_spec).unwrap_or_default(); - let now_received = prior_received + signer_shares; - DataReceived::set(txn, genesis, data_spec, &now_received); - DataDb::set(txn, genesis, data_spec, &signer.to_bytes(), data); - - // If we have all the needed commitments/preprocesses/shares, tell the processor - let needed = if data_spec.topic == Topic::Dkg { spec.n() } else { spec.t() }; - if (prior_received < needed) && (now_received >= needed) { - return Accumulation::Ready({ - let mut data = HashMap::new(); - for validator in spec.validators().iter().map(|validator| validator.0) { - data.insert( - spec.i(validator).unwrap().start, - if let Some(data) = Self::get(txn, genesis, data_spec, &validator.to_bytes()) { - data - } else { - continue; - }, - ); - } - - assert_eq!(data.len(), usize::from(needed)); - - // Remove our own piece of data, if we were involved - if data - .remove( - &spec - .i(Ristretto::generator() * our_key.deref()) - .expect("handling a message for a Tributary we aren't part of") - .start, - ) - .is_some() - { - DataSet::Participating(data) - } else { - DataSet::NotParticipating - } - }); - } - Accumulation::NotReady - } -} - -impl EventDb { - pub fn handle_event(txn: &mut impl DbTxn, id: [u8; 32], index: u32) { - assert!(Self::get(txn, id, index).is_none()); - Self::set(txn, id, index, &()); + res } } diff --git a/coordinator/src/tributary/dkg_confirmer.rs b/coordinator/src/tributary/dkg_confirmer.rs deleted file mode 100644 index 5fca0b2f0..000000000 --- a/coordinator/src/tributary/dkg_confirmer.rs +++ /dev/null @@ -1,207 +0,0 @@ -use std::collections::HashMap; - -use zeroize::Zeroizing; - -use rand_core::SeedableRng; -use rand_chacha::ChaCha20Rng; - -use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::{Ciphersuite, Ristretto}; -use frost::{ - FrostError, - dkg::{Participant, musig::musig}, - sign::*, -}; -use frost_schnorrkel::Schnorrkel; - -use serai_client::validator_sets::primitives::{KeyPair, musig_context, set_keys_message}; - -use crate::tributary::TributarySpec; - -/* - The following confirms the results of the DKG performed by the Processors onto Substrate. - - This is done by a signature over the generated key pair by the validators' MuSig-aggregated - public key. The MuSig-aggregation achieves on-chain efficiency and prevents on-chain censorship - of individual validator's DKG results by the Serai validator set. - - Since we're using the validators public keys, as needed for their being the root of trust, the - coordinator must perform the signing. This is distinct from all other group-signing operations - which are generally done by the processor. - - Instead of maintaining state, the following rebuilds the full state on every call. This is deemed - acceptable re: performance as: - - 1) The DKG confirmation is only done upon the start of the Tributary. - 2) This is an O(n) algorithm. - 3) The size of the validator set is bounded by MAX_KEY_SHARES_PER_SET. - - Accordingly, this should be infrequently ran and of tolerable algorithmic complexity. - - As for safety, it is explicitly unsafe to reuse nonces across signing sessions. This is in - contradiction with our rebuilding which is dependent on deterministic nonces. Safety is derived - from the deterministic nonces being context-bound under a BFT protocol. The flow is as follows: - - 1) Derive a deterministic nonce by hashing the private key, Tributary parameters, and attempt. - 2) Publish the nonces' commitments, receiving everyone elses *and the DKG shares determining the - message to be signed*. - 3) Sign and publish the signature share. - - In order for nonce re-use to occur, the received nonce commitments, or the received DKG shares, - would have to be distinct and sign would have to be called again. - - Before we act on any received messages, they're ordered and finalized by a BFT algorithm. The - only way to operate on distinct received messages would be if: - - 1) A logical flaw exists, letting new messages over write prior messages - 2) A reorganization occured from chain A to chain B, and with it, different messages - - Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While - a significant amount of processes may be byzantine, leading to BFT being broken, that still will - not trigger a reorganization. The only way to move to a distinct chain, with distinct messages, - would be by rebuilding the local process entirely (this time following chain B). - - Accordingly, safety follows if: - - 1) The local view of received messages is static - 2) The local process doesn't rebuild after a byzantine fault produces multiple blockchains - - We assume the former. We can prevent the latter (TODO) by: - - 1) Defining a per-build entropy, used so long as a DB is used. - 2) Checking the initially used commitments for the DKG align with the per-build entropy. - - If a rebuild occurs, which is the only way we could follow a distinct blockchain, our entropy - will change (preventing nonce reuse). - - This will allow a validator to still participate in DKGs within a single build, even if they have - spontaneous reboots, and on collapse triggering a rebuild, they don't lose safety. - - TODO: We also need to review how we're handling Processor preprocesses and likely implement the - same on-chain-preprocess-matches-presumed-preprocess check before publishing shares. -*/ -pub(crate) struct DkgConfirmer; -impl DkgConfirmer { - // Convert the passed in HashMap, which uses the validators' start index for their `s` threshold - // shares, to the indexes needed for MuSig - fn from_threshold_i_to_musig_i( - spec: &TributarySpec, - mut old_map: HashMap>, - ) -> HashMap> { - let mut new_map = HashMap::new(); - for (new_i, validator) in spec.validators().into_iter().enumerate() { - let threshold_i = spec.i(validator.0).unwrap(); - if let Some(value) = old_map.remove(&threshold_i.start) { - new_map.insert(Participant::new(u16::try_from(new_i + 1).unwrap()).unwrap(), value); - } - } - new_map - } - - fn preprocess_internal( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - ) -> (AlgorithmSignMachine, [u8; 64]) { - let validators = spec.validators().iter().map(|val| val.0).collect::>(); - - let context = musig_context(spec.set()); - let mut chacha = ChaCha20Rng::from_seed({ - let mut entropy_transcript = RecommendedTranscript::new(b"DkgConfirmer Entropy"); - entropy_transcript.append_message(b"spec", spec.serialize()); - entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes())); - entropy_transcript.append_message(b"attempt", attempt.to_le_bytes()); - Zeroizing::new(entropy_transcript).rng_seed(b"preprocess") - }); - let (machine, preprocess) = AlgorithmMachine::new( - Schnorrkel::new(b"substrate"), - musig(&context, key, &validators) - .expect("confirming the DKG for a set we aren't in/validator present multiple times") - .into(), - ) - .preprocess(&mut chacha); - - (machine, preprocess.serialize().try_into().unwrap()) - } - // Get the preprocess for this confirmation. - pub(crate) fn preprocess( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - ) -> [u8; 64] { - Self::preprocess_internal(spec, key, attempt).1 - } - - fn share_internal( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - key_pair: &KeyPair, - ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { - let machine = Self::preprocess_internal(spec, key, attempt).0; - let preprocesses = Self::from_threshold_i_to_musig_i(spec, preprocesses) - .into_iter() - .map(|(p, preprocess)| { - machine - .read_preprocess(&mut preprocess.as_slice()) - .map(|preprocess| (p, preprocess)) - .map_err(|_| p) - }) - .collect::, _>>()?; - let (machine, share) = machine - .sign(preprocesses, &set_keys_message(&spec.set(), key_pair)) - .map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok((machine, share.serialize().try_into().unwrap())) - } - // Get the share for this confirmation, if the preprocesses are valid. - pub(crate) fn share( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - key_pair: &KeyPair, - ) -> Result<[u8; 32], Participant> { - Self::share_internal(spec, key, attempt, preprocesses, key_pair).map(|(_, share)| share) - } - - pub(crate) fn complete( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - key_pair: &KeyPair, - shares: HashMap>, - ) -> Result<[u8; 64], Participant> { - let machine = Self::share_internal(spec, key, attempt, preprocesses, key_pair) - .expect("trying to complete a machine which failed to preprocess") - .0; - - let shares = Self::from_threshold_i_to_musig_i(spec, shares) - .into_iter() - .map(|(p, share)| { - machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p) - }) - .collect::, _>>()?; - let signature = machine.complete(shares).map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok(signature.to_bytes()) - } -} diff --git a/coordinator/src/tributary/dkg_removal.rs b/coordinator/src/tributary/dkg_removal.rs deleted file mode 100644 index 0120ef061..000000000 --- a/coordinator/src/tributary/dkg_removal.rs +++ /dev/null @@ -1,241 +0,0 @@ -use core::ops::Deref; -use std::collections::HashMap; - -use zeroize::Zeroizing; - -use rand_core::SeedableRng; -use rand_chacha::ChaCha20Rng; - -use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::{ - group::{Group, GroupEncoding}, - Ciphersuite, Ristretto, -}; -use frost::{ - FrostError, - dkg::{Participant, musig::musig}, - sign::*, -}; -use frost_schnorrkel::Schnorrkel; - -use serai_client::{ - Public, SeraiAddress, - validator_sets::primitives::{musig_context, remove_participant_message}, -}; - -use crate::tributary::TributarySpec; - -/* - The following is a clone of DkgConfirmer modified for DKG removals. - - The notable difference is this uses a MuSig key of the first `t` participants to respond with - preprocesses, not all `n` participants. - - TODO: Exact same commentary on seeded RNGs. The following can drop its seeded RNG if cached - preprocesses are used to carry the preprocess between machines -*/ -pub(crate) struct DkgRemoval; -impl DkgRemoval { - // Convert the passed in HashMap, which uses the validators' start index for their `s` threshold - // shares, to the indexes needed for MuSig - fn from_threshold_i_to_musig_i( - mut old_map: HashMap<[u8; 32], Vec>, - ) -> HashMap> { - let mut new_map = HashMap::new(); - let mut participating = old_map.keys().cloned().collect::>(); - participating.sort(); - for (i, participating) in participating.into_iter().enumerate() { - new_map.insert( - Participant::new(u16::try_from(i + 1).unwrap()).unwrap(), - old_map.remove(&participating).unwrap(), - ); - } - new_map - } - - fn preprocess_rng( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - ) -> ChaCha20Rng { - ChaCha20Rng::from_seed({ - let mut entropy_transcript = RecommendedTranscript::new(b"DkgRemoval Entropy"); - entropy_transcript.append_message(b"spec", spec.serialize()); - entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes())); - entropy_transcript.append_message(b"attempt", attempt.to_le_bytes()); - Zeroizing::new(entropy_transcript).rng_seed(b"preprocess") - }) - } - - fn preprocess_internal( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - participants: Option<&[::G]>, - ) -> (Option>, [u8; 64]) { - // TODO: Diversify this among DkgConfirmer/DkgRemoval? - let context = musig_context(spec.set()); - - let (_, preprocess) = AlgorithmMachine::new( - Schnorrkel::new(b"substrate"), - // Preprocess with our key alone as we don't know the signing set yet - musig(&context, key, &[::G::generator() * key.deref()]) - .expect("couldn't get the MuSig key of our key alone") - .into(), - ) - .preprocess(&mut Self::preprocess_rng(spec, key, attempt)); - - let machine = if let Some(participants) = participants { - let (machine, actual_preprocess) = AlgorithmMachine::new( - Schnorrkel::new(b"substrate"), - // Preprocess with our key alone as we don't know the signing set yet - musig(&context, key, participants) - .expect( - "couldn't create a MuSig key for the DKG removal we're supposedly participating in", - ) - .into(), - ) - .preprocess(&mut Self::preprocess_rng(spec, key, attempt)); - // Doesn't use assert_eq due to lack of Debug - assert!(preprocess == actual_preprocess); - Some(machine) - } else { - None - }; - - (machine, preprocess.serialize().try_into().unwrap()) - } - // Get the preprocess for this confirmation. - pub(crate) fn preprocess( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - ) -> [u8; 64] { - Self::preprocess_internal(spec, key, attempt, None).1 - } - - fn share_internal( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - mut preprocesses: HashMap>, - removed: [u8; 32], - ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { - // TODO: Remove this ugly blob - let preprocesses = { - let mut preprocesses_participants = preprocesses.keys().cloned().collect::>(); - preprocesses_participants.sort(); - let mut actual_keys = vec![]; - let spec_validators = spec.validators(); - for participant in &preprocesses_participants { - for (validator, _) in &spec_validators { - if participant == &spec.i(*validator).unwrap().start { - actual_keys.push(*validator); - } - } - } - - let mut new_preprocesses = HashMap::new(); - for (participant, actual_key) in - preprocesses_participants.into_iter().zip(actual_keys.into_iter()) - { - new_preprocesses.insert(actual_key, preprocesses.remove(&participant).unwrap()); - } - new_preprocesses - }; - - let participants = preprocesses.keys().cloned().collect::>(); - let preprocesses = Self::from_threshold_i_to_musig_i( - preprocesses.into_iter().map(|(key, preprocess)| (key.to_bytes(), preprocess)).collect(), - ); - let machine = Self::preprocess_internal(spec, key, attempt, Some(&participants)).0.unwrap(); - let preprocesses = preprocesses - .into_iter() - .map(|(p, preprocess)| { - machine - .read_preprocess(&mut preprocess.as_slice()) - .map(|preprocess| (p, preprocess)) - .map_err(|_| p) - }) - .collect::, _>>()?; - let (machine, share) = machine - .sign(preprocesses, &remove_participant_message(&spec.set(), Public(removed))) - .map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok((machine, share.serialize().try_into().unwrap())) - } - // Get the share for this confirmation, if the preprocesses are valid. - pub(crate) fn share( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - removed: [u8; 32], - ) -> Result<[u8; 32], Participant> { - Self::share_internal(spec, key, attempt, preprocesses, removed).map(|(_, share)| share) - } - - pub(crate) fn complete( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - removed: [u8; 32], - mut shares: HashMap>, - ) -> Result<(Vec, [u8; 64]), Participant> { - // TODO: Remove this ugly blob - let shares = { - let mut shares_participants = shares.keys().cloned().collect::>(); - shares_participants.sort(); - let mut actual_keys = vec![]; - let spec_validators = spec.validators(); - for participant in &shares_participants { - for (validator, _) in &spec_validators { - if participant == &spec.i(*validator).unwrap().start { - actual_keys.push(*validator); - } - } - } - - let mut new_shares = HashMap::new(); - for (participant, actual_key) in shares_participants.into_iter().zip(actual_keys.into_iter()) - { - new_shares.insert(actual_key.to_bytes(), shares.remove(&participant).unwrap()); - } - new_shares - }; - - let mut signers = shares.keys().cloned().map(SeraiAddress).collect::>(); - signers.sort(); - - let machine = Self::share_internal(spec, key, attempt, preprocesses, removed) - .expect("trying to complete a machine which failed to preprocess") - .0; - - let shares = Self::from_threshold_i_to_musig_i(shares) - .into_iter() - .map(|(p, share)| { - machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p) - }) - .collect::, _>>()?; - let signature = machine.complete(shares).map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok((signers, signature.to_bytes())) - } -} diff --git a/coordinator/src/tributary/handle.rs b/coordinator/src/tributary/handle.rs index 499170c50..bca417121 100644 --- a/coordinator/src/tributary/handle.rs +++ b/coordinator/src/tributary/handle.rs @@ -1,4 +1,4 @@ -use core::{ops::Deref, future::Future}; +use core::ops::Deref; use std::collections::HashMap; use rand_core::OsRng; @@ -10,9 +10,7 @@ use frost::dkg::Participant; use scale::{Encode, Decode}; use serai_client::{ - Public, SeraiAddress, Signature, - validator_sets::primitives::{ValidatorSet, KeyPair}, - SeraiValidatorSets, + Public, SeraiAddress, Signature, validator_sets::primitives::KeyPair, SeraiValidatorSets, }; use tributary::{Signed, TransactionKind, TransactionTrait}; @@ -23,45 +21,28 @@ use processor_messages::{ sign::{self, SignId}, }; -use serai_db::{Get, Db}; +use serai_db::*; use crate::{ processors::Processors, tributary::{ - SignData, Transaction, TributarySpec, SeraiBlockNumber, Topic, DataSpecification, DataSet, - Accumulation, - dkg_confirmer::DkgConfirmer, - dkg_removal::DkgRemoval, - scanner::{RecognizedIdType, RIDTrait, PstTxType}, - FatallySlashed, DkgShare, DkgCompleted, PlanIds, ConfirmationNonces, RemovalNonces, AttemptDb, - DataDb, + SignData, Transaction, TributarySpec, SeraiBlockNumber, Topic, Label, DataSpecification, + DataSet, Accumulation, + signing_protocol::{DkgConfirmer, DkgRemoval}, + scanner::{RecognizedIdType, RIDTrait, PstTxType, PSTTrait, PTTTrait, TributaryBlockHandler}, + FatallySlashed, DkgShare, DkgCompleted, PlanIds, ConfirmationNonces, RemovalNonces, DkgKeyPair, + AttemptDb, DataReceived, DataDb, }, + P2p, }; -use super::CurrentlyCompletingKeyPair; - -const DKG_COMMITMENTS: &str = "commitments"; -const DKG_SHARES: &str = "shares"; -const DKG_CONFIRMATION_NONCES: &str = "confirmation_nonces"; -const DKG_CONFIRMATION_SHARES: &str = "confirmation_shares"; - -// These d/s/b prefixes between DKG Removal, Batch, and Sign should be unnecessary, as Batch/Share -// entries themselves should already be domain separated -const DKG_REMOVAL_PREPROCESS: &str = "d_preprocess"; -const DKG_REMOVAL_SHARE: &str = "d_share"; - -const BATCH_PREPROCESS: &str = "b_preprocess"; -const BATCH_SHARE: &str = "b_share"; - -const SIGN_PREPROCESS: &str = "s_preprocess"; -const SIGN_SHARE: &str = "s_share"; - pub fn dkg_confirmation_nonces( key: &Zeroizing<::F>, spec: &TributarySpec, + txn: &mut impl DbTxn, attempt: u32, ) -> [u8; 64] { - DkgConfirmer::preprocess(spec, key, attempt) + (DkgConfirmer { key, spec, txn, attempt }).preprocess() } // If there's an error generating a key pair, return any errors which would've occured when @@ -69,18 +50,18 @@ pub fn dkg_confirmation_nonces( // // The caller must ensure only error_generating_key_pair or generated_key_pair is called for a // given attempt. -pub fn error_generating_key_pair( - getter: &G, +pub fn error_generating_key_pair( + txn: &mut impl DbTxn, key: &Zeroizing<::F>, spec: &TributarySpec, attempt: u32, ) -> Option { - let preprocesses = ConfirmationNonces::get(getter, spec.genesis(), attempt).unwrap(); + let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap(); // Sign a key pair which can't be valid // (0xff used as 0 would be the Ristretto identity point, 0-length for the network key) let key_pair = KeyPair(Public([0xff; 32]), vec![0xffu8; 0].try_into().unwrap()); - match DkgConfirmer::share(spec, key, attempt, preprocesses, &key_pair) { + match (DkgConfirmer { key, spec, txn, attempt }).share(preprocesses, &key_pair) { Ok(mut share) => { // Zeroize the share to ensure it's not accessed share.zeroize(); @@ -97,166 +78,125 @@ pub fn generated_key_pair( key_pair: &KeyPair, attempt: u32, ) -> Result<[u8; 32], Participant> { - CurrentlyCompletingKeyPair::set(txn, spec.genesis(), key_pair); + DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair); let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap(); - DkgConfirmer::share(spec, key, attempt, preprocesses, key_pair) + (DkgConfirmer { key, spec, txn, attempt }).share(preprocesses, key_pair) } -pub(super) async fn fatal_slash< - D: Db, - FPtt: Future, - PTT: Clone + Fn(Transaction) -> FPtt, ->( - txn: &mut D::Transaction<'_>, - spec: &TributarySpec, - publish_tributary_tx: &PTT, - our_key: &Zeroizing<::F>, - slashing: [u8; 32], - reason: &str, -) { - let genesis = spec.genesis(); - - log::warn!("fatally slashing {}. reason: {}", hex::encode(slashing), reason); - FatallySlashed::set_fatally_slashed(txn, genesis, slashing); - // TODO: disconnect the node from network/ban from further participation in all Tributaries - - // TODO: If during DKG, trigger a re-attempt - // Despite triggering a re-attempt, this DKG may still complete and may become in-use - - // If during a DKG, remove the participant - if DkgCompleted::get(txn, genesis).is_none() { - let preprocess = DkgRemoval::preprocess(spec, our_key, 0); - let mut tx = Transaction::DkgRemovalPreprocess(SignData { - plan: slashing, - attempt: 0, - data: vec![preprocess.to_vec()], - signed: Transaction::empty_signed(), - }); - tx.sign(&mut OsRng, genesis, our_key); - publish_tributary_tx(tx).await; +fn unflatten(spec: &TributarySpec, data: &mut HashMap>) { + for (validator, _) in spec.validators() { + let range = spec.i(validator).unwrap(); + let Some(all_segments) = data.remove(&range.start) else { + continue; + }; + let mut data_vec = Vec::<_>::decode(&mut all_segments.as_slice()).unwrap(); + for i in u16::from(range.start) .. u16::from(range.end) { + let i = Participant::new(i).unwrap(); + data.insert(i, data_vec.remove(0)); + } } } -// TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second -// Tributary post-DKG -// https://github.com/serai-dex/serai/issues/426 - -async fn fatal_slash_with_participant_index< - D: Db, - FPtt: Future, - PTT: Clone + Fn(Transaction) -> FPtt, ->( - txn: &mut ::Transaction<'_>, - spec: &TributarySpec, - publish_tributary_tx: &PTT, - our_key: &Zeroizing<::F>, - i: Participant, - reason: &str, -) { - // Resolve from Participant to ::G - let i = u16::from(i); - let mut validator = None; - for (potential, _) in spec.validators() { - let v_i = spec.i(potential).unwrap(); - if (u16::from(v_i.start) <= i) && (i < u16::from(v_i.end)) { - validator = Some(potential); - break; +impl + TributaryBlockHandler<'_, T, Pro, PST, PTT, RID, P> +{ + fn accumulate( + &mut self, + data_spec: &DataSpecification, + signer: ::G, + data: &Vec, + ) -> Accumulation { + let genesis = self.spec.genesis(); + if DataDb::get(self.txn, genesis, data_spec, &signer.to_bytes()).is_some() { + panic!("accumulating data for a participant multiple times"); } - } - let validator = validator.unwrap(); + let signer_shares = { + let signer_i = + self.spec.i(signer).expect("transaction signed by a non-validator for this tributary"); + u16::from(signer_i.end) - u16::from(signer_i.start) + }; - fatal_slash::(txn, spec, publish_tributary_tx, our_key, validator.to_bytes(), reason) - .await; -} + let prior_received = DataReceived::get(self.txn, genesis, data_spec).unwrap_or_default(); + let now_received = prior_received + signer_shares; + DataReceived::set(self.txn, genesis, data_spec, &now_received); + DataDb::set(self.txn, genesis, data_spec, &signer.to_bytes(), data); -#[allow(clippy::too_many_arguments)] -pub(crate) async fn handle_application_tx< - D: Db, - Pro: Processors, - FPst: Future, - PST: Clone + Fn(ValidatorSet, PstTxType, serai_client::Transaction) -> FPst, - FPtt: Future, - PTT: Clone + Fn(Transaction) -> FPtt, - FRid: Future, - RID: RIDTrait, ->( - tx: Transaction, - spec: &TributarySpec, - processors: &Pro, - publish_serai_tx: PST, - publish_tributary_tx: &PTT, - key: &Zeroizing<::F>, - recognized_id: RID, - txn: &mut ::Transaction<'_>, -) { - let genesis = spec.genesis(); - - // Don't handle transactions from fatally slashed participants - // TODO: Because fatally slashed participants can still publish onto the blockchain, they have - // a notable DoS ability - if let TransactionKind::Signed(_, signed) = tx.kind() { - if FatallySlashed::get(txn, genesis, signed.signer.to_bytes()).is_some() { - return; + // If we have all the needed commitments/preprocesses/shares, tell the processor + let needed = if (data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation) { + self.spec.n() + } else { + self.spec.t() + }; + if (prior_received < needed) && (now_received >= needed) { + return Accumulation::Ready({ + let mut data = HashMap::new(); + for validator in self.spec.validators().iter().map(|validator| validator.0) { + data.insert( + self.spec.i(validator).unwrap().start, + if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) { + data + } else { + continue; + }, + ); + } + + assert_eq!(data.len(), usize::from(needed)); + + // Remove our own piece of data, if we were involved + if data + .remove( + &self + .spec + .i(Ristretto::generator() * self.our_key.deref()) + .expect("handling a message for a Tributary we aren't part of") + .start, + ) + .is_some() + { + DataSet::Participating(data) + } else { + DataSet::NotParticipating + } + }); } + Accumulation::NotReady } - async fn handle, PTT: Clone + Fn(Transaction) -> FPtt>( - txn: &mut ::Transaction<'_>, - spec: &TributarySpec, - publish_tributary_tx: &PTT, - key: &Zeroizing<::F>, + async fn handle_data( + &mut self, data_spec: &DataSpecification, bytes: Vec, signed: &Signed, ) -> Accumulation { - let genesis = spec.genesis(); + let genesis = self.spec.genesis(); - let Some(curr_attempt) = AttemptDb::attempt(txn, genesis, data_spec.topic) else { + let Some(curr_attempt) = AttemptDb::attempt(self.txn, genesis, data_spec.topic) else { // Premature publication of a valid ID/publication of an invalid ID - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer.to_bytes(), - "published data for ID without an attempt", - ) - .await; + self.fatal_slash(signed.signer.to_bytes(), "published data for ID without an attempt").await; return Accumulation::NotReady; }; // If they've already published a TX for this attempt, slash // This shouldn't be reachable since nonces were made inserted by the coordinator, yet it's a // cheap check to leave in for safety - if DataDb::get(txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer.to_bytes(), - "published data multiple times", - ) - .await; + if DataDb::get(self.txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() { + self.fatal_slash(signed.signer.to_bytes(), "published data multiple times").await; return Accumulation::NotReady; } - // If the attempt is lesser than the blockchain's, slash + // If the attempt is lesser than the blockchain's, return if data_spec.attempt < curr_attempt { - // TODO: Slash for being late return Accumulation::NotReady; } // If the attempt is greater, this is a premature publication, full slash if data_spec.attempt > curr_attempt { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer.to_bytes(), - "published data with an attempt which hasn't started", - ) - .await; + self + .fatal_slash( + signed.signer.to_bytes(), + "published data with an attempt which hasn't started", + ) + .await; return Accumulation::NotReady; } @@ -266,769 +206,520 @@ pub(crate) async fn handle_application_tx< // TODO: If this is shares, we need to check they are part of the selected signing set // Accumulate this data - DataDb::accumulate(txn, key, spec, data_spec, signed.signer, &bytes) + self.accumulate(data_spec, signed.signer, &bytes) } - async fn check_sign_data_len< - D: Db, - FPtt: Future, - PTT: Clone + Fn(Transaction) -> FPtt, - >( - txn: &mut D::Transaction<'_>, - spec: &TributarySpec, - publish_tributary_tx: &PTT, - our_key: &Zeroizing<::F>, + async fn check_sign_data_len( + &mut self, signer: ::G, len: usize, ) -> Result<(), ()> { - let signer_i = spec.i(signer).unwrap(); + let signer_i = self.spec.i(signer).unwrap(); if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - our_key, - signer.to_bytes(), - "signer published a distinct amount of sign data than they had shares", - ) - .await; + self + .fatal_slash( + signer.to_bytes(), + "signer published a distinct amount of sign data than they had shares", + ) + .await; Err(())?; } Ok(()) } - fn unflatten(spec: &TributarySpec, data: &mut HashMap>) { - for (validator, _) in spec.validators() { - let range = spec.i(validator).unwrap(); - let Some(all_segments) = data.remove(&range.start) else { - continue; - }; - let mut data_vec = Vec::<_>::decode(&mut all_segments.as_slice()).unwrap(); - for i in u16::from(range.start) .. u16::from(range.end) { - let i = Participant::new(i).unwrap(); - data.insert(i, data_vec.remove(0)); - } + fn dkg_removal(&mut self, data: &SignData<[u8; 32]>) -> DkgRemoval<'_, T> { + DkgRemoval { + spec: self.spec, + key: self.our_key, + txn: self.txn, + removing: data.plan, + attempt: data.attempt, } } - match tx { - Transaction::RemoveParticipant(i) => { - fatal_slash_with_participant_index::( - txn, - spec, - publish_tributary_tx, - key, - i, - "RemoveParticipant Provided TX", - ) - .await - } - Transaction::DkgCommitments(attempt, commitments, signed) => { - let Ok(_) = check_sign_data_len::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer, - commitments.len(), - ) - .await - else { + pub(crate) async fn handle_application_tx(&mut self, tx: Transaction) { + let genesis = self.spec.genesis(); + + // Don't handle transactions from fatally slashed participants + // This prevents removed participants from sabotaging the removal signing sessions and so on + // TODO: Because fatally slashed participants can still publish onto the blockchain, they have + // a notable DoS ability + if let TransactionKind::Signed(_, signed) = tx.kind() { + if FatallySlashed::get(self.txn, genesis, signed.signer.to_bytes()).is_some() { return; - }; - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { topic: Topic::Dkg, label: DKG_COMMITMENTS, attempt }, - commitments.encode(), - &signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(mut commitments)) => { - log::info!("got all DkgCommitments for {}", hex::encode(genesis)); - unflatten(spec, &mut commitments); - processors - .send( - spec.set().network, - key_gen::CoordinatorMessage::Commitments { - id: KeyGenId { session: spec.set().session, attempt }, - commitments, - }, - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => { - panic!("wasn't a participant in DKG commitments") - } - Accumulation::NotReady => {} } } - Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => { - let sender_i = spec - .i(signed.signer) - .expect("transaction added to tributary by signer who isn't a participant"); - let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start); - - if shares.len() != usize::from(sender_is_len) { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer.to_bytes(), - "invalid amount of DKG shares by key shares", - ) - .await; - return; + match tx { + Transaction::RemoveParticipant(i) => { + self.fatal_slash_with_participant_index(i, "RemoveParticipant Provided TX").await } - for shares in &shares { - if shares.len() != (usize::from(spec.n() - sender_is_len)) { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer.to_bytes(), - "invalid amount of DKG shares", - ) - .await; + + Transaction::DkgCommitments { attempt, commitments, signed } => { + let Ok(_) = self.check_sign_data_len(signed.signer, commitments.len()).await else { return; + }; + let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt }; + match self.handle_data(&data_spec, commitments.encode(), &signed).await { + Accumulation::Ready(DataSet::Participating(mut commitments)) => { + log::info!("got all DkgCommitments for {}", hex::encode(genesis)); + unflatten(self.spec, &mut commitments); + self + .processors + .send( + self.spec.set().network, + key_gen::CoordinatorMessage::Commitments { + id: KeyGenId { session: self.spec.set().session, attempt }, + commitments, + }, + ) + .await; + } + Accumulation::Ready(DataSet::NotParticipating) => { + panic!("wasn't a participant in DKG commitments") + } + Accumulation::NotReady => {} } } - // Save each share as needed for blame - { - let from_range = spec.i(signed.signer).unwrap(); - for (from_offset, shares) in shares.iter().enumerate() { - let from = - Participant::new(u16::from(from_range.start) + u16::try_from(from_offset).unwrap()) - .unwrap(); - - for (to_offset, share) in shares.iter().enumerate() { - // 0-indexed (the enumeration) to 1-indexed (Participant) - let mut to = u16::try_from(to_offset).unwrap() + 1; - // Adjust for the omission of the sender's own shares - if to >= u16::from(from_range.start) { - to += u16::from(from_range.end) - u16::from(from_range.start); - } - let to = Participant::new(to).unwrap(); - - DkgShare::set(txn, genesis, from.into(), to.into(), share); + Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => { + let Ok(_) = self.check_sign_data_len(signed.signer, shares.len()).await else { + return; + }; + + let sender_i = self + .spec + .i(signed.signer) + .expect("transaction added to tributary by signer who isn't a participant"); + let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start); + for shares in &shares { + if shares.len() != (usize::from(self.spec.n() - sender_is_len)) { + self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares").await; + return; } } - } - // Filter down to only our share's bytes for handle - let our_i = spec - .i(Ristretto::generator() * key.deref()) - .expect("in a tributary we're not a validator for"); - - let our_shares = if sender_i == our_i { - vec![] - } else { - // 1-indexed to 0-indexed - let mut our_i_pos = u16::from(our_i.start) - 1; - // Handle the omission of the sender's own data - if u16::from(our_i.start) > u16::from(sender_i.start) { - our_i_pos -= sender_is_len; + // Save each share as needed for blame + { + let from_range = self.spec.i(signed.signer).unwrap(); + for (from_offset, shares) in shares.iter().enumerate() { + let from = + Participant::new(u16::from(from_range.start) + u16::try_from(from_offset).unwrap()) + .unwrap(); + + for (to_offset, share) in shares.iter().enumerate() { + // 0-indexed (the enumeration) to 1-indexed (Participant) + let mut to = u16::try_from(to_offset).unwrap() + 1; + // Adjust for the omission of the sender's own shares + if to >= u16::from(from_range.start) { + to += u16::from(from_range.end) - u16::from(from_range.start); + } + let to = Participant::new(to).unwrap(); + + DkgShare::set(self.txn, genesis, from.into(), to.into(), share); + } + } } - let our_i_pos = usize::from(our_i_pos); - shares - .iter_mut() - .map(|shares| { - shares - .drain( - our_i_pos .. - (our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))), - ) - .collect::>() - }) - .collect() - }; - // Drop shares as it's been mutated into invalidity - drop(shares); - - let confirmation_nonces = handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { topic: Topic::Dkg, label: DKG_CONFIRMATION_NONCES, attempt }, - confirmation_nonces.to_vec(), - &signed, - ) - .await; - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { topic: Topic::Dkg, label: DKG_SHARES, attempt }, - our_shares.encode(), - &signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(shares)) => { - log::info!("got all DkgShares for {}", hex::encode(genesis)); - - let Accumulation::Ready(DataSet::Participating(confirmation_nonces)) = - confirmation_nonces - else { - panic!("got all DKG shares yet confirmation nonces aren't Ready(Participating(_))"); - }; - ConfirmationNonces::set(txn, genesis, attempt, &confirmation_nonces); - - // shares is a HashMap>>>, with the values representing: - // - Each of the sender's shares - // - Each of the our shares - // - Each share - // We need a Vec>>, with the outer being each of ours - let mut expanded_shares = vec![]; - for (sender_start_i, shares) in shares { - let shares: Vec>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap(); - for (sender_i_offset, our_shares) in shares.into_iter().enumerate() { - for (our_share_i, our_share) in our_shares.into_iter().enumerate() { - if expanded_shares.len() <= our_share_i { - expanded_shares.push(HashMap::new()); + + // Filter down to only our share's bytes for handle + let our_i = self + .spec + .i(Ristretto::generator() * self.our_key.deref()) + .expect("in a tributary we're not a validator for"); + + let our_shares = if sender_i == our_i { + vec![] + } else { + // 1-indexed to 0-indexed + let mut our_i_pos = u16::from(our_i.start) - 1; + // Handle the omission of the sender's own data + if u16::from(our_i.start) > u16::from(sender_i.start) { + our_i_pos -= sender_is_len; + } + let our_i_pos = usize::from(our_i_pos); + shares + .iter_mut() + .map(|shares| { + shares + .drain( + our_i_pos .. + (our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))), + ) + .collect::>() + }) + .collect() + }; + // Drop shares as it's been mutated into invalidity + drop(shares); + + let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt }; + let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode(); + match self.handle_data(&data_spec, encoded_data, &signed).await { + Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => { + log::info!("got all DkgShares for {}", hex::encode(genesis)); + + let mut confirmation_nonces = HashMap::new(); + let mut shares = HashMap::new(); + for (participant, confirmation_nonces_and_shares) in confirmation_nonces_and_shares { + let (these_confirmation_nonces, these_shares) = + <(Vec, Vec)>::decode(&mut confirmation_nonces_and_shares.as_slice()) + .unwrap(); + confirmation_nonces.insert(participant, these_confirmation_nonces); + shares.insert(participant, these_shares); + } + ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces); + + // shares is a HashMap>>>, with the values representing: + // - Each of the sender's shares + // - Each of the our shares + // - Each share + // We need a Vec>>, with the outer being each of ours + let mut expanded_shares = vec![]; + for (sender_start_i, shares) in shares { + let shares: Vec>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap(); + for (sender_i_offset, our_shares) in shares.into_iter().enumerate() { + for (our_share_i, our_share) in our_shares.into_iter().enumerate() { + if expanded_shares.len() <= our_share_i { + expanded_shares.push(HashMap::new()); + } + expanded_shares[our_share_i].insert( + Participant::new( + u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(), + ) + .unwrap(), + our_share, + ); } - expanded_shares[our_share_i].insert( - Participant::new( - u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(), - ) - .unwrap(), - our_share, - ); } } + + self + .processors + .send( + self.spec.set().network, + key_gen::CoordinatorMessage::Shares { + id: KeyGenId { session: self.spec.set().session, attempt }, + shares: expanded_shares, + }, + ) + .await; } + Accumulation::Ready(DataSet::NotParticipating) => { + panic!("wasn't a participant in DKG shares") + } + Accumulation::NotReady => {} + } + } - processors - .send( - spec.set().network, - key_gen::CoordinatorMessage::Shares { - id: KeyGenId { session: spec.set().session, attempt }, - shares: expanded_shares, - }, + Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { + let range = self.spec.i(signed.signer).unwrap(); + if !range.contains(&accuser) { + self + .fatal_slash( + signed.signer.to_bytes(), + "accused with a Participant index which wasn't theirs", ) .await; + return; } - Accumulation::Ready(DataSet::NotParticipating) => { - panic!("wasn't a participant in DKG shares") + if range.contains(&faulty) { + self + .fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare") + .await; + return; } - Accumulation::NotReady => assert!(matches!(confirmation_nonces, Accumulation::NotReady)), - } - } - // TODO: Ban self-accusals - Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { - let range = spec.i(signed.signer).unwrap(); - if (u16::from(accuser) < u16::from(range.start)) || - (u16::from(range.end) <= u16::from(accuser)) - { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer.to_bytes(), - "accused with a Participant index which wasn't theirs", - ) - .await; - return; - } - - if !((u16::from(range.start) <= u16::from(faulty)) && - (u16::from(faulty) < u16::from(range.end))) - { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer.to_bytes(), - "accused self of having an InvalidDkgShare", - ) - .await; - return; + let share = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()).unwrap(); + self + .processors + .send( + self.spec.set().network, + key_gen::CoordinatorMessage::VerifyBlame { + id: KeyGenId { session: self.spec.set().session, attempt }, + accuser, + accused: faulty, + share, + blame, + }, + ) + .await; } - let share = DkgShare::get(txn, genesis, accuser.into(), faulty.into()).unwrap(); - processors - .send( - spec.set().network, - key_gen::CoordinatorMessage::VerifyBlame { - id: KeyGenId { session: spec.set().session, attempt }, - accuser, - accused: faulty, - share, - blame, - }, - ) - .await; - } - - Transaction::DkgConfirmed(attempt, shares, signed) => { - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { topic: Topic::Dkg, label: DKG_CONFIRMATION_SHARES, attempt }, - shares.to_vec(), - &signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(shares)) => { - log::info!("got all DkgConfirmed for {}", hex::encode(genesis)); - - let preprocesses = ConfirmationNonces::get(txn, genesis, attempt).unwrap(); - // TODO: This can technically happen under very very very specific timing as the txn put - // happens before DkgConfirmed, yet the txn commit isn't guaranteed to - let key_pair = CurrentlyCompletingKeyPair::get(txn, genesis).expect( - "in DkgConfirmed handling, which happens after everyone \ + Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { + let data_spec = + DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt }; + match self.handle_data(&data_spec, confirmation_share.to_vec(), &signed).await { + Accumulation::Ready(DataSet::Participating(shares)) => { + log::info!("got all DkgConfirmed for {}", hex::encode(genesis)); + + let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap(); + // TODO: This can technically happen under very very very specific timing as the txn put + // happens before DkgConfirmed, yet the txn commit isn't guaranteed to + let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect( + "in DkgConfirmed handling, which happens after everyone \ (including us) fires DkgConfirmed, yet no confirming key pair", - ); - let sig = - match DkgConfirmer::complete(spec, key, attempt, preprocesses, &key_pair, shares) { - Ok(sig) => sig, - Err(p) => { - fatal_slash_with_participant_index::( - txn, - spec, - publish_tributary_tx, - key, - p, - "invalid DkgConfirmer share", - ) - .await; - return; - } - }; + ); + let sig = + match (DkgConfirmer { spec: self.spec, key: self.our_key, txn: self.txn, attempt }) + .complete(preprocesses, &key_pair, shares) + { + Ok(sig) => sig, + Err(p) => { + self.fatal_slash_with_participant_index(p, "invalid DkgConfirmer share").await; + return; + } + }; - DkgCompleted::set(txn, genesis, &()); + DkgCompleted::set(self.txn, genesis, &()); - publish_serai_tx( - spec.set(), - PstTxType::SetKeys, - SeraiValidatorSets::set_keys(spec.set().network, key_pair, Signature(sig)), - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => { - panic!("wasn't a participant in DKG confirmination shares") + self + .publish_serai_tx + .publish_serai_tx( + self.spec.set(), + PstTxType::SetKeys, + SeraiValidatorSets::set_keys(self.spec.set().network, key_pair, Signature(sig)), + ) + .await; + } + Accumulation::Ready(DataSet::NotParticipating) => { + panic!("wasn't a participant in DKG confirmination shares") + } + Accumulation::NotReady => {} } - Accumulation::NotReady => {} } - } - Transaction::DkgRemovalPreprocess(data) => { - let signer = data.signed.signer; - // TODO: Only handle this if we're not actively removing this validator - if (data.data.len() != 1) || (data.data[0].len() != 64) { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signer.to_bytes(), - "non-64-byte DKG removal preprocess", - ) - .await; - return; - } - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { - topic: Topic::DkgRemoval(data.plan), - label: DKG_REMOVAL_PREPROCESS, - attempt: data.attempt, - }, - data.data.encode(), - &data.signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(preprocesses)) => { - RemovalNonces::set(txn, genesis, data.plan, data.attempt, &preprocesses); - - let Ok(share) = DkgRemoval::share(spec, key, data.attempt, preprocesses, data.plan) - else { - // TODO: Locally increase slash points to maximum (distinct from an explicitly fatal - // slash) and censor transactions (yet don't explicitly ban) - return; - }; - - let mut tx = Transaction::DkgRemovalPreprocess(SignData { - plan: data.plan, - attempt: data.attempt, - data: vec![share.to_vec()], - signed: Transaction::empty_signed(), - }); - tx.sign(&mut OsRng, genesis, key); - publish_tributary_tx(tx).await; + Transaction::DkgRemoval(data) => { + let signer = data.signed.signer; + let expected_len = match data.label { + Label::Preprocess => 64, + Label::Share => 32, + }; + if (data.data.len() != 1) || (data.data[0].len() != expected_len) { + self.fatal_slash(signer.to_bytes(), "unexpected length data for dkg removal").await; + return; } - Accumulation::Ready(DataSet::NotParticipating) => {} - Accumulation::NotReady => {} - } - } - Transaction::DkgRemovalShare(data) => { - let signer = data.signed.signer; - if (data.data.len() != 1) || (data.data[0].len() != 32) { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signer.to_bytes(), - "non-32-byte DKG removal share", - ) - .await; - return; - } - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { + + let data_spec = DataSpecification { topic: Topic::DkgRemoval(data.plan), - label: DKG_REMOVAL_SHARE, + label: data.label, attempt: data.attempt, - }, - data.data.encode(), - &data.signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(shares)) => { - let preprocesses = RemovalNonces::get(txn, genesis, data.plan, data.attempt).unwrap(); - - let Ok((signers, signature)) = - DkgRemoval::complete(spec, key, data.attempt, preprocesses, data.plan, shares) - else { - // TODO: Locally increase slash points to maximum (distinct from an explicitly fatal - // slash) and censor transactions (yet don't explicitly ban) - return; - }; - - // TODO: Only handle this if we're not actively removing any of the signers - // The created Substrate call will fail if a removed validator was one of the signers - // Since: - // 1) publish_serai_tx will block this task until the TX is published - // 2) We won't scan any more TXs/blocks until we handle this TX - // The TX *must* be successfully published *before* we start removing any more signers - // Accordingly, if the signers aren't currently being removed, they won't be removed - // by the time this transaction is successfully published *unless* a malicious 34% - // participates with the non-participating 33% to continue operation and produce a - // distinct removal (since the non-participating won't block in this block) - // This breaks BFT and is accordingly within bounds - - let tx = serai_client::SeraiValidatorSets::remove_participant( - spec.set().network, - SeraiAddress(data.plan), - signers, - Signature(signature), - ); - publish_serai_tx(spec.set(), PstTxType::RemoveParticipant(data.plan), tx).await; + }; + let Accumulation::Ready(DataSet::Participating(results)) = + self.handle_data(&data_spec, data.data.encode(), &data.signed).await + else { + return; + }; + + match data.label { + Label::Preprocess => { + RemovalNonces::set(self.txn, genesis, data.plan, data.attempt, &results); + + let Ok(share) = self.dkg_removal(&data).share(results) else { + // TODO: Locally increase slash points to maximum (distinct from an explicitly fatal + // slash) and censor transactions (yet don't explicitly ban) + return; + }; + + let mut tx = Transaction::DkgRemoval(SignData { + plan: data.plan, + attempt: data.attempt, + label: Label::Preprocess, + data: vec![share.to_vec()], + signed: Transaction::empty_signed(), + }); + tx.sign(&mut OsRng, genesis, self.our_key); + self.publish_tributary_tx.publish_tributary_tx(tx).await; + } + Label::Share => { + let preprocesses = + RemovalNonces::get(self.txn, genesis, data.plan, data.attempt).unwrap(); + + let Ok((signers, signature)) = self.dkg_removal(&data).complete(preprocesses, results) + else { + // TODO: Locally increase slash points to maximum (distinct from an explicitly fatal + // slash) and censor transactions (yet don't explicitly ban) + return; + }; + + // TODO: Only handle this if we're not actively removing any of the signers + // The created Substrate call will fail if a removed validator was one of the signers + // Since: + // 1) publish_serai_tx will block this task until the TX is published + // 2) We won't scan any more TXs/blocks until we handle this TX + // The TX *must* be successfully published *before* we start removing any more + // signers + // Accordingly, if the signers aren't currently being removed, they won't be removed + // by the time this transaction is successfully published *unless* a malicious 34% + // participates with the non-participating 33% to continue operation and produce a + // distinct removal (since the non-participating won't block in this block) + // This breaks BFT and is accordingly within bounds + + let tx = serai_client::SeraiValidatorSets::remove_participant( + self.spec.set().network, + SeraiAddress(data.plan), + signers, + Signature(signature), + ); + self + .publish_serai_tx + .publish_serai_tx(self.spec.set(), PstTxType::RemoveParticipant(data.plan), tx) + .await; + } } - Accumulation::Ready(DataSet::NotParticipating) => {} - Accumulation::NotReady => {} } - } - Transaction::CosignSubstrateBlock(hash) => { - AttemptDb::recognize_topic( - txn, - genesis, - Topic::SubstrateSign(SubstrateSignableId::CosigningSubstrateBlock(hash)), - ); - - let block_number = SeraiBlockNumber::get(txn, hash) - .expect("CosignSubstrateBlock yet didn't save Serai block number"); - processors - .send( - spec.set().network, - coordinator::CoordinatorMessage::CosignSubstrateBlock { - id: SubstrateSignId { - session: spec.set().session, - id: SubstrateSignableId::CosigningSubstrateBlock(hash), - attempt: 0, - }, - block_number, + Transaction::CosignSubstrateBlock(hash) => { + AttemptDb::recognize_topic( + self.txn, + genesis, + Topic::SubstrateSign(SubstrateSignableId::CosigningSubstrateBlock(hash)), + ); + + let block_number = SeraiBlockNumber::get(self.txn, hash) + .expect("CosignSubstrateBlock yet didn't save Serai block number"); + let msg = coordinator::CoordinatorMessage::CosignSubstrateBlock { + id: SubstrateSignId { + session: self.spec.set().session, + id: SubstrateSignableId::CosigningSubstrateBlock(hash), + attempt: 0, }, - ) - .await; - } + block_number, + }; + self.processors.send(self.spec.set().network, msg).await; + } - Transaction::Batch(_, batch) => { - // Because this Batch has achieved synchrony, its batch ID should be authorized - AttemptDb::recognize_topic( - txn, - genesis, - Topic::SubstrateSign(SubstrateSignableId::Batch(batch)), - ); - recognized_id(spec.set(), genesis, RecognizedIdType::Batch, batch.to_vec()).await; - } + Transaction::Batch { block: _, batch } => { + // Because this Batch has achieved synchrony, its batch ID should be authorized + AttemptDb::recognize_topic( + self.txn, + genesis, + Topic::SubstrateSign(SubstrateSignableId::Batch(batch)), + ); + self + .recognized_id + .recognized_id(self.spec.set(), genesis, RecognizedIdType::Batch, batch.to_vec()) + .await; + } - Transaction::SubstrateBlock(block) => { - let plan_ids = PlanIds::get(txn, &genesis, block).expect( - "synced a tributary block finalizing a substrate block in a provided transaction \ + Transaction::SubstrateBlock(block) => { + let plan_ids = PlanIds::get(self.txn, &genesis, block).expect( + "synced a tributary block finalizing a substrate block in a provided transaction \ despite us not providing that transaction", - ); + ); - for id in plan_ids.into_iter() { - AttemptDb::recognize_topic(txn, genesis, Topic::Sign(id)); - recognized_id(spec.set(), genesis, RecognizedIdType::Plan, id.to_vec()).await; + for id in plan_ids.into_iter() { + AttemptDb::recognize_topic(self.txn, genesis, Topic::Sign(id)); + self + .recognized_id + .recognized_id(self.spec.set(), genesis, RecognizedIdType::Plan, id.to_vec()) + .await; + } } - } - Transaction::SubstratePreprocess(data) => { - let signer = data.signed.signer; - let Ok(_) = check_sign_data_len::( - txn, - spec, - publish_tributary_tx, - key, - signer, - data.data.len(), - ) - .await - else { - return; - }; - for data in &data.data { - if data.len() != 64 { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signer.to_bytes(), - "non-64-byte Substrate preprocess", - ) - .await; + Transaction::SubstrateSign(data) => { + let signer = data.signed.signer; + let Ok(_) = self.check_sign_data_len(signer, data.data.len()).await else { return; + }; + let expected_len = match data.label { + Label::Preprocess => 64, + Label::Share => 32, + }; + for data in &data.data { + if data.len() != expected_len { + self + .fatal_slash( + signer.to_bytes(), + "unexpected length data for substrate signing protocol", + ) + .await; + } } - } - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { + + let data_spec = DataSpecification { topic: Topic::SubstrateSign(data.plan), - label: BATCH_PREPROCESS, + label: data.label, attempt: data.attempt, - }, - data.data.encode(), - &data.signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(mut preprocesses)) => { - unflatten(spec, &mut preprocesses); - processors - .send( - spec.set().network, - coordinator::CoordinatorMessage::SubstratePreprocesses { - id: SubstrateSignId { - session: spec.set().session, - id: data.plan, - attempt: data.attempt, - }, - preprocesses: preprocesses - .into_iter() - .map(|(k, v)| (k, v.try_into().unwrap())) - .collect(), - }, - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => {} - Accumulation::NotReady => {} - } - } - Transaction::SubstrateShare(data) => { - let Ok(_) = check_sign_data_len::( - txn, - spec, - publish_tributary_tx, - key, - data.signed.signer, - data.data.len(), - ) - .await - else { - return; - }; - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { - topic: Topic::SubstrateSign(data.plan), - label: BATCH_SHARE, + }; + let Accumulation::Ready(DataSet::Participating(mut results)) = + self.handle_data(&data_spec, data.data.encode(), &data.signed).await + else { + return; + }; + unflatten(self.spec, &mut results); + + let id = SubstrateSignId { + session: self.spec.set().session, + id: data.plan, attempt: data.attempt, - }, - data.data.encode(), - &data.signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(mut shares)) => { - unflatten(spec, &mut shares); - processors - .send( - spec.set().network, - coordinator::CoordinatorMessage::SubstrateShares { - id: SubstrateSignId { - session: spec.set().session, - id: data.plan, - attempt: data.attempt, - }, - shares: shares - .into_iter() - .map(|(validator, share)| (validator, share.try_into().unwrap())) - .collect(), - }, - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => {} - Accumulation::NotReady => {} + }; + let msg = match data.label { + Label::Preprocess => coordinator::CoordinatorMessage::SubstratePreprocesses { + id, + preprocesses: results.into_iter().map(|(v, p)| (v, p.try_into().unwrap())).collect(), + }, + Label::Share => coordinator::CoordinatorMessage::SubstrateShares { + id, + shares: results.into_iter().map(|(v, p)| (v, p.try_into().unwrap())).collect(), + }, + }; + self.processors.send(self.spec.set().network, msg).await; } - } - Transaction::SignPreprocess(data) => { - let Ok(_) = check_sign_data_len::( - txn, - spec, - publish_tributary_tx, - key, - data.signed.signer, - data.data.len(), - ) - .await - else { - return; - }; - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { + Transaction::Sign(data) => { + let Ok(_) = self.check_sign_data_len(data.signed.signer, data.data.len()).await else { + return; + }; + + let data_spec = DataSpecification { topic: Topic::Sign(data.plan), - label: SIGN_PREPROCESS, + label: data.label, attempt: data.attempt, - }, - data.data.encode(), - &data.signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(mut preprocesses)) => { - unflatten(spec, &mut preprocesses); - processors + }; + if let Accumulation::Ready(DataSet::Participating(mut results)) = + self.handle_data(&data_spec, data.data.encode(), &data.signed).await + { + unflatten(self.spec, &mut results); + let id = + SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt }; + self + .processors .send( - spec.set().network, - sign::CoordinatorMessage::Preprocesses { - id: SignId { session: spec.set().session, id: data.plan, attempt: data.attempt }, - preprocesses, + self.spec.set().network, + match data.label { + Label::Preprocess => { + sign::CoordinatorMessage::Preprocesses { id, preprocesses: results } + } + Label::Share => sign::CoordinatorMessage::Shares { id, shares: results }, }, ) .await; } - Accumulation::Ready(DataSet::NotParticipating) => {} - Accumulation::NotReady => {} } - } - Transaction::SignShare(data) => { - let Ok(_) = check_sign_data_len::( - txn, - spec, - publish_tributary_tx, - key, - data.signed.signer, - data.data.len(), - ) - .await - else { - return; - }; - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { - topic: Topic::Sign(data.plan), - label: SIGN_SHARE, - attempt: data.attempt, - }, - data.data.encode(), - &data.signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(mut shares)) => { - unflatten(spec, &mut shares); - processors - .send( - spec.set().network, - sign::CoordinatorMessage::Shares { - id: SignId { session: spec.set().session, id: data.plan, attempt: data.attempt }, - shares, - }, - ) + + Transaction::SignCompleted { plan, tx_hash, first_signer, signature: _ } => { + log::info!( + "on-chain SignCompleted claims {} completes {}", + hex::encode(&tx_hash), + hex::encode(plan) + ); + + if AttemptDb::attempt(self.txn, genesis, Topic::Sign(plan)).is_none() { + self + .fatal_slash(first_signer.to_bytes(), "claimed an unrecognized plan was completed") .await; - } - Accumulation::Ready(DataSet::NotParticipating) => {} - Accumulation::NotReady => {} - } - } - Transaction::SignCompleted { plan, tx_hash, first_signer, signature: _ } => { - log::info!( - "on-chain SignCompleted claims {} completes {}", - hex::encode(&tx_hash), - hex::encode(plan) - ); - - if AttemptDb::attempt(txn, genesis, Topic::Sign(plan)).is_none() { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - first_signer.to_bytes(), - "claimed an unrecognized plan was completed", - ) - .await; - return; - }; + return; + }; - // TODO: Confirm this signer hasn't prior published a completion + // TODO: Confirm this signer hasn't prior published a completion - processors - .send( - spec.set().network, - sign::CoordinatorMessage::Completed { - session: spec.set().session, - id: plan, - tx: tx_hash, - }, - ) - .await; + let msg = sign::CoordinatorMessage::Completed { + session: self.spec.set().session, + id: plan, + tx: tx_hash, + }; + self.processors.send(self.spec.set().network, msg).await; + } } } } diff --git a/coordinator/src/tributary/mod.rs b/coordinator/src/tributary/mod.rs index d9f1aa184..6e2f26610 100644 --- a/coordinator/src/tributary/mod.rs +++ b/coordinator/src/tributary/mod.rs @@ -1,852 +1,63 @@ -use core::{ - ops::{Deref, Range}, - fmt::Debug, -}; -use std::io::{self, Read, Write}; - -use zeroize::Zeroizing; -use rand_core::{RngCore, CryptoRng}; - -use blake2::{Digest, Blake2s256}; -use transcript::{Transcript, RecommendedTranscript}; - -use ciphersuite::{ - group::{ff::Field, GroupEncoding}, - Ciphersuite, Ristretto, -}; -use schnorr::SchnorrSignature; -use frost::Participant; - -use scale::{Encode, Decode}; -use processor_messages::coordinator::SubstrateSignableId; - -use serai_client::{ - primitives::{NetworkId, PublicKey}, - validator_sets::primitives::{Session, ValidatorSet}, -}; - -#[rustfmt::skip] use tributary::{ ReadWrite, - transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait}, - TRANSACTION_SIZE_LIMIT, + transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait}, + Tributary, }; mod db; pub use db::*; -mod dkg_confirmer; -mod dkg_removal; +mod spec; +pub use spec::TributarySpec; + +mod transaction; +pub use transaction::{Label, SignData, Transaction}; + +mod signing_protocol; mod handle; pub use handle::*; pub mod scanner; -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct TributarySpec { - serai_block: [u8; 32], - start_time: u64, - set: ValidatorSet, - validators: Vec<(::G, u16)>, -} - -impl TributarySpec { - pub fn new( - serai_block: [u8; 32], - start_time: u64, - set: ValidatorSet, - set_participants: Vec<(PublicKey, u16)>, - ) -> TributarySpec { - let mut validators = vec![]; - for (participant, shares) in set_participants { - let participant = ::read_G::<&[u8]>(&mut participant.0.as_ref()) - .expect("invalid key registered as participant"); - validators.push((participant, shares)); - } - - Self { serai_block, start_time, set, validators } - } - - pub fn set(&self) -> ValidatorSet { - self.set - } - - pub fn genesis(&self) -> [u8; 32] { - // Calculate the genesis for this Tributary - let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis"); - // This locks it to a specific Serai chain - genesis.append_message(b"serai_block", self.serai_block); - genesis.append_message(b"session", self.set.session.0.to_le_bytes()); - genesis.append_message(b"network", self.set.network.encode()); - let genesis = genesis.challenge(b"genesis"); - let genesis_ref: &[u8] = genesis.as_ref(); - genesis_ref[.. 32].try_into().unwrap() - } - - pub fn start_time(&self) -> u64 { - self.start_time - } - - pub fn n(&self) -> u16 { - self.validators.iter().map(|(_, weight)| weight).sum() - } - - pub fn t(&self) -> u16 { - ((2 * self.n()) / 3) + 1 - } - - pub fn i(&self, key: ::G) -> Option> { - let mut i = 1; - for (validator, weight) in &self.validators { - if validator == &key { - return Some(Range { - start: Participant::new(i).unwrap(), - end: Participant::new(i + weight).unwrap(), - }); - } - i += weight; - } - None - } - - pub fn validators(&self) -> Vec<(::G, u64)> { - self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect() - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&self.serai_block)?; - writer.write_all(&self.start_time.to_le_bytes())?; - writer.write_all(&self.set.session.0.to_le_bytes())?; - let network_encoded = self.set.network.encode(); - assert_eq!(network_encoded.len(), 1); - writer.write_all(&network_encoded)?; - writer.write_all(&u32::try_from(self.validators.len()).unwrap().to_le_bytes())?; - for validator in &self.validators { - writer.write_all(&validator.0.to_bytes())?; - writer.write_all(&validator.1.to_le_bytes())?; - } - Ok(()) - } - - pub fn serialize(&self) -> Vec { - let mut res = vec![]; - self.write(&mut res).unwrap(); - res - } - - pub fn read(reader: &mut R) -> io::Result { - let mut serai_block = [0; 32]; - reader.read_exact(&mut serai_block)?; - - let mut start_time = [0; 8]; - reader.read_exact(&mut start_time)?; - let start_time = u64::from_le_bytes(start_time); - - let mut session = [0; 4]; - reader.read_exact(&mut session)?; - let session = Session(u32::from_le_bytes(session)); - - let mut network = [0; 1]; - reader.read_exact(&mut network)?; - let network = - NetworkId::decode(&mut &network[..]).map_err(|_| io::Error::other("invalid network"))?; - - let mut validators_len = [0; 4]; - reader.read_exact(&mut validators_len)?; - let validators_len = usize::try_from(u32::from_le_bytes(validators_len)).unwrap(); - - let mut validators = Vec::with_capacity(validators_len); - for _ in 0 .. validators_len { - let key = Ristretto::read_G(reader)?; - let mut weight = [0; 2]; - reader.read_exact(&mut weight)?; - validators.push((key, u16::from_le_bytes(weight))); - } - - Ok(Self { serai_block, start_time, set: ValidatorSet { session, network }, validators }) - } -} - -#[derive(Clone, PartialEq, Eq)] -pub struct SignData { - pub plan: Id, - pub attempt: u32, - - pub data: Vec>, - - pub signed: Signed, -} - -impl Debug for SignData { - fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - fmt - .debug_struct("SignData") - .field("id", &hex::encode(self.plan.encode())) - .field("attempt", &self.attempt) - .field("signer", &hex::encode(self.signed.signer.to_bytes())) - .finish_non_exhaustive() - } -} - -impl SignData { - pub(crate) fn read(reader: &mut R, nonce: u32) -> io::Result { - let plan = Id::decode(&mut scale::IoReader(&mut *reader)) - .map_err(|_| io::Error::other("invalid plan in SignData"))?; - - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let data = { - let mut data_pieces = [0]; - reader.read_exact(&mut data_pieces)?; - if data_pieces[0] == 0 { - Err(io::Error::other("zero pieces of data in SignData"))?; - } - let mut all_data = vec![]; - for _ in 0 .. data_pieces[0] { - let mut data_len = [0; 2]; - reader.read_exact(&mut data_len)?; - let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; - reader.read_exact(&mut data)?; - all_data.push(data); - } - all_data - }; - - let signed = Signed::read_without_nonce(reader, nonce)?; - - Ok(SignData { plan, attempt, data, signed }) - } - - pub(crate) fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&self.plan.encode())?; - writer.write_all(&self.attempt.to_le_bytes())?; - - writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?; - for data in &self.data { - if data.len() > u16::MAX.into() { - // Currently, the largest individual preprocess is a Monero transaction - // It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a - // key image and proof (96 bytes) - // Even with all of that, we could support 227 inputs in a single TX - // Monero is limited to ~120 inputs per TX - // - // Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess - Err(io::Error::other("signing data exceeded 65535 bytes"))?; - } - writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?; - writer.write_all(data)?; - } - - self.signed.write_without_nonce(writer) - } -} - -#[derive(Clone, PartialEq, Eq)] -pub enum Transaction { - RemoveParticipant(Participant), - - // Once this completes successfully, no more instances should be created. - DkgCommitments(u32, Vec>, Signed), - DkgShares { - attempt: u32, - // Sending Participant, Receiving Participant, Share - shares: Vec>>, - confirmation_nonces: [u8; 64], - signed: Signed, - }, - InvalidDkgShare { - attempt: u32, - accuser: Participant, - faulty: Participant, - blame: Option>, - signed: Signed, - }, - DkgConfirmed(u32, [u8; 32], Signed), - - DkgRemovalPreprocess(SignData<[u8; 32]>), - DkgRemovalShare(SignData<[u8; 32]>), - - // Co-sign a Substrate block. - CosignSubstrateBlock([u8; 32]), - - // When we have synchrony on a batch, we can allow signing it - // TODO (never?): This is less efficient compared to an ExternalBlock provided transaction, - // which would be binding over the block hash and automatically achieve synchrony on all - // relevant batches. ExternalBlock was removed for this due to complexity around the pipeline - // with the current processor, yet it would still be an improvement. - Batch([u8; 32], [u8; 5]), - // When a Serai block is finalized, with the contained batches, we can allow the associated plan - // IDs - SubstrateBlock(u64), - - SubstratePreprocess(SignData), - SubstrateShare(SignData), - - SignPreprocess(SignData<[u8; 32]>), - SignShare(SignData<[u8; 32]>), - // This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst - // reporters (who should all report the same thing) - // We do still track the signer in order to prevent a single signer from publishing arbitrarily - // many TXs without penalty - // Here, they're denoted as the first_signer, as only the signer of the first TX to be included - // with this pairing will be remembered on-chain - SignCompleted { - plan: [u8; 32], - tx_hash: Vec, - first_signer: ::G, - signature: SchnorrSignature, - }, -} - -impl Debug for Transaction { - fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - match self { - Transaction::RemoveParticipant(participant) => fmt - .debug_struct("Transaction::RemoveParticipant") - .field("participant", participant) - .finish(), - Transaction::DkgCommitments(attempt, _, signed) => fmt - .debug_struct("Transaction::DkgCommitments") - .field("attempt", attempt) - .field("signer", &hex::encode(signed.signer.to_bytes())) - .finish_non_exhaustive(), - Transaction::DkgShares { attempt, signed, .. } => fmt - .debug_struct("Transaction::DkgShares") - .field("attempt", attempt) - .field("signer", &hex::encode(signed.signer.to_bytes())) - .finish_non_exhaustive(), - Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt - .debug_struct("Transaction::InvalidDkgShare") - .field("attempt", attempt) - .field("accuser", accuser) - .field("faulty", faulty) - .finish_non_exhaustive(), - Transaction::DkgConfirmed(attempt, _, signed) => fmt - .debug_struct("Transaction::DkgConfirmed") - .field("attempt", attempt) - .field("signer", &hex::encode(signed.signer.to_bytes())) - .finish_non_exhaustive(), - Transaction::DkgRemovalPreprocess(sign_data) => { - fmt.debug_struct("Transaction::DkgRemovalPreprocess").field("sign_data", sign_data).finish() - } - Transaction::DkgRemovalShare(sign_data) => { - fmt.debug_struct("Transaction::DkgRemovalShare").field("sign_data", sign_data).finish() - } - Transaction::CosignSubstrateBlock(block) => fmt - .debug_struct("Transaction::CosignSubstrateBlock") - .field("block", &hex::encode(block)) - .finish(), - Transaction::Batch(block, batch) => fmt - .debug_struct("Transaction::Batch") - .field("block", &hex::encode(block)) - .field("batch", &hex::encode(batch)) - .finish(), - Transaction::SubstrateBlock(block) => { - fmt.debug_struct("Transaction::SubstrateBlock").field("block", block).finish() - } - Transaction::SubstratePreprocess(sign_data) => { - fmt.debug_struct("Transaction::SubstratePreprocess").field("sign_data", sign_data).finish() - } - Transaction::SubstrateShare(sign_data) => { - fmt.debug_struct("Transaction::SubstrateShare").field("sign_data", sign_data).finish() - } - Transaction::SignPreprocess(sign_data) => { - fmt.debug_struct("Transaction::SignPreprocess").field("sign_data", sign_data).finish() - } - Transaction::SignShare(sign_data) => { - fmt.debug_struct("Transaction::SignShare").field("sign_data", sign_data).finish() - } - Transaction::SignCompleted { plan, tx_hash, .. } => fmt - .debug_struct("Transaction::SignCompleted") - .field("plan", &hex::encode(plan)) - .field("tx_hash", &hex::encode(tx_hash)) - .finish_non_exhaustive(), - } - } -} - -impl ReadWrite for Transaction { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0]; - reader.read_exact(&mut kind)?; - - match kind[0] { - 0 => Ok(Transaction::RemoveParticipant({ - let mut participant = [0; 2]; - reader.read_exact(&mut participant)?; - Participant::new(u16::from_le_bytes(participant)) - .ok_or_else(|| io::Error::other("invalid participant in RemoveParticipant"))? - })), - - 1 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let commitments = { - let mut commitments_len = [0; 1]; - reader.read_exact(&mut commitments_len)?; - let commitments_len = usize::from(commitments_len[0]); - if commitments_len == 0 { - Err(io::Error::other("zero commitments in DkgCommitments"))?; - } - - let mut each_commitments_len = [0; 2]; - reader.read_exact(&mut each_commitments_len)?; - let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len)); - if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT { - Err(io::Error::other( - "commitments present in transaction exceeded transaction size limit", - ))?; - } - let mut commitments = vec![vec![]; commitments_len]; - for commitments in &mut commitments { - *commitments = vec![0; each_commitments_len]; - reader.read_exact(commitments)?; - } - commitments - }; - - let signed = Signed::read_without_nonce(reader, 0)?; - - Ok(Transaction::DkgCommitments(attempt, commitments, signed)) - } - - 2 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let shares = { - let mut share_quantity = [0; 1]; - reader.read_exact(&mut share_quantity)?; - - let mut key_share_quantity = [0; 1]; - reader.read_exact(&mut key_share_quantity)?; - - let mut share_len = [0; 2]; - reader.read_exact(&mut share_len)?; - let share_len = usize::from(u16::from_le_bytes(share_len)); - - let mut all_shares = vec![]; - for _ in 0 .. share_quantity[0] { - let mut shares = vec![]; - for _ in 0 .. key_share_quantity[0] { - let mut share = vec![0; share_len]; - reader.read_exact(&mut share)?; - shares.push(share); - } - all_shares.push(shares); - } - all_shares - }; - - let mut confirmation_nonces = [0; 64]; - reader.read_exact(&mut confirmation_nonces)?; - - let signed = Signed::read_without_nonce(reader, 1)?; - - Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed }) - } - - 3 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let mut accuser = [0; 2]; - reader.read_exact(&mut accuser)?; - let accuser = Participant::new(u16::from_le_bytes(accuser)) - .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; - - let mut faulty = [0; 2]; - reader.read_exact(&mut faulty)?; - let faulty = Participant::new(u16::from_le_bytes(faulty)) - .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; - - let mut blame_len = [0; 2]; - reader.read_exact(&mut blame_len)?; - let mut blame = vec![0; u16::from_le_bytes(blame_len).into()]; - reader.read_exact(&mut blame)?; - - // This shares a nonce with DkgConfirmed as only one is expected - let signed = Signed::read_without_nonce(reader, 2)?; - - Ok(Transaction::InvalidDkgShare { - attempt, - accuser, - faulty, - blame: Some(blame).filter(|blame| !blame.is_empty()), - signed, - }) - } - - 4 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let mut confirmation_share = [0; 32]; - reader.read_exact(&mut confirmation_share)?; - - let signed = Signed::read_without_nonce(reader, 2)?; - - Ok(Transaction::DkgConfirmed(attempt, confirmation_share, signed)) - } - - 5 => SignData::read(reader, 0).map(Transaction::DkgRemovalPreprocess), - 6 => SignData::read(reader, 1).map(Transaction::DkgRemovalShare), - - 7 => { - let mut block = [0; 32]; - reader.read_exact(&mut block)?; - Ok(Transaction::CosignSubstrateBlock(block)) - } - - 8 => { - let mut block = [0; 32]; - reader.read_exact(&mut block)?; - let mut batch = [0; 5]; - reader.read_exact(&mut batch)?; - Ok(Transaction::Batch(block, batch)) - } - - 9 => { - let mut block = [0; 8]; - reader.read_exact(&mut block)?; - Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block))) - } - - 10 => SignData::read(reader, 0).map(Transaction::SubstratePreprocess), - 11 => SignData::read(reader, 1).map(Transaction::SubstrateShare), - - 12 => SignData::read(reader, 0).map(Transaction::SignPreprocess), - 13 => SignData::read(reader, 1).map(Transaction::SignShare), - - 14 => { - let mut plan = [0; 32]; - reader.read_exact(&mut plan)?; - - let mut tx_hash_len = [0]; - reader.read_exact(&mut tx_hash_len)?; - let mut tx_hash = vec![0; usize::from(tx_hash_len[0])]; - reader.read_exact(&mut tx_hash)?; - - let first_signer = Ristretto::read_G(reader)?; - let signature = SchnorrSignature::::read(reader)?; - - Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature }) - } - - _ => Err(io::Error::other("invalid transaction type")), - } - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - Transaction::RemoveParticipant(i) => { - writer.write_all(&[0])?; - writer.write_all(&u16::from(*i).to_le_bytes()) - } - - Transaction::DkgCommitments(attempt, commitments, signed) => { - writer.write_all(&[1])?; - writer.write_all(&attempt.to_le_bytes())?; - if commitments.is_empty() { - Err(io::Error::other("zero commitments in DkgCommitments"))? - } - writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?; - for commitments_i in commitments { - if commitments_i.len() != commitments[0].len() { - Err(io::Error::other("commitments of differing sizes in DkgCommitments"))? - } - } - writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?; - for commitments in commitments { - writer.write_all(commitments)?; - } - signed.write_without_nonce(writer) - } - - Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => { - writer.write_all(&[2])?; - writer.write_all(&attempt.to_le_bytes())?; - - // `shares` is a Vec which is supposed to map to a HashMap>. Since we - // bound participants to 150, this conversion is safe if a valid in-memory transaction. - writer.write_all(&[u8::try_from(shares.len()).unwrap()])?; - // This assumes at least one share is being sent to another party - writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?; - let share_len = shares[0][0].len(); - // For BLS12-381 G2, this would be: - // - A 32-byte share - // - A 96-byte ephemeral key - // - A 128-byte signature - // Hence why this has to be u16 - writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?; - - for these_shares in shares { - assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable"); - for share in these_shares { - assert_eq!(share.len(), share_len, "sent shares were of variable length"); - writer.write_all(share)?; - } - } - - writer.write_all(confirmation_nonces)?; - signed.write_without_nonce(writer) - } - - Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { - writer.write_all(&[3])?; - writer.write_all(&attempt.to_le_bytes())?; - writer.write_all(&u16::from(*accuser).to_le_bytes())?; - writer.write_all(&u16::from(*faulty).to_le_bytes())?; - - // Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length - assert!(blame.as_ref().map(|blame| blame.len()).unwrap_or(1) != 0); - let blame_len = - u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB"); - writer.write_all(&blame_len.to_le_bytes())?; - writer.write_all(blame.as_ref().unwrap_or(&vec![]))?; - - signed.write_without_nonce(writer) - } - - Transaction::DkgConfirmed(attempt, share, signed) => { - writer.write_all(&[4])?; - writer.write_all(&attempt.to_le_bytes())?; - writer.write_all(share)?; - signed.write_without_nonce(writer) - } - - Transaction::DkgRemovalPreprocess(data) => { - writer.write_all(&[5])?; - data.write(writer) - } - Transaction::DkgRemovalShare(data) => { - writer.write_all(&[6])?; - data.write(writer) - } - - Transaction::CosignSubstrateBlock(block) => { - writer.write_all(&[7])?; - writer.write_all(block) - } - - Transaction::Batch(block, batch) => { - writer.write_all(&[8])?; - writer.write_all(block)?; - writer.write_all(batch) - } - - Transaction::SubstrateBlock(block) => { - writer.write_all(&[9])?; - writer.write_all(&block.to_le_bytes()) - } - - Transaction::SubstratePreprocess(data) => { - writer.write_all(&[10])?; - data.write(writer) - } - Transaction::SubstrateShare(data) => { - writer.write_all(&[11])?; - data.write(writer) - } - - Transaction::SignPreprocess(data) => { - writer.write_all(&[12])?; - data.write(writer) - } - Transaction::SignShare(data) => { - writer.write_all(&[13])?; - data.write(writer) - } - Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => { - writer.write_all(&[14])?; - writer.write_all(plan)?; - writer - .write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?; - writer.write_all(tx_hash)?; - writer.write_all(&first_signer.to_bytes())?; - signature.write(writer) - } - } - } -} - -impl TransactionTrait for Transaction { - fn kind(&self) -> TransactionKind<'_> { - match self { - Transaction::RemoveParticipant(_) => TransactionKind::Provided("remove"), - - Transaction::DkgCommitments(attempt, _, signed) => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) - } - Transaction::DkgShares { attempt, signed, .. } => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) - } - Transaction::InvalidDkgShare { attempt, signed, .. } => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) - } - Transaction::DkgConfirmed(attempt, _, signed) => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) - } - - Transaction::DkgRemovalPreprocess(data) => { - TransactionKind::Signed((b"dkg_removal", data.plan, data.attempt).encode(), &data.signed) - } - Transaction::DkgRemovalShare(data) => { - TransactionKind::Signed((b"dkg_removal", data.plan, data.attempt).encode(), &data.signed) - } - - Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"), - - Transaction::Batch(_, _) => TransactionKind::Provided("batch"), - Transaction::SubstrateBlock(_) => TransactionKind::Provided("serai"), - - Transaction::SubstratePreprocess(data) => { - TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed) - } - Transaction::SubstrateShare(data) => { - TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed) - } - - Transaction::SignPreprocess(data) => { - TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed) - } - Transaction::SignShare(data) => { - TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed) - } - Transaction::SignCompleted { .. } => TransactionKind::Unsigned, - } - } - - fn hash(&self) -> [u8; 32] { - let mut tx = self.serialize(); - if let TransactionKind::Signed(_, signed) = self.kind() { - // Make sure the part we're cutting off is the signature - assert_eq!(tx.drain((tx.len() - 64) ..).collect::>(), signed.signature.serialize()); - } - Blake2s256::digest([b"Coordinator Tributary Transaction".as_slice(), &tx].concat()).into() - } - - fn verify(&self) -> Result<(), TransactionError> { - if let Transaction::SubstrateShare(data) = self { - for data in &data.data { - if data.len() != 32 { - Err(TransactionError::InvalidContent)?; - } - } - } - - if let Transaction::SignCompleted { first_signer, signature, .. } = self { - if !signature.verify(*first_signer, self.sign_completed_challenge()) { - Err(TransactionError::InvalidContent)?; - } - } - - Ok(()) - } -} - -impl Transaction { - // Used to initially construct transactions so we can then get sig hashes and perform signing - pub fn empty_signed() -> Signed { - Signed { - signer: Ristretto::generator(), - nonce: 0, - signature: SchnorrSignature:: { - R: Ristretto::generator(), - s: ::F::ZERO, - }, - } - } - - // Sign a transaction - pub fn sign( - &mut self, - rng: &mut R, - genesis: [u8; 32], - key: &Zeroizing<::F>, +pub async fn publish_signed_transaction( + txn: &mut D::Transaction<'_>, + tributary: &Tributary, + tx: Transaction, +) { + log::debug!("publishing transaction {}", hex::encode(tx.hash())); + + let (order, signer) = if let TransactionKind::Signed(order, signed) = tx.kind() { + let signer = signed.signer; + + // Safe as we should deterministically create transactions, meaning if this is already on-disk, + // it's what we're saving now + SignedTransactionDb::set(txn, &order, signed.nonce, &tx.serialize()); + + (order, signer) + } else { + panic!("non-signed transaction passed to publish_signed_transaction"); + }; + + // If we're trying to publish 5, when the last transaction published was 3, this will delay + // publication until the point in time we publish 4 + while let Some(tx) = SignedTransactionDb::take_signed_transaction( + txn, + &order, + tributary + .next_nonce(&signer, &order) + .await + .expect("we don't have a nonce, meaning we aren't a participant on this tributary"), ) { - fn signed(tx: &mut Transaction) -> (u32, &mut Signed) { - let nonce = match tx { - Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"), - - Transaction::DkgCommitments(_, _, _) => 0, - Transaction::DkgShares { .. } => 1, - Transaction::InvalidDkgShare { .. } => 2, - Transaction::DkgConfirmed(_, _, _) => 2, - - Transaction::DkgRemovalPreprocess(_) => 0, - Transaction::DkgRemovalShare(_) => 1, - - Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), - - Transaction::Batch(_, _) => panic!("signing Batch"), - Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"), - - Transaction::SubstratePreprocess(_) => 0, - Transaction::SubstrateShare(_) => 1, - - Transaction::SignPreprocess(_) => 0, - Transaction::SignShare(_) => 1, - Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), - }; - - ( - nonce, - match tx { - Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"), - - Transaction::DkgCommitments(_, _, ref mut signed) => signed, - Transaction::DkgShares { ref mut signed, .. } => signed, - Transaction::InvalidDkgShare { ref mut signed, .. } => signed, - Transaction::DkgConfirmed(_, _, ref mut signed) => signed, - - Transaction::DkgRemovalPreprocess(ref mut data) => &mut data.signed, - Transaction::DkgRemovalShare(ref mut data) => &mut data.signed, - - Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), - - Transaction::Batch(_, _) => panic!("signing Batch"), - Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"), - - Transaction::SubstratePreprocess(ref mut data) => &mut data.signed, - Transaction::SubstrateShare(ref mut data) => &mut data.signed, - - Transaction::SignPreprocess(ref mut data) => &mut data.signed, - Transaction::SignShare(ref mut data) => &mut data.signed, - Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), - }, - ) - } - - let (nonce, signed_ref) = signed(self); - signed_ref.signer = Ristretto::generator() * key.deref(); - signed_ref.nonce = nonce; - - let sig_nonce = Zeroizing::new(::F::random(rng)); - signed(self).1.signature.R = ::generator() * sig_nonce.deref(); - let sig_hash = self.sig_hash(genesis); - signed(self).1.signature = SchnorrSignature::::sign(key, sig_nonce, sig_hash); - } - - pub fn sign_completed_challenge(&self) -> ::F { - if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self { - let mut transcript = - RecommendedTranscript::new(b"Coordinator Tributary Transaction SignCompleted"); - transcript.append_message(b"plan", plan); - transcript.append_message(b"tx_hash", tx_hash); - transcript.append_message(b"signer", first_signer.to_bytes()); - transcript.append_message(b"nonce", signature.R.to_bytes()); - Ristretto::hash_to_F(b"SignCompleted signature", &transcript.challenge(b"challenge")) - } else { - panic!("sign_completed_challenge called on transaction which wasn't SignCompleted") + // We need to return a proper error here to enable that, due to a race condition around + // multiple publications + match tributary.add_transaction(tx.clone()).await { + Ok(_) => {} + // Some asynchonicity if InvalidNonce, assumed safe to deterministic nonces + Err(TransactionError::InvalidNonce) => { + log::warn!("publishing TX {tx:?} returned InvalidNonce. was it already added?") + } + Err(e) => panic!("created an invalid transaction: {e:?}"), } } } diff --git a/coordinator/src/tributary/scanner.rs b/coordinator/src/tributary/scanner.rs index c127bdfa6..f094e570a 100644 --- a/coordinator/src/tributary/scanner.rs +++ b/coordinator/src/tributary/scanner.rs @@ -1,9 +1,12 @@ -use core::{future::Future, time::Duration}; +use core::{marker::PhantomData, future::Future, time::Duration}; use std::sync::Arc; +use rand_core::OsRng; + use zeroize::Zeroizing; -use ciphersuite::{Ciphersuite, Ristretto}; +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use frost::Participant; use tokio::sync::broadcast; @@ -22,9 +25,11 @@ use tributary::{ use crate::{ Db, - tributary::handle::{fatal_slash, handle_application_tx}, processors::Processors, - tributary::{TributarySpec, Transaction, LastBlock, EventDb}, + tributary::{ + TributarySpec, Label, SignData, Transaction, Topic, AttemptDb, LastHandledBlock, + FatallySlashed, DkgCompleted, signing_protocol::DkgRemoval, + }, P2p, }; @@ -34,13 +39,31 @@ pub enum RecognizedIdType { Plan, } -pub(crate) trait RIDTrait: - Clone + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec) -> FRid -{ +#[async_trait::async_trait] +pub trait RIDTrait { + async fn recognized_id( + &self, + set: ValidatorSet, + genesis: [u8; 32], + kind: RecognizedIdType, + id: Vec, + ); } -impl) -> FRid> RIDTrait - for F +#[async_trait::async_trait] +impl< + FRid: Send + Future, + F: Sync + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec) -> FRid, + > RIDTrait for F { + async fn recognized_id( + &self, + set: ValidatorSet, + genesis: [u8; 32], + kind: RecognizedIdType, + id: Vec, + ) { + (self)(set, genesis, kind, id).await + } } #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -49,123 +72,181 @@ pub enum PstTxType { RemoveParticipant([u8; 32]), } -// Handle a specific Tributary block -#[allow(clippy::too_many_arguments)] -async fn handle_block< - D: Db, +#[async_trait::async_trait] +pub trait PSTTrait { + async fn publish_serai_tx( + &self, + set: ValidatorSet, + kind: PstTxType, + tx: serai_client::Transaction, + ); +} +#[async_trait::async_trait] +impl< + FPst: Send + Future, + F: Sync + Fn(ValidatorSet, PstTxType, serai_client::Transaction) -> FPst, + > PSTTrait for F +{ + async fn publish_serai_tx( + &self, + set: ValidatorSet, + kind: PstTxType, + tx: serai_client::Transaction, + ) { + (self)(set, kind, tx).await + } +} + +#[async_trait::async_trait] +pub trait PTTTrait { + async fn publish_tributary_tx(&self, tx: Transaction); +} +#[async_trait::async_trait] +impl, F: Sync + Fn(Transaction) -> FPtt> PTTTrait for F { + async fn publish_tributary_tx(&self, tx: Transaction) { + (self)(tx).await + } +} + +pub struct TributaryBlockHandler< + 'a, + T: DbTxn, Pro: Processors, - FPst: Future, - PST: Clone + Fn(ValidatorSet, PstTxType, serai_client::Transaction) -> FPst, - FPtt: Future, - PTT: Clone + Fn(Transaction) -> FPtt, - FRid: Future, - RID: RIDTrait, + PST: PSTTrait, + PTT: PTTTrait, + RID: RIDTrait, P: P2p, ->( - db: &mut D, - key: &Zeroizing<::F>, - recognized_id: RID, - processors: &Pro, - publish_serai_tx: PST, - publish_tributary_tx: &PTT, - spec: &TributarySpec, +> { + pub txn: &'a mut T, + pub our_key: &'a Zeroizing<::F>, + pub recognized_id: &'a RID, + pub processors: &'a Pro, + pub publish_serai_tx: &'a PST, + pub publish_tributary_tx: &'a PTT, + pub spec: &'a TributarySpec, block: Block, -) { - log::info!("found block for Tributary {:?}", spec.set()); + _p2p: PhantomData

, +} + +impl + TributaryBlockHandler<'_, T, Pro, PST, PTT, RID, P> +{ + pub async fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) { + let genesis = self.spec.genesis(); - let hash = block.hash(); + log::warn!("fatally slashing {}. reason: {}", hex::encode(slashing), reason); + FatallySlashed::set_fatally_slashed(self.txn, genesis, slashing); + // TODO: disconnect the node from network/ban from further participation in all Tributaries - let mut event_id = 0; - #[allow(clippy::explicit_counter_loop)] // event_id isn't TX index. It just currently lines up - for tx in block.transactions { - if EventDb::get(db, hash, event_id).is_some() { - event_id += 1; - continue; + // TODO: If during DKG, trigger a re-attempt + // Despite triggering a re-attempt, this DKG may still complete and may become in-use + + // If during a DKG, remove the participant + if DkgCompleted::get(self.txn, genesis).is_none() { + AttemptDb::recognize_topic(self.txn, genesis, Topic::DkgRemoval(slashing)); + let preprocess = (DkgRemoval { + spec: self.spec, + key: self.our_key, + txn: self.txn, + removing: slashing, + attempt: 0, + }) + .preprocess(); + let mut tx = Transaction::DkgRemoval(SignData { + plan: slashing, + attempt: 0, + label: Label::Preprocess, + data: vec![preprocess.to_vec()], + signed: Transaction::empty_signed(), + }); + tx.sign(&mut OsRng, genesis, self.our_key); + self.publish_tributary_tx.publish_tributary_tx(tx).await; } + } - let mut txn = db.txn(); + // TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second + // Tributary post-DKG + // https://github.com/serai-dex/serai/issues/426 - match tx { - TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => { - // Since the evidence is on the chain, it should already have been validated - // We can just punish the signer - let data = match ev { - Evidence::ConflictingMessages(first, second) => (first, Some(second)), - Evidence::ConflictingPrecommit(first, second) => (first, Some(second)), - Evidence::InvalidPrecommit(first) => (first, None), - Evidence::InvalidValidRound(first) => (first, None), - }; - let msgs = ( - decode_signed_message::>(&data.0).unwrap(), - if data.1.is_some() { - Some( - decode_signed_message::>(&data.1.unwrap()) - .unwrap(), - ) - } else { - None - }, - ); - - // Since anything with evidence is fundamentally faulty behavior, not just temporal errors, - // mark the node as fatally slashed - fatal_slash::( - &mut txn, - spec, - publish_tributary_tx, - key, - msgs.0.msg.sender, - &format!("invalid tendermint messages: {:?}", msgs), - ) - .await; - } - TributaryTransaction::Application(tx) => { - handle_application_tx::( - tx, - spec, - processors, - publish_serai_tx.clone(), - publish_tributary_tx, - key, - recognized_id.clone(), - &mut txn, - ) - .await; + pub async fn fatal_slash_with_participant_index(&mut self, i: Participant, reason: &str) { + // Resolve from Participant to ::G + let i = u16::from(i); + let mut validator = None; + for (potential, _) in self.spec.validators() { + let v_i = self.spec.i(potential).unwrap(); + if (u16::from(v_i.start) <= i) && (i < u16::from(v_i.end)) { + validator = Some(potential); + break; } } + let validator = validator.unwrap(); - EventDb::handle_event(&mut txn, hash, event_id); - txn.commit(); - - event_id += 1; + self.fatal_slash(validator.to_bytes(), reason).await; } - // TODO: Trigger any necessary re-attempts + async fn handle(mut self) { + log::info!("found block for Tributary {:?}", self.spec.set()); + + let transactions = self.block.transactions.clone(); + for tx in transactions { + match tx { + TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => { + // Since the evidence is on the chain, it should already have been validated + // We can just punish the signer + let data = match ev { + Evidence::ConflictingMessages(first, second) => (first, Some(second)), + Evidence::ConflictingPrecommit(first, second) => (first, Some(second)), + Evidence::InvalidPrecommit(first) => (first, None), + Evidence::InvalidValidRound(first) => (first, None), + }; + let msgs = ( + decode_signed_message::>(&data.0).unwrap(), + if data.1.is_some() { + Some( + decode_signed_message::>(&data.1.unwrap()) + .unwrap(), + ) + } else { + None + }, + ); + + // Since anything with evidence is fundamentally faulty behavior, not just temporal + // errors, mark the node as fatally slashed + self + .fatal_slash(msgs.0.msg.sender, &format!("invalid tendermint messages: {:?}", msgs)) + .await; + } + TributaryTransaction::Application(tx) => { + self.handle_application_tx(tx).await; + } + } + } + + // TODO: Trigger any necessary re-attempts + } } #[allow(clippy::too_many_arguments)] pub(crate) async fn handle_new_blocks< D: Db, Pro: Processors, - FPst: Future, - PST: Clone + Fn(ValidatorSet, PstTxType, serai_client::Transaction) -> FPst, - FPtt: Future, - PTT: Clone + Fn(Transaction) -> FPtt, - FRid: Future, - RID: RIDTrait, + PST: PSTTrait, + PTT: PTTTrait, + RID: RIDTrait, P: P2p, >( db: &mut D, key: &Zeroizing<::F>, - recognized_id: RID, + recognized_id: &RID, processors: &Pro, - publish_serai_tx: PST, + publish_serai_tx: &PST, publish_tributary_tx: &PTT, spec: &TributarySpec, tributary: &TributaryReader, ) { let genesis = tributary.genesis(); - let mut last_block = LastBlock::get(db, genesis).unwrap_or(genesis); + let mut last_block = LastHandledBlock::get(db, genesis).unwrap_or(genesis); while let Some(next) = tributary.block_after(&last_block) { let block = tributary.block(&next).unwrap(); @@ -182,20 +263,22 @@ pub(crate) async fn handle_new_blocks< } } - handle_block::<_, _, _, _, _, _, _, _, P>( - db, - key, - recognized_id.clone(), + let mut txn = db.txn(); + (TributaryBlockHandler { + txn: &mut txn, + spec, + our_key: key, + recognized_id, processors, - publish_serai_tx.clone(), + publish_serai_tx, publish_tributary_tx, - spec, block, - ) + _p2p: PhantomData::

, + }) + .handle::() .await; last_block = next; - let mut txn = db.txn(); - LastBlock::set(&mut txn, genesis, &next); + LastHandledBlock::set(&mut txn, genesis, &next); txn.commit(); } } @@ -204,8 +287,7 @@ pub(crate) async fn scan_tributaries_task< D: Db, Pro: Processors, P: P2p, - FRid: Send + Future, - RID: 'static + Send + Sync + RIDTrait, + RID: 'static + Send + Sync + Clone + RIDTrait, >( raw_db: D, key: Zeroizing<::F>, @@ -240,12 +322,12 @@ pub(crate) async fn scan_tributaries_task< // the next block occurs let next_block_notification = tributary.next_block_notification().await; - handle_new_blocks::<_, _, _, _, _, _, _, _, P>( + handle_new_blocks::<_, _, _, _, _, P>( &mut tributary_db, &key, - recognized_id.clone(), + &recognized_id, &processors, - |set, tx_type, tx| { + &|set, tx_type, tx| { let serai = serai.clone(); async move { loop { @@ -314,7 +396,7 @@ pub(crate) async fn scan_tributaries_task< } } }, - &|tx| { + &|tx: Transaction| { let tributary = tributary.clone(); async move { match tributary.add_transaction(tx.clone()).await { diff --git a/coordinator/src/tributary/signing_protocol.rs b/coordinator/src/tributary/signing_protocol.rs new file mode 100644 index 000000000..2db2d9644 --- /dev/null +++ b/coordinator/src/tributary/signing_protocol.rs @@ -0,0 +1,395 @@ +/* + A MuSig-based signing protocol executed with the validators' keys. + + This is used for confirming the results of a DKG on-chain, an operation requiring all validators, + and for removing another validator before the DKG completes, an operation requiring a + supermajority of validators. + + Since we're using the validator's keys, as needed for their being the root of trust, the + coordinator must perform the signing. This is distinct from all other group-signing operations, + as they're all done by the processor. + + The MuSig-aggregation achieves on-chain efficiency and enables a more secure design pattern. + While we could individually tack votes, that'd require logic to prevent voting multiple times and + tracking the accumulated votes. MuSig-aggregation simply requires checking the list is sorted and + the list's weight exceeds the threshold. + + Instead of maintaining state in memory, a combination of the DB and re-execution are used. This + is deemed acceptable re: performance as: + + 1) This is only done prior to a DKG being confirmed on Substrate and is assumed infrequent. + 2) This is an O(n) algorithm. + 3) The size of the validator set is bounded by MAX_KEY_SHARES_PER_SET. + + Accordingly, this should be tolerable. + + As for safety, it is explicitly unsafe to reuse nonces across signing sessions. This raises + concerns regarding our re-execution which is dependent on fixed nonces. Safety is derived from + the nonces being context-bound under a BFT protocol. The flow is as follows: + + 1) Decide the nonce. + 2) Publish the nonces' commitments, receiving everyone elses *and potentially the message to be + signed*. + 3) Sign and publish the signature share. + + In order for nonce re-use to occur, the received nonce commitments (or the message to be signed) + would have to be distinct and sign would have to be called again. + + Before we act on any received messages, they're ordered and finalized by a BFT algorithm. The + only way to operate on distinct received messages would be if: + + 1) A logical flaw exists, letting new messages over write prior messages + 2) A reorganization occured from chain A to chain B, and with it, different messages + + Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While + a significant amount of processes may be byzantine, leading to BFT being broken, that still will + not trigger a reorganization. The only way to move to a distinct chain, with distinct messages, + would be by rebuilding the local process (this time following chain B). Upon any complete + rebuild, we'd re-decide nonces, achieving safety. This does set a bound preventing partial + rebuilds which is accepted. + + Additionally, to ensure a rebuilt service isn't flagged as malicious, we have to check the + commitments generated from the decided nonces are in fact its commitments on-chain (TODO). + + TODO: We also need to review how we're handling Processor preprocesses and likely implement the + same on-chain-preprocess-matches-presumed-preprocess check before publishing shares. +*/ + +use core::ops::Deref; +use std::collections::HashMap; + +use zeroize::{Zeroize, Zeroizing}; + +use rand_core::OsRng; + +use blake2::{Digest, Blake2s256}; + +use ciphersuite::{ + group::{ff::PrimeField, Group, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use frost::{ + FrostError, + dkg::{Participant, musig::musig}, + ThresholdKeys, + sign::*, +}; +use frost_schnorrkel::Schnorrkel; + +use scale::Encode; + +use serai_client::{ + Public, SeraiAddress, + validator_sets::primitives::{ + KeyPair, musig_context, set_keys_message, remove_participant_message, + }, +}; + +use serai_db::*; + +use crate::tributary::TributarySpec; + +create_db!( + SigningProtocolDb { + CachedPreprocesses: (context: &impl Encode) -> [u8; 32] + } +); + +struct SigningProtocol<'a, T: DbTxn, C: Encode> { + pub(crate) key: &'a Zeroizing<::F>, + pub(crate) spec: &'a TributarySpec, + pub(crate) txn: &'a mut T, + pub(crate) context: C, +} + +impl SigningProtocol<'_, T, C> { + fn preprocess_internal( + &mut self, + participants: &[::G], + ) -> (AlgorithmSignMachine, [u8; 64]) { + // Encrypt the cached preprocess as recovery of it will enable recovering the private key + // While the DB isn't expected to be arbitrarily readable, it isn't a proper secret store and + // shouldn't be trusted as one + let mut encryption_key = { + let mut encryption_key_preimage = + Zeroizing::new(b"Cached Preprocess Encryption Key".to_vec()); + encryption_key_preimage.extend(self.context.encode()); + let repr = Zeroizing::new(self.key.to_repr()); + encryption_key_preimage.extend(repr.deref()); + Blake2s256::digest(&encryption_key_preimage) + }; + let encryption_key_slice: &mut [u8] = encryption_key.as_mut(); + + let algorithm = Schnorrkel::new(b"substrate"); + let keys: ThresholdKeys = + musig(&musig_context(self.spec.set()), self.key, participants) + .expect("signing for a set we aren't in/validator present multiple times") + .into(); + + if CachedPreprocesses::get(self.txn, &self.context).is_none() { + let (machine, _) = + AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng); + + let mut cache = machine.cache(); + assert_eq!(cache.0.len(), 32); + #[allow(clippy::needless_range_loop)] + for b in 0 .. 32 { + cache.0[b] ^= encryption_key_slice[b]; + } + + CachedPreprocesses::set(self.txn, &self.context, &cache.0); + } + + let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap(); + let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached); + #[allow(clippy::needless_range_loop)] + for b in 0 .. 32 { + cached[b] ^= encryption_key_slice[b]; + } + encryption_key_slice.zeroize(); + let (machine, preprocess) = + AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached)); + + (machine, preprocess.serialize().try_into().unwrap()) + } + + fn share_internal( + &mut self, + participants: &[::G], + mut serialized_preprocesses: HashMap>, + msg: &[u8], + ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { + let machine = self.preprocess_internal(participants).0; + + let mut participants = serialized_preprocesses.keys().cloned().collect::>(); + participants.sort(); + let mut preprocesses = HashMap::new(); + for participant in participants { + preprocesses.insert( + participant, + machine + .read_preprocess(&mut serialized_preprocesses.remove(&participant).unwrap().as_slice()) + .map_err(|_| participant)?, + ); + } + + let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e { + FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) => unreachable!("{e:?}"), + FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, + })?; + + Ok((machine, share.serialize().try_into().unwrap())) + } + + fn complete_internal( + &mut self, + machine: AlgorithmSignatureMachine, + shares: HashMap>, + ) -> Result<[u8; 64], Participant> { + let shares = shares + .into_iter() + .map(|(p, share)| { + machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p) + }) + .collect::, _>>()?; + let signature = machine.complete(shares).map_err(|e| match e { + FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) => unreachable!("{e:?}"), + FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, + })?; + Ok(signature.to_bytes()) + } +} + +// Get the keys of the participants, noted by their threshold is, and return a new map indexed by +// the MuSig is. +// +// If sort_by_keys = true, the MuSig is will index the keys once sorted. Else, the MuSig is will +// index the validators in the order they've been defined. +fn threshold_i_map_to_keys_and_musig_i_map( + spec: &TributarySpec, + our_key: &Zeroizing<::F>, + mut map: HashMap>, + sort_by_keys: bool, +) -> (Vec<::G>, HashMap>) { + // Insert our own index so calculations aren't offset + let our_threshold_i = + spec.i(::generator() * our_key.deref()).unwrap().start; + assert!(map.insert(our_threshold_i, vec![]).is_none()); + + let spec_validators = spec.validators(); + let key_from_threshold_i = |threshold_i| { + for (key, _) in &spec_validators { + if threshold_i == spec.i(*key).unwrap().start { + return *key; + } + } + panic!("requested info for threshold i which doesn't exist") + }; + + let mut sorted = vec![]; + let mut threshold_is = map.keys().cloned().collect::>(); + threshold_is.sort(); + for threshold_i in threshold_is { + sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap())); + } + if sort_by_keys { + // Substrate expects these signers to be sorted by key + sorted.sort_by(|(key1, _), (key2, _)| key1.to_bytes().cmp(&key2.to_bytes())); + } + + // Now that signers are sorted, with their shares, create a map with the is needed for MuSig + let mut participants = vec![]; + let mut map = HashMap::new(); + for (raw_i, (key, share)) in sorted.into_iter().enumerate() { + let musig_i = u16::try_from(raw_i).unwrap() + 1; + participants.push(key); + map.insert(Participant::new(musig_i).unwrap(), share); + } + + map.remove(&our_threshold_i).unwrap(); + + (participants, map) +} + +pub(crate) struct DkgConfirmer<'a, T: DbTxn> { + pub(crate) key: &'a Zeroizing<::F>, + pub(crate) spec: &'a TributarySpec, + pub(crate) txn: &'a mut T, + pub(crate) attempt: u32, +} + +impl DkgConfirmer<'_, T> { + fn signing_protocol(&mut self) -> SigningProtocol<'_, T, (&'static [u8; 12], u32)> { + let context = (b"DkgConfirmer", self.attempt); + SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context } + } + + fn preprocess_internal(&mut self) -> (AlgorithmSignMachine, [u8; 64]) { + let participants = self.spec.validators().iter().map(|val| val.0).collect::>(); + self.signing_protocol().preprocess_internal(&participants) + } + // Get the preprocess for this confirmation. + pub(crate) fn preprocess(&mut self) -> [u8; 64] { + self.preprocess_internal().1 + } + + fn share_internal( + &mut self, + preprocesses: HashMap>, + key_pair: &KeyPair, + ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { + let participants = self.spec.validators().iter().map(|val| val.0).collect::>(); + let preprocesses = + threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses, false).1; + let msg = set_keys_message(&self.spec.set(), key_pair); + self.signing_protocol().share_internal(&participants, preprocesses, &msg) + } + // Get the share for this confirmation, if the preprocesses are valid. + pub(crate) fn share( + &mut self, + preprocesses: HashMap>, + key_pair: &KeyPair, + ) -> Result<[u8; 32], Participant> { + self.share_internal(preprocesses, key_pair).map(|(_, share)| share) + } + + pub(crate) fn complete( + &mut self, + preprocesses: HashMap>, + key_pair: &KeyPair, + shares: HashMap>, + ) -> Result<[u8; 64], Participant> { + let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares, false).1; + + let machine = self + .share_internal(preprocesses, key_pair) + .expect("trying to complete a machine which failed to preprocess") + .0; + + self.signing_protocol().complete_internal(machine, shares) + } +} + +pub(crate) struct DkgRemoval<'a, T: DbTxn> { + pub(crate) key: &'a Zeroizing<::F>, + pub(crate) spec: &'a TributarySpec, + pub(crate) txn: &'a mut T, + pub(crate) removing: [u8; 32], + pub(crate) attempt: u32, +} + +impl DkgRemoval<'_, T> { + fn signing_protocol(&mut self) -> SigningProtocol<'_, T, (&'static [u8; 10], [u8; 32], u32)> { + let context = (b"DkgRemoval", self.removing, self.attempt); + SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context } + } + + fn preprocess_internal( + &mut self, + participants: Option<&[::G]>, + ) -> (AlgorithmSignMachine, [u8; 64]) { + // We won't know the participants when we first preprocess + // If we don't, we use our key alone as the participant + let just_us = [::G::generator() * self.key.deref()]; + let to_musig = if let Some(participants) = participants { participants } else { &just_us }; + + let (machine, preprocess) = self.signing_protocol().preprocess_internal(to_musig); + + // If we're now specifying participants, confirm the commitments were the same + if participants.is_some() { + let (_, theoretical_preprocess) = self.signing_protocol().preprocess_internal(&just_us); + assert_eq!(theoretical_preprocess, preprocess); + } + + (machine, preprocess) + } + // Get the preprocess for this confirmation. + pub(crate) fn preprocess(&mut self) -> [u8; 64] { + self.preprocess_internal(None).1 + } + + fn share_internal( + &mut self, + preprocesses: HashMap>, + ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { + let (participants, preprocesses) = + threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses, true); + let msg = remove_participant_message(&self.spec.set(), Public(self.removing)); + self.signing_protocol().share_internal(&participants, preprocesses, &msg) + } + // Get the share for this confirmation, if the preprocesses are valid. + pub(crate) fn share( + &mut self, + preprocesses: HashMap>, + ) -> Result<[u8; 32], Participant> { + self.share_internal(preprocesses).map(|(_, share)| share) + } + + pub(crate) fn complete( + &mut self, + preprocesses: HashMap>, + shares: HashMap>, + ) -> Result<(Vec, [u8; 64]), Participant> { + let (participants, shares) = + threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares, true); + let signers = participants.iter().map(|key| SeraiAddress(key.to_bytes())).collect::>(); + + let machine = self + .share_internal(preprocesses) + .expect("trying to complete a machine which failed to preprocess") + .0; + + let signature = self.signing_protocol().complete_internal(machine, shares)?; + Ok((signers, signature)) + } +} diff --git a/coordinator/src/tributary/spec.rs b/coordinator/src/tributary/spec.rs new file mode 100644 index 000000000..95f9e595b --- /dev/null +++ b/coordinator/src/tributary/spec.rs @@ -0,0 +1,116 @@ +use core::{ops::Range, fmt::Debug}; +use std::io; + +use transcript::{Transcript, RecommendedTranscript}; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use frost::Participant; + +use scale::Encode; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::{primitives::PublicKey, validator_sets::primitives::ValidatorSet}; + +fn borsh_serialize_validators( + validators: &Vec<(::G, u16)>, + writer: &mut W, +) -> Result<(), io::Error> { + let len = u16::try_from(validators.len()).unwrap(); + BorshSerialize::serialize(&len, writer)?; + for validator in validators { + BorshSerialize::serialize(&validator.0.to_bytes(), writer)?; + BorshSerialize::serialize(&validator.1, writer)?; + } + Ok(()) +} + +fn borsh_deserialize_validators( + reader: &mut R, +) -> Result::G, u16)>, io::Error> { + let len: u16 = BorshDeserialize::deserialize_reader(reader)?; + let mut res = vec![]; + for _ in 0 .. len { + let compressed: [u8; 32] = BorshDeserialize::deserialize_reader(reader)?; + let point = Option::from(::G::from_bytes(&compressed)) + .ok_or_else(|| io::Error::other("invalid point for validator"))?; + let weight: u16 = BorshDeserialize::deserialize_reader(reader)?; + res.push((point, weight)); + } + Ok(res) +} + +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub struct TributarySpec { + serai_block: [u8; 32], + start_time: u64, + set: ValidatorSet, + #[borsh( + serialize_with = "borsh_serialize_validators", + deserialize_with = "borsh_deserialize_validators" + )] + validators: Vec<(::G, u16)>, +} + +impl TributarySpec { + pub fn new( + serai_block: [u8; 32], + start_time: u64, + set: ValidatorSet, + set_participants: Vec<(PublicKey, u16)>, + ) -> TributarySpec { + let mut validators = vec![]; + for (participant, shares) in set_participants { + let participant = ::read_G::<&[u8]>(&mut participant.0.as_ref()) + .expect("invalid key registered as participant"); + validators.push((participant, shares)); + } + + Self { serai_block, start_time, set, validators } + } + + pub fn set(&self) -> ValidatorSet { + self.set + } + + pub fn genesis(&self) -> [u8; 32] { + // Calculate the genesis for this Tributary + let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis"); + // This locks it to a specific Serai chain + genesis.append_message(b"serai_block", self.serai_block); + genesis.append_message(b"session", self.set.session.0.to_le_bytes()); + genesis.append_message(b"network", self.set.network.encode()); + let genesis = genesis.challenge(b"genesis"); + let genesis_ref: &[u8] = genesis.as_ref(); + genesis_ref[.. 32].try_into().unwrap() + } + + pub fn start_time(&self) -> u64 { + self.start_time + } + + pub fn n(&self) -> u16 { + self.validators.iter().map(|(_, weight)| weight).sum() + } + + pub fn t(&self) -> u16 { + ((2 * self.n()) / 3) + 1 + } + + pub fn i(&self, key: ::G) -> Option> { + let mut i = 1; + for (validator, weight) in &self.validators { + if validator == &key { + return Some(Range { + start: Participant::new(i).unwrap(), + end: Participant::new(i + weight).unwrap(), + }); + } + i += weight; + } + None + } + + pub fn validators(&self) -> Vec<(::G, u64)> { + self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect() + } +} diff --git a/coordinator/src/tributary/transaction.rs b/coordinator/src/tributary/transaction.rs new file mode 100644 index 000000000..4a2748d52 --- /dev/null +++ b/coordinator/src/tributary/transaction.rs @@ -0,0 +1,693 @@ +use core::{ops::Deref, fmt::Debug}; +use std::io; + +use zeroize::Zeroizing; +use rand_core::{RngCore, CryptoRng}; + +use blake2::{Digest, Blake2s256}; +use transcript::{Transcript, RecommendedTranscript}; + +use ciphersuite::{ + group::{ff::Field, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use schnorr::SchnorrSignature; +use frost::Participant; + +use scale::{Encode, Decode}; +use processor_messages::coordinator::SubstrateSignableId; + +use tributary::{ + TRANSACTION_SIZE_LIMIT, ReadWrite, + transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait}, +}; + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)] +pub enum Label { + Preprocess, + Share, +} + +impl Label { + // TODO: Should nonces be u8 thanks to our use of topics? + pub fn nonce(&self) -> u32 { + match self { + Label::Preprocess => 0, + Label::Share => 1, + } + } +} + +#[derive(Clone, PartialEq, Eq)] +pub struct SignData { + pub plan: Id, + pub attempt: u32, + pub label: Label, + + pub data: Vec>, + + pub signed: Signed, +} + +impl Debug for SignData { + fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + fmt + .debug_struct("SignData") + .field("id", &hex::encode(self.plan.encode())) + .field("attempt", &self.attempt) + .field("label", &self.label) + .field("signer", &hex::encode(self.signed.signer.to_bytes())) + .finish_non_exhaustive() + } +} + +impl SignData { + pub(crate) fn read(reader: &mut R) -> io::Result { + let plan = Id::decode(&mut scale::IoReader(&mut *reader)) + .map_err(|_| io::Error::other("invalid plan in SignData"))?; + + let mut attempt = [0; 4]; + reader.read_exact(&mut attempt)?; + let attempt = u32::from_le_bytes(attempt); + + let mut label = [0; 1]; + reader.read_exact(&mut label)?; + let label = match label[0] { + 0 => Label::Preprocess, + 1 => Label::Share, + _ => Err(io::Error::other("invalid label in SignData"))?, + }; + + let data = { + let mut data_pieces = [0]; + reader.read_exact(&mut data_pieces)?; + if data_pieces[0] == 0 { + Err(io::Error::other("zero pieces of data in SignData"))?; + } + let mut all_data = vec![]; + for _ in 0 .. data_pieces[0] { + let mut data_len = [0; 2]; + reader.read_exact(&mut data_len)?; + let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; + reader.read_exact(&mut data)?; + all_data.push(data); + } + all_data + }; + + let signed = Signed::read_without_nonce(reader, label.nonce())?; + + Ok(SignData { plan, attempt, label, data, signed }) + } + + pub(crate) fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&self.plan.encode())?; + writer.write_all(&self.attempt.to_le_bytes())?; + writer.write_all(&[match self.label { + Label::Preprocess => 0, + Label::Share => 1, + }])?; + + writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?; + for data in &self.data { + if data.len() > u16::MAX.into() { + // Currently, the largest individual preprocess is a Monero transaction + // It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a + // key image and proof (96 bytes) + // Even with all of that, we could support 227 inputs in a single TX + // Monero is limited to ~120 inputs per TX + // + // Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess + Err(io::Error::other("signing data exceeded 65535 bytes"))?; + } + writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?; + writer.write_all(data)?; + } + + self.signed.write_without_nonce(writer) + } +} + +#[derive(Clone, PartialEq, Eq)] +pub enum Transaction { + RemoveParticipant(Participant), + + // Once this completes successfully, no more instances should be created. + DkgCommitments { + attempt: u32, + commitments: Vec>, + signed: Signed, + }, + DkgShares { + attempt: u32, + // Sending Participant, Receiving Participant, Share + shares: Vec>>, + confirmation_nonces: [u8; 64], + signed: Signed, + }, + InvalidDkgShare { + attempt: u32, + accuser: Participant, + faulty: Participant, + blame: Option>, + signed: Signed, + }, + DkgConfirmed { + attempt: u32, + confirmation_share: [u8; 32], + signed: Signed, + }, + + DkgRemoval(SignData<[u8; 32]>), + + // Co-sign a Substrate block. + CosignSubstrateBlock([u8; 32]), + + // When we have synchrony on a batch, we can allow signing it + // TODO (never?): This is less efficient compared to an ExternalBlock provided transaction, + // which would be binding over the block hash and automatically achieve synchrony on all + // relevant batches. ExternalBlock was removed for this due to complexity around the pipeline + // with the current processor, yet it would still be an improvement. + Batch { + block: [u8; 32], + batch: [u8; 5], + }, + // When a Serai block is finalized, with the contained batches, we can allow the associated plan + // IDs + SubstrateBlock(u64), + + SubstrateSign(SignData), + Sign(SignData<[u8; 32]>), + // This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst + // reporters (who should all report the same thing) + // We do still track the signer in order to prevent a single signer from publishing arbitrarily + // many TXs without penalty + // Here, they're denoted as the first_signer, as only the signer of the first TX to be included + // with this pairing will be remembered on-chain + SignCompleted { + plan: [u8; 32], + tx_hash: Vec, + first_signer: ::G, + signature: SchnorrSignature, + }, +} + +impl Debug for Transaction { + fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + match self { + Transaction::RemoveParticipant(participant) => fmt + .debug_struct("Transaction::RemoveParticipant") + .field("participant", participant) + .finish(), + Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt + .debug_struct("Transaction::DkgCommitments") + .field("attempt", attempt) + .field("signer", &hex::encode(signed.signer.to_bytes())) + .finish_non_exhaustive(), + Transaction::DkgShares { attempt, signed, .. } => fmt + .debug_struct("Transaction::DkgShares") + .field("attempt", attempt) + .field("signer", &hex::encode(signed.signer.to_bytes())) + .finish_non_exhaustive(), + Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt + .debug_struct("Transaction::InvalidDkgShare") + .field("attempt", attempt) + .field("accuser", accuser) + .field("faulty", faulty) + .finish_non_exhaustive(), + Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt + .debug_struct("Transaction::DkgConfirmed") + .field("attempt", attempt) + .field("signer", &hex::encode(signed.signer.to_bytes())) + .finish_non_exhaustive(), + Transaction::DkgRemoval(sign_data) => { + fmt.debug_struct("Transaction::DkgRemoval").field("sign_data", sign_data).finish() + } + Transaction::CosignSubstrateBlock(block) => fmt + .debug_struct("Transaction::CosignSubstrateBlock") + .field("block", &hex::encode(block)) + .finish(), + Transaction::Batch { block, batch } => fmt + .debug_struct("Transaction::Batch") + .field("block", &hex::encode(block)) + .field("batch", &hex::encode(batch)) + .finish(), + Transaction::SubstrateBlock(block) => { + fmt.debug_struct("Transaction::SubstrateBlock").field("block", block).finish() + } + Transaction::SubstrateSign(sign_data) => { + fmt.debug_struct("Transaction::Substrate").field("sign_data", sign_data).finish() + } + Transaction::Sign(sign_data) => { + fmt.debug_struct("Transaction::Sign").field("sign_data", sign_data).finish() + } + Transaction::SignCompleted { plan, tx_hash, .. } => fmt + .debug_struct("Transaction::SignCompleted") + .field("plan", &hex::encode(plan)) + .field("tx_hash", &hex::encode(tx_hash)) + .finish_non_exhaustive(), + } + } +} + +impl ReadWrite for Transaction { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0]; + reader.read_exact(&mut kind)?; + + match kind[0] { + 0 => Ok(Transaction::RemoveParticipant({ + let mut participant = [0; 2]; + reader.read_exact(&mut participant)?; + Participant::new(u16::from_le_bytes(participant)) + .ok_or_else(|| io::Error::other("invalid participant in RemoveParticipant"))? + })), + + 1 => { + let mut attempt = [0; 4]; + reader.read_exact(&mut attempt)?; + let attempt = u32::from_le_bytes(attempt); + + let commitments = { + let mut commitments_len = [0; 1]; + reader.read_exact(&mut commitments_len)?; + let commitments_len = usize::from(commitments_len[0]); + if commitments_len == 0 { + Err(io::Error::other("zero commitments in DkgCommitments"))?; + } + + let mut each_commitments_len = [0; 2]; + reader.read_exact(&mut each_commitments_len)?; + let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len)); + if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT { + Err(io::Error::other( + "commitments present in transaction exceeded transaction size limit", + ))?; + } + let mut commitments = vec![vec![]; commitments_len]; + for commitments in &mut commitments { + *commitments = vec![0; each_commitments_len]; + reader.read_exact(commitments)?; + } + commitments + }; + + let signed = Signed::read_without_nonce(reader, 0)?; + + Ok(Transaction::DkgCommitments { attempt, commitments, signed }) + } + + 2 => { + let mut attempt = [0; 4]; + reader.read_exact(&mut attempt)?; + let attempt = u32::from_le_bytes(attempt); + + let shares = { + let mut share_quantity = [0; 1]; + reader.read_exact(&mut share_quantity)?; + + let mut key_share_quantity = [0; 1]; + reader.read_exact(&mut key_share_quantity)?; + + let mut share_len = [0; 2]; + reader.read_exact(&mut share_len)?; + let share_len = usize::from(u16::from_le_bytes(share_len)); + + let mut all_shares = vec![]; + for _ in 0 .. share_quantity[0] { + let mut shares = vec![]; + for _ in 0 .. key_share_quantity[0] { + let mut share = vec![0; share_len]; + reader.read_exact(&mut share)?; + shares.push(share); + } + all_shares.push(shares); + } + all_shares + }; + + let mut confirmation_nonces = [0; 64]; + reader.read_exact(&mut confirmation_nonces)?; + + let signed = Signed::read_without_nonce(reader, 1)?; + + Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed }) + } + + 3 => { + let mut attempt = [0; 4]; + reader.read_exact(&mut attempt)?; + let attempt = u32::from_le_bytes(attempt); + + let mut accuser = [0; 2]; + reader.read_exact(&mut accuser)?; + let accuser = Participant::new(u16::from_le_bytes(accuser)) + .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; + + let mut faulty = [0; 2]; + reader.read_exact(&mut faulty)?; + let faulty = Participant::new(u16::from_le_bytes(faulty)) + .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; + + let mut blame_len = [0; 2]; + reader.read_exact(&mut blame_len)?; + let mut blame = vec![0; u16::from_le_bytes(blame_len).into()]; + reader.read_exact(&mut blame)?; + + // This shares a nonce with DkgConfirmed as only one is expected + let signed = Signed::read_without_nonce(reader, 2)?; + + Ok(Transaction::InvalidDkgShare { + attempt, + accuser, + faulty, + blame: Some(blame).filter(|blame| !blame.is_empty()), + signed, + }) + } + + 4 => { + let mut attempt = [0; 4]; + reader.read_exact(&mut attempt)?; + let attempt = u32::from_le_bytes(attempt); + + let mut confirmation_share = [0; 32]; + reader.read_exact(&mut confirmation_share)?; + + let signed = Signed::read_without_nonce(reader, 2)?; + + Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed }) + } + + 5 => SignData::read(reader).map(Transaction::DkgRemoval), + + 6 => { + let mut block = [0; 32]; + reader.read_exact(&mut block)?; + Ok(Transaction::CosignSubstrateBlock(block)) + } + + 7 => { + let mut block = [0; 32]; + reader.read_exact(&mut block)?; + let mut batch = [0; 5]; + reader.read_exact(&mut batch)?; + Ok(Transaction::Batch { block, batch }) + } + + 8 => { + let mut block = [0; 8]; + reader.read_exact(&mut block)?; + Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block))) + } + + 9 => SignData::read(reader).map(Transaction::SubstrateSign), + 10 => SignData::read(reader).map(Transaction::Sign), + + 11 => { + let mut plan = [0; 32]; + reader.read_exact(&mut plan)?; + + let mut tx_hash_len = [0]; + reader.read_exact(&mut tx_hash_len)?; + let mut tx_hash = vec![0; usize::from(tx_hash_len[0])]; + reader.read_exact(&mut tx_hash)?; + + let first_signer = Ristretto::read_G(reader)?; + let signature = SchnorrSignature::::read(reader)?; + + Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature }) + } + + _ => Err(io::Error::other("invalid transaction type")), + } + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Transaction::RemoveParticipant(i) => { + writer.write_all(&[0])?; + writer.write_all(&u16::from(*i).to_le_bytes()) + } + + Transaction::DkgCommitments { attempt, commitments, signed } => { + writer.write_all(&[1])?; + writer.write_all(&attempt.to_le_bytes())?; + if commitments.is_empty() { + Err(io::Error::other("zero commitments in DkgCommitments"))? + } + writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?; + for commitments_i in commitments { + if commitments_i.len() != commitments[0].len() { + Err(io::Error::other("commitments of differing sizes in DkgCommitments"))? + } + } + writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?; + for commitments in commitments { + writer.write_all(commitments)?; + } + signed.write_without_nonce(writer) + } + + Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => { + writer.write_all(&[2])?; + writer.write_all(&attempt.to_le_bytes())?; + + // `shares` is a Vec which is supposed to map to a HashMap>. Since we + // bound participants to 150, this conversion is safe if a valid in-memory transaction. + writer.write_all(&[u8::try_from(shares.len()).unwrap()])?; + // This assumes at least one share is being sent to another party + writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?; + let share_len = shares[0][0].len(); + // For BLS12-381 G2, this would be: + // - A 32-byte share + // - A 96-byte ephemeral key + // - A 128-byte signature + // Hence why this has to be u16 + writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?; + + for these_shares in shares { + assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable"); + for share in these_shares { + assert_eq!(share.len(), share_len, "sent shares were of variable length"); + writer.write_all(share)?; + } + } + + writer.write_all(confirmation_nonces)?; + signed.write_without_nonce(writer) + } + + Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { + writer.write_all(&[3])?; + writer.write_all(&attempt.to_le_bytes())?; + writer.write_all(&u16::from(*accuser).to_le_bytes())?; + writer.write_all(&u16::from(*faulty).to_le_bytes())?; + + // Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length + assert!(blame.as_ref().map(|blame| blame.len()).unwrap_or(1) != 0); + let blame_len = + u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB"); + writer.write_all(&blame_len.to_le_bytes())?; + writer.write_all(blame.as_ref().unwrap_or(&vec![]))?; + + signed.write_without_nonce(writer) + } + + Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { + writer.write_all(&[4])?; + writer.write_all(&attempt.to_le_bytes())?; + writer.write_all(confirmation_share)?; + signed.write_without_nonce(writer) + } + + Transaction::DkgRemoval(data) => { + writer.write_all(&[5])?; + data.write(writer) + } + + Transaction::CosignSubstrateBlock(block) => { + writer.write_all(&[6])?; + writer.write_all(block) + } + + Transaction::Batch { block, batch } => { + writer.write_all(&[7])?; + writer.write_all(block)?; + writer.write_all(batch) + } + + Transaction::SubstrateBlock(block) => { + writer.write_all(&[8])?; + writer.write_all(&block.to_le_bytes()) + } + + Transaction::SubstrateSign(data) => { + writer.write_all(&[9])?; + data.write(writer) + } + Transaction::Sign(data) => { + writer.write_all(&[10])?; + data.write(writer) + } + Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => { + writer.write_all(&[11])?; + writer.write_all(plan)?; + writer + .write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?; + writer.write_all(tx_hash)?; + writer.write_all(&first_signer.to_bytes())?; + signature.write(writer) + } + } + } +} + +impl TransactionTrait for Transaction { + fn kind(&self) -> TransactionKind<'_> { + match self { + Transaction::RemoveParticipant(_) => TransactionKind::Provided("remove"), + + Transaction::DkgCommitments { attempt, commitments: _, signed } => { + TransactionKind::Signed((b"dkg", attempt).encode(), signed) + } + Transaction::DkgShares { attempt, signed, .. } => { + TransactionKind::Signed((b"dkg", attempt).encode(), signed) + } + Transaction::InvalidDkgShare { attempt, signed, .. } => { + TransactionKind::Signed((b"dkg", attempt).encode(), signed) + } + Transaction::DkgConfirmed { attempt, signed, .. } => { + TransactionKind::Signed((b"dkg", attempt).encode(), signed) + } + + Transaction::DkgRemoval(data) => { + TransactionKind::Signed((b"dkg_removal", data.plan, data.attempt).encode(), &data.signed) + } + + Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"), + + Transaction::Batch { .. } => TransactionKind::Provided("batch"), + Transaction::SubstrateBlock(_) => TransactionKind::Provided("serai"), + + Transaction::SubstrateSign(data) => { + TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed) + } + Transaction::Sign(data) => { + TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed) + } + Transaction::SignCompleted { .. } => TransactionKind::Unsigned, + } + } + + fn hash(&self) -> [u8; 32] { + let mut tx = self.serialize(); + if let TransactionKind::Signed(_, signed) = self.kind() { + // Make sure the part we're cutting off is the signature + assert_eq!(tx.drain((tx.len() - 64) ..).collect::>(), signed.signature.serialize()); + } + Blake2s256::digest([b"Coordinator Tributary Transaction".as_slice(), &tx].concat()).into() + } + + fn verify(&self) -> Result<(), TransactionError> { + // TODO: Check DkgRemoval and SubstrateSign's lengths here + + if let Transaction::SignCompleted { first_signer, signature, .. } = self { + if !signature.verify(*first_signer, self.sign_completed_challenge()) { + Err(TransactionError::InvalidContent)?; + } + } + + Ok(()) + } +} + +impl Transaction { + // Used to initially construct transactions so we can then get sig hashes and perform signing + pub fn empty_signed() -> Signed { + Signed { + signer: Ristretto::generator(), + nonce: 0, + signature: SchnorrSignature:: { + R: Ristretto::generator(), + s: ::F::ZERO, + }, + } + } + + // Sign a transaction + pub fn sign( + &mut self, + rng: &mut R, + genesis: [u8; 32], + key: &Zeroizing<::F>, + ) { + fn signed(tx: &mut Transaction) -> (u32, &mut Signed) { + let nonce = match tx { + Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"), + + Transaction::DkgCommitments { .. } => 0, + Transaction::DkgShares { .. } => 1, + Transaction::InvalidDkgShare { .. } => 2, + Transaction::DkgConfirmed { .. } => 2, + + Transaction::DkgRemoval(data) => data.label.nonce(), + + Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), + + Transaction::Batch { .. } => panic!("signing Batch"), + Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"), + + Transaction::SubstrateSign(data) => data.label.nonce(), + Transaction::Sign(data) => data.label.nonce(), + Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), + }; + + ( + nonce, + match tx { + Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"), + + Transaction::DkgCommitments { ref mut signed, .. } => signed, + Transaction::DkgShares { ref mut signed, .. } => signed, + Transaction::InvalidDkgShare { ref mut signed, .. } => signed, + Transaction::DkgConfirmed { ref mut signed, .. } => signed, + + Transaction::DkgRemoval(ref mut data) => &mut data.signed, + + Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), + + Transaction::Batch { .. } => panic!("signing Batch"), + Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"), + + Transaction::SubstrateSign(ref mut data) => &mut data.signed, + Transaction::Sign(ref mut data) => &mut data.signed, + Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), + }, + ) + } + + let (nonce, signed_ref) = signed(self); + signed_ref.signer = Ristretto::generator() * key.deref(); + signed_ref.nonce = nonce; + + let sig_nonce = Zeroizing::new(::F::random(rng)); + signed(self).1.signature.R = ::generator() * sig_nonce.deref(); + let sig_hash = self.sig_hash(genesis); + signed(self).1.signature = SchnorrSignature::::sign(key, sig_nonce, sig_hash); + } + + pub fn sign_completed_challenge(&self) -> ::F { + if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self { + let mut transcript = + RecommendedTranscript::new(b"Coordinator Tributary Transaction SignCompleted"); + transcript.append_message(b"plan", plan); + transcript.append_message(b"tx_hash", tx_hash); + transcript.append_message(b"signer", first_signer.to_bytes()); + transcript.append_message(b"nonce", signature.R.to_bytes()); + Ristretto::hash_to_F(b"SignCompleted signature", &transcript.challenge(b"challenge")) + } else { + panic!("sign_completed_challenge called on transaction which wasn't SignCompleted") + } + } +} diff --git a/crypto/dkg/Cargo.toml b/crypto/dkg/Cargo.toml index 2b8cd25db..0eb3f5412 100644 --- a/crypto/dkg/Cargo.toml +++ b/crypto/dkg/Cargo.toml @@ -23,7 +23,6 @@ zeroize = { version = "^1.5", default-features = false, features = ["zeroize_der std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } -serde = { version = "1", default-features = false, features = ["derive"], optional = true } transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] } chacha20 = { version = "0.9", default-features = false, features = ["zeroize"] } @@ -47,7 +46,6 @@ std = [ "std-shims/std", "borsh?/std", - "serde?/std", "transcript/std", "chacha20/std", @@ -61,6 +59,5 @@ std = [ "dleq/serialize" ] borsh = ["dep:borsh"] -serde = ["dep:serde"] tests = ["rand_core/getrandom"] default = ["std"] diff --git a/crypto/dkg/src/lib.rs b/crypto/dkg/src/lib.rs index fd49856f7..eb915e236 100644 --- a/crypto/dkg/src/lib.rs +++ b/crypto/dkg/src/lib.rs @@ -31,8 +31,7 @@ pub mod tests; /// The ID of a participant, defined as a non-zero u16. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Zeroize)] -#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))] pub struct Participant(pub(crate) u16); impl Participant { /// Create a new Participant identifier from a u16. @@ -118,6 +117,14 @@ mod lib { Ciphersuite, }; + #[cfg(feature = "borsh")] + impl borsh::BorshDeserialize for Participant { + fn deserialize_reader(reader: &mut R) -> io::Result { + Participant::new(u16::deserialize_reader(reader)?) + .ok_or_else(|| io::Error::other("invalid participant")) + } + } + // Validate a map of values to have the expected included participants pub(crate) fn validate_map( map: &HashMap, @@ -147,8 +154,7 @@ mod lib { /// Parameters for a multisig. // These fields should not be made public as they should be static #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] - #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] - #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))] pub struct ThresholdParams { /// Participants needed to sign on behalf of the group. pub(crate) t: u16, @@ -189,6 +195,16 @@ mod lib { } } + #[cfg(feature = "borsh")] + impl borsh::BorshDeserialize for ThresholdParams { + fn deserialize_reader(reader: &mut R) -> io::Result { + let t = u16::deserialize_reader(reader)?; + let n = u16::deserialize_reader(reader)?; + let i = Participant::deserialize_reader(reader)?; + ThresholdParams::new(t, n, i).map_err(|e| io::Error::other(format!("{e:?}"))) + } + } + /// Calculate the lagrange coefficient for a signing set. pub fn lagrange(i: Participant, included: &[Participant]) -> F { let i_f = F::from(u64::from(u16::from(i))); diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index ac4c53a64..a716dc583 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -224,13 +224,15 @@ pub trait SignMachine: Send + Sync + Sized { /// security as your private key share. fn cache(self) -> CachedPreprocess; - /// Create a sign machine from a cached preprocess. After this, the preprocess must be deleted so - /// it's never reused. Any reuse would cause the signer to leak their secret share. + /// Create a sign machine from a cached preprocess. + + /// After this, the preprocess must be deleted so it's never reused. Any reuse will presumably + /// cause the signer to leak their secret share. fn from_cache( params: Self::Params, keys: Self::Keys, cache: CachedPreprocess, - ) -> Result; + ) -> (Self, Self::Preprocess); /// Read a Preprocess message. Despite taking self, this does not save the preprocess. /// It must be externally cached and passed into sign. @@ -277,9 +279,8 @@ impl> SignMachine for AlgorithmSignMachi algorithm: A, keys: ThresholdKeys, cache: CachedPreprocess, - ) -> Result { - let (machine, _) = AlgorithmMachine::new(algorithm, keys).seeded_preprocess(cache); - Ok(machine) + ) -> (Self, Self::Preprocess) { + AlgorithmMachine::new(algorithm, keys).seeded_preprocess(cache) } fn read_preprocess(&self, reader: &mut R) -> io::Result { diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index e36bd7110..7e1c0acf3 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -183,7 +183,7 @@ pub fn sign( let cache = machines.remove(&i).unwrap().cache(); machines.insert( i, - M::SignMachine::from_cache(params.clone(), keys.remove(&i).unwrap(), cache).unwrap(), + M::SignMachine::from_cache(params.clone(), keys.remove(&i).unwrap(), cache).0, ); } } diff --git a/processor/src/cosigner.rs b/processor/src/cosigner.rs index 551a14c50..d53593abe 100644 --- a/processor/src/cosigner.rs +++ b/processor/src/cosigner.rs @@ -16,7 +16,6 @@ use frost_schnorrkel::Schnorrkel; use log::{info, warn}; -use scale::Encode; use serai_client::validator_sets::primitives::Session; use messages::coordinator::*; diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs index 9cd6657f3..9f987794f 100644 --- a/processor/src/key_gen.rs +++ b/processor/src/key_gen.rs @@ -16,7 +16,6 @@ use frost::{ use log::info; -use scale::Encode; use serai_client::validator_sets::primitives::{Session, KeyPair}; use messages::key_gen::*; diff --git a/processor/src/signer.rs b/processor/src/signer.rs index 3fcb0d70b..57c63fcd0 100644 --- a/processor/src/signer.rs +++ b/processor/src/signer.rs @@ -10,7 +10,6 @@ use frost::{ use log::{info, debug, warn, error}; -use scale::Encode; use serai_client::validator_sets::primitives::Session; use messages::sign::*; diff --git a/substrate/abi/Cargo.toml b/substrate/abi/Cargo.toml index 0b2715766..fa235d0cd 100644 --- a/substrate/abi/Cargo.toml +++ b/substrate/abi/Cargo.toml @@ -34,5 +34,19 @@ serai-signals-primitives = { path = "../signals/primitives", version = "0.1" } frame-support = { git = "https://github.com/serai-dex/substrate" } [features] -borsh = ["dep:borsh"] -serde = ["dep:serde"] +borsh = [ + "dep:borsh", + "serai-primitives/borsh", + "serai-coins-primitives/borsh", + "serai-validator-sets-primitives/borsh", + "serai-in-instructions-primitives/borsh", + "serai-signals-primitives/borsh", +] +serde = [ + "dep:serde", + "serai-primitives/serde", + "serai-coins-primitives/serde", + "serai-validator-sets-primitives/serde", + "serai-in-instructions-primitives/serde", + "serai-signals-primitives/serde", +] diff --git a/substrate/client/Cargo.toml b/substrate/client/Cargo.toml index 6901a83ff..bf81622e9 100644 --- a/substrate/client/Cargo.toml +++ b/substrate/client/Cargo.toml @@ -54,6 +54,7 @@ serai-docker-tests = { path = "../../tests/docker" } [features] serai = ["thiserror", "serde", "serde_json", "sp-core", "sp-runtime", "frame-system", "simple-request"] +borsh = ["serai-abi/borsh"] networks = [] bitcoin = ["networks", "dep:bitcoin"]