diff --git a/common/db/src/db_macro.rs b/common/db/src/db_macro.rs new file mode 100644 index 000000000..5486e79b4 --- /dev/null +++ b/common/db/src/db_macro.rs @@ -0,0 +1,32 @@ +#[macro_export] +macro_rules! createDb { + ($db_name: ident + { $($field_name: ident),*} + ) => { + fn db_key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { + let db_len = u8::try_from(db_dst.len()).unwrap(); + let dst_len = u8::try_from(item_dst.len()).unwrap(); + [[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat() + } + + $( + #[derive(Clone, Debug)] + pub struct $field_name; + impl $field_name { + + pub fn key(key: impl AsRef<[u8]>) -> Vec { + db_key(stringify!($db_name).as_bytes(), stringify!($field_name).as_bytes(), key) + } + pub fn set(txn: &mut impl DbTxn, key: impl AsRef<[u8]>, data: &impl serde::Serialize) { + let key = $field_name::key(key); + txn.put(&key, bincode::serialize(data).unwrap()); + } + pub fn get(getter: &impl Get, key: impl AsRef<[u8]>) -> Option { + getter.get($field_name::key(key)).map(|data| { + bincode::deserialize(&mut data.as_ref()).unwrap() + }) + } + } + )* + }; + } diff --git a/common/db/src/lib.rs b/common/db/src/lib.rs index 0bcf9810a..9a7c14ae6 100644 --- a/common/db/src/lib.rs +++ b/common/db/src/lib.rs @@ -1,5 +1,7 @@ mod mem; pub use mem::*; +mod db_macro; +pub use db_macro::*; #[cfg(feature = "rocksdb")] mod rocks; diff --git a/processor/src/db.rs b/processor/src/db.rs index 212341d0c..f906a5ced 100644 --- a/processor/src/db.rs +++ b/processor/src/db.rs @@ -8,6 +8,13 @@ pub use serai_db::*; use crate::networks::{Block, Network}; +createDb!( + MainDb { + HandledMessageDb, + PendingActivationsDb + } +); + #[derive(Debug)] pub struct MainDb(D, PhantomData); impl MainDb { diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs index fe6905da1..2d30727f9 100644 --- a/processor/src/key_gen.rs +++ b/processor/src/key_gen.rs @@ -1,6 +1,6 @@ -use core::marker::PhantomData; use std::collections::HashMap; +use serai_db::createDb; use zeroize::Zeroizing; use rand_core::SeedableRng; @@ -27,105 +27,72 @@ pub struct KeyConfirmed { pub network_keys: ThresholdKeys, } -#[derive(Clone, Debug)] -struct KeyGenDb(PhantomData, PhantomData); -impl KeyGenDb { - fn key_gen_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { - D::key(b"KEY_GEN", dst, key) - } - - fn params_key(set: &ValidatorSet) -> Vec { - Self::key_gen_key(b"params", set.encode()) - } - fn save_params(txn: &mut D::Transaction<'_>, set: &ValidatorSet, params: &ThresholdParams) { - txn.put(Self::params_key(set), bincode::serialize(params).unwrap()); - } - fn params(getter: &G, set: &ValidatorSet) -> Option { - getter.get(Self::params_key(set)).map(|bytes| bincode::deserialize(&bytes).unwrap()) +createDb!( + KeyGenDb { + ParamsDb, + CommitmentsDb, + GeneratedKeysDb, + KeysDb } +); + +#[allow(clippy::type_complexity)] +fn read_keys( + getter: &impl Get, + key: &[u8], +) -> Option<(Vec, (ThresholdKeys, ThresholdKeys))> { + let keys_vec = getter.get(key)?; + let mut keys_ref: &[u8] = keys_vec.as_ref(); + let substrate_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap()); + let mut network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap()); + N::tweak_keys(&mut network_keys); + Some((keys_vec, (substrate_keys, network_keys))) +} - // Not scoped to the set since that'd have latter attempts overwrite former - // A former attempt may become the finalized attempt, even if it doesn't in a timely manner - // Overwriting its commitments would be accordingly poor - fn commitments_key(id: &KeyGenId) -> Vec { - Self::key_gen_key(b"commitments", id.encode()) - } - fn save_commitments( - txn: &mut D::Transaction<'_>, - id: &KeyGenId, - commitments: &HashMap>, - ) { - txn.put(Self::commitments_key(id), bincode::serialize(commitments).unwrap()); - } - fn commitments(getter: &G, id: &KeyGenId) -> HashMap> { - bincode::deserialize::>>( - &getter.get(Self::commitments_key(id)).unwrap(), - ) - .unwrap() - } +fn confirm_keys( + txn: &mut impl DbTxn, + set: ValidatorSet, + key_pair: KeyPair, +) -> (ThresholdKeys, ThresholdKeys) { + let val: &[u8] = key_pair.1.as_ref(); + let (keys_vec, keys) = + read_keys::(txn, &GeneratedKeysDb::key((set, (&key_pair.0 .0, val)).encode())) + .unwrap(); + assert_eq!(key_pair.0 .0, keys.0.group_key().to_bytes()); + assert_eq!( + { + let network_key: &[u8] = key_pair.1.as_ref(); + network_key + }, + keys.1.group_key().to_bytes().as_ref(), + ); + txn.put(KeysDb::key(&keys.1.group_key().to_bytes()), keys_vec); + keys +} - fn generated_keys_key(set: ValidatorSet, key_pair: (&[u8; 32], &[u8])) -> Vec { - Self::key_gen_key(b"generated_keys", (set, key_pair).encode()) - } - fn save_keys( - txn: &mut D::Transaction<'_>, +fn keys( + getter: &impl Get, + key: &::G, +) -> Option<(ThresholdKeys, ThresholdKeys)> { + let res = read_keys::(getter, &KeysDb::key(key.to_bytes()))?.1; + assert_eq!(&res.1.group_key(), key); + Some(res) +} +impl GeneratedKeysDb { + fn save_keys( + txn: &mut impl DbTxn, id: &KeyGenId, substrate_keys: &ThresholdCore, network_keys: &ThresholdKeys, ) { let mut keys = substrate_keys.serialize(); keys.extend(network_keys.serialize().iter()); + let key = (id.set, (&substrate_keys.group_key().to_bytes(), network_keys.group_key().to_bytes().as_ref())).encode(); txn.put( - Self::generated_keys_key( - id.set, - (&substrate_keys.group_key().to_bytes(), network_keys.group_key().to_bytes().as_ref()), - ), + Self::key(key), keys, ); } - - fn keys_key(key: &::G) -> Vec { - Self::key_gen_key(b"keys", key.to_bytes()) - } - #[allow(clippy::type_complexity)] - fn read_keys( - getter: &G, - key: &[u8], - ) -> Option<(Vec, (ThresholdKeys, ThresholdKeys))> { - let keys_vec = getter.get(key)?; - let mut keys_ref: &[u8] = keys_vec.as_ref(); - let substrate_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap()); - let mut network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap()); - N::tweak_keys(&mut network_keys); - Some((keys_vec, (substrate_keys, network_keys))) - } - fn confirm_keys( - txn: &mut D::Transaction<'_>, - set: ValidatorSet, - key_pair: KeyPair, - ) -> (ThresholdKeys, ThresholdKeys) { - let (keys_vec, keys) = - Self::read_keys(txn, &Self::generated_keys_key(set, (&key_pair.0 .0, key_pair.1.as_ref()))) - .unwrap(); - assert_eq!(key_pair.0 .0, keys.0.group_key().to_bytes()); - assert_eq!( - { - let network_key: &[u8] = key_pair.1.as_ref(); - network_key - }, - keys.1.group_key().to_bytes().as_ref(), - ); - txn.put(Self::keys_key(&keys.1.group_key()), keys_vec); - keys - } - fn keys( - getter: &G, - key: &::G, - ) -> Option<(ThresholdKeys, ThresholdKeys)> { - let res = Self::read_keys(getter, &Self::keys_key(key))?.1; - assert_eq!(&res.1.group_key(), key); - Some(res) - } } /// Coded so if the processor spontaneously reboots, one of two paths occur: @@ -149,7 +116,7 @@ impl KeyGen { pub fn in_set(&self, set: &ValidatorSet) -> bool { // We determine if we're in set using if we have the parameters for a set's key generation - KeyGenDb::::params(&self.db, set).is_some() + ParamsDb::get::(&self.db, set.encode()).is_some() } pub fn keys( @@ -165,7 +132,7 @@ impl KeyGen { // The only other concern is if it's set when it's not safe to use // The keys are only written on confirmation, and the transaction writing them is atomic to // every associated operation - KeyGenDb::::keys(&self.db, key) + keys::(&self.db, key) } pub async fn handle( @@ -207,7 +174,7 @@ impl KeyGen { self.active_share.remove(&id.set).is_none() { // If we haven't handled this set before, save the params - KeyGenDb::::save_params(txn, &id.set, ¶ms); + ParamsDb::set(txn, &id.set.encode(), ¶ms); } let (machines, commitments) = key_gen_machines(id, params); @@ -228,7 +195,7 @@ impl KeyGen { panic!("commitments when already handled commitments"); } - let params = KeyGenDb::::params(txn, &id.set).unwrap(); + let params = ParamsDb::get::(txn, &id.set.encode()).unwrap(); // Unwrap the machines, rebuilding them if we didn't have them in our cache // We won't if the processor rebooted @@ -288,7 +255,7 @@ impl KeyGen { share.extend(network_shares[i].serialize()); } - KeyGenDb::::save_commitments(txn, &id, &commitments); + CommitmentsDb::set(txn, &id.encode(), &commitments); ProcessorMessage::Shares { id, shares } } @@ -296,13 +263,13 @@ impl KeyGen { CoordinatorMessage::Shares { id, shares } => { info!("Received shares for {:?}", id); - let params = KeyGenDb::::params(txn, &id.set).unwrap(); + let params = ParamsDb::get::(txn, &id.set.encode()).unwrap(); // Same commentary on inconsistency as above exists let machines = self.active_share.remove(&id.set).unwrap_or_else(|| { let machines = key_gen_machines(id, params).0; let mut rng = secret_shares_rng(id); - let commitments = KeyGenDb::::commitments(txn, &id); + let commitments = CommitmentsDb::get::>>(txn, &id.encode()).unwrap(); let mut commitments_ref: HashMap = commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect(); @@ -376,7 +343,7 @@ impl KeyGen { let mut network_keys = ThresholdKeys::new(network_keys); N::tweak_keys(&mut network_keys); - KeyGenDb::::save_keys(txn, &id, &substrate_keys, &network_keys); + GeneratedKeysDb::save_keys::(txn, &id, &substrate_keys, &network_keys); ProcessorMessage::GeneratedKeyPair { id, @@ -393,7 +360,7 @@ impl KeyGen { set: ValidatorSet, key_pair: KeyPair, ) -> KeyConfirmed { - let (substrate_keys, network_keys) = KeyGenDb::::confirm_keys(txn, set, key_pair); + let (substrate_keys, network_keys) = confirm_keys::(txn, set, key_pair); info!( "Confirmed key pair {} {} for set {:?}", diff --git a/processor/src/main.rs b/processor/src/main.rs index 523fa3b17..b4d006e33 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -471,6 +471,7 @@ async fn run(mut raw_db: D, network: N, mut let (main_db, mut tributary_mutable, mut substrate_mutable) = boot(&mut raw_db, &network).await; // We can't load this from the DB as we can't guarantee atomic increments with the ack function + // TODO: Load with a slight tolerance let mut last_coordinator_msg = None; loop {