Skip to content

Commit

Permalink
Add a TributaryState struct with higher-level DB logic
Browse files Browse the repository at this point in the history
  • Loading branch information
kayabaNerve committed Oct 24, 2023
1 parent 7c10873 commit 0198d4c
Show file tree
Hide file tree
Showing 5 changed files with 142 additions and 97 deletions.
1 change: 1 addition & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions coordinator/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ tributary = { package = "tributary-chain", path = "./tributary" }
serai-client = { path = "../substrate/client", features = ["serai"] }

hex = "0.4"
bincode = "1"
serde_json = { version = "1", default-features = false }

log = "0.4"
Expand Down
3 changes: 1 addition & 2 deletions coordinator/src/tests/tributary/dkg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -283,8 +283,7 @@ async fn dkg_test() {
let mut txs = vec![];
for key in keys.iter() {
let attempt = 0;
// This is fine to re-use the one DB as such, due to exactly how this specific call is coded,
// albeit poor
let (mut scanner_db, _) = new_processors(key, &spec, &tributaries[0].1).await;
let mut txn = scanner_db.0.txn();
let share =
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
Expand Down
92 changes: 91 additions & 1 deletion coordinator/src/tributary/db.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
use std::io::Read;
use core::{marker::PhantomData, ops::Deref};
use std::{io::Read, collections::HashMap};

use scale::{Encode, Decode};

use zeroize::Zeroizing;
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::Participant;

use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair};

pub use serai_db::*;

use crate::tributary::TributarySpec;

#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub enum Topic {
Dkg,
Expand Down Expand Up @@ -125,6 +130,29 @@ impl<D: Db> TributaryDb<D> {
})
}

fn confirmation_nonces_key(genesis: [u8; 32], attempt: u32) -> Vec<u8> {
Self::tributary_key(b"confirmation_nonces", (genesis, attempt).encode())
}
pub fn save_confirmation_nonces(
txn: &mut D::Transaction<'_>,
genesis: [u8; 32],
attempt: u32,
nonces: HashMap<Participant, Vec<u8>>,
) {
let nonces =
nonces.into_iter().map(|(key, value)| (u16::from(key), value)).collect::<HashMap<_, _>>();
txn.put(Self::confirmation_nonces_key(genesis, attempt), bincode::serialize(&nonces).unwrap())
}
pub fn confirmation_nonces<G: Get>(
getter: &G,
genesis: [u8; 32],
attempt: u32,
) -> Option<HashMap<Participant, Vec<u8>>> {
let bytes = getter.get(Self::confirmation_nonces_key(genesis, attempt))?;
let map: HashMap<u16, Vec<u8>> = bincode::deserialize(&bytes).unwrap();
Some(map.into_iter().map(|(key, value)| (Participant::new(key).unwrap(), value)).collect())
}

// The key pair which we're actively working on completing
fn currently_completing_key_pair_key(genesis: [u8; 32]) -> Vec<u8> {
Self::tributary_key(b"currently_completing_key_pair", genesis)
Expand Down Expand Up @@ -221,3 +249,65 @@ impl<D: Db> TributaryDb<D> {
txn.put(Self::event_key(&id, index), []);
}
}

pub enum DataSet {
Participating(HashMap<Participant, Vec<u8>>),
NotParticipating,
}

pub enum Accumulation {
Ready(DataSet),
NotReady,
}

pub struct TributaryState<D: Db>(PhantomData<D>);
impl<D: Db> TributaryState<D> {
pub fn accumulate(
txn: &mut D::Transaction<'_>,
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
spec: &TributarySpec,
data_spec: &DataSpecification,
signer: <Ristretto as Ciphersuite>::G,
data: &[u8],
) -> Accumulation {
if TributaryDb::<D>::data(txn, spec.genesis(), data_spec, signer).is_some() {
panic!("accumulating data for a participant multiple times");
}
let received = TributaryDb::<D>::set_data(txn, spec.genesis(), data_spec, signer, data);

// If we have all the needed commitments/preprocesses/shares, tell the processor
// TODO: This needs to be coded by weight, not by validator count
let needed = if data_spec.topic == Topic::Dkg { spec.n() } else { spec.t() };
if received == needed {
return Accumulation::Ready({
let mut data = HashMap::new();
for validator in spec.validators().iter().map(|validator| validator.0) {
data.insert(
spec.i(validator).unwrap(),
if let Some(data) = TributaryDb::<D>::data(txn, spec.genesis(), data_spec, validator) {
data
} else {
continue;
},
);
}
assert_eq!(data.len(), usize::from(needed));

// Remove our own piece of data, if we were involved
if data
.remove(
&spec
.i(Ristretto::generator() * our_key.deref())
.expect("handling a message for a Tributary we aren't part of"),
)
.is_some()
{
DataSet::Participating(data)
} else {
DataSet::NotParticipating
}
});
}
Accumulation::NotReady
}
}
Loading

0 comments on commit 0198d4c

Please sign in to comment.