Skip to content

Commit

Permalink
Support multiple key shares per validator (#416)
Browse files Browse the repository at this point in the history
* Update the coordinator to give key shares based on weight, not based on existence

Participants are now identified by their starting index. While this compiles,
the following is unimplemented:

1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.

* Add a fn to the DkgConfirmer to convert `i` values as needed

Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.

* Have the Tributary DB track participation by shares, not by count

* Prevent a node from obtaining 34% of the maximum amount of key shares

This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.

* Correct the mechanism to detect if sufficient accumulation has occured

It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.

* Finish updating the coordinator to handle a multiple key share per validator environment

* Adjust stategy re: preventing noce reuse in DKG Confirmer

* Add TODOs regarding dropped transactions, add possible TODO fix

* Update tests/coordinator

This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.

* Update processor key_gen to handle generating multiple key shares at once

* Update SubstrateSigner

* Update signer, clippy

* Update processor tests

* Update processor docker tests
  • Loading branch information
kayabaNerve authored Nov 4, 2023
1 parent 5970a45 commit e05b77d
Show file tree
Hide file tree
Showing 28 changed files with 865 additions and 436 deletions.
9 changes: 6 additions & 3 deletions coordinator/src/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -114,8 +114,9 @@ impl<D: Db> MainDb<D> {
network: NetworkId,
id_type: RecognizedIdType,
id: [u8; 32],
preprocess: Vec<u8>,
preprocess: Vec<Vec<u8>>,
) {
let preprocess = preprocess.encode();
let key = Self::first_preprocess_key(network, id_type, id);
if let Some(existing) = txn.get(&key) {
assert_eq!(existing, preprocess, "saved a distinct first preprocess");
Expand All @@ -128,8 +129,10 @@ impl<D: Db> MainDb<D> {
network: NetworkId,
id_type: RecognizedIdType,
id: [u8; 32],
) -> Option<Vec<u8>> {
getter.get(Self::first_preprocess_key(network, id_type, id))
) -> Option<Vec<Vec<u8>>> {
getter
.get(Self::first_preprocess_key(network, id_type, id))
.map(|bytes| Vec::<_>::decode(&mut bytes.as_slice()).unwrap())
}

fn last_received_batch_key(network: NetworkId) -> Vec<u8> {
Expand Down
72 changes: 43 additions & 29 deletions coordinator/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,19 +101,16 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
// If we're rebooting, we'll re-fire this message
// This is safe due to the message-queue deduplicating based off the intent system
let set = spec.set();
let our_i = spec
.i(Ristretto::generator() * key.deref())
.expect("adding a tributary for a set we aren't in set for");
processors
.send(
set.network,
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
id: processor_messages::key_gen::KeyGenId { set, attempt: 0 },
params: frost::ThresholdParams::new(
spec.t(),
spec.n(),
spec
.i(Ristretto::generator() * key.deref())
.expect("adding a tributary for a set we aren't in set for"),
)
.unwrap(),
params: frost::ThresholdParams::new(spec.t(), spec.n(), our_i.start).unwrap(),
shares: u16::from(our_i.end) - u16::from(our_i.start),
},
)
.await;
Expand Down Expand Up @@ -426,18 +423,29 @@ async fn handle_processor_message<D: Db, P: P2p>(
// Create a MuSig-based machine to inform Substrate of this key generation
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, id.attempt);

let our_i = spec
.i(pub_key)
.expect("processor message to DKG for a session we aren't a validator in");

// `tx_shares` needs to be done here as while it can be serialized from the HashMap
// without further context, it can't be deserialized without context
let mut tx_shares = Vec::with_capacity(shares.len());
for i in 1 ..= spec.n() {
let i = Participant::new(i).unwrap();
if i ==
spec
.i(pub_key)
.expect("processor message to DKG for a session we aren't a validator in")
{
if our_i.contains(&i) {
for shares in &shares {
if shares.contains_key(&i) {
panic!("processor sent us our own shares");
}
}
continue;
}
tx_shares
.push(shares.remove(&i).expect("processor didn't send share for another validator"));
tx_shares.push(vec![]);
for shares in &mut shares {
tx_shares.last_mut().unwrap().push(
shares.remove(&i).expect("processor didn't send share for another validator"),
);
}
}

vec![Transaction::DkgShares {
Expand Down Expand Up @@ -474,32 +482,34 @@ async fn handle_processor_message<D: Db, P: P2p>(
}
},
ProcessorMessage::Sign(msg) => match msg {
sign::ProcessorMessage::Preprocess { id, preprocess } => {
sign::ProcessorMessage::Preprocess { id, preprocesses } => {
if id.attempt == 0 {
MainDb::<D>::save_first_preprocess(
&mut txn,
network,
RecognizedIdType::Plan,
id.id,
preprocess,
preprocesses,
);

vec![]
} else {
vec![Transaction::SignPreprocess(SignData {
plan: id.id,
attempt: id.attempt,
data: preprocess,
data: preprocesses,
signed: Transaction::empty_signed(),
})]
}
}
sign::ProcessorMessage::Share { id, share } => vec![Transaction::SignShare(SignData {
plan: id.id,
attempt: id.attempt,
data: share,
signed: Transaction::empty_signed(),
})],
sign::ProcessorMessage::Share { id, shares } => {
vec![Transaction::SignShare(SignData {
plan: id.id,
attempt: id.attempt,
data: shares,
signed: Transaction::empty_signed(),
})]
}
sign::ProcessorMessage::Completed { key: _, id, tx } => {
let r = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
#[allow(non_snake_case)]
Expand All @@ -522,7 +532,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
},
ProcessorMessage::Coordinator(inner_msg) => match inner_msg {
coordinator::ProcessorMessage::SubstrateBlockAck { .. } => unreachable!(),
coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocess } => {
coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocesses } => {
log::info!(
"informed of batch (sign ID {}, attempt {}) for block {}",
hex::encode(id.id),
Expand All @@ -538,7 +548,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
spec.set().network,
RecognizedIdType::Batch,
id.id,
preprocess,
preprocesses,
);

// If this is the new key's first Batch, only create this TX once we verify all
Expand All @@ -550,6 +560,10 @@ async fn handle_processor_message<D: Db, P: P2p>(
if last_received != 0 {
// Decrease by 1, to get the ID of the Batch prior to this Batch
let prior_sets_last_batch = last_received - 1;
// TODO: If we're looping here, we're not handling the messages we need to in order
// to create the Batch we're looking for
// Don't have the processor yield the handover batch untill the batch before is
// acknowledged on-chain?
loop {
let successfully_verified = substrate::verify_published_batches::<D>(
&mut txn,
Expand Down Expand Up @@ -598,16 +612,16 @@ async fn handle_processor_message<D: Db, P: P2p>(
vec![Transaction::BatchPreprocess(SignData {
plan: id.id,
attempt: id.attempt,
data: preprocess,
data: preprocesses,
signed: Transaction::empty_signed(),
})]
}
}
coordinator::ProcessorMessage::BatchShare { id, share } => {
coordinator::ProcessorMessage::BatchShare { id, shares } => {
vec![Transaction::BatchShare(SignData {
plan: id.id,
attempt: id.attempt,
data: share.to_vec(),
data: shares.into_iter().map(|share| share.to_vec()).collect(),
signed: Transaction::empty_signed(),
})]
}
Expand Down
2 changes: 1 addition & 1 deletion coordinator/src/substrate/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ async fn handle_new_set<D: Db>(
.await?
.expect("validator selected for set yet didn't have an allocation")
.0;
set_data.push((participant, allocation / allocation_per_key_share));
set_data.push((participant, u16::try_from(allocation / allocation_per_key_share).unwrap()));
}
amortize_excess_key_shares(&mut set_data);
set_data
Expand Down
13 changes: 7 additions & 6 deletions coordinator/src/tests/tributary/dkg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ async fn dkg_test() {
let mut commitments = vec![0; 256];
OsRng.fill_bytes(&mut commitments);

let mut tx = Transaction::DkgCommitments(attempt, commitments, Transaction::empty_signed());
let mut tx =
Transaction::DkgCommitments(attempt, vec![commitments], Transaction::empty_signed());
tx.sign(&mut OsRng, spec.genesis(), key, 0);
txs.push(tx);
}
Expand All @@ -69,7 +70,7 @@ async fn dkg_test() {
.enumerate()
.map(|(i, tx)| {
if let Transaction::DkgCommitments(_, commitments, _) = tx {
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments.clone())
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
} else {
panic!("txs had non-commitments");
}
Expand Down Expand Up @@ -165,7 +166,7 @@ async fn dkg_test() {
if i != k {
let mut share = vec![0; 256];
OsRng.fill_bytes(&mut share);
shares.push(share);
shares.push(vec![share]);
}
}

Expand Down Expand Up @@ -213,7 +214,7 @@ async fn dkg_test() {
let shares_for = |i: usize| {
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
id: KeyGenId { set: spec.set(), attempt: 0 },
shares: txs
shares: vec![txs
.iter()
.enumerate()
.filter_map(|(l, tx)| {
Expand All @@ -224,14 +225,14 @@ async fn dkg_test() {
let relative_i = i - (if i > l { 1 } else { 0 });
Some((
Participant::new((l + 1).try_into().unwrap()).unwrap(),
shares[relative_i].clone(),
shares[relative_i][0].clone(),
))
}
} else {
panic!("txs had non-shares");
}
})
.collect::<HashMap<_, _>>(),
.collect::<HashMap<_, _>>()],
})
};

Expand Down
74 changes: 60 additions & 14 deletions coordinator/src/tests/tributary/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,13 @@ fn random_sign_data<R: RngCore>(rng: &mut R) -> SignData {
plan,
attempt: random_u32(&mut OsRng),

data: random_vec(&mut OsRng, 512),
data: {
let mut res = vec![];
for _ in 0 .. ((rng.next_u64() % 255) + 1) {
res.push(random_vec(&mut OsRng, 512));
}
res
},

signed: random_signed(&mut OsRng),
}
Expand All @@ -46,30 +52,70 @@ fn test_read_write<RW: Eq + Debug + ReadWrite>(value: RW) {
assert_eq!(value, RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap());
}

#[test]
fn tx_size_limit() {
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, MAX_KEY_LEN};

use tributary::TRANSACTION_SIZE_LIMIT;

let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;
let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients;
// Handwave the DKG Commitments size as the size of the commitments to the coefficients and
// 1024 bytes for all overhead
let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024;
assert!(
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
(handwaved_dkg_commitments_size * max_key_shares_per_individual)
);

// Encryption key, PoP (2 elements), message
let elements_per_share = 4;
let handwaved_dkg_shares_size =
(elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024;
assert!(
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
(handwaved_dkg_shares_size * max_key_shares_per_individual)
);
}

#[test]
fn serialize_sign_data() {
test_read_write(random_sign_data(&mut OsRng));
}

#[test]
fn serialize_transaction() {
test_read_write(Transaction::DkgCommitments(
random_u32(&mut OsRng),
random_vec(&mut OsRng, 512),
random_signed(&mut OsRng),
));
{
let mut commitments = vec![random_vec(&mut OsRng, 512)];
for _ in 0 .. (OsRng.next_u64() % 100) {
let mut temp = commitments[0].clone();
OsRng.fill_bytes(&mut temp);
commitments.push(temp);
}
test_read_write(Transaction::DkgCommitments(
random_u32(&mut OsRng),
commitments,
random_signed(&mut OsRng),
));
}

{
// This supports a variable share length, yet share length is expected to be constant among
// shares
let share_len = usize::try_from(OsRng.next_u64() % 512).unwrap();
// This supports a variable share length, and variable amount of sent shares, yet share length
// and sent shares is expected to be constant among recipients
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();
// Create a valid vec of shares
let mut shares = vec![];
// Create up to 512 participants
for _ in 0 .. (OsRng.next_u64() % 512) {
let mut share = vec![0; share_len];
OsRng.fill_bytes(&mut share);
shares.push(share);
// Create up to 150 participants
for _ in 0 .. ((OsRng.next_u64() % 150) + 1) {
// Give each sender multiple shares
let mut sender_shares = vec![];
for _ in 0 .. amount_of_shares {
let mut share = vec![0; share_len];
OsRng.fill_bytes(&mut share);
sender_shares.push(share);
}
shares.push(sender_shares);
}

test_read_write(Transaction::DkgShares {
Expand Down
2 changes: 1 addition & 1 deletion coordinator/src/tests/tributary/tx.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ async fn tx_test() {
// Create the TX with a null signature so we can get its sig hash
let block_before_tx = tributaries[sender].1.tip().await;
let mut tx =
Transaction::DkgCommitments(attempt, commitments.clone(), Transaction::empty_signed());
Transaction::DkgCommitments(attempt, vec![commitments.clone()], Transaction::empty_signed());
tx.sign(&mut OsRng, spec.genesis(), &key, 0);

assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true));
Expand Down
Loading

0 comments on commit e05b77d

Please sign in to comment.