Skip to content

Commit

Permalink
Handle the combination of DKG removals with re-attempts
Browse files Browse the repository at this point in the history
With a DKG removal comes a reduction in the amount of participants which was
ignored by re-attempts.

Now, we determine n/i based on the parties removed, and deterministically
obtain the context of who was removd.
  • Loading branch information
kayabaNerve committed Dec 13, 2023
1 parent 884b6a6 commit 77edd00
Show file tree
Hide file tree
Showing 15 changed files with 410 additions and 132 deletions.
34 changes: 20 additions & 14 deletions coordinator/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,14 +108,14 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
// This is safe due to the message-queue deduplicating based off the intent system
let set = spec.set();
let our_i = spec
.i(Ristretto::generator() * key.deref())
.i(&[], Ristretto::generator() * key.deref())
.expect("adding a tributary for a set we aren't in set for");
processors
.send(
set.network,
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 },
params: frost::ThresholdParams::new(spec.t(), spec.n(), our_i.start).unwrap(),
params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(),
shares: u16::from(our_i.end) - u16::from(our_i.start),
},
)
Expand Down Expand Up @@ -370,29 +370,32 @@ async fn handle_processor_message<D: Db, P: P2p>(
signed: Transaction::empty_signed(),
}]
}
key_gen::ProcessorMessage::InvalidCommitments { id: _, faulty } => {
key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => {
// This doesn't need the ID since it's a Provided transaction which everyone will provide
// With this provision comes explicit ordering (with regards to other RemoveParticipant
// transactions) and group consensus
// With this provision comes explicit ordering (with regards to other
// RemoveParticipantDueToDkg transactions) and group consensus
// Accordingly, this can't be replayed
// It could be included on-chain early/late with regards to the chain's active attempt,
// which attempt scheduling is written to avoid
vec![Transaction::RemoveParticipant(faulty)]
// which attempt scheduling is written to make a non-issue by auto re-attempting once a
// fatal slash occurs, regardless of timing
vec![Transaction::RemoveParticipantDueToDkg { attempt: id.attempt, participant: faulty }]
}
key_gen::ProcessorMessage::Shares { id, mut shares } => {
// Create a MuSig-based machine to inform Substrate of this key generation
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt);

let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt)
.expect("participating in a DKG attempt yet we didn't track who was removed yet?");
let our_i = spec
.i(pub_key)
.i(&removed, pub_key)
.expect("processor message to DKG for a session we aren't a validator in");

// `tx_shares` needs to be done here as while it can be serialized from the HashMap
// without further context, it can't be deserialized without context
let mut tx_shares = Vec::with_capacity(shares.len());
for shares in &mut shares {
tx_shares.push(vec![]);
for i in 1 ..= spec.n() {
for i in 1 ..= spec.n(&removed) {
let i = Participant::new(i).unwrap();
if our_i.contains(&i) {
if shares.contains_key(&i) {
Expand All @@ -415,13 +418,16 @@ async fn handle_processor_message<D: Db, P: P2p>(
}
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
// Check if the MuSig signature had any errors as if so, we need to provide
// RemoveParticipant
// RemoveParticipantDueToDkg
// As for the safety of calling error_generating_key_pair, the processor is presumed
// to only send InvalidShare or GeneratedKeyPair for a given attempt
let mut txs = if let Some(faulty) =
crate::tributary::error_generating_key_pair(&mut txn, key, spec, id.attempt)
{
vec![Transaction::RemoveParticipant(faulty)]
vec![Transaction::RemoveParticipantDueToDkg {
attempt: id.attempt,
participant: faulty,
}]
} else {
vec![]
};
Expand Down Expand Up @@ -457,14 +463,14 @@ async fn handle_processor_message<D: Db, P: P2p>(
}]
}
Err(p) => {
vec![Transaction::RemoveParticipant(p)]
vec![Transaction::RemoveParticipantDueToDkg { attempt: id.attempt, participant: p }]
}
}
}
// This is a response to the ordered VerifyBlame, which is why this satisfies the provided
// transaction's needs to be perfectly ordered
key_gen::ProcessorMessage::Blame { id: _, participant } => {
vec![Transaction::RemoveParticipant(participant)]
key_gen::ProcessorMessage::Blame { id, participant } => {
vec![Transaction::RemoveParticipantDueToDkg { attempt: id.attempt, participant }]
}
},
ProcessorMessage::Sign(msg) => match msg {
Expand Down
3 changes: 2 additions & 1 deletion coordinator/src/substrate/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,7 @@ async fn handle_block<D: Db, Pro: Processors>(
let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen else {
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
};
let substrate_key = key_pair.0 .0;
processors
.send(
set.network,
Expand All @@ -289,7 +290,7 @@ async fn handle_block<D: Db, Pro: Processors>(
)
.await;
let mut txn = db.txn();
SeraiDkgCompleted::set(&mut txn, set, &());
SeraiDkgCompleted::set(&mut txn, set, &substrate_key);
HandledEvent::handle_event(&mut txn, hash, event_id);
txn.commit();
}
Expand Down
11 changes: 7 additions & 4 deletions coordinator/src/tests/tributary/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -123,10 +123,13 @@ fn serialize_sign_data() {

#[test]
fn serialize_transaction() {
test_read_write(Transaction::RemoveParticipant(
frost::Participant::new(u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1))
.unwrap(),
));
test_read_write(Transaction::RemoveParticipantDueToDkg {
attempt: u32::try_from(OsRng.next_u64() >> 32).unwrap(),
participant: frost::Participant::new(
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
)
.unwrap(),
});

{
let mut commitments = vec![random_vec(&mut OsRng, 512)];
Expand Down
4 changes: 2 additions & 2 deletions coordinator/src/tests/tributary/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ async fn sync_test() {
let mut keys = new_keys(&mut OsRng);
let spec = new_spec(&mut OsRng, &keys);
// Ensure this can have a node fail
assert!(spec.n() > spec.t());
assert!(spec.n(&[]) > spec.t());

let mut tributaries = new_tributaries(&keys, &spec)
.await
Expand Down Expand Up @@ -142,7 +142,7 @@ async fn sync_test() {
// Because only `t` validators are used in a commit, take n - t nodes offline
// leaving only `t` nodes. Which should force it to participate in the consensus
// of next blocks.
let spares = usize::from(spec.n() - spec.t());
let spares = usize::from(spec.n(&[]) - spec.t());
for thread in p2p_threads.iter().take(spares) {
thread.abort();
}
Expand Down
9 changes: 7 additions & 2 deletions coordinator/src/tributary/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,22 +42,26 @@ pub enum Accumulation {
NotReady,
}

// TODO: Move from genesis to set for indexing
create_db!(
Tributary {
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
SeraiDkgRemoval: (spec: ValidatorSet, removing: [u8; 32]) -> (),
SeraiDkgCompleted: (spec: ValidatorSet) -> (),
SeraiDkgCompleted: (spec: ValidatorSet) -> [u8; 32],

TributaryBlockNumber: (block: [u8; 32]) -> u32,
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
FatalSlashesAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
FatalSlashesAsOfFatalSlash: (genesis: [u8; 32], fatally_slashed: [u8; 32]) -> Vec<[u8; 32]>,
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
RemovalNonces:
(genesis: [u8; 32], removing: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
KeyToDkgAttempt: (key: [u8; 32]) -> u32,
DkgCompleted: (genesis: [u8; 32]) -> (),
LocallyDkgRemoved: (genesis: [u8; 32], validator: [u8; 32]) -> (),
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
Expand All @@ -74,13 +78,14 @@ impl FatallySlashed {
Self::set(txn, genesis, account, &());
let mut existing = FatalSlashes::get(txn, genesis).unwrap_or_default();

// Don't append if we already have it
// Don't append if we already have it, which can occur upon multiple faults
if existing.iter().any(|existing| existing == &account) {
return;
}

existing.push(account);
FatalSlashes::set(txn, genesis, &existing);
FatalSlashesAsOfFatalSlash::set(txn, genesis, account, &existing);
}
}

Expand Down
Loading

0 comments on commit 77edd00

Please sign in to comment.