Skip to content

Commit

Permalink
Sudo removal phase 1 (#980)
Browse files Browse the repository at this point in the history
## Describe your changes

1. Prepare parachain and mainnet for sudo removal
  • Loading branch information
Gauthamastro authored Aug 12, 2024
2 parents f0640ac + 55cc355 commit f0d8e78
Show file tree
Hide file tree
Showing 19 changed files with 472 additions and 158 deletions.
6 changes: 6 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 4 additions & 4 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ orml-vesting = { git = "https://github.com/Polkadex-Substrate/orml-1.1.0.git", b
sp-io = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false }
sp-core = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false }
sp-runtime = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false }
sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false}
sp-api = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false }
sp-version = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false }
sp-staking = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false }
sp-keyring = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false }
Expand Down Expand Up @@ -172,7 +172,7 @@ sc-basic-authorship = { git = "https://github.com/paritytech/polkadot-sdk", bran
sc-consensus-babe-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false }
sc-authority-discovery = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false }
sc-finality-grandpa-rpc = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false }
substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0"}
substrate-wasm-builder = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" }
substrate-frame-rpc-system = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false }
pallet-nomination-pools = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false }
grandpa-primitives = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0", default-features = false, package = "sp-finality-grandpa" }
Expand Down Expand Up @@ -212,7 +212,7 @@ polkadot-runtime-common = { git = "https://github.com/paritytech/polkadot-sdk",
sp-consensus-aura = { git = "https://github.com/paritytech/polkadot-sdk", default-features = false, branch = "release-polkadot-v1.1.0" }

[patch.'https://github.com/w3f/ring-vrf']
bandersnatch_vrfs = { git = "https://github.com/w3f//ring-vrf.git", rev = "3ebdd261873da05124f4499c85a8e62d40411620"}
bandersnatch_vrfs = { git = "https://github.com/w3f//ring-vrf.git", rev = "3ebdd261873da05124f4499c85a8e62d40411620" }

[patch.'https://github.com/paritytech/polkadot-sdk']
substrate-wasm-builder = { git = "https://github.com/Polkadex-Substrate/polkadot-sdk", branch = "release-polkadot-v1.1.0"}
substrate-wasm-builder = { git = "https://github.com/Polkadex-Substrate/polkadot-sdk", branch = "release-polkadot-v1.1.0" }
4 changes: 2 additions & 2 deletions nodes/mainnet/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ clap = { version = "4.0.9", features = ["derive"] }
itertools = "0.10.1"
jsonrpsee = { version = "0.16.2", features = ["server"] }
# local dependencies
node-polkadex-runtime = { path = "../../runtimes/mainnet"}
node-polkadex-runtime = { path = "../../runtimes/mainnet" }
rpc-assets = { path = "../../rpc/assets" }
pallet-rewards-rpc = { path = "../../pallets/rewards/rpc" }
pallet-ocex-rpc = { path = "../../pallets/ocex/rpc" }
Expand Down Expand Up @@ -100,7 +100,7 @@ sc-consensus-babe = { git = "https://github.com/paritytech/polkadot-sdk", branch
sc-consensus-epochs = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" }
sp-tracing = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" }
sc-keystore = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" }
sc-service-test = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0"}
sc-service-test = { git = "https://github.com/paritytech/polkadot-sdk", branch = "release-polkadot-v1.1.0" }

[features]
default = []
Expand Down
4 changes: 4 additions & 0 deletions nodes/parachain/src/chain_spec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -251,8 +251,12 @@ fn create_genesis_config(
..Default::default()
},
sudo: parachain_polkadex_runtime::SudoConfig { key: Some(root_key) },
elections: Default::default(),
council: Default::default(),
technical_committee: Default::default(),
assets: Default::default(),
transaction_payment: Default::default(),
democracy: Default::default(),
}
}

Expand Down
5 changes: 4 additions & 1 deletion pallets/ocex/src/validator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -658,14 +658,17 @@ impl<T: Config> Pallet<T> {
return Err("Invalid egress message for withdraw trading fees");
}
},
IngressMessages::NewLMPEpoch(epoch) => Self::start_new_lmp_epoch(state, epoch)?,
IngressMessages::NewLMPEpoch(_epoch) => {
// Self::start_new_lmp_epoch(state, epoch)?
},
_ => {},
}
}
state_info.last_block = blk.saturated_into();
Ok(verified_egress_messages)
}

#[allow(dead_code)]
/// Reset the offchain state's LMP index and set the epoch
fn start_new_lmp_epoch(state: &mut OffchainState, epoch: u16) -> Result<(), &'static str> {
let mut config = if epoch > 1 {
Expand Down
9 changes: 4 additions & 5 deletions pallets/thea-message-handler/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,8 @@ pub mod pallet {
let current_set_id = <ValidatorSetId<T>>::get();

match payload.message.payload_type {
PayloadType::ScheduledRotateValidators => {
PayloadType::ScheduledRotateValidators => {}, // Deprecated
PayloadType::ValidatorsRotated => {
// Thea message related to key change
match ValidatorSet::decode(&mut payload.message.data.as_ref()) {
Err(_err) => return Err(Error::<T>::ErrorDecodingValidatorSet.into()),
Expand All @@ -218,13 +219,11 @@ pub mod pallet {
validator_set.set_id,
BoundedVec::truncate_from(validator_set.validators),
);
// We are checking if the validator set is changed, then we update it here too
<ValidatorSetId<T>>::put(current_set_id.saturating_add(1));
},
}
},
PayloadType::ValidatorsRotated => {
// We are checking if the validator set is changed, then we update it here too
<ValidatorSetId<T>>::put(current_set_id.saturating_add(1));
},
PayloadType::L1Deposit => {
// Normal Thea message
T::Executor::execute_deposits(
Expand Down
21 changes: 6 additions & 15 deletions pallets/thea-message-handler/src/test.rs

Large diffs are not rendered by default.

182 changes: 96 additions & 86 deletions pallets/thea/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ use sp_runtime::{
use sp_std::collections::btree_set::BTreeSet;
use sp_std::prelude::*;
use thea_primitives::{
types::{Message, NetworkType, PayloadType},
types::{Message, PayloadType},
Network, ValidatorSet, GENESIS_AUTHORITY_SET_ID,
};

Expand Down Expand Up @@ -382,6 +382,7 @@ pub mod pallet {
stake: Balance,
) -> DispatchResult {
let signer = ensure_signed(origin)?;
// Testing relayer must be removed after final audit
let expected_signer = <AllowListTestingRelayers<T>>::get(payload.network)
.ok_or(Error::<T>::NoRelayersFound)?;
ensure!(signer == expected_signer, Error::<T>::NotAnAllowlistedRelayer);
Expand Down Expand Up @@ -758,103 +759,112 @@ impl<T: Config> Pallet<T> {

fn change_authorities(
incoming: BoundedVec<T::TheaId, T::MaxAuthorities>, // n+1th set
queued: BoundedVec<T::TheaId, T::MaxAuthorities>, // n+ 2th set
_queued: BoundedVec<T::TheaId, T::MaxAuthorities>, // n+ 2th set
) {
// ( outgoing) -> (validators/incoming) -> (queued)
// nth epoch -> n+1th epoch -> n+2nd epoch
let id = Self::validator_set_id();
let outgoing = <Authorities<T>>::get(id); // nth set ( active ,current )
let new_id = id + 1u64;
let active_networks = <ActiveNetworks<T>>::get();
// We need to issue a new message if the validator set is changing,
// that is, the incoming set is has different session keys from outgoing set.
// This last message should be signed by the outgoing set
// Similar to how Grandpa's session change works.
// // We need to issue a new message if the validator set is changing,
// // that is, the incoming set is has different session keys from outgoing set.
// // This last message should be signed by the outgoing set
// // Similar to how Grandpa's session change works.
let incoming_set = BTreeSet::from_iter(incoming.to_vec());
if incoming_set != BTreeSet::from_iter(queued.to_vec()) {
let uncompressed_keys: Vec<[u8; 20]> = vec![];
// TODO: Uncomment the following when parsing is fixed for ethereum keys.
// for public_key in queued.clone().into_iter() {
// let public_key: sp_core::ecdsa::Public = public_key.into();
// if public_key.0 == [0u8; 33] {
// uncompressed_keys.push([0u8; 20]);
// continue;
// }
// if let Ok(compressed_key) = libsecp256k1::PublicKey::parse_compressed(&public_key.0)
// {
// let uncompressed_key = compressed_key.serialize();
// let uncompressed_key: [u8; 64] =
// if let Ok(uncompressed_key) = uncompressed_key[1..65].try_into() {
// uncompressed_key
// } else {
// log::error!(target: "thea", "Unable to slice last 64 bytes of uncompressed_key for Evm");
// Self::deposit_event(Event::<T>::UnableToSlicePublicKeyHash(
// public_key.into(),
// ));
// return;
// };
// let hash: [u8; 32] = sp_io::hashing::keccak_256(&uncompressed_key);
// if let Ok(address) = hash[12..32].try_into() {
// uncompressed_keys.push(address);
// } else {
// log::error!(target: "thea", "Unable to slice last 20 bytes of hash for Evm");
// Self::deposit_event(Event::<T>::UnableToSlicePublicKeyHash(
// public_key.into(),
// ));
// return;
// }
// } else {
// log::error!(target: "thea", "Unable to parse compressed key");
// Self::deposit_event(Event::<T>::UnableToParsePublicKey(public_key.into()));
// return;
// }
// }
for network in &active_networks {
let network_config = <NetworkConfig<T>>::get(*network);
let message = match network_config.network_type {
NetworkType::Evm => {
if let Some(payload) = ValidatorSet::new(uncompressed_keys.clone(), new_id)
{
Self::generate_payload(
PayloadType::ScheduledRotateValidators,
*network,
payload.encode(),
)
} else {
log::error!(target: "thea", "Unable to generate rotate validators payload");
Self::deposit_event(Event::<T>::UnableToGenerateValidatorSet(*network));
continue;
}
},
NetworkType::Parachain => {
if let Some(payload) = ValidatorSet::new(queued.clone(), new_id) {
Self::generate_payload(
PayloadType::ScheduledRotateValidators,
*network,
payload.encode(),
)
} else {
log::error!(target: "thea", "Unable to generate rotate validators payload");
Self::deposit_event(Event::<T>::UnableToGenerateValidatorSet(*network));
continue;
}
},
};
<OutgoingNonce<T>>::insert(message.network, message.nonce);
<OutgoingMessages<T>>::insert(message.network, message.nonce, message);
}
<NextAuthorities<T>>::put(queued);
}
// if incoming_set != BTreeSet::from_iter(queued.to_vec()) {
// let uncompressed_keys: Vec<[u8; 20]> = vec![];
// // TODO: Uncomment the following when parsing is fixed for ethereum keys.
// // for public_key in queued.clone().into_iter() {
// // let public_key: sp_core::ecdsa::Public = public_key.into();
// // if public_key.0 == [0u8; 33] {
// // uncompressed_keys.push([0u8; 20]);
// // continue;
// // }
// // if let Ok(compressed_key) = libsecp256k1::PublicKey::parse_compressed(&public_key.0)
// // {
// // let uncompressed_key = compressed_key.serialize();
// // let uncompressed_key: [u8; 64] =
// // if let Ok(uncompressed_key) = uncompressed_key[1..65].try_into() {
// // uncompressed_key
// // } else {
// // log::error!(target: "thea", "Unable to slice last 64 bytes of uncompressed_key for Evm");
// // Self::deposit_event(Event::<T>::UnableToSlicePublicKeyHash(
// // public_key.into(),
// // ));
// // return;
// // };
// // let hash: [u8; 32] = sp_io::hashing::keccak_256(&uncompressed_key);
// // if let Ok(address) = hash[12..32].try_into() {
// // uncompressed_keys.push(address);
// // } else {
// // log::error!(target: "thea", "Unable to slice last 20 bytes of hash for Evm");
// // Self::deposit_event(Event::<T>::UnableToSlicePublicKeyHash(
// // public_key.into(),
// // ));
// // return;
// // }
// // } else {
// // log::error!(target: "thea", "Unable to parse compressed key");
// // Self::deposit_event(Event::<T>::UnableToParsePublicKey(public_key.into()));
// // return;
// // }
// // }
// for network in &active_networks {
// let network_config = <NetworkConfig<T>>::get(*network);
// let message = match network_config.network_type {
// NetworkType::Evm => {
// if let Some(payload) = ValidatorSet::new(uncompressed_keys.clone(), new_id)
// {
// Self::generate_payload(
// PayloadType::ScheduledRotateValidators,
// *network,
// payload.encode(),
// )
// } else {
// log::error!(target: "thea", "Unable to generate rotate validators payload");
// Self::deposit_event(Event::<T>::UnableToGenerateValidatorSet(*network));
// continue;
// }
// },
// NetworkType::Parachain => {
// if let Some(payload) = ValidatorSet::new(queued.clone(), new_id) {
// Self::generate_payload(
// PayloadType::ScheduledRotateValidators,
// *network,
// payload.encode(),
// )
// } else {
// log::error!(target: "thea", "Unable to generate rotate validators payload");
// Self::deposit_event(Event::<T>::UnableToGenerateValidatorSet(*network));
// continue;
// }
// },
// };
// <OutgoingNonce<T>>::insert(message.network, message.nonce);
// <OutgoingMessages<T>>::insert(message.network, message.nonce, message);
// }
// <NextAuthorities<T>>::put(queued);
// }
if incoming_set != BTreeSet::from_iter(outgoing.to_vec()) {
// This will happen when new era starts, or end of the last epoch
<Authorities<T>>::insert(new_id, incoming);
<ValidatorSetId<T>>::put(new_id);
for network in active_networks {
let message =
Self::generate_payload(PayloadType::ValidatorsRotated, network, Vec::new()); //Empty data means activate the next set_id
<OutgoingNonce<T>>::insert(network, message.nonce);
<OutgoingMessages<T>>::insert(network, message.nonce, message);
if let Some(payload) = ValidatorSet::new(incoming.clone(), new_id) {
let message = Self::generate_payload(
PayloadType::ValidatorsRotated,
network,
payload.encode(),
);
<OutgoingNonce<T>>::insert(network, message.nonce);
<OutgoingMessages<T>>::insert(network, message.nonce, message);
} else {
log::error!(target: "thea", "Unable to generate rotate validators payload");
Self::deposit_event(Event::<T>::UnableToGenerateValidatorSet(network));
continue;
}
}
<Authorities<T>>::insert(new_id, incoming);
<ValidatorSetId<T>>::put(new_id);
}
}

Expand Down
16 changes: 6 additions & 10 deletions pallets/thea/src/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -83,8 +83,12 @@ fn test_session_change() {
// Simulating the on_new_session to last epoch of an era.
Thea::on_new_session(false, authorities.into_iter(), queued.clone().into_iter());
assert!(Thea::validator_set_id() == 0);
assert!(Thea::outgoing_nonce(1) == 1); // Thea validator session change message is generated here
assert!(Thea::outgoing_nonce(1) == 0); // Thea validator session change message is not generated here on new change only when session actually changes

// Simulating the on_new_session to the first epoch of the next era.
Thea::on_new_session(false, queued.clone().into_iter(), queued.clone().into_iter());
assert!(Thea::validator_set_id() == 1);
assert!(Thea::outgoing_nonce(1) == 1);
let message = Thea::get_outgoing_messages(1, 1).unwrap();
assert_eq!(message.nonce, 1);
let validator_set: ValidatorSet<<Test as Config>::TheaId> =
Expand All @@ -93,14 +97,6 @@ fn test_session_change() {
queued.iter().map(|(_, public)| public.clone()).collect();
assert_eq!(validator_set.set_id, 1);
assert_eq!(validator_set.validators, queued_validators);

// Simulating the on_new_session to the first epoch of the next era.
Thea::on_new_session(false, queued.clone().into_iter(), queued.clone().into_iter());
assert!(Thea::validator_set_id() == 1);
assert!(Thea::outgoing_nonce(1) == 2);
let message = Thea::get_outgoing_messages(1, 2).unwrap();
assert_eq!(message.nonce, 2);
assert!(message.data.is_empty());
})
}

Expand Down Expand Up @@ -320,11 +316,11 @@ fn test_report_misbehaviour_happy_path() {
assert_ok!(Thea::report_misbehaviour(RuntimeOrigin::signed(fisherman), network, 1));
})
}

use frame_support::{
assert_noop,
traits::{fungible::MutateHold, tokens::Precision},
};
use thea_primitives::types::NetworkType;
use thea_primitives::types::{AssetMetadata, IncomingMessage, SignedMessage, THEA_HOLD_REASON};

#[test]
Expand Down
Loading

0 comments on commit f0d8e78

Please sign in to comment.