diff --git a/.github/actions/bitcoin/action.yml b/.github/actions/bitcoin/action.yml index 6f628172d..2765571f7 100644 --- a/.github/actions/bitcoin/action.yml +++ b/.github/actions/bitcoin/action.yml @@ -37,4 +37,4 @@ runs: - name: Bitcoin Regtest Daemon shell: bash - run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon + run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -txindex -daemon diff --git a/.github/actions/build-dependencies/action.yml b/.github/actions/build-dependencies/action.yml index 5994b7232..47d775222 100644 --- a/.github/actions/build-dependencies/action.yml +++ b/.github/actions/build-dependencies/action.yml @@ -42,8 +42,8 @@ runs: shell: bash run: | cargo install svm-rs - svm install 0.8.25 - svm use 0.8.25 + svm install 0.8.26 + svm use 0.8.26 # - name: Cache Rust # uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index da0bdcfa1..b994a3cbd 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -73,6 +73,15 @@ jobs: - name: Run rustfmt run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check + - name: Install foundry + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 + with: + version: nightly-41d4e5437107f6f42c7711123890147bc736a609 + cache: false + + - name: Run forge fmt + run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol") + machete: runs-on: ubuntu-latest steps: diff --git a/.github/workflows/networks-tests.yml b/.github/workflows/networks-tests.yml index f346b9861..920449784 100644 --- a/.github/workflows/networks-tests.yml +++ b/.github/workflows/networks-tests.yml @@ -30,8 +30,9 @@ jobs: run: | GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p bitcoin-serai \ + -p build-solidity-contracts \ + -p ethereum-schnorr-contract \ -p alloy-simple-request-transport \ - -p ethereum-serai \ -p serai-ethereum-relayer \ -p monero-io \ -p monero-generators \ diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 05c259725..d207e9cdf 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -39,7 +39,25 @@ jobs: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p serai-message-queue \ -p serai-processor-messages \ - -p serai-processor \ + -p serai-processor-key-gen \ + -p serai-processor-view-keys \ + -p serai-processor-frost-attempt-manager \ + -p serai-processor-primitives \ + -p serai-processor-scanner \ + -p serai-processor-scheduler-primitives \ + -p serai-processor-utxo-scheduler-primitives \ + -p serai-processor-utxo-scheduler \ + -p serai-processor-transaction-chaining-scheduler \ + -p serai-processor-smart-contract-scheduler \ + -p serai-processor-signers \ + -p serai-processor-bin \ + -p serai-bitcoin-processor \ + -p serai-processor-ethereum-primitives \ + -p serai-processor-ethereum-deployer \ + -p serai-processor-ethereum-router \ + -p serai-processor-ethereum-erc20 \ + -p serai-ethereum-processor \ + -p serai-monero-processor \ -p tendermint-machine \ -p tributary-chain \ -p serai-coordinator \ diff --git a/Cargo.lock b/Cargo.lock index ff21fe66c..131081ab0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -184,17 +184,6 @@ dependencies = [ "serde", ] -[[package]] -name = "alloy-json-abi" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "299d2a937b6c60968df3dad2a988b0f0e03277b344639a4f7a31bd68e6285e59" -dependencies = [ - "alloy-primitives", - "alloy-sol-type-parser", - "serde", -] - [[package]] name = "alloy-json-rpc" version = "0.3.1" @@ -426,7 +415,6 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71c4d842beb7a6686d04125603bc57614d5ed78bf95e4753274db3db4ba95214" dependencies = [ - "alloy-json-abi", "alloy-sol-macro-input", "const-hex", "heck 0.5.0", @@ -445,33 +433,21 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1306e8d3c9e6e6ecf7a39ffaf7291e73a5f655a2defd366ee92c2efebcdf7fee" dependencies = [ - "alloy-json-abi", "const-hex", "dunce", "heck 0.5.0", "proc-macro2", "quote", - "serde_json", "syn 2.0.77", "syn-solidity", ] -[[package]] -name = "alloy-sol-type-parser" -version = "0.8.0" -source = "git+https://github.com/alloy-rs/core?rev=446b9d2fbce12b88456152170709a3eaac929af0#446b9d2fbce12b88456152170709a3eaac929af0" -dependencies = [ - "serde", - "winnow 0.6.18", -] - [[package]] name = "alloy-sol-types" version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "577e262966e92112edbd15b1b2c0947cc434d6e8311df96d3329793fe8047da9" dependencies = [ - "alloy-json-abi", "alloy-primitives", "alloy-sol-macro", "const-hex", @@ -1318,6 +1294,10 @@ dependencies = [ "semver 0.6.0", ] +[[package]] +name = "build-solidity-contracts" +version = "0.1.1" + [[package]] name = "bumpalo" version = "3.16.0" @@ -2480,24 +2460,22 @@ dependencies = [ ] [[package]] -name = "ethereum-serai" +name = "ethereum-schnorr-contract" version = "0.1.0" dependencies = [ - "alloy-consensus", "alloy-core", - "alloy-network", "alloy-node-bindings", "alloy-provider", "alloy-rpc-client", "alloy-rpc-types-eth", "alloy-simple-request-transport", "alloy-sol-types", - "flexible-transcript", + "build-solidity-contracts", "group", "k256", - "modular-frost", "rand_core", - "thiserror", + "sha3", + "subtle", "tokio", ] @@ -8120,6 +8098,34 @@ dependencies = [ "sp-runtime", ] +[[package]] +name = "serai-bitcoin-processor" +version = "0.1.0" +dependencies = [ + "bitcoin-serai", + "borsh", + "ciphersuite", + "dkg", + "hex", + "log", + "modular-frost", + "parity-scale-codec", + "rand_core", + "secp256k1", + "serai-client", + "serai-db", + "serai-processor-bin", + "serai-processor-key-gen", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-signers", + "serai-processor-transaction-chaining-scheduler", + "serai-processor-utxo-scheduler-primitives", + "tokio", + "zalloc", +] + [[package]] name = "serai-client" version = "0.1.0" @@ -8128,13 +8134,14 @@ dependencies = [ "bitcoin", "bitvec", "blake2", + "borsh", "ciphersuite", "dockertest", "frame-system", "frost-schnorrkel", "hex", "modular-frost", - "monero-wallet", + "monero-address", "multiaddr", "parity-scale-codec", "rand_core", @@ -8315,6 +8322,46 @@ dependencies = [ name = "serai-env" version = "0.1.0" +[[package]] +name = "serai-ethereum-processor" +version = "0.1.0" +dependencies = [ + "alloy-core", + "alloy-provider", + "alloy-rlp", + "alloy-rpc-client", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "alloy-transport", + "borsh", + "ciphersuite", + "const-hex", + "dkg", + "ethereum-schnorr-contract", + "hex", + "k256", + "log", + "modular-frost", + "parity-scale-codec", + "rand_core", + "serai-client", + "serai-db", + "serai-env", + "serai-processor-bin", + "serai-processor-ethereum-erc20", + "serai-processor-ethereum-primitives", + "serai-processor-ethereum-router", + "serai-processor-key-gen", + "serai-processor-messages", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-signers", + "serai-processor-smart-contract-scheduler", + "tokio", + "zalloc", +] + [[package]] name = "serai-ethereum-relayer" version = "0.1.0" @@ -8343,7 +8390,6 @@ dependencies = [ "serai-coordinator-tests", "serai-docker-tests", "serai-message-queue-tests", - "serai-processor", "serai-processor-tests", "serde", "serde_json", @@ -8459,6 +8505,36 @@ dependencies = [ "zeroize", ] +[[package]] +name = "serai-monero-processor" +version = "0.1.0" +dependencies = [ + "borsh", + "ciphersuite", + "dalek-ff-group", + "dkg", + "log", + "modular-frost", + "monero-simple-request-rpc", + "monero-wallet", + "parity-scale-codec", + "rand_chacha", + "rand_core", + "serai-client", + "serai-processor-bin", + "serai-processor-key-gen", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-signers", + "serai-processor-utxo-scheduler", + "serai-processor-utxo-scheduler-primitives", + "serai-processor-view-keys", + "tokio", + "zalloc", + "zeroize", +] + [[package]] name = "serai-no-std-tests" version = "0.1.0" @@ -8559,44 +8635,125 @@ dependencies = [ ] [[package]] -name = "serai-processor" +name = "serai-processor-bin" version = "0.1.0" dependencies = [ - "async-trait", - "bitcoin-serai", - "blake2", "borsh", "ciphersuite", - "const-hex", - "dalek-ff-group", "dkg", - "dockertest", - "ec-divisors", "env_logger", - "ethereum-serai", - "flexible-transcript", - "frost-schnorrkel", "hex", - "k256", "log", - "modular-frost", - "monero-simple-request-rpc", - "monero-wallet", "parity-scale-codec", - "rand_chacha", - "rand_core", - "secp256k1", "serai-client", "serai-db", - "serai-docker-tests", "serai-env", "serai-message-queue", + "serai-processor-key-gen", "serai-processor-messages", - "serde_json", - "sp-application-crypto", - "thiserror", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-signers", "tokio", - "zalloc", + "zeroize", +] + +[[package]] +name = "serai-processor-ethereum-deployer" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-core", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "alloy-sol-macro", + "alloy-sol-types", + "alloy-transport", + "build-solidity-contracts", + "serai-processor-ethereum-primitives", +] + +[[package]] +name = "serai-processor-ethereum-erc20" +version = "0.1.0" +dependencies = [ + "alloy-core", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "alloy-sol-macro", + "alloy-sol-types", + "alloy-transport", + "tokio", +] + +[[package]] +name = "serai-processor-ethereum-primitives" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-core", + "group", + "k256", +] + +[[package]] +name = "serai-processor-ethereum-router" +version = "0.1.0" +dependencies = [ + "alloy-consensus", + "alloy-core", + "alloy-provider", + "alloy-rpc-types-eth", + "alloy-simple-request-transport", + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "alloy-sol-types", + "alloy-transport", + "build-solidity-contracts", + "ethereum-schnorr-contract", + "group", + "serai-client", + "serai-processor-ethereum-deployer", + "serai-processor-ethereum-erc20", + "serai-processor-ethereum-primitives", + "syn 2.0.77", + "syn-solidity", +] + +[[package]] +name = "serai-processor-frost-attempt-manager" +version = "0.1.0" +dependencies = [ + "borsh", + "log", + "modular-frost", + "parity-scale-codec", + "rand_core", + "serai-db", + "serai-processor-messages", + "serai-validator-sets-primitives", +] + +[[package]] +name = "serai-processor-key-gen" +version = "0.1.0" +dependencies = [ + "blake2", + "borsh", + "ciphersuite", + "dkg", + "ec-divisors", + "flexible-transcript", + "log", + "parity-scale-codec", + "rand_chacha", + "rand_core", + "serai-db", + "serai-processor-messages", + "serai-validator-sets-primitives", "zeroize", ] @@ -8606,6 +8763,7 @@ version = "0.1.0" dependencies = [ "borsh", "dkg", + "hex", "parity-scale-codec", "serai-coins-primitives", "serai-in-instructions-primitives", @@ -8613,6 +8771,86 @@ dependencies = [ "serai-validator-sets-primitives", ] +[[package]] +name = "serai-processor-primitives" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "log", + "parity-scale-codec", + "serai-coins-primitives", + "serai-primitives", + "tokio", +] + +[[package]] +name = "serai-processor-scanner" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "hex", + "log", + "parity-scale-codec", + "serai-coins-primitives", + "serai-db", + "serai-in-instructions-primitives", + "serai-primitives", + "serai-processor-messages", + "serai-processor-primitives", + "serai-processor-scheduler-primitives", + "tokio", +] + +[[package]] +name = "serai-processor-scheduler-primitives" +version = "0.1.0" +dependencies = [ + "borsh", + "ciphersuite", + "modular-frost", + "parity-scale-codec", + "serai-db", +] + +[[package]] +name = "serai-processor-signers" +version = "0.1.0" +dependencies = [ + "borsh", + "ciphersuite", + "frost-schnorrkel", + "log", + "modular-frost", + "parity-scale-codec", + "rand_core", + "serai-db", + "serai-in-instructions-primitives", + "serai-primitives", + "serai-processor-frost-attempt-manager", + "serai-processor-messages", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-validator-sets-primitives", + "tokio", + "zeroize", +] + +[[package]] +name = "serai-processor-smart-contract-scheduler" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "parity-scale-codec", + "serai-db", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", +] + [[package]] name = "serai-processor-tests" version = "0.1.0" @@ -8623,7 +8861,6 @@ dependencies = [ "curve25519-dalek", "dkg", "dockertest", - "ethereum-serai", "hex", "k256", "monero-simple-request-rpc", @@ -8635,13 +8872,60 @@ dependencies = [ "serai-docker-tests", "serai-message-queue", "serai-message-queue-tests", - "serai-processor", "serai-processor-messages", "serde_json", "tokio", "zeroize", ] +[[package]] +name = "serai-processor-transaction-chaining-scheduler" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "parity-scale-codec", + "serai-db", + "serai-primitives", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-utxo-scheduler-primitives", +] + +[[package]] +name = "serai-processor-utxo-scheduler" +version = "0.1.0" +dependencies = [ + "borsh", + "group", + "parity-scale-codec", + "serai-db", + "serai-primitives", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", + "serai-processor-utxo-scheduler-primitives", +] + +[[package]] +name = "serai-processor-utxo-scheduler-primitives" +version = "0.1.0" +dependencies = [ + "borsh", + "serai-primitives", + "serai-processor-primitives", + "serai-processor-scanner", + "serai-processor-scheduler-primitives", +] + +[[package]] +name = "serai-processor-view-keys" +version = "0.1.0" +dependencies = [ + "ciphersuite", +] + [[package]] name = "serai-reproducible-runtime-tests" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index bce4ebe38..d0c91a300 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -46,8 +46,9 @@ members = [ "networks/bitcoin", + "networks/ethereum/build-contracts", + "networks/ethereum/schnorr", "networks/ethereum/alloy-simple-request-transport", - "networks/ethereum", "networks/ethereum/relayer", "networks/monero/io", @@ -70,7 +71,28 @@ members = [ "message-queue", "processor/messages", - "processor", + + "processor/key-gen", + "processor/view-keys", + "processor/frost-attempt-manager", + + "processor/primitives", + "processor/scanner", + "processor/scheduler/primitives", + "processor/scheduler/utxo/primitives", + "processor/scheduler/utxo/standard", + "processor/scheduler/utxo/transaction-chaining", + "processor/scheduler/smart-contract", + "processor/signers", + + "processor/bin", + "processor/bitcoin", + "processor/ethereum/primitives", + "processor/ethereum/deployer", + "processor/ethereum/router", + "processor/ethereum/erc20", + "processor/ethereum", + "processor/monero", "coordinator/tributary/tendermint", "coordinator/tributary", @@ -182,9 +204,6 @@ directories-next = { path = "patches/directories-next" } # The official pasta_curves repo doesn't support Zeroize pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" } -# https://github.com/alloy-rs/core/issues/717 -alloy-sol-type-parser = { git = "https://github.com/alloy-rs/core", rev = "446b9d2fbce12b88456152170709a3eaac929af0" } - [workspace.lints.clippy] unwrap_or_default = "allow" borrow_as_ptr = "deny" diff --git a/common/db/src/create_db.rs b/common/db/src/create_db.rs index abd86e464..50fe51f7f 100644 --- a/common/db/src/create_db.rs +++ b/common/db/src/create_db.rs @@ -38,12 +38,21 @@ pub fn serai_db_key( #[macro_export] macro_rules! create_db { ($db_name: ident { - $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)* + $( + $field_name: ident: + $(<$($generic_name: tt: $generic_type: tt),+>)?( + $($arg: ident: $arg_type: ty),* + ) -> $field_type: ty$(,)? + )* }) => { $( #[derive(Clone, Debug)] - pub(crate) struct $field_name; - impl $field_name { + pub(crate) struct $field_name$( + <$($generic_name: $generic_type),+> + )?$( + (core::marker::PhantomData<($($generic_name),+)>) + )?; + impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? { pub(crate) fn key($($arg: $arg_type),*) -> Vec { use scale::Encode; $crate::serai_db_key( @@ -52,18 +61,43 @@ macro_rules! create_db { ($($arg),*).encode() ) } - pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) { - let key = $field_name::key($($arg),*); + pub(crate) fn set( + txn: &mut impl DbTxn + $(, $arg: $arg_type)*, + data: &$field_type + ) { + let key = Self::key($($arg),*); txn.put(&key, borsh::to_vec(data).unwrap()); } - pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> { - getter.get($field_name::key($($arg),*)).map(|data| { + pub(crate) fn get( + getter: &impl Get, + $($arg: $arg_type),* + ) -> Option<$field_type> { + getter.get(Self::key($($arg),*)).map(|data| { borsh::from_slice(data.as_ref()).unwrap() }) } + // Returns a PhantomData of all generic types so if the generic was only used in the value, + // not the keys, this doesn't have unused generic types #[allow(dead_code)] - pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) { - txn.del(&$field_name::key($($arg),*)) + pub(crate) fn del( + txn: &mut impl DbTxn + $(, $arg: $arg_type)* + ) -> core::marker::PhantomData<($($($generic_name),+)?)> { + txn.del(&Self::key($($arg),*)); + core::marker::PhantomData + } + + pub(crate) fn take( + txn: &mut impl DbTxn + $(, $arg: $arg_type)* + ) -> Option<$field_type> { + let key = Self::key($($arg),*); + let res = txn.get(&key).map(|data| borsh::from_slice(data.as_ref()).unwrap()); + if res.is_some() { + txn.del(key); + } + res } } )* @@ -73,19 +107,30 @@ macro_rules! create_db { #[macro_export] macro_rules! db_channel { ($db_name: ident { - $($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)* + $($field_name: ident: + $(<$($generic_name: tt: $generic_type: tt),+>)?( + $($arg: ident: $arg_type: ty),* + ) -> $field_type: ty$(,)? + )* }) => { $( create_db! { $db_name { - $field_name: ($($arg: $arg_type,)* index: u32) -> $field_type, + $field_name: $(<$($generic_name: $generic_type),+>)?( + $($arg: $arg_type,)* + index: u32 + ) -> $field_type } } - impl $field_name { - pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) { + impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? { + pub(crate) fn send( + txn: &mut impl DbTxn + $(, $arg: $arg_type)* + , value: &$field_type + ) { // Use index 0 to store the amount of messages - let messages_sent_key = $field_name::key($($arg),*, 0); + let messages_sent_key = Self::key($($arg,)* 0); let messages_sent = txn.get(&messages_sent_key).map(|counter| { u32::from_le_bytes(counter.try_into().unwrap()) }).unwrap_or(0); @@ -96,19 +141,22 @@ macro_rules! db_channel { // at the same time let index_to_use = messages_sent + 2; - $field_name::set(txn, $($arg),*, index_to_use, value); + Self::set(txn, $($arg,)* index_to_use, value); } - pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> { - let messages_recvd_key = $field_name::key($($arg),*, 1); + pub(crate) fn try_recv( + txn: &mut impl DbTxn + $(, $arg: $arg_type)* + ) -> Option<$field_type> { + let messages_recvd_key = Self::key($($arg,)* 1); let messages_recvd = txn.get(&messages_recvd_key).map(|counter| { u32::from_le_bytes(counter.try_into().unwrap()) }).unwrap_or(0); let index_to_read = messages_recvd + 2; - let res = $field_name::get(txn, $($arg),*, index_to_read); + let res = Self::get(txn, $($arg,)* index_to_read); if res.is_some() { - $field_name::del(txn, $($arg),*, index_to_read); + Self::del(txn, $($arg,)* index_to_read); txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes()); } res diff --git a/deny.toml b/deny.toml index e5c72f0cf..d09fc8ebb 100644 --- a/deny.toml +++ b/deny.toml @@ -40,13 +40,31 @@ allow = [ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-env" }, - { allow = ["AGPL-3.0"], name = "ethereum-serai" }, + { allow = ["AGPL-3.0"], name = "ethereum-schnorr-contract" }, { allow = ["AGPL-3.0"], name = "serai-ethereum-relayer" }, { allow = ["AGPL-3.0"], name = "serai-message-queue" }, { allow = ["AGPL-3.0"], name = "serai-processor-messages" }, - { allow = ["AGPL-3.0"], name = "serai-processor" }, + + { allow = ["AGPL-3.0"], name = "serai-processor-key-gen" }, + { allow = ["AGPL-3.0"], name = "serai-processor-frost-attempt-manager" }, + + { allow = ["AGPL-3.0"], name = "serai-processor-scanner" }, + { allow = ["AGPL-3.0"], name = "serai-processor-scheduler-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-processor-utxo-scheduler-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-processor-standard-scheduler" }, + { allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" }, + { allow = ["AGPL-3.0"], name = "serai-processor-smart-contract-scheduler" }, + { allow = ["AGPL-3.0"], name = "serai-processor-signers" }, + + { allow = ["AGPL-3.0"], name = "serai-bitcoin-processor" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-primitives" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-deployer" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-router" }, + { allow = ["AGPL-3.0"], name = "serai-processor-ethereum-erc20" }, + { allow = ["AGPL-3.0"], name = "serai-ethereum-processor" }, + { allow = ["AGPL-3.0"], name = "serai-monero-processor" }, { allow = ["AGPL-3.0"], name = "tributary-chain" }, { allow = ["AGPL-3.0"], name = "serai-coordinator" }, diff --git a/message-queue/src/main.rs b/message-queue/src/main.rs index c43cc3c84..03c580ce4 100644 --- a/message-queue/src/main.rs +++ b/message-queue/src/main.rs @@ -72,6 +72,9 @@ pub(crate) fn queue_message( // Assert one, and only one of these, is the coordinator assert!(matches!(meta.from, Service::Coordinator) ^ matches!(meta.to, Service::Coordinator)); + // Lock the queue + let queue_lock = QUEUES.read().unwrap()[&(meta.from, meta.to)].write().unwrap(); + // Verify (from, to, intent) hasn't been prior seen fn key(domain: &'static [u8], key: impl AsRef<[u8]>) -> Vec { [&[u8::try_from(domain.len()).unwrap()], domain, key.as_ref()].concat() @@ -93,7 +96,7 @@ pub(crate) fn queue_message( DbTxn::put(&mut txn, intent_key, []); // Queue it - let id = QUEUES.read().unwrap()[&(meta.from, meta.to)].write().unwrap().queue_message( + let id = queue_lock.queue_message( &mut txn, QueuedMessage { from: meta.from, diff --git a/networks/bitcoin/src/wallet/send.rs b/networks/bitcoin/src/wallet/send.rs index ccb020b21..276f536ed 100644 --- a/networks/bitcoin/src/wallet/send.rs +++ b/networks/bitcoin/src/wallet/send.rs @@ -44,7 +44,7 @@ pub enum TransactionError { #[error("fee was too low to pass the default minimum fee rate")] TooLowFee, #[error("not enough funds for these payments")] - NotEnoughFunds, + NotEnoughFunds { inputs: u64, payments: u64, fee: u64 }, #[error("transaction was too large")] TooLargeTransaction, } @@ -213,7 +213,11 @@ impl SignableTransaction { } if input_sat < (payment_sat + needed_fee) { - Err(TransactionError::NotEnoughFunds)?; + Err(TransactionError::NotEnoughFunds { + inputs: input_sat, + payments: payment_sat, + fee: needed_fee, + })?; } // If there's a change address, check if there's change to give it @@ -258,9 +262,9 @@ impl SignableTransaction { res } - /// Returns the outputs this transaction will create. - pub fn outputs(&self) -> &[TxOut] { - &self.tx.output + /// Returns the transaction, sans witness, this will create if signed. + pub fn transaction(&self) -> &Transaction { + &self.tx } /// Create a multisig machine for this transaction. diff --git a/networks/bitcoin/tests/wallet.rs b/networks/bitcoin/tests/wallet.rs index a290122b4..45371414b 100644 --- a/networks/bitcoin/tests/wallet.rs +++ b/networks/bitcoin/tests/wallet.rs @@ -195,10 +195,10 @@ async_sequential! { Err(TransactionError::TooLowFee), ); - assert_eq!( + assert!(matches!( SignableTransaction::new(inputs.clone(), &[(addr(), inputs[0].value() * 2)], None, None, FEE), - Err(TransactionError::NotEnoughFunds), - ); + Err(TransactionError::NotEnoughFunds { .. }), + )); assert_eq!( SignableTransaction::new(inputs, &vec![(addr(), 1000); 10000], None, None, FEE), diff --git a/networks/ethereum/.gitignore b/networks/ethereum/.gitignore deleted file mode 100644 index 2dccdce9b..000000000 --- a/networks/ethereum/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -# Solidity build outputs -cache -artifacts diff --git a/networks/ethereum/Cargo.toml b/networks/ethereum/Cargo.toml deleted file mode 100644 index a91b83c54..000000000 --- a/networks/ethereum/Cargo.toml +++ /dev/null @@ -1,49 +0,0 @@ -[package] -name = "ethereum-serai" -version = "0.1.0" -description = "An Ethereum library supporting Schnorr signing and on-chain verification" -license = "AGPL-3.0-only" -repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum" -authors = ["Luke Parker ", "Elizabeth Binks "] -edition = "2021" -publish = false -rust-version = "1.79" - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[lints] -workspace = true - -[dependencies] -thiserror = { version = "1", default-features = false } - -rand_core = { version = "0.6", default-features = false, features = ["std"] } - -transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] } - -group = { version = "0.13", default-features = false } -k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] } -frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] } - -alloy-core = { version = "0.8", default-features = false } -alloy-sol-types = { version = "0.8", default-features = false, features = ["json"] } -alloy-consensus = { version = "0.3", default-features = false, features = ["k256"] } -alloy-network = { version = "0.3", default-features = false } -alloy-rpc-types-eth = { version = "0.3", default-features = false } -alloy-rpc-client = { version = "0.3", default-features = false } -alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false } -alloy-provider = { version = "0.3", default-features = false } - -alloy-node-bindings = { version = "0.3", default-features = false, optional = true } - -[dev-dependencies] -frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] } - -tokio = { version = "1", features = ["macros"] } - -alloy-node-bindings = { version = "0.3", default-features = false } - -[features] -tests = ["alloy-node-bindings", "frost/tests"] diff --git a/networks/ethereum/README.md b/networks/ethereum/README.md deleted file mode 100644 index 0090b26bd..000000000 --- a/networks/ethereum/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# Ethereum - -This package contains Ethereum-related functionality, specifically deploying and -interacting with Serai contracts. - -While `monero-serai` and `bitcoin-serai` are general purpose libraries, -`ethereum-serai` is Serai specific. If any of the utilities are generally -desired, please fork and maintain your own copy to ensure the desired -functionality is preserved, or open an issue to request we make this library -general purpose. - -### Dependencies - -- solc -- [Foundry](https://github.com/foundry-rs/foundry) diff --git a/networks/ethereum/build-contracts/Cargo.toml b/networks/ethereum/build-contracts/Cargo.toml new file mode 100644 index 000000000..41d1f993b --- /dev/null +++ b/networks/ethereum/build-contracts/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "build-solidity-contracts" +version = "0.1.1" +description = "A helper function to build Solidity contracts" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/build-contracts" +authors = ["Luke Parker "] +edition = "2021" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/processor/LICENSE b/networks/ethereum/build-contracts/LICENSE similarity index 94% rename from processor/LICENSE rename to networks/ethereum/build-contracts/LICENSE index c425427c8..41d5a2616 100644 --- a/processor/LICENSE +++ b/networks/ethereum/build-contracts/LICENSE @@ -1,6 +1,6 @@ AGPL-3.0-only license -Copyright (c) 2022-2023 Luke Parker +Copyright (c) 2022-2024 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as diff --git a/networks/ethereum/build-contracts/README.md b/networks/ethereum/build-contracts/README.md new file mode 100644 index 000000000..437f15c28 --- /dev/null +++ b/networks/ethereum/build-contracts/README.md @@ -0,0 +1,4 @@ +# Build Solidity Contracts + +A helper function to build Solidity contracts. This is intended to be called +from within build scripts. diff --git a/networks/ethereum/build-contracts/src/lib.rs b/networks/ethereum/build-contracts/src/lib.rs new file mode 100644 index 000000000..b1c9c87f2 --- /dev/null +++ b/networks/ethereum/build-contracts/src/lib.rs @@ -0,0 +1,103 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::{path::PathBuf, fs, process::Command}; + +/// Build contracts from the specified path, outputting the artifacts to the specified path. +/// +/// Requires solc 0.8.26. +pub fn build( + include_paths: &[&str], + contracts_path: &str, + artifacts_path: &str, +) -> Result<(), String> { + if !fs::exists(artifacts_path) + .map_err(|e| format!("couldn't check if artifacts directory already exists: {e:?}"))? + { + fs::create_dir(artifacts_path) + .map_err(|e| format!("couldn't create the non-existent artifacts directory: {e:?}"))?; + } + + println!("cargo:rerun-if-changed={contracts_path}/*"); + println!("cargo:rerun-if-changed={artifacts_path}/*"); + + for line in String::from_utf8( + Command::new("solc") + .args(["--version"]) + .output() + .map_err(|_| "couldn't fetch solc output".to_string())? + .stdout, + ) + .map_err(|_| "solc stdout wasn't UTF-8")? + .lines() + { + if let Some(version) = line.strip_prefix("Version: ") { + let version = + version.split('+').next().ok_or_else(|| "no value present on line".to_string())?; + if version != "0.8.26" { + Err(format!("version was {version}, 0.8.26 required"))? + } + } + } + + #[rustfmt::skip] + let mut args = vec![ + "--base-path", ".", + "-o", artifacts_path, "--overwrite", + "--bin", "--bin-runtime", "--abi", + "--via-ir", "--optimize", + "--no-color", + ]; + for include_path in include_paths { + args.push("--include-path"); + args.push(include_path); + } + let mut args = args.into_iter().map(str::to_string).collect::>(); + + let mut queue = vec![PathBuf::from(contracts_path)]; + while let Some(folder) = queue.pop() { + for entry in fs::read_dir(folder).map_err(|e| format!("couldn't read directory: {e:?}"))? { + let entry = entry.map_err(|e| format!("couldn't read directory in entry: {e:?}"))?; + let kind = entry.file_type().map_err(|e| format!("couldn't fetch file type: {e:?}"))?; + if kind.is_dir() { + queue.push(entry.path()); + } + + if kind.is_file() && + entry + .file_name() + .into_string() + .map_err(|_| "file name wasn't a valid UTF-8 string".to_string())? + .ends_with(".sol") + { + args.push( + entry + .path() + .into_os_string() + .into_string() + .map_err(|_| "file path wasn't a valid UTF-8 string".to_string())?, + ); + } + + // We on purposely ignore symlinks to avoid recursive structures + } + } + + let solc = Command::new("solc") + .args(args.clone()) + .output() + .map_err(|_| "couldn't fetch solc output".to_string())?; + let stderr = + String::from_utf8(solc.stderr).map_err(|_| "solc stderr wasn't UTF-8".to_string())?; + if !solc.status.success() { + Err(format!("solc (`{}`) didn't successfully execute: {stderr}", args.join(" ")))?; + } + for line in stderr.lines() { + if line.contains("Error:") { + Err(format!("solc (`{}`) output had error: {stderr}", args.join(" ")))?; + } + } + + Ok(()) +} diff --git a/networks/ethereum/build.rs b/networks/ethereum/build.rs deleted file mode 100644 index 38fcfe002..000000000 --- a/networks/ethereum/build.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::process::Command; - -fn main() { - println!("cargo:rerun-if-changed=contracts/*"); - println!("cargo:rerun-if-changed=artifacts/*"); - - for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout) - .unwrap() - .lines() - { - if let Some(version) = line.strip_prefix("Version: ") { - let version = version.split('+').next().unwrap(); - assert_eq!(version, "0.8.25"); - } - } - - #[rustfmt::skip] - let args = [ - "--base-path", ".", - "-o", "./artifacts", "--overwrite", - "--bin", "--abi", - "--via-ir", "--optimize", - - "./contracts/IERC20.sol", - - "./contracts/Schnorr.sol", - "./contracts/Deployer.sol", - "./contracts/Sandbox.sol", - "./contracts/Router.sol", - - "./src/tests/contracts/Schnorr.sol", - "./src/tests/contracts/ERC20.sol", - - "--no-color", - ]; - let solc = Command::new("solc").args(args).output().unwrap(); - assert!(solc.status.success()); - for line in String::from_utf8(solc.stderr).unwrap().lines() { - assert!(!line.starts_with("Error:")); - } -} diff --git a/networks/ethereum/contracts/Deployer.sol b/networks/ethereum/contracts/Deployer.sol deleted file mode 100644 index 475be4c1b..000000000 --- a/networks/ethereum/contracts/Deployer.sol +++ /dev/null @@ -1,52 +0,0 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; - -/* -The expected deployment process of the Router is as follows: - -1) A transaction deploying Deployer is made. Then, a deterministic signature is - created such that an account with an unknown private key is the creator of - the contract. Anyone can fund this address, and once anyone does, the - transaction deploying Deployer can be published by anyone. No other - transaction may be made from that account. - -2) Anyone deploys the Router through the Deployer. This uses a sequential nonce - such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible. - While such attacks would still be feasible if the Deployer's address was - controllable, the usage of a deterministic signature with a NUMS method - prevents that. - -This doesn't have any denial-of-service risks and will resolve once anyone steps -forward as deployer. This does fail to guarantee an identical address across -every chain, though it enables letting anyone efficiently ask the Deployer for -the address (with the Deployer having an identical address on every chain). - -Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the -Deployer contract to use a consistent salt for the Router, yet the Router must -be deployed with a specific public key for Serai. Since Ethereum isn't able to -determine a valid public key (one the result of a Serai DKG) from a dishonest -public key, we have to allow multiple deployments with Serai being the one to -determine which to use. - -The alternative would be to have a council publish the Serai key on-Ethereum, -with Serai verifying the published result. This would introduce a DoS risk in -the council not publishing the correct key/not publishing any key. -*/ - -contract Deployer { - event Deployment(bytes32 indexed init_code_hash, address created); - - error DeploymentFailed(); - - function deploy(bytes memory init_code) external { - address created; - assembly { - created := create(0, add(init_code, 0x20), mload(init_code)) - } - if (created == address(0)) { - revert DeploymentFailed(); - } - // These may be emitted out of order upon re-entrancy - emit Deployment(keccak256(init_code), created); - } -} diff --git a/networks/ethereum/contracts/Router.sol b/networks/ethereum/contracts/Router.sol deleted file mode 100644 index c5e1efa2f..000000000 --- a/networks/ethereum/contracts/Router.sol +++ /dev/null @@ -1,222 +0,0 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; - -import "./IERC20.sol"; - -import "./Schnorr.sol"; -import "./Sandbox.sol"; - -contract Router { - // Nonce is incremented for each batch of transactions executed/key update - uint256 public nonce; - - // Current public key's x-coordinate - // This key must always have the parity defined within the Schnorr contract - bytes32 public seraiKey; - - struct OutInstruction { - address to; - Call[] calls; - - uint256 value; - } - - struct Signature { - bytes32 c; - bytes32 s; - } - - event SeraiKeyUpdated( - uint256 indexed nonce, - bytes32 indexed key, - Signature signature - ); - event InInstruction( - address indexed from, - address indexed coin, - uint256 amount, - bytes instruction - ); - // success is a uint256 representing a bitfield of transaction successes - event Executed( - uint256 indexed nonce, - bytes32 indexed batch, - uint256 success, - Signature signature - ); - - // error types - error InvalidKey(); - error InvalidSignature(); - error InvalidAmount(); - error FailedTransfer(); - error TooManyTransactions(); - - modifier _updateSeraiKeyAtEndOfFn( - uint256 _nonce, - bytes32 key, - Signature memory sig - ) { - if ( - (key == bytes32(0)) || - ((bytes32(uint256(key) % Schnorr.Q)) != key) - ) { - revert InvalidKey(); - } - - _; - - seraiKey = key; - emit SeraiKeyUpdated(_nonce, key, sig); - } - - constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn( - 0, - _seraiKey, - Signature({ c: bytes32(0), s: bytes32(0) }) - ) { - nonce = 1; - } - - // updateSeraiKey validates the given Schnorr signature against the current - // public key, and if successful, updates the contract's public key to the - // given one. - function updateSeraiKey( - bytes32 _seraiKey, - Signature calldata sig - ) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) { - bytes memory message = - abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey); - nonce++; - - if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { - revert InvalidSignature(); - } - } - - function inInstruction( - address coin, - uint256 amount, - bytes memory instruction - ) external payable { - if (coin == address(0)) { - if (amount != msg.value) { - revert InvalidAmount(); - } - } else { - (bool success, bytes memory res) = - address(coin).call( - abi.encodeWithSelector( - IERC20.transferFrom.selector, - msg.sender, - address(this), - amount - ) - ); - - // Require there was nothing returned, which is done by some non-standard - // tokens, or that the ERC20 contract did in fact return true - bool nonStandardResOrTrue = - (res.length == 0) || abi.decode(res, (bool)); - if (!(success && nonStandardResOrTrue)) { - revert FailedTransfer(); - } - } - - /* - Due to fee-on-transfer tokens, emitting the amount directly is frowned upon. - The amount instructed to transfer may not actually be the amount - transferred. - - If we add nonReentrant to every single function which can effect the - balance, we can check the amount exactly matches. This prevents transfers of - less value than expected occurring, at least, not without an additional - transfer to top up the difference (which isn't routed through this contract - and accordingly isn't trying to artificially create events). - - If we don't add nonReentrant, a transfer can be started, and then a new - transfer for the difference can follow it up (again and again until a - rounding error is reached). This contract would believe all transfers were - done in full, despite each only being done in part (except for the last - one). - - Given fee-on-transfer tokens aren't intended to be supported, the only - token planned to be supported is Dai and it doesn't have any fee-on-transfer - logic, fee-on-transfer tokens aren't even able to be supported at this time, - we simply classify this entire class of tokens as non-standard - implementations which induce undefined behavior. It is the Serai network's - role not to add support for any non-standard implementations. - */ - emit InInstruction(msg.sender, coin, amount, instruction); - } - - // execute accepts a list of transactions to execute as well as a signature. - // if signature verification passes, the given transactions are executed. - // if signature verification fails, this function will revert. - function execute( - OutInstruction[] calldata transactions, - Signature calldata sig - ) external { - if (transactions.length > 256) { - revert TooManyTransactions(); - } - - bytes memory message = - abi.encode("execute", block.chainid, nonce, transactions); - uint256 executed_with_nonce = nonce; - // This prevents re-entrancy from causing double spends yet does allow - // out-of-order execution via re-entrancy - nonce++; - - if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { - revert InvalidSignature(); - } - - uint256 successes; - for (uint256 i = 0; i < transactions.length; i++) { - bool success; - - // If there are no calls, send to `to` the value - if (transactions[i].calls.length == 0) { - (success, ) = transactions[i].to.call{ - value: transactions[i].value, - gas: 5_000 - }(""); - } else { - // If there are calls, ignore `to`. Deploy a new Sandbox and proxy the - // calls through that - // - // We could use a single sandbox in order to reduce gas costs, yet that - // risks one person creating an approval that's hooked before another - // user's intended action executes, in order to drain their coins - // - // While technically, that would be a flaw in the sandboxed flow, this - // is robust and prevents such flaws from being possible - // - // We also don't want people to set state via the Sandbox and expect it - // future available when anyone else could set a distinct value - Sandbox sandbox = new Sandbox(); - (success, ) = address(sandbox).call{ - value: transactions[i].value, - // TODO: Have the Call specify the gas up front - gas: 350_000 - }( - abi.encodeWithSelector( - Sandbox.sandbox.selector, - transactions[i].calls - ) - ); - } - - assembly { - successes := or(successes, shl(i, success)) - } - } - emit Executed( - executed_with_nonce, - keccak256(message), - successes, - sig - ); - } -} diff --git a/networks/ethereum/contracts/Sandbox.sol b/networks/ethereum/contracts/Sandbox.sol deleted file mode 100644 index a82a3afda..000000000 --- a/networks/ethereum/contracts/Sandbox.sol +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.24; - -struct Call { - address to; - uint256 value; - bytes data; -} - -// A minimal sandbox focused on gas efficiency. -// -// The first call is executed if any of the calls fail, making it a fallback. -// All other calls are executed sequentially. -contract Sandbox { - error AlreadyCalled(); - error CallsFailed(); - - function sandbox(Call[] calldata calls) external payable { - // Prevent re-entrancy due to this executing arbitrary calls from anyone - // and anywhere - bool called; - assembly { called := tload(0) } - if (called) { - revert AlreadyCalled(); - } - assembly { tstore(0, 1) } - - // Execute the calls, starting from 1 - for (uint256 i = 1; i < calls.length; i++) { - (bool success, ) = - calls[i].to.call{ value: calls[i].value }(calls[i].data); - - // If this call failed, execute the fallback (call 0) - if (!success) { - (success, ) = - calls[0].to.call{ value: address(this).balance }(calls[0].data); - // If this call also failed, revert entirely - if (!success) { - revert CallsFailed(); - } - return; - } - } - - // We don't clear the re-entrancy guard as this contract should never be - // called again, so there's no reason to spend the effort - } -} diff --git a/networks/ethereum/contracts/Schnorr.sol b/networks/ethereum/contracts/Schnorr.sol deleted file mode 100644 index 8edcdffd6..000000000 --- a/networks/ethereum/contracts/Schnorr.sol +++ /dev/null @@ -1,44 +0,0 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; - -// see https://github.com/noot/schnorr-verify for implementation details -library Schnorr { - // secp256k1 group order - uint256 constant public Q = - 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; - - // Fixed parity for the public keys used in this contract - // This avoids spending a word passing the parity in a similar style to - // Bitcoin's Taproot - uint8 constant public KEY_PARITY = 27; - - error InvalidSOrA(); - error MalformedSignature(); - - // px := public key x-coord, where the public key has a parity of KEY_PARITY - // message := 32-byte hash of the message - // c := schnorr signature challenge - // s := schnorr signature - function verify( - bytes32 px, - bytes memory message, - bytes32 c, - bytes32 s - ) internal pure returns (bool) { - // ecrecover = (m, v, r, s) -> key - // We instead pass the following to obtain the nonce (not the key) - // Then we hash it and verify it matches the challenge - bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); - bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q)); - - // For safety, we want each input to ecrecover to be 0 (sa, px, ca) - // The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero - // That leaves us to check `sa` are non-zero - if (sa == 0) revert InvalidSOrA(); - address R = ecrecover(sa, KEY_PARITY, px, ca); - if (R == address(0)) revert MalformedSignature(); - - // Check the signature is correct by rebuilding the challenge - return c == keccak256(abi.encodePacked(R, px, message)); - } -} diff --git a/networks/ethereum/relayer/README.md b/networks/ethereum/relayer/README.md index beed4b724..fc2d36fdd 100644 --- a/networks/ethereum/relayer/README.md +++ b/networks/ethereum/relayer/README.md @@ -1,4 +1,4 @@ # Ethereum Transaction Relayer -This server collects Ethereum router commands to be published, offering an RPC -to fetch them. +This server collects Ethereum transactions to be published, offering an RPC to +fetch them. diff --git a/networks/ethereum/relayer/src/main.rs b/networks/ethereum/relayer/src/main.rs index 545930040..6424c90f5 100644 --- a/networks/ethereum/relayer/src/main.rs +++ b/networks/ethereum/relayer/src/main.rs @@ -40,8 +40,8 @@ async fn main() { db }; - // Start command recipience server - // This should not be publicly exposed + // Start transaction recipience server + // This MUST NOT be publicly exposed // TODO: Add auth tokio::spawn({ let db = db.clone(); @@ -58,25 +58,27 @@ async fn main() { let mut buf = vec![0; usize::try_from(msg_len).unwrap()]; let Ok(_) = socket.read_exact(&mut buf).await else { break }; - if buf.len() < 5 { + if buf.len() < (4 + 1) { break; } let nonce = u32::from_le_bytes(buf[.. 4].try_into().unwrap()); let mut txn = db.txn(); + // Save the transaction txn.put(nonce.to_le_bytes(), &buf[4 ..]); txn.commit(); let Ok(()) = socket.write_all(&[1]).await else { break }; - log::info!("received signed command #{nonce}"); + log::info!("received transaction to publish (nonce {nonce})"); } }); } } }); - // Start command fetch server + // Start transaction fetch server // 5132 ^ ((b'E' << 8) | b'R') + 1 + // TODO: JSON-RPC server which returns this as JSON? let server = TcpListener::bind("0.0.0.0:20831").await.unwrap(); loop { let (mut socket, _) = server.accept().await.unwrap(); @@ -84,16 +86,17 @@ async fn main() { tokio::spawn(async move { let db = db.clone(); loop { - // Nonce to get the router comamnd for + // Nonce to get the unsigned transaction for let mut buf = vec![0; 4]; let Ok(_) = socket.read_exact(&mut buf).await else { break }; - let command = db.get(&buf[.. 4]).unwrap_or(vec![]); - let Ok(()) = socket.write_all(&u32::try_from(command.len()).unwrap().to_le_bytes()).await + let transaction = db.get(&buf[.. 4]).unwrap_or(vec![]); + let Ok(()) = + socket.write_all(&u32::try_from(transaction.len()).unwrap().to_le_bytes()).await else { break; }; - let Ok(()) = socket.write_all(&command).await else { break }; + let Ok(()) = socket.write_all(&transaction).await else { break }; } }); } diff --git a/networks/ethereum/schnorr/Cargo.toml b/networks/ethereum/schnorr/Cargo.toml new file mode 100644 index 000000000..2e9597c86 --- /dev/null +++ b/networks/ethereum/schnorr/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "ethereum-schnorr-contract" +version = "0.1.0" +description = "A Solidity contract to verify Schnorr signatures" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/schnorr" +authors = ["Luke Parker ", "Elizabeth Binks "] +edition = "2021" +rust-version = "1.81" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +subtle = { version = "2", default-features = false, features = ["std"] } +sha3 = { version = "0.10", default-features = false, features = ["std"] } +group = { version = "0.13", default-features = false, features = ["alloc"] } +k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic"] } + +[build-dependencies] +build-solidity-contracts = { path = "../build-contracts", version = "0.1" } + +[dev-dependencies] +rand_core = { version = "0.6", default-features = false, features = ["std"] } + +k256 = { version = "^0.13.1", default-features = false, features = ["ecdsa"] } + +alloy-core = { version = "0.8", default-features = false } +alloy-sol-types = { version = "0.8", default-features = false } + +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-rpc-types-eth = { version = "0.3", default-features = false } +alloy-rpc-client = { version = "0.3", default-features = false } +alloy-provider = { version = "0.3", default-features = false } + +alloy-node-bindings = { version = "0.3", default-features = false } + +tokio = { version = "1", default-features = false, features = ["macros"] } diff --git a/networks/ethereum/LICENSE b/networks/ethereum/schnorr/LICENSE similarity index 94% rename from networks/ethereum/LICENSE rename to networks/ethereum/schnorr/LICENSE index c425427c8..41d5a2616 100644 --- a/networks/ethereum/LICENSE +++ b/networks/ethereum/schnorr/LICENSE @@ -1,6 +1,6 @@ AGPL-3.0-only license -Copyright (c) 2022-2023 Luke Parker +Copyright (c) 2022-2024 Luke Parker This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License Version 3 as diff --git a/networks/ethereum/schnorr/README.md b/networks/ethereum/schnorr/README.md new file mode 100644 index 000000000..410cf5205 --- /dev/null +++ b/networks/ethereum/schnorr/README.md @@ -0,0 +1,5 @@ +# Ethereum Schnorr Contract + +An Ethereum contract to verify Schnorr signatures. + +This crate will fail to build if `solc` is not installed and available. diff --git a/networks/ethereum/schnorr/build.rs b/networks/ethereum/schnorr/build.rs new file mode 100644 index 000000000..cf12f948c --- /dev/null +++ b/networks/ethereum/schnorr/build.rs @@ -0,0 +1,4 @@ +fn main() { + let artifacts_path = std::env::var("OUT_DIR").unwrap().to_string() + "/ethereum-schnorr-contract"; + build_solidity_contracts::build(&[], "contracts", &artifacts_path).unwrap(); +} diff --git a/networks/ethereum/schnorr/contracts/Schnorr.sol b/networks/ethereum/schnorr/contracts/Schnorr.sol new file mode 100644 index 000000000..7405051ac --- /dev/null +++ b/networks/ethereum/schnorr/contracts/Schnorr.sol @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; + +// See https://github.com/noot/schnorr-verify for implementation details +library Schnorr { + // secp256k1 group order + uint256 private constant Q = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; + + // We fix the key to have: + // 1) An even y-coordinate + // 2) An x-coordinate < Q + uint8 private constant KEY_PARITY = 27; + + // px := public key x-coordinate, where the public key has an even y-coordinate + // message := the message signed + // c := Schnorr signature challenge + // s := Schnorr signature solution + function verify(bytes32 px, bytes32 message, bytes32 c, bytes32 s) internal pure returns (bool) { + // ecrecover = (m, v, r, s) -> key + // We instead pass the following to obtain the nonce (not the key) + // Then we hash it and verify it matches the challenge + bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); + bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q)); + + /* + The ecrecover precompile checks `r` and `s` (`px` and `ca`) are non-zero, + banning the two keys with zero for their x-coordinate and zero challenge. + Each has negligible probability of occuring (assuming zero x-coordinates + are even on-curve in the first place). + + `sa` is not checked to be non-zero yet it does not need to be. The inverse + of it is never taken. + */ + address R = ecrecover(sa, KEY_PARITY, px, ca); + // The ecrecover failed + if (R == address(0)) return false; + + // Check the signature is correct by rebuilding the challenge + return c == keccak256(abi.encodePacked(R, px, message)); + } +} diff --git a/networks/ethereum/schnorr/contracts/tests/Schnorr.sol b/networks/ethereum/schnorr/contracts/tests/Schnorr.sol new file mode 100644 index 000000000..412786a33 --- /dev/null +++ b/networks/ethereum/schnorr/contracts/tests/Schnorr.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; + +import "../Schnorr.sol"; + +contract TestSchnorr { + function verify(bytes32 public_key, bytes calldata message, bytes32 c, bytes32 s) + external + pure + returns (bool) + { + return Schnorr.verify(public_key, keccak256(message), c, s); + } +} diff --git a/networks/ethereum/schnorr/src/lib.rs b/networks/ethereum/schnorr/src/lib.rs new file mode 100644 index 000000000..3f67fbbff --- /dev/null +++ b/networks/ethereum/schnorr/src/lib.rs @@ -0,0 +1,16 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] +#![allow(non_snake_case)] + +/// The initialization bytecode of the Schnorr library. +pub const INIT_BYTECODE: &str = + include_str!(concat!(env!("OUT_DIR"), "/ethereum-schnorr-contract/Schnorr.bin")); + +mod public_key; +pub use public_key::PublicKey; +mod signature; +pub use signature::Signature; + +#[cfg(test)] +mod tests; diff --git a/networks/ethereum/schnorr/src/public_key.rs b/networks/ethereum/schnorr/src/public_key.rs new file mode 100644 index 000000000..3c39552fe --- /dev/null +++ b/networks/ethereum/schnorr/src/public_key.rs @@ -0,0 +1,74 @@ +use subtle::Choice; +use group::ff::PrimeField; +use k256::{ + elliptic_curve::{ + ops::Reduce, + point::{AffineCoordinates, DecompressPoint}, + }, + AffinePoint, ProjectivePoint, Scalar, U256 as KU256, +}; + +/// A public key for the Schnorr Solidity library. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct PublicKey { + A: ProjectivePoint, + x_coordinate: [u8; 32], +} + +impl PublicKey { + /// Construct a new `PublicKey`. + /// + /// This will return None if the provided point isn't eligible to be a public key (due to + /// bounds such as parity). + #[must_use] + pub fn new(A: ProjectivePoint) -> Option { + let affine = A.to_affine(); + + // Only allow even keys to save a word within Ethereum + if bool::from(affine.y_is_odd()) { + None?; + } + + let x_coordinate = affine.x(); + // Return None if the x-coordinate isn't mutual to both fields + // While reductions shouldn't be an issue, it's one less headache/concern to have + // The trivial amount of public keys this makes non-representable aren't a concern + if >::reduce_bytes(&x_coordinate).to_repr() != x_coordinate { + None?; + } + + let x_coordinate: [u8; 32] = x_coordinate.into(); + // Returns None if the x-coordinate is 0 + // Such keys will never have their signatures able to be verified + if x_coordinate == [0; 32] { + None?; + } + Some(PublicKey { A, x_coordinate }) + } + + /// The point for this public key. + #[must_use] + pub fn point(&self) -> ProjectivePoint { + self.A + } + + /// The Ethereum representation of this public key. + #[must_use] + pub fn eth_repr(&self) -> [u8; 32] { + // We only encode the x-coordinate due to fixing the sign of the y-coordinate + self.x_coordinate + } + + /// Construct a PublicKey from its Ethereum representation. + // This wouldn't be possible if the x-coordinate had been reduced + #[must_use] + pub fn from_eth_repr(repr: [u8; 32]) -> Option { + let x_coordinate = repr; + + let y_is_odd = Choice::from(0); + let A_affine = + Option::::from(AffinePoint::decompress(&x_coordinate.into(), y_is_odd))?; + let A = ProjectivePoint::from(A_affine); + Some(PublicKey { A, x_coordinate }) + } +} diff --git a/networks/ethereum/schnorr/src/signature.rs b/networks/ethereum/schnorr/src/signature.rs new file mode 100644 index 000000000..1af1d60f8 --- /dev/null +++ b/networks/ethereum/schnorr/src/signature.rs @@ -0,0 +1,95 @@ +use std::io; + +use sha3::{Digest, Keccak256}; + +use group::ff::PrimeField; +use k256::{ + elliptic_curve::{ops::Reduce, sec1::ToEncodedPoint}, + ProjectivePoint, Scalar, U256 as KU256, +}; + +use crate::PublicKey; + +/// A signature for the Schnorr Solidity library. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Signature { + c: Scalar, + s: Scalar, +} + +impl Signature { + /// Construct a new `Signature`. + #[must_use] + pub fn new(c: Scalar, s: Scalar) -> Signature { + Signature { c, s } + } + + /// The challenge for a signature. + #[must_use] + pub fn challenge(R: ProjectivePoint, key: &PublicKey, message: &[u8]) -> Scalar { + // H(R || A || m) + let mut hash = Keccak256::new(); + // We transcript the nonce as an address since ecrecover yields an address + hash.update({ + let uncompressed_encoded_point = R.to_encoded_point(false); + // Skip the prefix byte marking this as uncompressed + let x_and_y_coordinates = &uncompressed_encoded_point.as_ref()[1 ..]; + // Last 20 bytes of the hash of the x and y coordinates + &Keccak256::digest(x_and_y_coordinates)[12 ..] + }); + hash.update(key.eth_repr()); + hash.update(Keccak256::digest(message)); + >::reduce_bytes(&hash.finalize()) + } + + /// Verify a signature. + #[must_use] + pub fn verify(&self, key: &PublicKey, message: &[u8]) -> bool { + // Recover the nonce + let R = (ProjectivePoint::GENERATOR * self.s) - (key.point() * self.c); + // Check the challenge + Self::challenge(R, key, message) == self.c + } + + /// The challenge present within this signature. + pub fn c(&self) -> Scalar { + self.c + } + + /// The signature solution present within this signature. + pub fn s(&self) -> Scalar { + self.s + } + + /// Convert the signature to bytes. + #[must_use] + pub fn to_bytes(&self) -> [u8; 64] { + let mut res = [0; 64]; + res[.. 32].copy_from_slice(self.c.to_repr().as_ref()); + res[32 ..].copy_from_slice(self.s.to_repr().as_ref()); + res + } + + /// Write the signature. + pub fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(&self.to_bytes()) + } + + /// Read a signature. + pub fn read(reader: &mut impl io::Read) -> io::Result { + let mut read_F = || -> io::Result { + let mut bytes = [0; 32]; + reader.read_exact(&mut bytes)?; + Option::::from(Scalar::from_repr(bytes.into())) + .ok_or_else(|| io::Error::other("invalid scalar")) + }; + let c = read_F()?; + let s = read_F()?; + Ok(Signature { c, s }) + } + + /// Read a signature from bytes. + pub fn from_bytes(bytes: [u8; 64]) -> io::Result { + Self::read(&mut bytes.as_slice()) + } +} diff --git a/networks/ethereum/schnorr/src/tests/mod.rs b/networks/ethereum/schnorr/src/tests/mod.rs new file mode 100644 index 000000000..90774e30b --- /dev/null +++ b/networks/ethereum/schnorr/src/tests/mod.rs @@ -0,0 +1,112 @@ +use std::sync::Arc; + +use rand_core::{RngCore, OsRng}; + +use group::ff::{Field, PrimeField}; +use k256::{Scalar, ProjectivePoint}; + +use alloy_core::primitives::Address; +use alloy_sol_types::SolCall; + +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; + +use alloy_node_bindings::{Anvil, AnvilInstance}; + +use crate::{PublicKey, Signature}; + +mod premise; + +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod abi { + alloy_sol_types::sol!("contracts/tests/Schnorr.sol"); + pub(crate) use TestSchnorr::*; +} + +async fn setup_test() -> (AnvilInstance, Arc>, Address) { + let anvil = Anvil::new().spawn(); + + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), + )); + + let mut address = [0; 20]; + OsRng.fill_bytes(&mut address); + let address = Address::from(address); + let _: () = provider + .raw_request( + "anvil_setCode".into(), + [ + address.to_string(), + include_str!(concat!( + env!("OUT_DIR"), + "/ethereum-schnorr-contract/TestSchnorr.bin-runtime" + )) + .to_string(), + ], + ) + .await + .unwrap(); + + (anvil, provider, address) +} + +async fn call_verify( + provider: &RootProvider, + address: Address, + public_key: &PublicKey, + message: &[u8], + signature: &Signature, +) -> bool { + let public_key: [u8; 32] = public_key.eth_repr(); + let c_bytes: [u8; 32] = signature.c().to_repr().into(); + let s_bytes: [u8; 32] = signature.s().to_repr().into(); + let call = TransactionRequest::default().to(address).input(TransactionInput::new( + abi::verifyCall::new(( + public_key.into(), + message.to_vec().into(), + c_bytes.into(), + s_bytes.into(), + )) + .abi_encode() + .into(), + )); + let bytes = provider.call(&call).await.unwrap(); + let res = abi::verifyCall::abi_decode_returns(&bytes, true).unwrap(); + + res._0 +} + +#[tokio::test] +async fn test_verify() { + let (_anvil, provider, address) = setup_test().await; + + for _ in 0 .. 100 { + let (key, public_key) = loop { + let key = Scalar::random(&mut OsRng); + if let Some(public_key) = PublicKey::new(ProjectivePoint::GENERATOR * key) { + break (key, public_key); + } + }; + + let nonce = Scalar::random(&mut OsRng); + let mut message = vec![0; 1 + usize::try_from(OsRng.next_u32() % 256).unwrap()]; + OsRng.fill_bytes(&mut message); + + let c = Signature::challenge(ProjectivePoint::GENERATOR * nonce, &public_key, &message); + let s = nonce + (c * key); + + let sig = Signature::new(c, s); + assert!(sig.verify(&public_key, &message)); + assert!(call_verify(&provider, address, &public_key, &message, &sig).await); + // Mutate the message and make sure the signature now fails to verify + message[0] = message[0].wrapping_add(1); + assert!(!call_verify(&provider, address, &public_key, &message, &sig).await); + } +} diff --git a/networks/ethereum/schnorr/src/tests/premise.rs b/networks/ethereum/schnorr/src/tests/premise.rs new file mode 100644 index 000000000..01571a437 --- /dev/null +++ b/networks/ethereum/schnorr/src/tests/premise.rs @@ -0,0 +1,111 @@ +use rand_core::{RngCore, OsRng}; + +use sha3::{Digest, Keccak256}; +use group::ff::{Field, PrimeField}; +use k256::{ + elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint}, + ecdsa::{ + self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey, + }, + U256, Scalar, ProjectivePoint, +}; + +use alloy_core::primitives::Address; + +use crate::{PublicKey, Signature}; + +// The ecrecover opcode, yet with if the y is odd replacing v +fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> { + let sig = ecdsa::Signature::from_scalars(r, s).ok()?; + let message: [u8; 32] = message.to_repr().into(); + alloy_core::primitives::Signature::from_signature_and_parity( + sig, + alloy_core::primitives::Parity::Parity(odd_y), + ) + .ok()? + .recover_address_from_prehash(&alloy_core::primitives::B256::from(message)) + .ok() + .map(Into::into) +} + +// Test ecrecover behaves as expected +#[test] +fn test_ecrecover() { + let private = SigningKey::random(&mut OsRng); + let public = VerifyingKey::from(&private); + + // Sign the signature + const MESSAGE: &[u8] = b"Hello, World!"; + let (sig, recovery_id) = private + .as_nonzero_scalar() + .try_sign_prehashed(Scalar::random(&mut OsRng), &Keccak256::digest(MESSAGE)) + .unwrap(); + + // Sanity check the signature verifies + #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result + { + assert_eq!(public.verify_prehash(&Keccak256::digest(MESSAGE), &sig).unwrap(), ()); + } + + // Perform the ecrecover + assert_eq!( + ecrecover( + >::reduce_bytes(&Keccak256::digest(MESSAGE)), + u8::from(recovery_id.unwrap().is_y_odd()) == 1, + *sig.r(), + *sig.s() + ) + .unwrap(), + Address::from_raw_public_key(&public.to_encoded_point(false).as_ref()[1 ..]), + ); +} + +// Test that we can recover the nonce from a Schnorr signature via a call to ecrecover, the premise +// of efficiently verifying Schnorr signatures in an Ethereum contract +#[test] +fn nonce_recovery_via_ecrecover() { + let (key, public_key) = loop { + let key = Scalar::random(&mut OsRng); + if let Some(public_key) = PublicKey::new(ProjectivePoint::GENERATOR * key) { + break (key, public_key); + } + }; + + let nonce = Scalar::random(&mut OsRng); + let R = ProjectivePoint::GENERATOR * nonce; + + let mut message = vec![0; 1 + usize::try_from(OsRng.next_u32() % 256).unwrap()]; + OsRng.fill_bytes(&mut message); + + let c = Signature::challenge(R, &public_key, &message); + let s = nonce + (c * key); + + /* + An ECDSA signature is `(r, s)` with `s = (H(m) + rx) / k`, where: + - `m` is the message + - `r` is the x-coordinate of the nonce, reduced into a scalar + - `x` is the private key + - `k` is the nonce + + We fix the recovery ID to be for the even key with an x-coordinate < the order. Accordingly, + `kG = Point::from(Even, r)`. This enables recovering the public key via + `((s Point::from(Even, r)) - H(m)G) / r`. + + We want to calculate `R` from `(c, s)` where `s = r + cx`. That means we need to calculate + `sG - cX`. + + We can calculate `sG - cX` with `((s Point::from(Even, r)) - H(m)G) / r` if: + - Latter `r` = `X.x` + - Latter `s` = `c` + - `H(m)` = former `s` + This gets us to `(cX - sG) / X.x`. If we additionally scale the latter's `s, H(m)` values (the + former's `c, s` values) by `X.x`, we get `cX - sG`. This just requires negating each to achieve + `sG - cX`. + */ + let x_scalar = >::reduce_bytes(&public_key.point().to_affine().x()); + let sa = -(s * x_scalar); + let ca = -(c * x_scalar); + + let q = ecrecover(sa, false, x_scalar, ca).unwrap(); + assert_eq!(q, Address::from_raw_public_key(&R.to_encoded_point(false).as_ref()[1 ..])); +} diff --git a/networks/ethereum/src/abi/mod.rs b/networks/ethereum/src/abi/mod.rs deleted file mode 100644 index 1ae233743..000000000 --- a/networks/ethereum/src/abi/mod.rs +++ /dev/null @@ -1,37 +0,0 @@ -use alloy_sol_types::sol; - -#[rustfmt::skip] -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] -mod erc20_container { - use super::*; - sol!("contracts/IERC20.sol"); -} -pub use erc20_container::IERC20 as erc20; - -#[rustfmt::skip] -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] -mod deployer_container { - use super::*; - sol!("contracts/Deployer.sol"); -} -pub use deployer_container::Deployer as deployer; - -#[rustfmt::skip] -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] -mod router_container { - use super::*; - sol!(Router, "artifacts/Router.abi"); -} -pub use router_container::Router as router; diff --git a/networks/ethereum/src/crypto.rs b/networks/ethereum/src/crypto.rs deleted file mode 100644 index 6ea6a0b04..000000000 --- a/networks/ethereum/src/crypto.rs +++ /dev/null @@ -1,188 +0,0 @@ -use group::ff::PrimeField; -use k256::{ - elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint}, - ProjectivePoint, Scalar, U256 as KU256, -}; -#[cfg(test)] -use k256::{elliptic_curve::point::DecompressPoint, AffinePoint}; - -use frost::{ - algorithm::{Hram, SchnorrSignature}, - curve::{Ciphersuite, Secp256k1}, -}; - -use alloy_core::primitives::{Parity, Signature as AlloySignature}; -use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; - -use crate::abi::router::{Signature as AbiSignature}; - -pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] { - alloy_core::primitives::keccak256(data).into() -} - -pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar { - >::reduce_bytes(&keccak256(data).into()) -} - -pub fn address(point: &ProjectivePoint) -> [u8; 20] { - let encoded_point = point.to_encoded_point(false); - // Last 20 bytes of the hash of the concatenated x and y coordinates - // We obtain the concatenated x and y coordinates via the uncompressed encoding of the point - keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap() -} - -/// Deterministically sign a transaction. -/// -/// This function panics if passed a transaction with a non-None chain ID. -pub fn deterministically_sign(tx: &TxLegacy) -> Signed { - assert!( - tx.chain_id.is_none(), - "chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)" - ); - - let sig_hash = tx.signature_hash().0; - let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat()); - let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat()); - loop { - let r_bytes: [u8; 32] = r.to_repr().into(); - let s_bytes: [u8; 32] = s.to_repr().into(); - let v = Parity::NonEip155(false); - let signature = - AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap(); - let tx = tx.clone().into_signed(signature); - if tx.recover_signer().is_ok() { - return tx; - } - - // Re-hash until valid - r = hash_to_scalar(r_bytes.as_ref()); - s = hash_to_scalar(s_bytes.as_ref()); - } -} - -/// The public key for a Schnorr-signing account. -#[allow(non_snake_case)] -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct PublicKey { - pub(crate) A: ProjectivePoint, - pub(crate) px: Scalar, -} - -impl PublicKey { - /// Construct a new `PublicKey`. - /// - /// This will return None if the provided point isn't eligible to be a public key (due to - /// bounds such as parity). - #[allow(non_snake_case)] - pub fn new(A: ProjectivePoint) -> Option { - let affine = A.to_affine(); - // Only allow even keys to save a word within Ethereum - let is_odd = bool::from(affine.y_is_odd()); - if is_odd { - None?; - } - - let x_coord = affine.x(); - let x_coord_scalar = >::reduce_bytes(&x_coord); - // Return None if a reduction would occur - // Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less - // headache/concern to have - // This does ban a trivial amoount of public keys - if x_coord_scalar.to_repr() != x_coord { - None?; - } - - Some(PublicKey { A, px: x_coord_scalar }) - } - - pub fn point(&self) -> ProjectivePoint { - self.A - } - - pub(crate) fn eth_repr(&self) -> [u8; 32] { - self.px.to_repr().into() - } - - #[cfg(test)] - pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option { - #[allow(non_snake_case)] - let A = Option::::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into(); - Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px }) - } -} - -/// The HRAm to use for the Schnorr contract. -#[derive(Clone, Default)] -pub struct EthereumHram {} -impl Hram for EthereumHram { - #[allow(non_snake_case)] - fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { - let x_coord = A.to_affine().x(); - - let mut data = address(R).to_vec(); - data.extend(x_coord.as_slice()); - data.extend(m); - - >::reduce_bytes(&keccak256(&data).into()) - } -} - -/// A signature for the Schnorr contract. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Signature { - pub(crate) c: Scalar, - pub(crate) s: Scalar, -} -impl Signature { - pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool { - #[allow(non_snake_case)] - let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c); - EthereumHram::hram(&R, &public_key.A, message) == self.c - } - - /// Construct a new `Signature`. - /// - /// This will return None if the signature is invalid. - pub fn new( - public_key: &PublicKey, - message: &[u8], - signature: SchnorrSignature, - ) -> Option { - let c = EthereumHram::hram(&signature.R, &public_key.A, message); - if !signature.verify(public_key.A, c) { - None?; - } - - let res = Signature { c, s: signature.s }; - assert!(res.verify(public_key, message)); - Some(res) - } - - pub fn c(&self) -> Scalar { - self.c - } - pub fn s(&self) -> Scalar { - self.s - } - - pub fn to_bytes(&self) -> [u8; 64] { - let mut res = [0; 64]; - res[.. 32].copy_from_slice(self.c.to_repr().as_ref()); - res[32 ..].copy_from_slice(self.s.to_repr().as_ref()); - res - } - - pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result { - let mut reader = bytes.as_slice(); - let c = Secp256k1::read_F(&mut reader)?; - let s = Secp256k1::read_F(&mut reader)?; - Ok(Signature { c, s }) - } -} -impl From<&Signature> for AbiSignature { - fn from(sig: &Signature) -> AbiSignature { - let c: [u8; 32] = sig.c.to_repr().into(); - let s: [u8; 32] = sig.s.to_repr().into(); - AbiSignature { c: c.into(), s: s.into() } - } -} diff --git a/networks/ethereum/src/deployer.rs b/networks/ethereum/src/deployer.rs deleted file mode 100644 index 19aa328d2..000000000 --- a/networks/ethereum/src/deployer.rs +++ /dev/null @@ -1,113 +0,0 @@ -use std::sync::Arc; - -use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind}; -use alloy_consensus::{Signed, TxLegacy}; - -use alloy_sol_types::{SolCall, SolEvent}; - -use alloy_rpc_types_eth::{BlockNumberOrTag, Filter}; -use alloy_simple_request_transport::SimpleRequest; -use alloy_provider::{Provider, RootProvider}; - -use crate::{ - Error, - crypto::{self, keccak256, PublicKey}, - router::Router, -}; -pub use crate::abi::deployer as abi; - -/// The Deployer contract for the Router contract. -/// -/// This Deployer has a deterministic address, letting it be immediately identified on any -/// compatible chain. It then supports retrieving the Router contract's address (which isn't -/// deterministic) using a single log query. -#[derive(Clone, Debug)] -pub struct Deployer; -impl Deployer { - /// Obtain the transaction to deploy this contract, already signed. - /// - /// The account this transaction is sent from (which is populated in `from`) must be sufficiently - /// funded for this transaction to be submitted. This account has no known private key to anyone, - /// so ETH sent can be neither misappropriated nor returned. - pub fn deployment_tx() -> Signed { - let bytecode = include_str!("../artifacts/Deployer.bin"); - let bytecode = - Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex"); - - let tx = TxLegacy { - chain_id: None, - nonce: 0, - gas_price: 100_000_000_000u128, - // TODO: Use a more accurate gas limit - gas_limit: 1_000_000u128, - to: TxKind::Create, - value: U256::ZERO, - input: bytecode, - }; - - crypto::deterministically_sign(&tx) - } - - /// Obtain the deterministic address for this contract. - pub fn address() -> [u8; 20] { - let deployer_deployer = - Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature"); - **Address::create(&deployer_deployer, 0) - } - - /// Construct a new view of the `Deployer`. - pub async fn new(provider: Arc>) -> Result, Error> { - let address = Self::address(); - let code = provider.get_code_at(address.into()).await.map_err(|_| Error::ConnectionError)?; - // Contract has yet to be deployed - if code.is_empty() { - return Ok(None); - } - Ok(Some(Self)) - } - - /// Yield the `ContractCall` necessary to deploy the Router. - pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy { - TxLegacy { - to: TxKind::Call(Self::address().into()), - input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(), - gas_limit: 1_000_000, - ..Default::default() - } - } - - /// Find the first Router deployed with the specified key as its first key. - /// - /// This is the Router Serai will use, and is the only way to construct a `Router`. - pub async fn find_router( - &self, - provider: Arc>, - key: &PublicKey, - ) -> Result, Error> { - let init_code = Router::init_code(key); - let init_code_hash = keccak256(&init_code); - - #[cfg(not(test))] - let to_block = BlockNumberOrTag::Finalized; - #[cfg(test)] - let to_block = BlockNumberOrTag::Latest; - - // Find the first log using this init code (where the init code is binding to the key) - // TODO: Make an abstraction for event filtering (de-duplicating common code) - let filter = - Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address())); - let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH); - let filter = filter.topic1(B256::from(init_code_hash)); - let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - let Some(first_log) = logs.first() else { return Ok(None) }; - let router = first_log - .log_decode::() - .map_err(|_| Error::ConnectionError)? - .inner - .data - .created; - - Ok(Some(Router::new(provider, router))) - } -} diff --git a/networks/ethereum/src/erc20.rs b/networks/ethereum/src/erc20.rs deleted file mode 100644 index 6a32f7cc1..000000000 --- a/networks/ethereum/src/erc20.rs +++ /dev/null @@ -1,105 +0,0 @@ -use std::{sync::Arc, collections::HashSet}; - -use alloy_core::primitives::{Address, B256, U256}; - -use alloy_sol_types::{SolInterface, SolEvent}; - -use alloy_rpc_types_eth::Filter; -use alloy_simple_request_transport::SimpleRequest; -use alloy_provider::{Provider, RootProvider}; - -use crate::Error; -pub use crate::abi::erc20 as abi; -use abi::{IERC20Calls, Transfer, transferCall, transferFromCall}; - -#[derive(Clone, Debug)] -pub struct TopLevelErc20Transfer { - pub id: [u8; 32], - pub from: [u8; 20], - pub amount: U256, - pub data: Vec, -} - -/// A view for an ERC20 contract. -#[derive(Clone, Debug)] -pub struct Erc20(Arc>, Address); -impl Erc20 { - /// Construct a new view of the specified ERC20 contract. - pub fn new(provider: Arc>, address: [u8; 20]) -> Self { - Self(provider, Address::from(&address)) - } - - pub async fn top_level_transfers( - &self, - block: u64, - to: [u8; 20], - ) -> Result, Error> { - let filter = Filter::new().from_block(block).to_block(block).address(self.1); - let filter = filter.event_signature(Transfer::SIGNATURE_HASH); - let mut to_topic = [0; 32]; - to_topic[12 ..].copy_from_slice(&to); - let filter = filter.topic2(B256::from(to_topic)); - let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - let mut handled = HashSet::new(); - - let mut top_level_transfers = vec![]; - for log in logs { - // Double check the address which emitted this log - if log.address() != self.1 { - Err(Error::ConnectionError)?; - } - - let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?; - let tx = - self.0.get_transaction_by_hash(tx_id).await.ok().flatten().ok_or(Error::ConnectionError)?; - - // If this is a top-level call... - if tx.to == Some(self.1) { - // And we recognize the call... - // Don't validate the encoding as this can't be re-encoded to an identical bytestring due - // to the InInstruction appended - if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) { - // Extract the top-level call's from/to/value - let (from, call_to, value) = match call { - IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value), - IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => { - (from, call_to, value) - } - // Treat any other function selectors as unrecognized - _ => continue, - }; - - let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - - // Ensure the top-level transfer is equivalent, and this presumably isn't a log for an - // internal transfer - if (log.from != from) || (call_to != to) || (value != log.value) { - continue; - } - - // Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's - // the only log we handle - if handled.contains(&tx_id) { - continue; - } - handled.insert(tx_id); - - // Read the data appended after - let encoded = call.abi_encode(); - let data = tx.input.as_ref()[encoded.len() ..].to_vec(); - - // Push the transfer - top_level_transfers.push(TopLevelErc20Transfer { - // Since we'll only handle one log for this TX, set the ID to the TX ID - id: *tx_id, - from: *log.from.0, - amount: log.value, - data, - }); - } - } - } - Ok(top_level_transfers) - } -} diff --git a/networks/ethereum/src/lib.rs b/networks/ethereum/src/lib.rs deleted file mode 100644 index 38bd79e79..000000000 --- a/networks/ethereum/src/lib.rs +++ /dev/null @@ -1,35 +0,0 @@ -use thiserror::Error; - -pub mod alloy { - pub use alloy_core::primitives; - pub use alloy_core as core; - pub use alloy_sol_types as sol_types; - - pub use alloy_consensus as consensus; - pub use alloy_network as network; - pub use alloy_rpc_types_eth as rpc_types; - pub use alloy_simple_request_transport as simple_request_transport; - pub use alloy_rpc_client as rpc_client; - pub use alloy_provider as provider; -} - -pub mod crypto; - -pub(crate) mod abi; - -pub mod erc20; -pub mod deployer; -pub mod router; - -pub mod machine; - -#[cfg(any(test, feature = "tests"))] -pub mod tests; - -#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] -pub enum Error { - #[error("failed to verify Schnorr signature")] - InvalidSignature, - #[error("couldn't make call/send TX")] - ConnectionError, -} diff --git a/networks/ethereum/src/machine.rs b/networks/ethereum/src/machine.rs deleted file mode 100644 index 0d5dc7a59..000000000 --- a/networks/ethereum/src/machine.rs +++ /dev/null @@ -1,414 +0,0 @@ -use std::{ - io::{self, Read}, - collections::HashMap, -}; - -use rand_core::{RngCore, CryptoRng}; - -use transcript::{Transcript, RecommendedTranscript}; - -use group::GroupEncoding; -use frost::{ - curve::{Ciphersuite, Secp256k1}, - Participant, ThresholdKeys, FrostError, - algorithm::Schnorr, - sign::*, -}; - -use alloy_core::primitives::U256; - -use crate::{ - crypto::{PublicKey, EthereumHram, Signature}, - router::{ - abi::{Call as AbiCall, OutInstruction as AbiOutInstruction}, - Router, - }, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Call { - pub to: [u8; 20], - pub value: U256, - pub data: Vec, -} -impl Call { - pub fn read(reader: &mut R) -> io::Result { - let mut to = [0; 20]; - reader.read_exact(&mut to)?; - - let value = { - let mut value_bytes = [0; 32]; - reader.read_exact(&mut value_bytes)?; - U256::from_le_slice(&value_bytes) - }; - - let mut data_len = { - let mut data_len = [0; 4]; - reader.read_exact(&mut data_len)?; - usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize") - }; - - // A valid DoS would be to claim a 4 GB data is present for only 4 bytes - // We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB) - let mut data = vec![]; - while data_len > 0 { - let chunk_len = data_len.min(1024); - let mut chunk = vec![0; chunk_len]; - reader.read_exact(&mut chunk)?; - data.extend(&chunk); - data_len -= chunk_len; - } - - Ok(Call { to, value, data }) - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&self.to)?; - writer.write_all(&self.value.as_le_bytes())?; - - let data_len = u32::try_from(self.data.len()) - .map_err(|_| io::Error::other("call data length exceeded 2**32"))?; - writer.write_all(&data_len.to_le_bytes())?; - writer.write_all(&self.data) - } -} -impl From for AbiCall { - fn from(call: Call) -> AbiCall { - AbiCall { to: call.to.into(), value: call.value, data: call.data.into() } - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum OutInstructionTarget { - Direct([u8; 20]), - Calls(Vec), -} -impl OutInstructionTarget { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - - match kind[0] { - 0 => { - let mut addr = [0; 20]; - reader.read_exact(&mut addr)?; - Ok(OutInstructionTarget::Direct(addr)) - } - 1 => { - let mut calls_len = [0; 4]; - reader.read_exact(&mut calls_len)?; - let calls_len = u32::from_le_bytes(calls_len); - - let mut calls = vec![]; - for _ in 0 .. calls_len { - calls.push(Call::read(reader)?); - } - Ok(OutInstructionTarget::Calls(calls)) - } - _ => Err(io::Error::other("unrecognized OutInstructionTarget"))?, - } - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - OutInstructionTarget::Direct(addr) => { - writer.write_all(&[0])?; - writer.write_all(addr)?; - } - OutInstructionTarget::Calls(calls) => { - writer.write_all(&[1])?; - let call_len = u32::try_from(calls.len()) - .map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?; - writer.write_all(&call_len.to_le_bytes())?; - for call in calls { - call.write(writer)?; - } - } - } - Ok(()) - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct OutInstruction { - pub target: OutInstructionTarget, - pub value: U256, -} -impl OutInstruction { - fn read(reader: &mut R) -> io::Result { - let target = OutInstructionTarget::read(reader)?; - - let value = { - let mut value_bytes = [0; 32]; - reader.read_exact(&mut value_bytes)?; - U256::from_le_slice(&value_bytes) - }; - - Ok(OutInstruction { target, value }) - } - fn write(&self, writer: &mut W) -> io::Result<()> { - self.target.write(writer)?; - writer.write_all(&self.value.as_le_bytes()) - } -} -impl From for AbiOutInstruction { - fn from(instruction: OutInstruction) -> AbiOutInstruction { - match instruction.target { - OutInstructionTarget::Direct(addr) => { - AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value } - } - OutInstructionTarget::Calls(calls) => AbiOutInstruction { - to: [0; 20].into(), - calls: calls.into_iter().map(Into::into).collect(), - value: instruction.value, - }, - } - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum RouterCommand { - UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey }, - Execute { chain_id: U256, nonce: U256, outs: Vec }, -} - -impl RouterCommand { - pub fn msg(&self) -> Vec { - match self { - RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { - Router::update_serai_key_message(*chain_id, *nonce, key) - } - RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message( - *chain_id, - *nonce, - outs.iter().map(|out| out.clone().into()).collect(), - ), - } - } - - pub fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - - match kind[0] { - 0 => { - let mut chain_id = [0; 32]; - reader.read_exact(&mut chain_id)?; - - let mut nonce = [0; 32]; - reader.read_exact(&mut nonce)?; - - let key = PublicKey::new(Secp256k1::read_G(reader)?) - .ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?; - Ok(RouterCommand::UpdateSeraiKey { - chain_id: U256::from_le_slice(&chain_id), - nonce: U256::from_le_slice(&nonce), - key, - }) - } - 1 => { - let mut chain_id = [0; 32]; - reader.read_exact(&mut chain_id)?; - let chain_id = U256::from_le_slice(&chain_id); - - let mut nonce = [0; 32]; - reader.read_exact(&mut nonce)?; - let nonce = U256::from_le_slice(&nonce); - - let mut outs_len = [0; 4]; - reader.read_exact(&mut outs_len)?; - let outs_len = u32::from_le_bytes(outs_len); - - let mut outs = vec![]; - for _ in 0 .. outs_len { - outs.push(OutInstruction::read(reader)?); - } - - Ok(RouterCommand::Execute { chain_id, nonce, outs }) - } - _ => Err(io::Error::other("reading unknown type of RouterCommand"))?, - } - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { - writer.write_all(&[0])?; - writer.write_all(&chain_id.as_le_bytes())?; - writer.write_all(&nonce.as_le_bytes())?; - writer.write_all(&key.A.to_bytes()) - } - RouterCommand::Execute { chain_id, nonce, outs } => { - writer.write_all(&[1])?; - writer.write_all(&chain_id.as_le_bytes())?; - writer.write_all(&nonce.as_le_bytes())?; - writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?; - for out in outs { - out.write(writer)?; - } - Ok(()) - } - } - } - - pub fn serialize(&self) -> Vec { - let mut res = vec![]; - self.write(&mut res).unwrap(); - res - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct SignedRouterCommand { - command: RouterCommand, - signature: Signature, -} - -impl SignedRouterCommand { - pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option { - let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?; - let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?; - let signature = Signature { c, s }; - - if !signature.verify(key, &command.msg()) { - None? - } - Some(SignedRouterCommand { command, signature }) - } - - pub fn command(&self) -> &RouterCommand { - &self.command - } - - pub fn signature(&self) -> &Signature { - &self.signature - } - - pub fn read(reader: &mut R) -> io::Result { - let command = RouterCommand::read(reader)?; - - let mut sig = [0; 64]; - reader.read_exact(&mut sig)?; - let signature = Signature::from_bytes(sig)?; - - Ok(SignedRouterCommand { command, signature }) - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - self.command.write(writer)?; - writer.write_all(&self.signature.to_bytes()) - } -} - -pub struct RouterCommandMachine { - key: PublicKey, - command: RouterCommand, - machine: AlgorithmMachine>, -} - -impl RouterCommandMachine { - pub fn new(keys: ThresholdKeys, command: RouterCommand) -> Option { - // The Schnorr algorithm should be fine without this, even when using the IETF variant - // If this is better and more comprehensive, we should do it, even if not necessary - let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1"); - let key = keys.group_key(); - transcript.append_message(b"key", key.to_bytes()); - transcript.append_message(b"command", command.serialize()); - - Some(Self { - key: PublicKey::new(key)?, - command, - machine: AlgorithmMachine::new(Schnorr::new(transcript), keys), - }) - } -} - -impl PreprocessMachine for RouterCommandMachine { - type Preprocess = Preprocess; - type Signature = SignedRouterCommand; - type SignMachine = RouterCommandSignMachine; - - fn preprocess( - self, - rng: &mut R, - ) -> (Self::SignMachine, Self::Preprocess) { - let (machine, preprocess) = self.machine.preprocess(rng); - - (RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess) - } -} - -pub struct RouterCommandSignMachine { - key: PublicKey, - command: RouterCommand, - machine: AlgorithmSignMachine>, -} - -impl SignMachine for RouterCommandSignMachine { - type Params = (); - type Keys = ThresholdKeys; - type Preprocess = Preprocess; - type SignatureShare = SignatureShare; - type SignatureMachine = RouterCommandSignatureMachine; - - fn cache(self) -> CachedPreprocess { - unimplemented!( - "RouterCommand machines don't support caching their preprocesses due to {}", - "being already bound to a specific command" - ); - } - - fn from_cache( - (): (), - _: ThresholdKeys, - _: CachedPreprocess, - ) -> (Self, Self::Preprocess) { - unimplemented!( - "RouterCommand machines don't support caching their preprocesses due to {}", - "being already bound to a specific command" - ); - } - - fn read_preprocess(&self, reader: &mut R) -> io::Result { - self.machine.read_preprocess(reader) - } - - fn sign( - self, - commitments: HashMap, - msg: &[u8], - ) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> { - if !msg.is_empty() { - panic!("message was passed to a RouterCommand machine when it generates its own"); - } - - let (machine, share) = self.machine.sign(commitments, &self.command.msg())?; - - Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share)) - } -} - -pub struct RouterCommandSignatureMachine { - key: PublicKey, - command: RouterCommand, - machine: - AlgorithmSignatureMachine>, -} - -impl SignatureMachine for RouterCommandSignatureMachine { - type SignatureShare = SignatureShare; - - fn read_share(&self, reader: &mut R) -> io::Result { - self.machine.read_share(reader) - } - - fn complete( - self, - shares: HashMap, - ) -> Result { - let sig = self.machine.complete(shares)?; - let signature = Signature::new(&self.key, &self.command.msg(), sig) - .expect("machine produced an invalid signature"); - Ok(SignedRouterCommand { command: self.command, signature }) - } -} diff --git a/networks/ethereum/src/router.rs b/networks/ethereum/src/router.rs deleted file mode 100644 index c569d4094..000000000 --- a/networks/ethereum/src/router.rs +++ /dev/null @@ -1,443 +0,0 @@ -use std::{sync::Arc, io, collections::HashSet}; - -use k256::{ - elliptic_curve::{group::GroupEncoding, sec1}, - ProjectivePoint, -}; - -use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; -#[cfg(test)] -use alloy_core::primitives::B256; -use alloy_consensus::TxLegacy; - -use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; - -use alloy_rpc_types_eth::Filter; -#[cfg(test)] -use alloy_rpc_types_eth::{BlockId, TransactionRequest, TransactionInput}; -use alloy_simple_request_transport::SimpleRequest; -use alloy_provider::{Provider, RootProvider}; - -pub use crate::{ - Error, - crypto::{PublicKey, Signature}, - abi::{erc20::Transfer, router as abi}, -}; -use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum Coin { - Ether, - Erc20([u8; 20]), -} - -impl Coin { - pub fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - Ok(match kind[0] { - 0 => Coin::Ether, - 1 => { - let mut address = [0; 20]; - reader.read_exact(&mut address)?; - Coin::Erc20(address) - } - _ => Err(io::Error::other("unrecognized Coin type"))?, - }) - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - Coin::Ether => writer.write_all(&[0]), - Coin::Erc20(token) => { - writer.write_all(&[1])?; - writer.write_all(token) - } - } - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct InInstruction { - pub id: ([u8; 32], u64), - pub from: [u8; 20], - pub coin: Coin, - pub amount: U256, - pub data: Vec, - pub key_at_end_of_block: ProjectivePoint, -} - -impl InInstruction { - pub fn read(reader: &mut R) -> io::Result { - let id = { - let mut id_hash = [0; 32]; - reader.read_exact(&mut id_hash)?; - let mut id_pos = [0; 8]; - reader.read_exact(&mut id_pos)?; - let id_pos = u64::from_le_bytes(id_pos); - (id_hash, id_pos) - }; - - let mut from = [0; 20]; - reader.read_exact(&mut from)?; - - let coin = Coin::read(reader)?; - let mut amount = [0; 32]; - reader.read_exact(&mut amount)?; - let amount = U256::from_le_slice(&amount); - - let mut data_len = [0; 4]; - reader.read_exact(&mut data_len)?; - let data_len = usize::try_from(u32::from_le_bytes(data_len)) - .map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?; - let mut data = vec![0; data_len]; - reader.read_exact(&mut data)?; - - let mut key_at_end_of_block = ::Repr::default(); - reader.read_exact(&mut key_at_end_of_block)?; - let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block)) - .ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?; - - Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block }) - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&self.id.0)?; - writer.write_all(&self.id.1.to_le_bytes())?; - - writer.write_all(&self.from)?; - - self.coin.write(writer)?; - writer.write_all(&self.amount.as_le_bytes())?; - - writer.write_all( - &u32::try_from(self.data.len()) - .map_err(|_| { - io::Error::other("InInstruction being written had data exceeding 2**32 in length") - })? - .to_le_bytes(), - )?; - writer.write_all(&self.data)?; - - writer.write_all(&self.key_at_end_of_block.to_bytes()) - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Executed { - pub tx_id: [u8; 32], - pub nonce: u64, - pub signature: [u8; 64], -} - -/// The contract Serai uses to manage its state. -#[derive(Clone, Debug)] -pub struct Router(Arc>, Address); -impl Router { - pub(crate) fn code() -> Vec { - let bytecode = include_str!("../artifacts/Router.bin"); - Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec() - } - - pub(crate) fn init_code(key: &PublicKey) -> Vec { - let mut bytecode = Self::code(); - // Append the constructor arguments - bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode()); - bytecode - } - - // This isn't pub in order to force users to use `Deployer::find_router`. - pub(crate) fn new(provider: Arc>, address: Address) -> Self { - Self(provider, address) - } - - pub fn address(&self) -> [u8; 20] { - **self.1 - } - - /// Get the key for Serai at the specified block. - #[cfg(test)] - pub async fn serai_key(&self, at: [u8; 32]) -> Result { - let call = TransactionRequest::default() - .to(self.1) - .input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into())); - let bytes = self - .0 - .call(&call) - .block(BlockId::Hash(B256::from(at).into())) - .await - .map_err(|_| Error::ConnectionError)?; - let res = - abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; - PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError) - } - - /// Get the message to be signed in order to update the key for Serai. - pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec { - let mut buffer = b"updateSeraiKey".to_vec(); - buffer.extend(&chain_id.to_be_bytes::<32>()); - buffer.extend(&nonce.to_be_bytes::<32>()); - buffer.extend(&key.eth_repr()); - buffer - } - - /// Update the key representing Serai. - pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy { - // TODO: Set a more accurate gas - TxLegacy { - to: TxKind::Call(self.1), - input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into())) - .abi_encode() - .into(), - gas_limit: 100_000, - ..Default::default() - } - } - - /// Get the current nonce for the published batches. - #[cfg(test)] - pub async fn nonce(&self, at: [u8; 32]) -> Result { - let call = TransactionRequest::default() - .to(self.1) - .input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into())); - let bytes = self - .0 - .call(&call) - .block(BlockId::Hash(B256::from(at).into())) - .await - .map_err(|_| Error::ConnectionError)?; - let res = - abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; - Ok(res._0) - } - - /// Get the message to be signed in order to update the key for Serai. - pub(crate) fn execute_message( - chain_id: U256, - nonce: U256, - outs: Vec, - ) -> Vec { - ("execute".to_string(), chain_id, nonce, outs).abi_encode_params() - } - - /// Execute a batch of `OutInstruction`s. - pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy { - TxLegacy { - to: TxKind::Call(self.1), - input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(), - // TODO - gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()), - ..Default::default() - } - } - - pub async fn key_at_end_of_block(&self, block: u64) -> Result, Error> { - let filter = Filter::new().from_block(0).to_block(block).address(self.1); - let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); - let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - if all_keys.is_empty() { - return Ok(None); - }; - - let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?; - let last_key_x_coordinate = last_key_x_coordinate_log - .log_decode::() - .map_err(|_| Error::ConnectionError)? - .inner - .data - .key; - - let mut compressed_point = ::Repr::default(); - compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY); - compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice()); - - let key = - Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)?; - Ok(Some(key)) - } - - pub async fn in_instructions( - &self, - block: u64, - allowed_tokens: &HashSet<[u8; 20]>, - ) -> Result, Error> { - let Some(key_at_end_of_block) = self.key_at_end_of_block(block).await? else { - return Ok(vec![]); - }; - - let filter = Filter::new().from_block(block).to_block(block).address(self.1); - let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); - let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - let mut transfer_check = HashSet::new(); - let mut in_instructions = vec![]; - for log in logs { - // Double check the address which emitted this log - if log.address() != self.1 { - Err(Error::ConnectionError)?; - } - - let id = ( - log.block_hash.ok_or(Error::ConnectionError)?.into(), - log.log_index.ok_or(Error::ConnectionError)?, - ); - - let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?; - let tx = self - .0 - .get_transaction_by_hash(tx_hash) - .await - .ok() - .flatten() - .ok_or(Error::ConnectionError)?; - - let log = - log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - - let coin = if log.coin.0 == [0; 20] { - Coin::Ether - } else { - let token = *log.coin.0; - - if !allowed_tokens.contains(&token) { - continue; - } - - // If this also counts as a top-level transfer via the token, drop it - // - // Necessary in order to handle a potential edge case with some theoretical token - // implementations - // - // This will either let it be handled by the top-level transfer hook or will drop it - // entirely on the side of caution - if tx.to == Some(token.into()) { - continue; - } - - // Get all logs for this TX - let receipt = self - .0 - .get_transaction_receipt(tx_hash) - .await - .map_err(|_| Error::ConnectionError)? - .ok_or(Error::ConnectionError)?; - let tx_logs = receipt.inner.logs(); - - // Find a matching transfer log - let mut found_transfer = false; - for tx_log in tx_logs { - let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?; - // Ensure we didn't already use this transfer to check a distinct InInstruction event - if transfer_check.contains(&log_index) { - continue; - } - - // Check if this log is from the token we expected to be transferred - if tx_log.address().0 != token { - continue; - } - // Check if this is a transfer log - // https://github.com/alloy-rs/core/issues/589 - if tx_log.topics()[0] != Transfer::SIGNATURE_HASH { - continue; - } - let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue }; - // Check if this is a transfer to us for the expected amount - if (transfer.to == self.1) && (transfer.value == log.amount) { - transfer_check.insert(log_index); - found_transfer = true; - break; - } - } - if !found_transfer { - // This shouldn't be a ConnectionError - // This is an exploit, a non-conforming ERC20, or an invalid connection - // This should halt the process which is sufficient, yet this is sub-optimal - // TODO - Err(Error::ConnectionError)?; - } - - Coin::Erc20(token) - }; - - in_instructions.push(InInstruction { - id, - from: *log.from.0, - coin, - amount: log.amount, - data: log.instruction.as_ref().to_vec(), - key_at_end_of_block, - }); - } - - Ok(in_instructions) - } - - pub async fn executed_commands(&self, block: u64) -> Result, Error> { - let mut res = vec![]; - - { - let filter = Filter::new().from_block(block).to_block(block).address(self.1); - let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); - let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - for log in logs { - // Double check the address which emitted this log - if log.address() != self.1 { - Err(Error::ConnectionError)?; - } - - let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); - - let log = - log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - - let mut signature = [0; 64]; - signature[.. 32].copy_from_slice(log.signature.c.as_ref()); - signature[32 ..].copy_from_slice(log.signature.s.as_ref()); - res.push(Executed { - tx_id, - nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, - signature, - }); - } - } - - { - let filter = Filter::new().from_block(block).to_block(block).address(self.1); - let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH); - let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - for log in logs { - // Double check the address which emitted this log - if log.address() != self.1 { - Err(Error::ConnectionError)?; - } - - let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); - - let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; - - let mut signature = [0; 64]; - signature[.. 32].copy_from_slice(log.signature.c.as_ref()); - signature[32 ..].copy_from_slice(log.signature.s.as_ref()); - res.push(Executed { - tx_id, - nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, - signature, - }); - } - } - - Ok(res) - } - - #[cfg(feature = "tests")] - pub fn key_updated_filter(&self) -> Filter { - Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH) - } - #[cfg(feature = "tests")] - pub fn executed_filter(&self) -> Filter { - Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH) - } -} diff --git a/networks/ethereum/src/tests/abi/mod.rs b/networks/ethereum/src/tests/abi/mod.rs deleted file mode 100644 index 57ea88116..000000000 --- a/networks/ethereum/src/tests/abi/mod.rs +++ /dev/null @@ -1,13 +0,0 @@ -use alloy_sol_types::sol; - -#[rustfmt::skip] -#[allow(warnings)] -#[allow(needless_pass_by_value)] -#[allow(clippy::all)] -#[allow(clippy::ignored_unit_patterns)] -#[allow(clippy::redundant_closure_for_method_calls)] -mod schnorr_container { - use super::*; - sol!("src/tests/contracts/Schnorr.sol"); -} -pub(crate) use schnorr_container::TestSchnorr as schnorr; diff --git a/networks/ethereum/src/tests/contracts/Schnorr.sol b/networks/ethereum/src/tests/contracts/Schnorr.sol deleted file mode 100644 index 832cd2fee..000000000 --- a/networks/ethereum/src/tests/contracts/Schnorr.sol +++ /dev/null @@ -1,15 +0,0 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; - -import "../../../contracts/Schnorr.sol"; - -contract TestSchnorr { - function verify( - bytes32 px, - bytes calldata message, - bytes32 c, - bytes32 s - ) external pure returns (bool) { - return Schnorr.verify(px, message, c, s); - } -} diff --git a/networks/ethereum/src/tests/crypto.rs b/networks/ethereum/src/tests/crypto.rs deleted file mode 100644 index a668b2d6d..000000000 --- a/networks/ethereum/src/tests/crypto.rs +++ /dev/null @@ -1,105 +0,0 @@ -use rand_core::OsRng; - -use group::ff::{Field, PrimeField}; -use k256::{ - ecdsa::{ - self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey, - }, - Scalar, ProjectivePoint, -}; - -use frost::{ - curve::{Ciphersuite, Secp256k1}, - algorithm::{Hram, IetfSchnorr}, - tests::{algorithm_machines, sign}, -}; - -use crate::{crypto::*, tests::key_gen}; - -// The ecrecover opcode, yet with parity replacing v -pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> { - let sig = ecdsa::Signature::from_scalars(r, s).ok()?; - let message: [u8; 32] = message.to_repr().into(); - alloy_core::primitives::Signature::from_signature_and_parity( - sig, - alloy_core::primitives::Parity::Parity(odd_y), - ) - .ok()? - .recover_address_from_prehash(&alloy_core::primitives::B256::from(message)) - .ok() - .map(Into::into) -} - -#[test] -fn test_ecrecover() { - let private = SigningKey::random(&mut OsRng); - let public = VerifyingKey::from(&private); - - // Sign the signature - const MESSAGE: &[u8] = b"Hello, World!"; - let (sig, recovery_id) = private - .as_nonzero_scalar() - .try_sign_prehashed( - ::F::random(&mut OsRng), - &keccak256(MESSAGE).into(), - ) - .unwrap(); - - // Sanity check the signature verifies - #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result - { - assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ()); - } - - // Perform the ecrecover - assert_eq!( - ecrecover( - hash_to_scalar(MESSAGE), - u8::from(recovery_id.unwrap().is_y_odd()) == 1, - *sig.r(), - *sig.s() - ) - .unwrap(), - address(&ProjectivePoint::from(public.as_affine())) - ); -} - -// Run the sign test with the EthereumHram -#[test] -fn test_signing() { - let (keys, _) = key_gen(); - - const MESSAGE: &[u8] = b"Hello, World!"; - - let algo = IetfSchnorr::::ietf(); - let _sig = - sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); -} - -#[allow(non_snake_case)] -pub fn preprocess_signature_for_ecrecover( - R: ProjectivePoint, - public_key: &PublicKey, - m: &[u8], - s: Scalar, -) -> (Scalar, Scalar) { - let c = EthereumHram::hram(&R, &public_key.A, m); - let sa = -(s * public_key.px); - let ca = -(c * public_key.px); - (sa, ca) -} - -#[test] -fn test_ecrecover_hack() { - let (keys, public_key) = key_gen(); - - const MESSAGE: &[u8] = b"Hello, World!"; - - let algo = IetfSchnorr::::ietf(); - let sig = - sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); - - let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s); - let q = ecrecover(sa, false, public_key.px, ca).unwrap(); - assert_eq!(q, address(&sig.R)); -} diff --git a/networks/ethereum/src/tests/schnorr.rs b/networks/ethereum/src/tests/schnorr.rs deleted file mode 100644 index 2c72ed19d..000000000 --- a/networks/ethereum/src/tests/schnorr.rs +++ /dev/null @@ -1,93 +0,0 @@ -use std::sync::Arc; - -use rand_core::OsRng; - -use group::ff::PrimeField; -use k256::Scalar; - -use frost::{ - curve::Secp256k1, - algorithm::IetfSchnorr, - tests::{algorithm_machines, sign}, -}; - -use alloy_core::primitives::Address; - -use alloy_sol_types::SolCall; - -use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; -use alloy_simple_request_transport::SimpleRequest; -use alloy_rpc_client::ClientBuilder; -use alloy_provider::{Provider, RootProvider}; - -use alloy_node_bindings::{Anvil, AnvilInstance}; - -use crate::{ - Error, - crypto::*, - tests::{key_gen, deploy_contract, abi::schnorr as abi}, -}; - -async fn setup_test() -> (AnvilInstance, Arc>, Address) { - let anvil = Anvil::new().spawn(); - - let provider = RootProvider::new( - ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), - ); - let wallet = anvil.keys()[0].clone().into(); - let client = Arc::new(provider); - - let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap(); - (anvil, client, address) -} - -#[tokio::test] -async fn test_deploy_contract() { - setup_test().await; -} - -pub async fn call_verify( - provider: &RootProvider, - contract: Address, - public_key: &PublicKey, - message: &[u8], - signature: &Signature, -) -> Result<(), Error> { - let px: [u8; 32] = public_key.px.to_repr().into(); - let c_bytes: [u8; 32] = signature.c.to_repr().into(); - let s_bytes: [u8; 32] = signature.s.to_repr().into(); - let call = TransactionRequest::default().to(contract).input(TransactionInput::new( - abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into())) - .abi_encode() - .into(), - )); - let bytes = provider.call(&call).await.map_err(|_| Error::ConnectionError)?; - let res = - abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; - - if res._0 { - Ok(()) - } else { - Err(Error::InvalidSignature) - } -} - -#[tokio::test] -async fn test_ecrecover_hack() { - let (_anvil, client, contract) = setup_test().await; - - let (keys, public_key) = key_gen(); - - const MESSAGE: &[u8] = b"Hello, World!"; - - let algo = IetfSchnorr::::ietf(); - let sig = - sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); - let sig = Signature::new(&public_key, MESSAGE, sig).unwrap(); - - call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap(); - // Test an invalid signature fails - let mut sig = sig; - sig.s += Scalar::ONE; - assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err()); -} diff --git a/networks/monero/rpc/src/lib.rs b/networks/monero/rpc/src/lib.rs index 4c5055ccc..3c8d337a9 100644 --- a/networks/monero/rpc/src/lib.rs +++ b/networks/monero/rpc/src/lib.rs @@ -249,7 +249,7 @@ fn rpc_point(point: &str) -> Result { /// While no implementors are directly provided, [monero-simple-request-rpc]( /// https://github.com/serai-dex/serai/tree/develop/networks/monero/rpc/simple-request /// ) is recommended. -pub trait Rpc: Sync + Clone + Debug { +pub trait Rpc: Sync + Clone { /// Perform a POST request to the specified route with the specified body. /// /// The implementor is left to handle anything such as authentication. @@ -1003,10 +1003,10 @@ pub trait Rpc: Sync + Clone + Debug { /// An implementation is provided for any satisfier of `Rpc`. It is not recommended to use an `Rpc` /// object to satisfy this. This should be satisfied by a local store of the output distribution, /// both for performance and to prevent potential attacks a remote node can perform. -pub trait DecoyRpc: Sync + Clone + Debug { +pub trait DecoyRpc: Sync { /// Get the height the output distribution ends at. /// - /// This is equivalent to the hight of the blockchain it's for. This is intended to be cheaper + /// This is equivalent to the height of the blockchain it's for. This is intended to be cheaper /// than fetching the entire output distribution. fn get_output_distribution_end_height( &self, diff --git a/networks/monero/src/block.rs b/networks/monero/src/block.rs index 62a77f8b7..15a8d1fc5 100644 --- a/networks/monero/src/block.rs +++ b/networks/monero/src/block.rs @@ -79,10 +79,13 @@ pub struct Block { } impl Block { - /// The zero-index position of this block within the blockchain. + /// The zero-indexed position of this block within the blockchain. /// /// This information comes from the Block's miner transaction. If the miner transaction isn't - /// structed as expected, this will return None. + /// structed as expected, this will return None. This will return Some for any Block which would + /// pass the consensus rules. + // https://github.com/monero-project/monero/blob/a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623 + // /src/cryptonote_core/blockchain.cpp#L1365-L1382 pub fn number(&self) -> Option { match &self.miner_transaction { Transaction::V1 { prefix, .. } | Transaction::V2 { prefix, .. } => { diff --git a/networks/monero/wallet/src/send/mod.rs b/networks/monero/wallet/src/send/mod.rs index 87d98d69f..3bd883dfe 100644 --- a/networks/monero/wallet/src/send/mod.rs +++ b/networks/monero/wallet/src/send/mod.rs @@ -100,10 +100,11 @@ impl Change { /// /// 1) The change in the TX is shunted to the fee (making it fingerprintable). /// - /// 2) If there are two outputs in the TX, Monero would create a payment ID for the non-change - /// output so an observer can't tell apart TXs with a payment ID from TXs without a payment - /// ID. monero-wallet will simply not create a payment ID in this case, revealing it's a - /// monero-wallet TX without change. + /// 2) In two-output transactions, where the payment address doesn't have a payment ID, wallet2 + /// includes an encrypted dummy payment ID for the non-change output in order to not allow + /// differentiating if transactions send to addresses with payment IDs or not. monero-wallet + /// includes a dummy payment ID which at least one recipient will identify as not the expected + /// dummy payment ID, revealing to the recipient(s) the sender is using non-wallet2 software. pub fn fingerprintable(address: Option) -> Change { if let Some(address) = address { Change(Some(ChangeEnum::AddressOnly(address))) diff --git a/networks/monero/wallet/src/send/tx.rs b/networks/monero/wallet/src/send/tx.rs index 659622115..0ebd47f10 100644 --- a/networks/monero/wallet/src/send/tx.rs +++ b/networks/monero/wallet/src/send/tx.rs @@ -76,10 +76,18 @@ impl SignableTransaction { PaymentId::Encrypted(id).write(&mut id_vec).unwrap(); extra.push_nonce(id_vec); } else { - // If there's no payment ID, we push a dummy (as wallet2 does) if there's only one payment - if (self.payments.len() == 2) && - self.payments.iter().any(|payment| matches!(payment, InternalPayment::Change(_))) - { + /* + If there's no payment ID, we push a dummy (as wallet2 does) to the first payment. + + This does cause a random payment ID for the other recipient (a documented fingerprint). + Functionally, random payment IDs should be fine as wallet2 will trigger this same behavior + (a random payment ID being seen by the recipient) with a batch send if one of the recipient + addresses has a payment ID. + + The alternative would be to not include any payment ID, fingerprinting to the entire + blockchain this is non-standard wallet software (instead of just a single recipient). + */ + if self.payments.len() == 2 { let (_, payment_id_xor) = self .payments .iter() diff --git a/orchestration/dev/networks/bitcoin/run.sh b/orchestration/dev/networks/bitcoin/run.sh index da7c95a8b..bec89fa98 100755 --- a/orchestration/dev/networks/bitcoin/run.sh +++ b/orchestration/dev/networks/bitcoin/run.sh @@ -3,7 +3,7 @@ RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" -bitcoind -txindex -regtest --port=8333 \ +bitcoind -regtest --port=8333 \ -rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \ -rpcbind=0.0.0.0 -rpcallowip=0.0.0.0/0 -rpcport=8332 \ - $1 + $@ diff --git a/orchestration/dev/networks/ethereum-relayer/.folder b/orchestration/dev/networks/ethereum-relayer/.folder index 675d44382..e69de29bb 100644 --- a/orchestration/dev/networks/ethereum-relayer/.folder +++ b/orchestration/dev/networks/ethereum-relayer/.folder @@ -1,11 +0,0 @@ -#!/bin/sh - -RPC_USER="${RPC_USER:=serai}" -RPC_PASS="${RPC_PASS:=seraidex}" - -# Run Monero -monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ - --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ - --rpc-access-control-origins "*" --disable-rpc-ban \ - --rpc-login=$RPC_USER:$RPC_PASS \ - $1 diff --git a/orchestration/dev/networks/monero/run.sh b/orchestration/dev/networks/monero/run.sh index 75a93e464..1186c4d17 100755 --- a/orchestration/dev/networks/monero/run.sh +++ b/orchestration/dev/networks/monero/run.sh @@ -8,4 +8,4 @@ monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ --rpc-access-control-origins "*" --disable-rpc-ban \ --rpc-login=$RPC_USER:$RPC_PASS --log-level 2 \ - $1 + $@ diff --git a/orchestration/src/processor.rs b/orchestration/src/processor.rs index 3387c4ede..00f9243dd 100644 --- a/orchestration/src/processor.rs +++ b/orchestration/src/processor.rs @@ -21,8 +21,8 @@ pub fn processor( if coin == "ethereum" { r#" RUN cargo install svm-rs -RUN svm install 0.8.25 -RUN svm use 0.8.25 +RUN svm install 0.8.26 +RUN svm use 0.8.26 "# } else { "" diff --git a/orchestration/testnet/networks/bitcoin/run.sh b/orchestration/testnet/networks/bitcoin/run.sh index dbec375ac..6544243b5 100755 --- a/orchestration/testnet/networks/bitcoin/run.sh +++ b/orchestration/testnet/networks/bitcoin/run.sh @@ -3,7 +3,7 @@ RPC_USER="${RPC_USER:=serai}" RPC_PASS="${RPC_PASS:=seraidex}" -bitcoind -txindex -testnet -port=8333 \ +bitcoind -testnet -port=8333 \ -rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \ -rpcbind=0.0.0.0 -rpcallowip=0.0.0.0/0 -rpcport=8332 \ --datadir=/volume diff --git a/orchestration/testnet/networks/ethereum-relayer/.folder b/orchestration/testnet/networks/ethereum-relayer/.folder index 675d44382..e69de29bb 100644 --- a/orchestration/testnet/networks/ethereum-relayer/.folder +++ b/orchestration/testnet/networks/ethereum-relayer/.folder @@ -1,11 +0,0 @@ -#!/bin/sh - -RPC_USER="${RPC_USER:=serai}" -RPC_PASS="${RPC_PASS:=seraidex}" - -# Run Monero -monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ - --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ - --rpc-access-control-origins "*" --disable-rpc-ban \ - --rpc-login=$RPC_USER:$RPC_PASS \ - $1 diff --git a/processor/Cargo.toml b/processor/Cargo.toml deleted file mode 100644 index fa2f643c3..000000000 --- a/processor/Cargo.toml +++ /dev/null @@ -1,97 +0,0 @@ -[package] -name = "serai-processor" -version = "0.1.0" -description = "Multichain processor premised on canonicity to reach distributed consensus automatically" -license = "AGPL-3.0-only" -repository = "https://github.com/serai-dex/serai/tree/develop/processor" -authors = ["Luke Parker "] -keywords = [] -edition = "2021" -publish = false - -[package.metadata.docs.rs] -all-features = true -rustdoc-args = ["--cfg", "docsrs"] - -[lints] -workspace = true - -[dependencies] -# Macros -async-trait = { version = "0.1", default-features = false } -zeroize = { version = "1", default-features = false, features = ["std"] } -thiserror = { version = "1", default-features = false } - -# Libs -rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } -rand_chacha = { version = "0.3", default-features = false, features = ["std"] } - -# Encoders -const-hex = { version = "1", default-features = false } -hex = { version = "0.4", default-features = false, features = ["std"] } -scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } -borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } -serde_json = { version = "1", default-features = false, features = ["std"] } - -# Cryptography -ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } - -blake2 = { version = "0.10", default-features = false, features = ["std"] } -transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std"] } -ec-divisors = { package = "ec-divisors", path = "../crypto/evrf/divisors", default-features = false } -dkg = { package = "dkg", path = "../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } -frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] } -frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false } - -# Bitcoin/Ethereum -k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true } - -# Bitcoin -secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"], optional = true } -bitcoin-serai = { path = "../networks/bitcoin", default-features = false, features = ["std"], optional = true } - -# Ethereum -ethereum-serai = { path = "../networks/ethereum", default-features = false, optional = true } - -# Monero -dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"], optional = true } -monero-simple-request-rpc = { path = "../networks/monero/rpc/simple-request", default-features = false, optional = true } -monero-wallet = { path = "../networks/monero/wallet", default-features = false, features = ["std", "multisig", "compile-time-generators"], optional = true } - -# Application -log = { version = "0.4", default-features = false, features = ["std"] } -env_logger = { version = "0.10", default-features = false, features = ["humantime"], optional = true } -tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } - -zalloc = { path = "../common/zalloc" } -serai-db = { path = "../common/db" } -serai-env = { path = "../common/env", optional = true } -# TODO: Replace with direct usage of primitives -serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] } - -messages = { package = "serai-processor-messages", path = "./messages" } - -message-queue = { package = "serai-message-queue", path = "../message-queue", optional = true } - -[dev-dependencies] -frost = { package = "modular-frost", path = "../crypto/frost", features = ["tests"] } - -sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } - -ethereum-serai = { path = "../networks/ethereum", default-features = false, features = ["tests"] } - -dockertest = "0.5" -serai-docker-tests = { path = "../tests/docker" } - -[features] -secp256k1 = ["k256", "dkg/evrf-secp256k1", "frost/secp256k1"] -bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"] - -ethereum = ["secp256k1", "ethereum-serai/tests"] - -ed25519 = ["dalek-ff-group", "dkg/evrf-ed25519", "frost/ed25519"] -monero = ["ed25519", "monero-simple-request-rpc", "monero-wallet", "serai-client/monero"] - -binaries = ["env_logger", "serai-env", "message-queue"] -parity-db = ["serai-db/parity-db"] -rocksdb = ["serai-db/rocksdb"] diff --git a/processor/README.md b/processor/README.md index 37d11e0d4..e942f5574 100644 --- a/processor/README.md +++ b/processor/README.md @@ -1,5 +1,5 @@ # Processor -The Serai processor scans a specified external network, communicating with the -coordinator. For details on its exact messaging flow, and overall policies, -please view `docs/processor`. +The Serai processors, built from the libraries here, scan an external network +and report the indexed data to the coordinator. For details on its exact +messaging flow, and overall policies, please view `docs/processor`. diff --git a/processor/TODO/main.rs b/processor/TODO/main.rs new file mode 100644 index 000000000..1458a7fc3 --- /dev/null +++ b/processor/TODO/main.rs @@ -0,0 +1,61 @@ +use messages::{ + coordinator::{ + SubstrateSignableId, PlanMeta, CoordinatorMessage as CoordinatorCoordinatorMessage, + }, + CoordinatorMessage, +}; + +use serai_env as env; + +use message_queue::{Service, client::MessageQueue}; + +mod db; +pub use db::*; + +mod coordinator; +pub use coordinator::*; + +mod multisigs; +use multisigs::{MultisigEvent, MultisigManager}; + +#[cfg(test)] +mod tests; + +async fn handle_coordinator_msg( + txn: &mut D::Transaction<'_>, + network: &N, + coordinator: &mut Co, + tributary_mutable: &mut TributaryMutable, + substrate_mutable: &mut SubstrateMutable, + msg: &Message, +) { + match msg.msg.clone() { + CoordinatorMessage::Substrate(msg) => { + match msg { + messages::substrate::CoordinatorMessage::SubstrateBlock { + context, + block: substrate_block, + burns, + batches, + } => { + // Send SubstrateBlockAck, with relevant plan IDs, before we trigger the signing of these + // plans + if !tributary_mutable.signers.is_empty() { + coordinator + .send(messages::coordinator::ProcessorMessage::SubstrateBlockAck { + block: substrate_block, + plans: to_sign + .iter() + .filter_map(|signable| { + SessionDb::get(txn, signable.0.to_bytes().as_ref()) + .map(|session| PlanMeta { session, id: signable.1 }) + }) + .collect(), + }) + .await; + } + } + } + } + } +} diff --git a/processor/src/tests/addresses.rs b/processor/TODO/tests/addresses.rs similarity index 99% rename from processor/src/tests/addresses.rs rename to processor/TODO/tests/addresses.rs index 3d4d6d4c1..1a06963a0 100644 --- a/processor/src/tests/addresses.rs +++ b/processor/TODO/tests/addresses.rs @@ -1,3 +1,5 @@ +// TODO + use core::{time::Duration, pin::Pin, future::Future}; use std::collections::HashMap; diff --git a/processor/src/tests/batch_signer.rs b/processor/TODO/tests/batch_signer.rs similarity index 99% rename from processor/src/tests/batch_signer.rs rename to processor/TODO/tests/batch_signer.rs index dc45ff312..cc5885fc2 100644 --- a/processor/src/tests/batch_signer.rs +++ b/processor/TODO/tests/batch_signer.rs @@ -1,3 +1,5 @@ +// TODO + use std::collections::HashMap; use rand_core::{RngCore, OsRng}; diff --git a/processor/src/tests/cosigner.rs b/processor/TODO/tests/cosigner.rs similarity index 99% rename from processor/src/tests/cosigner.rs rename to processor/TODO/tests/cosigner.rs index a66161bf7..98116bc35 100644 --- a/processor/src/tests/cosigner.rs +++ b/processor/TODO/tests/cosigner.rs @@ -1,3 +1,5 @@ +// TODO + use std::collections::HashMap; use rand_core::{RngCore, OsRng}; diff --git a/processor/src/tests/key_gen.rs b/processor/TODO/tests/key_gen.rs similarity index 99% rename from processor/src/tests/key_gen.rs rename to processor/TODO/tests/key_gen.rs index 43f0de058..116db11e5 100644 --- a/processor/src/tests/key_gen.rs +++ b/processor/TODO/tests/key_gen.rs @@ -1,3 +1,5 @@ +// TODO + use std::collections::HashMap; use zeroize::Zeroizing; diff --git a/processor/src/tests/literal/mod.rs b/processor/TODO/tests/literal/mod.rs similarity index 99% rename from processor/src/tests/literal/mod.rs rename to processor/TODO/tests/literal/mod.rs index d45649d59..b1285e634 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/TODO/tests/literal/mod.rs @@ -1,3 +1,5 @@ +// TODO + use dockertest::{ PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image, TestBodySpecification, DockerOperations, DockerTest, diff --git a/processor/src/tests/mod.rs b/processor/TODO/tests/mod.rs similarity index 99% rename from processor/src/tests/mod.rs rename to processor/TODO/tests/mod.rs index 7ab57bdef..4691e523c 100644 --- a/processor/src/tests/mod.rs +++ b/processor/TODO/tests/mod.rs @@ -1,3 +1,5 @@ +// TODO + use std::sync::OnceLock; mod key_gen; diff --git a/processor/src/tests/scanner.rs b/processor/TODO/tests/scanner.rs similarity index 99% rename from processor/src/tests/scanner.rs rename to processor/TODO/tests/scanner.rs index 6421c499a..6ad87f785 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/TODO/tests/scanner.rs @@ -1,3 +1,5 @@ +// TODO + use core::{pin::Pin, time::Duration, future::Future}; use std::sync::Arc; @@ -71,7 +73,7 @@ pub async fn test_scanner( let block_id = block.id(); // Verify the Scanner picked them up - let verify_event = |mut scanner: ScannerHandle| async { + let verify_event = |mut scanner: ScannerHandle| async move { let outputs = match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { is_retirement_block, block, outputs } => { diff --git a/processor/src/tests/signer.rs b/processor/TODO/tests/signer.rs similarity index 99% rename from processor/src/tests/signer.rs rename to processor/TODO/tests/signer.rs index 77307ef26..e35a048b0 100644 --- a/processor/src/tests/signer.rs +++ b/processor/TODO/tests/signer.rs @@ -1,3 +1,5 @@ +// TODO + use core::{pin::Pin, future::Future}; use std::collections::HashMap; @@ -184,7 +186,6 @@ pub async fn test_signer( let mut scheduler = N::Scheduler::new::(&mut txn, key, N::NETWORK); let payments = vec![Payment { address: N::external_address(&network, key).await, - data: None, balance: Balance { coin: match N::NETWORK { NetworkId::Serai => panic!("test_signer called with Serai"), diff --git a/processor/src/tests/wallet.rs b/processor/TODO/tests/wallet.rs similarity index 99% rename from processor/src/tests/wallet.rs rename to processor/TODO/tests/wallet.rs index 86a27349d..f78a16f5c 100644 --- a/processor/src/tests/wallet.rs +++ b/processor/TODO/tests/wallet.rs @@ -1,3 +1,5 @@ +// TODO + use core::{time::Duration, pin::Pin, future::Future}; use std::collections::HashMap; @@ -88,7 +90,6 @@ pub async fn test_wallet( outputs.clone(), vec![Payment { address: N::external_address(&network, key).await, - data: None, balance: Balance { coin: match N::NETWORK { NetworkId::Serai => panic!("test_wallet called with Serai"), @@ -116,7 +117,6 @@ pub async fn test_wallet( plans[0].payments, vec![Payment { address: N::external_address(&network, key).await, - data: None, balance: Balance { coin: match N::NETWORK { NetworkId::Serai => panic!("test_wallet called with Serai"), diff --git a/processor/bin/Cargo.toml b/processor/bin/Cargo.toml new file mode 100644 index 000000000..116916ab1 --- /dev/null +++ b/processor/bin/Cargo.toml @@ -0,0 +1,50 @@ +[package] +name = "serai-processor-bin" +version = "0.1.0" +description = "Framework for Serai processor binaries" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/bin" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +zeroize = { version = "1", default-features = false, features = ["std"] } + +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } + +serai-client = { path = "../../substrate/client", default-features = false } + +log = { version = "0.4", default-features = false, features = ["std"] } +env_logger = { version = "0.10", default-features = false, features = ["humantime"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +serai-env = { path = "../../common/env" } +serai-db = { path = "../../common/db" } + +messages = { package = "serai-processor-messages", path = "../messages" } +key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +signers = { package = "serai-processor-signers", path = "../signers" } + +message-queue = { package = "serai-message-queue", path = "../../message-queue" } + +[features] +parity-db = ["serai-db/parity-db"] +rocksdb = ["serai-db/rocksdb"] diff --git a/processor/bin/LICENSE b/processor/bin/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/bin/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/bin/README.md b/processor/bin/README.md new file mode 100644 index 000000000..858a29252 --- /dev/null +++ b/processor/bin/README.md @@ -0,0 +1,3 @@ +# Serai Processor Bin + +The framework for Serai processor binaries, common to the Serai processors. diff --git a/processor/bin/src/coordinator.rs b/processor/bin/src/coordinator.rs new file mode 100644 index 000000000..e05712cfe --- /dev/null +++ b/processor/bin/src/coordinator.rs @@ -0,0 +1,238 @@ +use core::future::Future; +use std::sync::{LazyLock, Arc, Mutex}; + +use tokio::sync::mpsc; + +use scale::Encode; +use serai_client::{ + primitives::Signature, + validator_sets::primitives::Session, + in_instructions::primitives::{Batch, SignedBatch}, +}; + +use serai_db::{Get, DbTxn, Db, create_db, db_channel}; + +use scanner::ScannerFeed; + +use message_queue::{Service, Metadata, client::MessageQueue}; + +create_db! { + ProcessorBinCoordinator { + SavedMessages: () -> u64, + } +} + +db_channel! { + ProcessorBinCoordinator { + ReceivedCoordinatorMessages: () -> Vec, + } +} + +// A lock to access SentCoordinatorMessages::send +static SEND_LOCK: LazyLock> = LazyLock::new(|| Mutex::new(())); + +db_channel! { + ProcessorBinCoordinator { + SentCoordinatorMessages: () -> Vec, + } +} + +#[derive(Clone)] +pub(crate) struct CoordinatorSend { + db: crate::Db, + sent_message: mpsc::UnboundedSender<()>, +} + +impl CoordinatorSend { + fn send(&mut self, msg: &messages::ProcessorMessage) { + let _lock = SEND_LOCK.lock().unwrap(); + let mut txn = self.db.txn(); + SentCoordinatorMessages::send(&mut txn, &borsh::to_vec(msg).unwrap()); + txn.commit(); + self + .sent_message + .send(()) + .expect("failed to tell the Coordinator tasks there's a new message to send"); + } +} + +pub(crate) struct Coordinator { + received_message: mpsc::UnboundedReceiver<()>, + send: CoordinatorSend, +} + +impl Coordinator { + pub(crate) fn new(db: crate::Db) -> Self { + let (received_message_send, received_message_recv) = mpsc::unbounded_channel(); + let (sent_message_send, mut sent_message_recv) = mpsc::unbounded_channel(); + + let service = Service::Processor(S::NETWORK); + let message_queue = Arc::new(MessageQueue::from_env(service)); + + // Spawn a task to move messages from the message-queue to our database so we can achieve + // atomicity. This is the only place we read/ack messages from + tokio::spawn({ + let mut db = db.clone(); + let message_queue = message_queue.clone(); + async move { + loop { + let msg = message_queue.next(Service::Coordinator).await; + + let prior_msg = msg.id.checked_sub(1); + let saved_messages = SavedMessages::get(&db); + /* + This should either be: + A) The message after the message we just saved (as normal) + B) The message we just saved (if we rebooted and failed to ack it) + */ + assert!((saved_messages == prior_msg) || (saved_messages == Some(msg.id))); + if saved_messages < Some(msg.id) { + let mut txn = db.txn(); + ReceivedCoordinatorMessages::send(&mut txn, &msg.msg); + SavedMessages::set(&mut txn, &msg.id); + txn.commit(); + } + // Acknowledge this message + message_queue.ack(Service::Coordinator, msg.id).await; + + // Fire that there's a new message + received_message_send + .send(()) + .expect("failed to tell the Coordinator there's a new message"); + } + } + }); + + // Spawn a task to send messages to the message-queue + tokio::spawn({ + let mut db = db.clone(); + async move { + loop { + let mut txn = db.txn(); + match SentCoordinatorMessages::try_recv(&mut txn) { + Some(msg) => { + let metadata = Metadata { + from: service, + to: Service::Coordinator, + intent: borsh::from_slice::(&msg).unwrap().intent(), + }; + message_queue.queue(metadata, msg).await; + txn.commit(); + } + None => { + let _ = + tokio::time::timeout(core::time::Duration::from_secs(60), sent_message_recv.recv()) + .await; + } + } + } + } + }); + + let send = CoordinatorSend { db, sent_message: sent_message_send }; + Coordinator { received_message: received_message_recv, send } + } + + pub(crate) fn coordinator_send(&self) -> CoordinatorSend { + self.send.clone() + } + + /// Fetch the next message from the Coordinator. + /// + /// This message is guaranteed to have never been handled before, where handling is defined as + /// this `txn` being committed. + pub(crate) async fn next_message( + &mut self, + txn: &mut impl DbTxn, + ) -> messages::CoordinatorMessage { + loop { + match ReceivedCoordinatorMessages::try_recv(txn) { + Some(msg) => { + return borsh::from_slice(&msg) + .expect("message wasn't a borsh-encoded CoordinatorMessage") + } + None => { + let _ = + tokio::time::timeout(core::time::Duration::from_secs(60), self.received_message.recv()) + .await; + } + } + } + } + + pub(crate) fn send_message(&mut self, msg: &messages::ProcessorMessage) { + self.send.send(msg); + } +} + +impl signers::Coordinator for CoordinatorSend { + type EphemeralError = (); + + fn send( + &mut self, + msg: messages::sign::ProcessorMessage, + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Sign(msg)); + Ok(()) + } + } + + fn publish_cosign( + &mut self, + block_number: u64, + block: [u8; 32], + signature: Signature, + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::CosignedBlock { + block_number, + block, + signature: signature.encode(), + }, + )); + Ok(()) + } + } + + fn publish_batch( + &mut self, + batch: Batch, + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Substrate( + messages::substrate::ProcessorMessage::Batch { batch }, + )); + Ok(()) + } + } + + fn publish_signed_batch( + &mut self, + batch: SignedBatch, + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::SignedBatch { batch }, + )); + Ok(()) + } + } + + fn publish_slash_report_signature( + &mut self, + session: Session, + signature: Signature, + ) -> impl Send + Future> { + async move { + self.send(&messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::SignedSlashReport { + session, + signature: signature.encode(), + }, + )); + Ok(()) + } + } +} diff --git a/processor/bin/src/lib.rs b/processor/bin/src/lib.rs new file mode 100644 index 000000000..7d98f8127 --- /dev/null +++ b/processor/bin/src/lib.rs @@ -0,0 +1,316 @@ +use core::cmp::Ordering; + +use zeroize::{Zeroize, Zeroizing}; + +use ciphersuite::{ + group::{ff::PrimeField, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use dkg::evrf::EvrfCurve; + +use serai_client::validator_sets::primitives::Session; + +use serai_env as env; +use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel}; + +use primitives::EncodableG; +use ::key_gen::{KeyGenParams, KeyGen}; +use scheduler::{SignableTransaction, TransactionFor}; +use scanner::{ScannerFeed, Scanner, KeyFor, Scheduler}; +use signers::{TransactionPublisher, Signers}; + +mod coordinator; +use coordinator::Coordinator; + +create_db! { + ProcessorBin { + ExternalKeyForSessionForSigners: (session: Session) -> EncodableG, + } +} + +db_channel! { + ProcessorBin { + KeyToActivate: () -> EncodableG + } +} + +/// The type used for the database. +#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] +pub type Db = serai_db::ParityDb; +/// The type used for the database. +#[cfg(feature = "rocksdb")] +pub type Db = serai_db::RocksDB; + +/// Initialize the processor. +/// +/// Yields the database. +#[allow(unused_variables, unreachable_code)] +pub fn init() -> Db { + // Override the panic handler with one which will panic if any tokio task panics + { + let existing = std::panic::take_hook(); + std::panic::set_hook(Box::new(move |panic| { + existing(panic); + const MSG: &str = "exiting the process due to a task panicking"; + println!("{MSG}"); + log::error!("{MSG}"); + std::process::exit(1); + })); + } + + if std::env::var("RUST_LOG").is_err() { + std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); + } + env_logger::init(); + + #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] + let db = + serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); + #[cfg(feature = "rocksdb")] + let db = serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); + db +} + +/// THe URL for the external network's node. +pub fn url() -> String { + let login = env::var("NETWORK_RPC_LOGIN").expect("network RPC login wasn't specified"); + let hostname = env::var("NETWORK_RPC_HOSTNAME").expect("network RPC hostname wasn't specified"); + let port = env::var("NETWORK_RPC_PORT").expect("network port domain wasn't specified"); + "http://".to_string() + &login + "@" + &hostname + ":" + &port +} + +fn key_gen() -> KeyGen { + fn read_key_from_env(label: &'static str) -> Zeroizing { + let key_hex = + Zeroizing::new(env::var(label).unwrap_or_else(|| panic!("{label} wasn't provided"))); + let bytes = Zeroizing::new( + hex::decode(key_hex).unwrap_or_else(|_| panic!("{label} wasn't a valid hex string")), + ); + + let mut repr = ::Repr::default(); + if repr.as_ref().len() != bytes.len() { + panic!("{label} wasn't the correct length"); + } + repr.as_mut().copy_from_slice(bytes.as_slice()); + let res = Zeroizing::new( + Option::from(::from_repr(repr)) + .unwrap_or_else(|| panic!("{label} wasn't a valid scalar")), + ); + repr.as_mut().zeroize(); + res + } + KeyGen::new( + read_key_from_env::<::EmbeddedCurve>("SUBSTRATE_EVRF_KEY"), + read_key_from_env::<::EmbeddedCurve>( + "NETWORK_EVRF_KEY", + ), + ) +} + +async fn first_block_after_time(feed: &S, serai_time: u64) -> u64 { + async fn first_block_after_time_iteration( + feed: &S, + serai_time: u64, + ) -> Result, S::EphemeralError> { + let latest = feed.latest_finalized_block_number().await?; + let latest_time = feed.time_of_block(latest).await?; + if latest_time < serai_time { + tokio::time::sleep(core::time::Duration::from_secs(serai_time - latest_time)).await; + return Ok(None); + } + + // A finalized block has a time greater than or equal to the time we want to start at + // Find the first such block with a binary search + // start_search and end_search are inclusive + let mut start_search = 0; + let mut end_search = latest; + while start_search != end_search { + // This on purposely chooses the earlier block in the case two blocks are both in the middle + let to_check = start_search + ((end_search - start_search) / 2); + let block_time = feed.time_of_block(to_check).await?; + match block_time.cmp(&serai_time) { + Ordering::Less => { + start_search = to_check + 1; + assert!(start_search <= end_search); + } + Ordering::Equal | Ordering::Greater => { + // This holds true since we pick the earlier block upon an even search distance + // If it didn't, this would cause an infinite loop + assert!(to_check < end_search); + end_search = to_check; + } + } + } + Ok(Some(start_search)) + } + loop { + match first_block_after_time_iteration(feed, serai_time).await { + Ok(Some(block)) => return block, + Ok(None) => { + log::info!("waiting for block to activate at (a block with timestamp >= {serai_time})"); + } + Err(e) => { + log::error!("couldn't find the first block Serai should scan due to an RPC error: {e:?}"); + } + } + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } +} + +/// Hooks to run during the main loop. +pub trait Hooks { + /// A hook to run upon receiving a message. + fn on_message(txn: &mut impl DbTxn, msg: &messages::CoordinatorMessage); +} +impl Hooks for () { + fn on_message(_: &mut impl DbTxn, _: &messages::CoordinatorMessage) {} +} + +/// The main loop of a Processor, interacting with the Coordinator. +pub async fn main_loop< + H: Hooks, + S: ScannerFeed, + K: KeyGenParams>>, + Sch: Clone + + Scheduler< + S, + SignableTransaction: SignableTransaction, + >, +>( + mut db: Db, + feed: S, + scheduler: Sch, + publisher: impl TransactionPublisher>, +) { + let mut coordinator = Coordinator::new::(db.clone()); + + let mut key_gen = key_gen::(); + let mut scanner = Scanner::new(db.clone(), feed.clone(), scheduler.clone()).await; + let mut signers = + Signers::::new(db.clone(), coordinator.coordinator_send(), publisher); + + loop { + let db_clone = db.clone(); + let mut txn = db.txn(); + let msg = coordinator.next_message(&mut txn).await; + H::on_message(&mut txn, &msg); + let mut txn = Some(txn); + match msg { + messages::CoordinatorMessage::KeyGen(msg) => { + let txn = txn.as_mut().unwrap(); + let mut new_key = None; + // This is a computationally expensive call yet it happens infrequently + for msg in key_gen.handle(txn, msg) { + if let messages::key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } = &msg { + new_key = Some(*session) + } + coordinator.send_message(&messages::ProcessorMessage::KeyGen(msg)); + } + + // If we were yielded a key, register it in the signers + if let Some(session) = new_key { + let (substrate_keys, network_keys) = KeyGen::::key_shares(txn, session) + .expect("generated key pair yet couldn't get key shares"); + signers.register_keys(txn, session, substrate_keys, network_keys); + } + } + + // These are cheap calls which are fine to be here in this loop + messages::CoordinatorMessage::Sign(msg) => { + let txn = txn.as_mut().unwrap(); + signers.queue_message(txn, &msg) + } + messages::CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { + session, + block_number, + block, + }, + ) => { + let txn = txn.take().unwrap(); + signers.cosign_block(txn, session, block_number, block) + } + messages::CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::SignSlashReport { session, report }, + ) => { + let txn = txn.take().unwrap(); + signers.sign_slash_report(txn, session, &report) + } + + messages::CoordinatorMessage::Substrate(msg) => match msg { + messages::substrate::CoordinatorMessage::SetKeys { serai_time, session, key_pair } => { + let txn = txn.as_mut().unwrap(); + let key = + EncodableG(K::decode_key(key_pair.1.as_ref()).expect("invalid key set on serai")); + + // Queue the key to be activated upon the next Batch + KeyToActivate::>::send(txn, &key); + + // Set the external key, as needed by the signers + ExternalKeyForSessionForSigners::>::set(txn, session, &key); + + // This is presumed extremely expensive, potentially blocking for several minutes, yet + // only happens for the very first set of keys + if session == Session(0) { + assert!(scanner.is_none()); + let start_block = first_block_after_time(&feed, serai_time).await; + scanner = Some( + Scanner::initialize(db_clone, feed.clone(), scheduler.clone(), start_block, key.0) + .await, + ); + } + } + messages::substrate::CoordinatorMessage::SlashesReported { session } => { + let txn = txn.as_mut().unwrap(); + + // Since this session had its slashes reported, it has finished all its signature + // protocols and has been fully retired. We retire it from the signers accordingly + let key = ExternalKeyForSessionForSigners::>::take(txn, session).unwrap().0; + + // This is a cheap call + signers.retire_session(txn, session, &key) + } + messages::substrate::CoordinatorMessage::Block { + serai_block_number: _, + batches, + mut burns, + } => { + let scanner = scanner.as_mut().unwrap(); + + // Substrate sets this limit to prevent DoSs from malicious validator sets + // That bound lets us consume this txn in the following loop body, as an optimization + assert!(batches.len() <= 1); + for messages::substrate::ExecutedBatch { id, in_instructions } in batches { + let key_to_activate = + KeyToActivate::>::try_recv(txn.as_mut().unwrap()).map(|key| key.0); + + // This is a cheap call as it internally just queues this to be done later + let _: () = scanner.acknowledge_batch( + txn.take().unwrap(), + id, + in_instructions, + /* + `acknowledge_batch` takes burns to optimize handling returns with standard + payments. That's why handling these with a Batch (and not waiting until the + following potential `queue_burns` call makes sense. As for which Batch, the first + is equally valid unless we want to start introspecting (and should be our only + Batch anyways). + */ + burns.drain(..).collect(), + key_to_activate, + ); + } + + // This is a cheap call as it internally just queues this to be done later + if !burns.is_empty() { + let _: () = scanner.queue_burns(txn.take().unwrap(), burns); + } + } + }, + }; + // If the txn wasn't already consumed and committed, commit it + if let Some(txn) = txn { + txn.commit(); + } + } +} diff --git a/processor/bitcoin/Cargo.toml b/processor/bitcoin/Cargo.toml new file mode 100644 index 000000000..90b9566b6 --- /dev/null +++ b/processor/bitcoin/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "serai-bitcoin-processor" +version = "0.1.0" +description = "Serai Bitcoin Processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/bitcoin" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +rand_core = { version = "0.6", default-features = false } + +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } + +secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"] } +bitcoin-serai = { path = "../../networks/bitcoin", default-features = false, features = ["std"] } + +serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] } + +zalloc = { path = "../../common/zalloc" } +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +serai-db = { path = "../../common/db" } + +key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +utxo-scheduler = { package = "serai-processor-utxo-scheduler-primitives", path = "../scheduler/utxo/primitives" } +transaction-chaining-scheduler = { package = "serai-processor-transaction-chaining-scheduler", path = "../scheduler/utxo/transaction-chaining" } +signers = { package = "serai-processor-signers", path = "../signers" } + +bin = { package = "serai-processor-bin", path = "../bin" } + +[features] +parity-db = ["bin/parity-db"] +rocksdb = ["bin/rocksdb"] diff --git a/processor/bitcoin/LICENSE b/processor/bitcoin/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/bitcoin/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/bitcoin/README.md b/processor/bitcoin/README.md new file mode 100644 index 000000000..79d1cedde --- /dev/null +++ b/processor/bitcoin/README.md @@ -0,0 +1 @@ +# Serai Bitcoin Processor diff --git a/processor/bitcoin/src/db.rs b/processor/bitcoin/src/db.rs new file mode 100644 index 000000000..1d73ebfee --- /dev/null +++ b/processor/bitcoin/src/db.rs @@ -0,0 +1,8 @@ +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + BitcoinProcessor { + LatestBlockToYieldAsFinalized: () -> u64, + ScriptPubKey: (tx: [u8; 32], vout: u32) -> Vec, + } +} diff --git a/processor/bitcoin/src/key_gen.rs b/processor/bitcoin/src/key_gen.rs new file mode 100644 index 000000000..415441348 --- /dev/null +++ b/processor/bitcoin/src/key_gen.rs @@ -0,0 +1,28 @@ +use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; +use frost::ThresholdKeys; + +use crate::{primitives::x_coord_to_even_point, scan::scanner}; + +pub(crate) struct KeyGenParams; +impl key_gen::KeyGenParams for KeyGenParams { + const ID: &'static str = "Bitcoin"; + + type ExternalNetworkCiphersuite = Secp256k1; + + fn tweak_keys(keys: &mut ThresholdKeys) { + *keys = bitcoin_serai::wallet::tweak_keys(keys); + // Also create a scanner to assert these keys, and all expected paths, are usable + scanner(keys.group_key()); + } + + fn encode_key(key: ::G) -> Vec { + let key = key.to_bytes(); + let key: &[u8] = key.as_ref(); + // Skip the parity encoding as we know this key is even + key[1 ..].to_vec() + } + + fn decode_key(key: &[u8]) -> Option<::G> { + x_coord_to_even_point(key) + } +} diff --git a/processor/bitcoin/src/main.rs b/processor/bitcoin/src/main.rs new file mode 100644 index 000000000..5feb3e25d --- /dev/null +++ b/processor/bitcoin/src/main.rs @@ -0,0 +1,286 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +#[global_allocator] +static ALLOCATOR: zalloc::ZeroizingAlloc = + zalloc::ZeroizingAlloc(std::alloc::System); + +use bitcoin_serai::rpc::Rpc as BRpc; + +use ::primitives::task::{Task, ContinuallyRan}; + +mod primitives; +pub(crate) use crate::primitives::*; + +// Internal utilities for scanning transactions +mod scan; + +// App-logic trait satisfactions +mod key_gen; +use crate::key_gen::KeyGenParams; +mod rpc; +use rpc::Rpc; +mod scheduler; +use scheduler::{Planner, Scheduler}; + +// Our custom code for Bitcoin +mod db; +mod txindex; +use txindex::TxIndexTask; + +pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> [u8; 32] { + use bitcoin_serai::bitcoin::hashes::Hash; + + let mut res = hash.to_byte_array(); + res.reverse(); + res +} + +#[tokio::main] +async fn main() { + let db = bin::init(); + let feed = Rpc { + db: db.clone(), + rpc: loop { + match BRpc::new(bin::url()).await { + Ok(rpc) => break rpc, + Err(e) => { + log::error!("couldn't connect to the Bitcoin node: {e:?}"); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } + } + }, + }; + + let (index_task, index_handle) = Task::new(); + tokio::spawn(TxIndexTask(feed.clone()).continually_run(index_task, vec![])); + core::mem::forget(index_handle); + + bin::main_loop::<(), _, KeyGenParams, _>(db, feed.clone(), Scheduler::new(Planner), feed).await; +} + +/* +use bitcoin_serai::{ + bitcoin::{ + hashes::Hash as HashTrait, + key::{Parity, XOnlyPublicKey}, + consensus::{Encodable, Decodable}, + script::Instruction, + Transaction, Block, ScriptBuf, + opcodes::all::{OP_SHA256, OP_EQUALVERIFY}, + }, + wallet::{ + tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, + SignableTransaction as BSignableTransaction, TransactionMachine, + }, + rpc::{RpcError, Rpc}, +}; + +#[cfg(test)] +use bitcoin_serai::bitcoin::{ + secp256k1::{SECP256K1, SecretKey, Message}, + PrivateKey, PublicKey, + sighash::{EcdsaSighashType, SighashCache}, + script::PushBytesBuf, + absolute::LockTime, + Amount as BAmount, Sequence, Script, Witness, OutPoint, + transaction::Version, + blockdata::transaction::{TxIn, TxOut}, +}; + +use serai_client::{ + primitives::{MAX_DATA_LEN, Coin, NetworkId, Amount, Balance}, + networks::bitcoin::Address, +}; +*/ + +/* +impl TransactionTrait for Transaction { + #[cfg(test)] + async fn fee(&self, network: &Bitcoin) -> u64 { + let mut value = 0; + for input in &self.input { + let output = input.previous_output; + let mut hash = *output.txid.as_raw_hash().as_byte_array(); + hash.reverse(); + value += network.rpc.get_transaction(&hash).await.unwrap().output + [usize::try_from(output.vout).unwrap()] + .value + .to_sat(); + } + for output in &self.output { + value -= output.value.to_sat(); + } + value + } +} + +impl Bitcoin { + pub(crate) async fn new(url: String) -> Bitcoin { + let mut res = Rpc::new(url.clone()).await; + while let Err(e) = res { + log::error!("couldn't connect to Bitcoin node: {e:?}"); + sleep(Duration::from_secs(5)).await; + res = Rpc::new(url.clone()).await; + } + Bitcoin { rpc: res.unwrap() } + } + + #[cfg(test)] + pub(crate) async fn fresh_chain(&self) { + if self.rpc.get_latest_block_number().await.unwrap() > 0 { + self + .rpc + .rpc_call( + "invalidateblock", + serde_json::json!([hex::encode(self.rpc.get_block_hash(1).await.unwrap())]), + ) + .await + .unwrap() + } + } + + // This function panics on a node which doesn't follow the Bitcoin protocol, which is deemed fine + async fn median_fee(&self, block: &Block) -> Result { + let mut fees = vec![]; + if block.txdata.len() > 1 { + for tx in &block.txdata[1 ..] { + let mut in_value = 0; + for input in &tx.input { + let mut input_tx = input.previous_output.txid.to_raw_hash().to_byte_array(); + input_tx.reverse(); + in_value += self + .rpc + .get_transaction(&input_tx) + .await + .map_err(|_| NetworkError::ConnectionError)? + .output[usize::try_from(input.previous_output.vout).unwrap()] + .value + .to_sat(); + } + let out = tx.output.iter().map(|output| output.value.to_sat()).sum::(); + fees.push((in_value - out) / u64::try_from(tx.vsize()).unwrap()); + } + } + fees.sort(); + let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); + + // The DUST constant documentation notes a relay rule practically enforcing a + // 1000 sat/kilo-vbyte minimum fee. + Ok(Fee(fee.max(1))) + } + + #[cfg(test)] + pub(crate) fn sign_btc_input_for_p2pkh( + tx: &Transaction, + input_index: usize, + private_key: &PrivateKey, + ) -> ScriptBuf { + use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; + + let public_key = PublicKey::from_private_key(SECP256K1, private_key); + let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); + + let mut der = SECP256K1 + .sign_ecdsa_low_r( + &Message::from_digest_slice( + SighashCache::new(tx) + .legacy_signature_hash( + input_index, + &main_addr.script_pubkey(), + EcdsaSighashType::All.to_u32(), + ) + .unwrap() + .to_raw_hash() + .as_ref(), + ) + .unwrap(), + &private_key.inner, + ) + .serialize_der() + .to_vec(); + der.push(1); + + ScriptBuf::builder() + .push_slice(PushBytesBuf::try_from(der).unwrap()) + .push_key(&public_key) + .into_script() + } +} + +impl Network for Bitcoin { + // 2 inputs should be 2 * 230 = 460 weight units + // The output should be ~36 bytes, or 144 weight units + // The overhead should be ~20 bytes at most, or 80 weight units + // 684 weight units, 171 vbytes, round up to 200 + // 200 vbytes at 1 sat/weight (our current minimum fee, 4 sat/vbyte) = 800 sat fee for the + // aggregation TX + const COST_TO_AGGREGATE: u64 = 800; + + #[cfg(test)] + async fn get_block_number(&self, id: &[u8; 32]) -> usize { + self.rpc.get_block_number(id).await.unwrap() + } + + #[cfg(test)] + async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventuality) -> Transaction { + self.rpc.get_transaction(&id.0).await.unwrap() + } + + #[cfg(test)] + async fn mine_block(&self) { + use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; + + self + .rpc + .rpc_call::>( + "generatetoaddress", + serde_json::json!([1, BAddress::p2sh(Script::new(), BNetwork::Regtest).unwrap()]), + ) + .await + .unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, address: Address) -> Block { + use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; + + let secret_key = SecretKey::new(&mut rand_core::OsRng); + let private_key = PrivateKey::new(secret_key, BNetwork::Regtest); + let public_key = PublicKey::from_private_key(SECP256K1, &private_key); + let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); + + let new_block = self.get_latest_block_number().await.unwrap() + 1; + self + .rpc + .rpc_call::>("generatetoaddress", serde_json::json!([100, main_addr])) + .await + .unwrap(); + + let tx = self.get_block(new_block).await.unwrap().txdata.swap_remove(0); + let mut tx = Transaction { + version: Version(2), + lock_time: LockTime::ZERO, + input: vec![TxIn { + previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, + script_sig: Script::new().into(), + sequence: Sequence(u32::MAX), + witness: Witness::default(), + }], + output: vec![TxOut { + value: tx.output[0].value - BAmount::from_sat(10000), + script_pubkey: address.clone().into(), + }], + }; + tx.input[0].script_sig = Self::sign_btc_input_for_p2pkh(&tx, 0, &private_key); + + let block = self.get_latest_block_number().await.unwrap() + 1; + self.rpc.send_raw_transaction(&tx).await.unwrap(); + for _ in 0 .. Self::CONFIRMATIONS { + self.mine_block().await; + } + self.get_block(block).await.unwrap() + } +} +*/ diff --git a/processor/bitcoin/src/primitives/block.rs b/processor/bitcoin/src/primitives/block.rs new file mode 100644 index 000000000..02b8e5957 --- /dev/null +++ b/processor/bitcoin/src/primitives/block.rs @@ -0,0 +1,80 @@ +use core::fmt; +use std::collections::HashMap; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::bitcoin::block::{Header, Block as BBlock}; + +use serai_client::networks::bitcoin::Address; + +use serai_db::Db; +use primitives::{ReceivedOutput, EventualityTracker}; + +use crate::{hash_bytes, scan::scanner, output::Output, transaction::Eventuality}; + +#[derive(Clone, Debug)] +pub(crate) struct BlockHeader(pub(crate) Header); +impl primitives::BlockHeader for BlockHeader { + fn id(&self) -> [u8; 32] { + hash_bytes(self.0.block_hash().to_raw_hash()) + } + fn parent(&self) -> [u8; 32] { + hash_bytes(self.0.prev_blockhash.to_raw_hash()) + } +} + +#[derive(Clone)] +pub(crate) struct Block(pub(crate) D, pub(crate) BBlock); +impl fmt::Debug for Block { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt.debug_struct("Block").field("1", &self.1).finish_non_exhaustive() + } +} + +impl primitives::Block for Block { + type Header = BlockHeader; + + type Key = ::G; + type Address = Address; + type Output = Output; + type Eventuality = Eventuality; + + fn id(&self) -> [u8; 32] { + primitives::BlockHeader::id(&BlockHeader(self.1.header)) + } + + fn scan_for_outputs_unordered( + &self, + _latest_active_key: Self::Key, + key: Self::Key, + ) -> Vec { + let scanner = scanner(key); + + let mut res = vec![]; + // We skip the coinbase transaction as its burdened by maturity + for tx in &self.1.txdata[1 ..] { + for output in scanner.scan_transaction(tx) { + res.push(Output::new(&self.0, key, tx, output)); + } + } + res + } + + #[allow(clippy::type_complexity)] + fn check_for_eventuality_resolutions( + &self, + eventualities: &mut EventualityTracker, + ) -> HashMap< + >::TransactionId, + Self::Eventuality, + > { + let mut res = HashMap::new(); + for tx in &self.1.txdata[1 ..] { + let id = hash_bytes(tx.compute_txid().to_raw_hash()); + if let Some(eventuality) = eventualities.active_eventualities.remove(id.as_slice()) { + res.insert(id, eventuality); + } + } + res + } +} diff --git a/processor/bitcoin/src/primitives/mod.rs b/processor/bitcoin/src/primitives/mod.rs new file mode 100644 index 000000000..e089c623a --- /dev/null +++ b/processor/bitcoin/src/primitives/mod.rs @@ -0,0 +1,20 @@ +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::bitcoin::key::{Parity, XOnlyPublicKey}; + +pub(crate) mod output; +pub(crate) mod transaction; +pub(crate) mod block; + +pub(crate) fn x_coord_to_even_point(key: &[u8]) -> Option<::G> { + if key.len() != 32 { + None? + }; + + // Read the x-only public key + let key = XOnlyPublicKey::from_slice(key).ok()?; + // Convert to a full public key + let key = key.public_key(Parity::Even); + // Convert to k256 (from libsecp256k1) + Secp256k1::read_G(&mut key.serialize().as_slice()).ok() +} diff --git a/processor/bitcoin/src/primitives/output.rs b/processor/bitcoin/src/primitives/output.rs new file mode 100644 index 000000000..f1a1dc7a4 --- /dev/null +++ b/processor/bitcoin/src/primitives/output.rs @@ -0,0 +1,170 @@ +use std::io; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::{ + bitcoin::{ + hashes::Hash as HashTrait, consensus::Encodable, script::Instruction, transaction::Transaction, + }, + wallet::ReceivedOutput as WalletOutput, +}; + +use scale::{Encode, Decode, IoReader}; +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::Get; + +use serai_client::{ + primitives::{Coin, Amount, Balance, ExternalAddress}, + networks::bitcoin::Address, +}; + +use primitives::{OutputType, ReceivedOutput}; + +use crate::{ + primitives::x_coord_to_even_point, + scan::{offsets_for_key, presumed_origin, extract_serai_data}, +}; + +#[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] +pub(crate) struct OutputId([u8; 36]); +impl Default for OutputId { + fn default() -> Self { + Self([0; 36]) + } +} +impl AsRef<[u8]> for OutputId { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} +impl AsMut<[u8]> for OutputId { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Output { + kind: OutputType, + presumed_origin: Option
, + pub(crate) output: WalletOutput, + data: Vec, +} + +impl Output { + pub(crate) fn new( + getter: &impl Get, + key: ::G, + tx: &Transaction, + output: WalletOutput, + ) -> Self { + Self { + kind: offsets_for_key(key) + .into_iter() + .find_map(|(kind, offset)| (offset == output.offset()).then_some(kind)) + .expect("scanned output for unknown offset"), + presumed_origin: presumed_origin(getter, tx), + output, + data: extract_serai_data(tx), + } + } + + pub(crate) fn new_with_presumed_origin( + key: ::G, + tx: &Transaction, + presumed_origin: Option
, + output: WalletOutput, + ) -> Self { + Self { + kind: offsets_for_key(key) + .into_iter() + .find_map(|(kind, offset)| (offset == output.offset()).then_some(kind)) + .expect("scanned output for unknown offset"), + presumed_origin, + output, + data: extract_serai_data(tx), + } + } +} + +impl ReceivedOutput<::G, Address> for Output { + type Id = OutputId; + type TransactionId = [u8; 32]; + + fn kind(&self) -> OutputType { + self.kind + } + + fn id(&self) -> Self::Id { + let mut id = OutputId::default(); + self.output.outpoint().consensus_encode(&mut id.as_mut()).unwrap(); + id + } + + fn transaction_id(&self) -> Self::TransactionId { + let mut res = self.output.outpoint().txid.to_raw_hash().to_byte_array(); + res.reverse(); + res + } + + fn key(&self) -> ::G { + // We read the key from the script pubkey so we don't have to independently store it + let script = &self.output.output().script_pubkey; + + // These assumptions are safe since it's an output we successfully scanned + assert!(script.is_p2tr()); + let Instruction::PushBytes(key) = script.instructions_minimal().last().unwrap().unwrap() else { + panic!("last item in v1 Taproot script wasn't bytes") + }; + let key = x_coord_to_even_point(key.as_ref()) + .expect("last item in scanned v1 Taproot script wasn't a valid x-only public key"); + + // The output's key minus the output's offset is the root key + key - (::G::GENERATOR * self.output.offset()) + } + + fn presumed_origin(&self) -> Option
{ + self.presumed_origin.clone() + } + + fn balance(&self) -> Balance { + Balance { coin: Coin::Bitcoin, amount: Amount(self.output.value()) } + } + + fn data(&self) -> &[u8] { + &self.data + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + self.kind.write(writer)?; + let presumed_origin: Option = self.presumed_origin.clone().map(Into::into); + writer.write_all(&presumed_origin.encode())?; + self.output.write(writer)?; + writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?; + writer.write_all(&self.data) + } + + fn read(mut reader: &mut R) -> io::Result { + Ok(Output { + kind: OutputType::read(reader)?, + presumed_origin: { + Option::::decode(&mut IoReader(&mut reader)) + .map_err(|e| io::Error::other(format!("couldn't decode ExternalAddress: {e:?}")))? + .map(|address| { + Address::try_from(address) + .map_err(|()| io::Error::other("couldn't decode Address from ExternalAddress")) + }) + .transpose()? + }, + output: WalletOutput::read(reader)?, + data: { + let mut data_len = [0; 2]; + reader.read_exact(&mut data_len)?; + + let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; + reader.read_exact(&mut data)?; + data + }, + }) + } +} diff --git a/processor/bitcoin/src/primitives/transaction.rs b/processor/bitcoin/src/primitives/transaction.rs new file mode 100644 index 000000000..9b81d2f0c --- /dev/null +++ b/processor/bitcoin/src/primitives/transaction.rs @@ -0,0 +1,171 @@ +use std::io; + +use rand_core::{RngCore, CryptoRng}; + +use ciphersuite::Secp256k1; +use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; + +use bitcoin_serai::{ + bitcoin::{ + consensus::{Encodable, Decodable}, + ScriptBuf, Transaction as BTransaction, + }, + wallet::{ + ReceivedOutput, TransactionError, SignableTransaction as BSignableTransaction, + TransactionMachine, + }, +}; + +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::networks::bitcoin::Address; + +use crate::output::OutputId; + +#[derive(Clone, Debug)] +pub(crate) struct Transaction(pub(crate) BTransaction); + +impl From for Transaction { + fn from(tx: BTransaction) -> Self { + Self(tx) + } +} + +impl scheduler::Transaction for Transaction { + fn read(reader: &mut impl io::Read) -> io::Result { + let tx = + BTransaction::consensus_decode(&mut io::BufReader::new(reader)).map_err(io::Error::other)?; + Ok(Self(tx)) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + let mut writer = io::BufWriter::new(writer); + self.0.consensus_encode(&mut writer)?; + writer.into_inner()?; + Ok(()) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct SignableTransaction { + pub(crate) inputs: Vec, + pub(crate) payments: Vec<(ScriptBuf, u64)>, + pub(crate) change: Option
, + pub(crate) fee_per_vbyte: u64, +} + +impl SignableTransaction { + fn signable(self) -> Result { + BSignableTransaction::new( + self.inputs, + &self.payments, + self.change.map(ScriptBuf::from), + None, + self.fee_per_vbyte, + ) + } +} + +#[derive(Clone)] +pub(crate) struct ClonableTransctionMachine(SignableTransaction, ThresholdKeys); +impl PreprocessMachine for ClonableTransctionMachine { + type Preprocess = ::Preprocess; + type Signature = ::Signature; + type SignMachine = ::SignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + self + .0 + .signable() + .expect("signing an invalid SignableTransaction") + .multisig(&self.1) + .expect("incorrect keys used for SignableTransaction") + .preprocess(rng) + } +} + +impl scheduler::SignableTransaction for SignableTransaction { + type Transaction = Transaction; + type Ciphersuite = Secp256k1; + type PreprocessMachine = ClonableTransctionMachine; + + fn read(reader: &mut impl io::Read) -> io::Result { + let inputs = { + let mut input_len = [0; 4]; + reader.read_exact(&mut input_len)?; + let mut inputs = vec![]; + for _ in 0 .. u32::from_le_bytes(input_len) { + inputs.push(ReceivedOutput::read(reader)?); + } + inputs + }; + + let payments = Vec::<(Vec, u64)>::deserialize_reader(reader)?; + let change = <_>::deserialize_reader(reader)?; + let fee_per_vbyte = <_>::deserialize_reader(reader)?; + + Ok(Self { + inputs, + payments: payments + .into_iter() + .map(|(address, amount)| (ScriptBuf::from_bytes(address), amount)) + .collect(), + change, + fee_per_vbyte, + }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(&u32::try_from(self.inputs.len()).unwrap().to_le_bytes())?; + for input in &self.inputs { + input.write(writer)?; + } + + for payment in &self.payments { + (payment.0.as_script().as_bytes(), payment.1).serialize(writer)?; + } + self.change.serialize(writer)?; + self.fee_per_vbyte.serialize(writer)?; + + Ok(()) + } + + fn id(&self) -> [u8; 32] { + self.clone().signable().unwrap().txid() + } + + fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine { + ClonableTransctionMachine(self, keys) + } +} + +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub(crate) struct Eventuality { + pub(crate) txid: [u8; 32], + pub(crate) singular_spent_output: Option, +} + +impl primitives::Eventuality for Eventuality { + type OutputId = OutputId; + + fn id(&self) -> [u8; 32] { + self.txid + } + + // We define the lookup as our ID since the resolving transaction only has a singular possible ID + fn lookup(&self) -> Vec { + self.txid.to_vec() + } + + fn singular_spent_output(&self) -> Option { + self.singular_spent_output.clone() + } + + fn read(reader: &mut impl io::Read) -> io::Result { + Self::deserialize_reader(reader) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.serialize(writer) + } +} diff --git a/processor/bitcoin/src/rpc.rs b/processor/bitcoin/src/rpc.rs new file mode 100644 index 000000000..acd3be858 --- /dev/null +++ b/processor/bitcoin/src/rpc.rs @@ -0,0 +1,181 @@ +use core::future::Future; + +use bitcoin_serai::rpc::{RpcError, Rpc as BRpc}; + +use serai_client::primitives::{NetworkId, Coin, Amount}; + +use serai_db::Db; +use scanner::ScannerFeed; +use signers::TransactionPublisher; + +use crate::{ + db, + transaction::Transaction, + block::{BlockHeader, Block}, +}; + +#[derive(Clone)] +pub(crate) struct Rpc { + pub(crate) db: D, + pub(crate) rpc: BRpc, +} + +impl ScannerFeed for Rpc { + const NETWORK: NetworkId = NetworkId::Bitcoin; + // 6 confirmations is widely accepted as secure and shouldn't occur + const CONFIRMATIONS: u64 = 6; + // The window length should be roughly an hour + const WINDOW_LENGTH: u64 = 6; + + const TEN_MINUTES: u64 = 1; + + type Block = Block; + + type EphemeralError = RpcError; + + fn latest_finalized_block_number( + &self, + ) -> impl Send + Future> { + async move { db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError) } + } + + fn time_of_block( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + let number = usize::try_from(number).unwrap(); + + /* + The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the + median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve + CLTV). This creates a monotonic median time which we use as the block time. + */ + // This implements `GetMedianTimePast` + let median = { + const MEDIAN_TIMESPAN: usize = 11; + let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN); + for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number { + timestamps + .push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time); + } + timestamps.sort(); + timestamps[timestamps.len() / 2] + }; + + /* + This block's timestamp is guaranteed to be greater than this median: + https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9 + /src/validation.cpp#L4182-L4184 + + This does not guarantee the median always increases however. Take the following trivial + example, as the window is initially built: + + 0 block has time 0 // Prior blocks: [] + 1 block has time 1 // Prior blocks: [0] + 2 block has time 2 // Prior blocks: [0, 1] + 3 block has time 2 // Prior blocks: [0, 1, 2] + + These two blocks have the same time (both greater than the median of their prior blocks) and + the same median. + + The median will never decrease however. The values pushed onto the window will always be + greater than the median. If a value greater than the median is popped, the median will + remain the same (due to the counterbalance of the pushed value). If a value less than the + median is popped, the median will increase (either to another instance of the same value, + yet one closer to the end of the repeating sequence, or to a higher value). + */ + Ok(median.into()) + } + } + + fn unchecked_block_header_by_number( + &self, + number: u64, + ) -> impl Send + + Future::Header, Self::EphemeralError>> + { + async move { + Ok(BlockHeader( + self + .rpc + .get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?) + .await? + .header, + )) + } + } + + fn unchecked_block_by_number( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + Ok(Block( + self.db.clone(), + self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?, + )) + } + } + + fn dust(coin: Coin) -> Amount { + assert_eq!(coin, Coin::Bitcoin); + + /* + A Taproot input is: + - 36 bytes for the OutPoint + - 0 bytes for the script (+1 byte for the length) + - 4 bytes for the sequence + Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format + + There's also: + - 1 byte for the witness length + - 1 byte for the signature length + - 64 bytes for the signature + which have the SegWit discount. + + (4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units + 230 ceil div 4 = 57 vbytes + + Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are: + - 1000 sat/kilo-vbyte for a transaction to be relayed + - Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte + The DUST constant needs to be determined by the latter. + Since these are solely relay rules, and may be raised, we require all outputs be spendable + under a 5000 sat/kilo-vbyte fee rate. + + 5000 sat/kilo-vbyte = 5 sat/vbyte + 5 * 57 = 285 sats/spent-output + + Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding + 100 vbytes, tripling the transaction size, then the sats/tx would be < 1000. + + Increase by an order of magnitude, in order to ensure this is actually worth our time, and we + get 10,000 satoshis. This is $5 if 1 BTC = 50,000 USD. + */ + Amount(10_000) + } + + fn cost_to_aggregate( + &self, + coin: Coin, + _reference_block: &Self::Block, + ) -> impl Send + Future> { + async move { + assert_eq!(coin, Coin::Bitcoin); + // TODO + Ok(Amount(0)) + } + } +} + +impl TransactionPublisher for Rpc { + type EphemeralError = RpcError; + + fn publish( + &self, + tx: Transaction, + ) -> impl Send + Future> { + async move { self.rpc.send_raw_transaction(&tx.0).await.map(|_| ()) } + } +} diff --git a/processor/bitcoin/src/scan.rs b/processor/bitcoin/src/scan.rs new file mode 100644 index 000000000..6d7fab887 --- /dev/null +++ b/processor/bitcoin/src/scan.rs @@ -0,0 +1,125 @@ +use std::{sync::LazyLock, collections::HashMap}; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::{ + bitcoin::{ + blockdata::opcodes, + script::{Instruction, ScriptBuf}, + Transaction, + }, + wallet::Scanner, +}; + +use serai_client::networks::bitcoin::Address; + +use serai_db::Get; +use primitives::OutputType; + +use crate::hash_bytes; + +const KEY_DST: &[u8] = b"Serai Bitcoin Processor Key Offset"; +static BRANCH_BASE_OFFSET: LazyLock<::F> = + LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"branch")); +static CHANGE_BASE_OFFSET: LazyLock<::F> = + LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"change")); +static FORWARD_BASE_OFFSET: LazyLock<::F> = + LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"forward")); + +// Unfortunately, we have per-key offsets as it's the root key plus the base offset may not be +// even. While we could tweak the key until all derivations are even, that'd require significantly +// more tweaking. This algorithmic complexity is preferred. +pub(crate) fn offsets_for_key( + key: ::G, +) -> HashMap::F> { + let mut offsets = HashMap::from([(OutputType::External, ::F::ZERO)]); + + // We create an actual Bitcoin scanner as upon adding an offset, it yields the tweaked offset + // actually used + let mut scanner = Scanner::new(key).unwrap(); + let mut register = |kind, offset| { + let tweaked_offset = scanner.register_offset(offset).expect("offset collision"); + offsets.insert(kind, tweaked_offset); + }; + + register(OutputType::Branch, *BRANCH_BASE_OFFSET); + register(OutputType::Change, *CHANGE_BASE_OFFSET); + register(OutputType::Forwarded, *FORWARD_BASE_OFFSET); + + offsets +} + +pub(crate) fn scanner(key: ::G) -> Scanner { + let mut scanner = Scanner::new(key).unwrap(); + for (_, offset) in offsets_for_key(key) { + let tweaked_offset = scanner.register_offset(offset).unwrap(); + assert_eq!(tweaked_offset, offset); + } + scanner +} + +pub(crate) fn presumed_origin(getter: &impl Get, tx: &Transaction) -> Option
{ + for input in &tx.input { + let txid = hash_bytes(input.previous_output.txid.to_raw_hash()); + let vout = input.previous_output.vout; + if let Some(address) = + Address::new(crate::txindex::script_pubkey_for_on_chain_output(getter, txid, vout)) + { + return Some(address); + } + } + None? +} + +// Checks if this script matches SHA256 PUSH MSG_HASH OP_EQUALVERIFY .. +fn matches_segwit_data(script: &ScriptBuf) -> Option { + let mut ins = script.instructions(); + + // first item should be SHA256 code + if ins.next()?.ok()?.opcode()? != opcodes::all::OP_SHA256 { + return Some(false); + } + + // next should be a data push + ins.next()?.ok()?.push_bytes()?; + + // next should be a equality check + if ins.next()?.ok()?.opcode()? != opcodes::all::OP_EQUALVERIFY { + return Some(false); + } + + Some(true) +} + +// Extract the data for Serai from a transaction +pub(crate) fn extract_serai_data(tx: &Transaction) -> Vec { + // Check for an OP_RETURN output + let mut data = (|| { + for output in &tx.output { + if output.script_pubkey.is_op_return() { + match output.script_pubkey.instructions_minimal().last() { + Some(Ok(Instruction::PushBytes(data))) => return Some(data.as_bytes().to_vec()), + _ => continue, + } + } + } + None + })(); + + // Check the inputs + if data.is_none() { + for input in &tx.input { + let witness = input.witness.to_vec(); + // The witness has to have at least 2 items, msg and the redeem script + if witness.len() >= 2 { + let redeem_script = ScriptBuf::from_bytes(witness.last().unwrap().clone()); + if matches_segwit_data(&redeem_script) == Some(true) { + data = Some(witness[witness.len() - 2].clone()); // len() - 1 is the redeem_script + break; + } + } + } + } + + data.unwrap_or(vec![]) +} diff --git a/processor/bitcoin/src/scheduler.rs b/processor/bitcoin/src/scheduler.rs new file mode 100644 index 000000000..08dc508c1 --- /dev/null +++ b/processor/bitcoin/src/scheduler.rs @@ -0,0 +1,213 @@ +use core::future::Future; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use bitcoin_serai::{ + bitcoin::ScriptBuf, + wallet::{TransactionError, SignableTransaction as BSignableTransaction, p2tr_script_buf}, +}; + +use serai_client::{ + primitives::{Coin, Amount}, + networks::bitcoin::Address, +}; + +use serai_db::Db; +use primitives::{OutputType, ReceivedOutput, Payment}; +use scanner::{KeyFor, AddressFor, OutputFor, BlockFor}; +use utxo_scheduler::{PlannedTransaction, TransactionPlanner}; +use transaction_chaining_scheduler::{EffectedReceivedOutputs, Scheduler as GenericScheduler}; + +use crate::{ + scan::{offsets_for_key, scanner}, + output::Output, + transaction::{SignableTransaction, Eventuality}, + rpc::Rpc, +}; + +fn address_from_serai_key(key: ::G, kind: OutputType) -> Address { + let offset = ::G::GENERATOR * offsets_for_key(key)[&kind]; + Address::new( + p2tr_script_buf(key + offset) + .expect("creating address from Serai key which wasn't properly tweaked"), + ) + .expect("couldn't create Serai-representable address for P2TR script") +} + +fn signable_transaction( + _reference_block: &BlockFor>, + inputs: Vec>>, + payments: Vec>>>, + change: Option>>, +) -> Result<(SignableTransaction, BSignableTransaction), TransactionError> { + assert!( + inputs.len() < + , EffectedReceivedOutputs>>>::MAX_INPUTS + ); + assert!( + (payments.len() + usize::from(u8::from(change.is_some()))) < + , EffectedReceivedOutputs>>>::MAX_OUTPUTS + ); + + // TODO + let fee_per_vbyte = 1; + + let inputs = inputs.into_iter().map(|input| input.output).collect::>(); + + let mut payments = payments + .into_iter() + .map(|payment| { + (ScriptBuf::from(payment.address().clone()), { + let balance = payment.balance(); + assert_eq!(balance.coin, Coin::Bitcoin); + balance.amount.0 + }) + }) + .collect::>(); + /* + Push a payment to a key with a known private key which anyone can spend. If this transaction + gets stuck, this lets anyone create a child transaction spending this output, raising the fee, + getting the transaction unstuck (via CPFP). + */ + payments.push(( + // The generator is even so this is valid + p2tr_script_buf(::G::GENERATOR).unwrap(), + // This uses the minimum output value allowed, as defined as a constant in bitcoin-serai + // TODO: Add a test for this comparing to bitcoin's `minimal_non_dust` + bitcoin_serai::wallet::DUST, + )); + + let change = change + .map(, EffectedReceivedOutputs>>>::change_address); + + BSignableTransaction::new( + inputs.clone(), + &payments, + change.clone().map(ScriptBuf::from), + None, + fee_per_vbyte, + ) + .map(|bst| (SignableTransaction { inputs, payments, change, fee_per_vbyte }, bst)) +} + +#[derive(Clone)] +pub(crate) struct Planner; +impl TransactionPlanner, EffectedReceivedOutputs>> for Planner { + type EphemeralError = (); + + type SignableTransaction = SignableTransaction; + + /* + Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT). + + A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes. While + our inputs are entirely SegWit, such fine tuning is not necessary and could create issues in + the future (if the size decreases or we misevaluate it). It also offers a minimal amount of + benefit when we are able to logarithmically accumulate inputs/fulfill payments. + + For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and + 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 + bytes. + + 100,000 / 192 = 520 + 520 * 192 leaves 160 bytes of overhead for the transaction structure itself. + */ + const MAX_INPUTS: usize = 520; + // We always reserve one output to create an anyone-can-spend output enabling anyone to use CPFP + // to unstick any transactions which had too low of a fee. + const MAX_OUTPUTS: usize = 519; + + fn branch_address(key: KeyFor>) -> AddressFor> { + address_from_serai_key(key, OutputType::Branch) + } + fn change_address(key: KeyFor>) -> AddressFor> { + address_from_serai_key(key, OutputType::Change) + } + fn forwarding_address(key: KeyFor>) -> AddressFor> { + address_from_serai_key(key, OutputType::Forwarded) + } + + fn calculate_fee( + &self, + reference_block: &BlockFor>, + inputs: Vec>>, + payments: Vec>>>, + change: Option>>, + ) -> impl Send + Future> { + async move { + Ok(match signable_transaction::(reference_block, inputs, payments, change) { + Ok(tx) => Amount(tx.1.needed_fee()), + Err( + TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, + ) => panic!("malformed arguments to calculate_fee"), + // No data, we have a minimum fee rate, we checked the amount of inputs/outputs + Err( + TransactionError::TooMuchData | + TransactionError::TooLowFee | + TransactionError::TooLargeTransaction, + ) => unreachable!(), + Err(TransactionError::NotEnoughFunds { fee, .. }) => Amount(fee), + }) + } + } + + fn plan( + &self, + reference_block: &BlockFor>, + inputs: Vec>>, + payments: Vec>>>, + change: Option>>, + ) -> impl Send + + Future< + Output = Result< + PlannedTransaction, Self::SignableTransaction, EffectedReceivedOutputs>>, + Self::EphemeralError, + >, + > { + async move { + let key = inputs.first().unwrap().key(); + for input in &inputs { + assert_eq!(key, input.key()); + } + + let singular_spent_output = (inputs.len() == 1).then(|| inputs[0].id()); + match signable_transaction::(reference_block, inputs.clone(), payments, change) { + Ok(tx) => Ok(PlannedTransaction { + signable: tx.0, + eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output }, + auxilliary: EffectedReceivedOutputs({ + let tx = tx.1.transaction(); + let scanner = scanner(key); + + let mut res = vec![]; + for output in scanner.scan_transaction(tx) { + res.push(Output::new_with_presumed_origin( + key, + tx, + // It shouldn't matter if this is wrong as we should never try to return these + // We still provide an accurate value to ensure a lack of discrepancies + Some(Address::new(inputs[0].output.output().script_pubkey.clone()).unwrap()), + output, + )); + } + res + }), + }), + Err( + TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment, + ) => panic!("malformed arguments to plan"), + // No data, we have a minimum fee rate, we checked the amount of inputs/outputs + Err( + TransactionError::TooMuchData | + TransactionError::TooLowFee | + TransactionError::TooLargeTransaction, + ) => unreachable!(), + Err(TransactionError::NotEnoughFunds { .. }) => { + panic!("plan called for a transaction without enough funds") + } + } + } + } +} + +pub(crate) type Scheduler = GenericScheduler, Planner>; diff --git a/processor/bitcoin/src/txindex.rs b/processor/bitcoin/src/txindex.rs new file mode 100644 index 000000000..6a55a4c46 --- /dev/null +++ b/processor/bitcoin/src/txindex.rs @@ -0,0 +1,108 @@ +use core::future::Future; + +use bitcoin_serai::bitcoin::ScriptBuf; + +use serai_db::{Get, DbTxn, Db}; + +use primitives::task::ContinuallyRan; +use scanner::ScannerFeed; + +use crate::{db, rpc::Rpc, hash_bytes}; + +pub(crate) fn script_pubkey_for_on_chain_output( + getter: &impl Get, + txid: [u8; 32], + vout: u32, +) -> ScriptBuf { + // We index every single output on the blockchain, so this shouldn't be possible + ScriptBuf::from_bytes( + db::ScriptPubKey::get(getter, txid, vout) + .expect("requested script public key for unknown output"), + ) +} + +/* + We want to be able to return received outputs. We do that by iterating over the inputs to find an + address format we recognize, then setting that address as the address to return to. + + Since inputs only contain the script signatures, yet addresses are for script public keys, we + need to pull up the output spent by an input and read the script public key from that. While we + could use `txindex=1`, and an asynchronous call to the Bitcoin node, we: + + 1) Can maintain a much smaller index ourselves + 2) Don't want the asynchronous call (which would require the flow be async, allowed to + potentially error, and more latent) + 3) Don't want to risk Bitcoin's `txindex` corruptions (frequently observed on testnet) + + This task builds that index. +*/ +pub(crate) struct TxIndexTask(pub(crate) Rpc); + +impl ContinuallyRan for TxIndexTask { + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let latest_block_number = self + .0 + .rpc + .get_latest_block_number() + .await + .map_err(|e| format!("couldn't fetch latest block number: {e:?}"))?; + let latest_block_number = u64::try_from(latest_block_number).unwrap(); + // `CONFIRMATIONS - 1` as any on-chain block inherently has one confirmation (itself) + let finalized_block_number = + latest_block_number.checked_sub(Rpc::::CONFIRMATIONS - 1).ok_or(format!( + "blockchain only just started and doesn't have {} blocks yet", + Rpc::::CONFIRMATIONS + ))?; + + /* + `finalized_block_number` is the latest block number minus confirmations. The blockchain may + undetectably re-organize though, as while the scanner will maintain an index of finalized + blocks and panics on reorganization, this runs prior to the scanner and that index. + + A reorganization of `CONFIRMATIONS` blocks is still an invariant. Even if that occurs, this + saves the script public keys *by the transaction hash an output index*. Accordingly, it + isn't invalidated on reorganization. The only risk would be if the new chain reorganized to + include a transaction to Serai which we didn't index the parents of. If that happens, we'll + panic when we scan the transaction, causing the invariant to be detected. + */ + + let finalized_block_number_in_db = db::LatestBlockToYieldAsFinalized::get(&self.0.db); + let next_block = finalized_block_number_in_db.map_or(0, |block| block + 1); + + let mut iterated = false; + for b in next_block ..= finalized_block_number { + iterated = true; + + // Fetch the block + let block_hash = self + .0 + .rpc + .get_block_hash(b.try_into().unwrap()) + .await + .map_err(|e| format!("couldn't fetch block hash for block {b}: {e:?}"))?; + let block = self + .0 + .rpc + .get_block(&block_hash) + .await + .map_err(|e| format!("couldn't fetch block {b}: {e:?}"))?; + + let mut txn = self.0.db.txn(); + + for tx in &block.txdata { + let txid = hash_bytes(tx.compute_txid().to_raw_hash()); + for (o, output) in tx.output.iter().enumerate() { + let o = u32::try_from(o).unwrap(); + // Set the script public key for this transaction + db::ScriptPubKey::set(&mut txn, txid, o, &output.script_pubkey.clone().into_bytes()); + } + } + + db::LatestBlockToYieldAsFinalized::set(&mut txn, &b); + txn.commit(); + } + Ok(iterated) + } + } +} diff --git a/processor/ethereum/Cargo.toml b/processor/ethereum/Cargo.toml new file mode 100644 index 000000000..139786316 --- /dev/null +++ b/processor/ethereum/Cargo.toml @@ -0,0 +1,69 @@ +[package] +name = "serai-ethereum-processor" +version = "0.1.0" +description = "Serai Ethereum Processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +rand_core = { version = "0.6", default-features = false } + +const-hex = { version = "1", default-features = false, features = ["std"] } +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] } + +k256 = { version = "^0.13.1", default-features = false, features = ["std"] } + +alloy-core = { version = "0.8", default-features = false } +alloy-rlp = { version = "0.3", default-features = false } + +alloy-rpc-types-eth = { version = "0.3", default-features = false } +alloy-transport = { version = "0.3", default-features = false } +alloy-simple-request-transport = { path = "../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-rpc-client = { version = "0.3", default-features = false } +alloy-provider = { version = "0.3", default-features = false } + +serai-client = { path = "../../substrate/client", default-features = false, features = ["ethereum"] } + +zalloc = { path = "../../common/zalloc" } +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +serai-env = { path = "../../common/env" } +serai-db = { path = "../../common/db" } + +messages = { package = "serai-processor-messages", path = "../messages" } +key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +smart-contract-scheduler = { package = "serai-processor-smart-contract-scheduler", path = "../scheduler/smart-contract" } +signers = { package = "serai-processor-signers", path = "../signers" } + +ethereum-schnorr = { package = "ethereum-schnorr-contract", path = "../../networks/ethereum/schnorr" } +ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "./primitives" } +ethereum-router = { package = "serai-processor-ethereum-router", path = "./router" } +ethereum-erc20 = { package = "serai-processor-ethereum-erc20", path = "./erc20" } + +bin = { package = "serai-processor-bin", path = "../bin" } + +[features] +parity-db = ["bin/parity-db"] +rocksdb = ["bin/rocksdb"] diff --git a/processor/ethereum/LICENSE b/processor/ethereum/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/ethereum/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/README.md b/processor/ethereum/README.md new file mode 100644 index 000000000..5301c64b9 --- /dev/null +++ b/processor/ethereum/README.md @@ -0,0 +1 @@ +# Serai Ethereum Processor diff --git a/networks/ethereum/src/tests/contracts/ERC20.sol b/processor/ethereum/TODO/contracts/tests/ERC20.sol similarity index 95% rename from networks/ethereum/src/tests/contracts/ERC20.sol rename to processor/ethereum/TODO/contracts/tests/ERC20.sol index e157974c7..9ce4bad77 100644 --- a/networks/ethereum/src/tests/contracts/ERC20.sol +++ b/processor/ethereum/TODO/contracts/tests/ERC20.sol @@ -1,5 +1,5 @@ -// SPDX-License-Identifier: AGPLv3 -pragma solidity ^0.8.0; +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; contract TestERC20 { event Transfer(address indexed from, address indexed to, uint256 value); @@ -8,9 +8,11 @@ contract TestERC20 { function name() public pure returns (string memory) { return "Test ERC20"; } + function symbol() public pure returns (string memory) { return "TEST"; } + function decimals() public pure returns (uint8) { return 18; } @@ -29,11 +31,13 @@ contract TestERC20 { function balanceOf(address owner) public view returns (uint256) { return balances[owner]; } + function transfer(address to, uint256 value) public returns (bool) { balances[msg.sender] -= value; balances[to] += value; return true; } + function transferFrom(address from, address to, uint256 value) public returns (bool) { allowances[from][msg.sender] -= value; balances[from] -= value; @@ -45,6 +49,7 @@ contract TestERC20 { allowances[msg.sender][spender] = value; return true; } + function allowance(address owner, address spender) public view returns (uint256) { return allowances[owner][spender]; } diff --git a/processor/ethereum/TODO/old_processor.rs b/processor/ethereum/TODO/old_processor.rs new file mode 100644 index 000000000..a7e85a5ce --- /dev/null +++ b/processor/ethereum/TODO/old_processor.rs @@ -0,0 +1,164 @@ +TODO + + async fn publish_completion( + &self, + completion: &::Completion, + ) -> Result<(), NetworkError> { + // Publish this to the dedicated TX server for a solver to actually publish + #[cfg(not(test))] + { + } + + // Publish this using a dummy account we fund with magic RPC commands + #[cfg(test)] + { + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let mut tx = match completion.command() { + RouterCommand::UpdateSeraiKey { key, .. } => { + router.update_serai_key(key, completion.signature()) + } + RouterCommand::Execute { outs, .. } => router.execute( + &outs.iter().cloned().map(Into::into).collect::>(), + completion.signature(), + ), + }; + tx.gas_limit = 1_000_000u64.into(); + tx.gas_price = 1_000_000_000u64.into(); + let tx = ethereum_serai::crypto::deterministically_sign(&tx); + + if self.provider.get_transaction_by_hash(*tx.hash()).await.unwrap().is_none() { + self + .provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [ + tx.recover_signer().unwrap().to_string(), + (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), + ], + ) + .await + .unwrap(); + + let (tx, sig, _) = tx.into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + let pending_tx = self.provider.send_raw_transaction(&bytes).await.unwrap(); + self.mine_block().await; + assert!(pending_tx.get_receipt().await.unwrap().status()); + } + + Ok(()) + } + } + + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Self::Eventuality, + ) -> Self::Transaction { + // We mine 96 blocks to ensure the 32 blocks relevant are finalized + // Back-check the prior two epochs in response to this + // TODO: Review why this is sub(3) and not sub(2) + for block in block.saturating_sub(3) ..= block { + match eventuality.1 { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let block = u64::try_from(block).unwrap(); + let filter = router + .key_updated_filter() + .from_block(block * 32) + .to_block(((block + 1) * 32) - 1) + .topic1(nonce); + let logs = self.provider.get_logs(&filter).await.unwrap(); + if let Some(log) = logs.first() { + return self + .provider + .get_transaction_by_hash(log.clone().transaction_hash.unwrap()) + .await + .unwrap() + .unwrap(); + }; + + let filter = router + .executed_filter() + .from_block(block * 32) + .to_block(((block + 1) * 32) - 1) + .topic1(nonce); + let logs = self.provider.get_logs(&filter).await.unwrap(); + if logs.is_empty() { + continue; + } + return self + .provider + .get_transaction_by_hash(logs[0].transaction_hash.unwrap()) + .await + .unwrap() + .unwrap(); + } + } + } + panic!("couldn't find completion in any three of checked blocks"); + } + + #[cfg(test)] + async fn mine_block(&self) { + self.provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, send_to: Self::Address) -> Self::Block { + use rand_core::OsRng; + use ciphersuite::group::ff::Field; + use ethereum_serai::alloy::sol_types::SolCall; + + let key = ::F::random(&mut OsRng); + let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); + + // Set a 1.1 ETH balance + self + .provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [Address(address).to_string(), "1100000000000000000".into()], + ) + .await + .unwrap(); + + let value = U256::from_str_radix("1000000000000000000", 10).unwrap(); + let tx = ethereum_serai::alloy::consensus::TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 1_000_000_000u128, + gas_limit: 200_000u128, + to: ethereum_serai::alloy::primitives::TxKind::Call(send_to.0.into()), + // 1 ETH + value, + input: ethereum_serai::router::abi::inInstructionCall::new(( + [0; 20].into(), + value, + vec![].into(), + )) + .abi_encode() + .into(), + }; + + use ethereum_serai::alloy::{primitives::Signature, consensus::SignableTransaction}; + let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap()) + .sign_prehash_recoverable(tx.signature_hash().as_ref()) + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&Signature::from(sig), &mut bytes); + let pending_tx = self.provider.send_raw_transaction(&bytes).await.ok().unwrap(); + + // Mine an epoch containing this TX + self.mine_block().await; + assert!(pending_tx.get_receipt().await.unwrap().status()); + // Yield the freshly mined block + self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap() + } diff --git a/processor/ethereum/TODO/tests/crypto.rs b/processor/ethereum/TODO/tests/crypto.rs new file mode 100644 index 000000000..20ba40b8d --- /dev/null +++ b/processor/ethereum/TODO/tests/crypto.rs @@ -0,0 +1,31 @@ +// TODO + +use rand_core::OsRng; + +use group::ff::{Field, PrimeField}; +use k256::{ + ecdsa::{ + self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey, + }, + Scalar, ProjectivePoint, +}; + +use frost::{ + curve::{Ciphersuite, Secp256k1}, + algorithm::{Hram, IetfSchnorr}, + tests::{algorithm_machines, sign}, +}; + +use crate::{crypto::*, tests::key_gen}; + +// Run the sign test with the EthereumHram +#[test] +fn test_signing() { + let (keys, _) = key_gen(); + + const MESSAGE: &[u8] = b"Hello, World!"; + + let algo = IetfSchnorr::::ietf(); + let _sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); +} diff --git a/networks/ethereum/src/tests/mod.rs b/processor/ethereum/TODO/tests/mod.rs similarity index 99% rename from networks/ethereum/src/tests/mod.rs rename to processor/ethereum/TODO/tests/mod.rs index dcdbedce8..a865868f6 100644 --- a/networks/ethereum/src/tests/mod.rs +++ b/processor/ethereum/TODO/tests/mod.rs @@ -1,3 +1,5 @@ +// TODO + use std::{sync::Arc, collections::HashMap}; use rand_core::OsRng; @@ -21,9 +23,7 @@ use crate::crypto::{address, deterministically_sign, PublicKey}; mod crypto; #[cfg(test)] -mod abi; -#[cfg(test)] -mod schnorr; +use contracts::tests as abi; #[cfg(test)] mod router; diff --git a/networks/ethereum/src/tests/router.rs b/processor/ethereum/TODO/tests/router.rs similarity index 99% rename from networks/ethereum/src/tests/router.rs rename to processor/ethereum/TODO/tests/router.rs index 724348cc3..63e5f1d53 100644 --- a/networks/ethereum/src/tests/router.rs +++ b/processor/ethereum/TODO/tests/router.rs @@ -1,3 +1,5 @@ +// TODO + use std::{convert::TryFrom, sync::Arc, collections::HashMap}; use rand_core::OsRng; diff --git a/processor/ethereum/deployer/Cargo.toml b/processor/ethereum/deployer/Cargo.toml new file mode 100644 index 000000000..9b0ed1464 --- /dev/null +++ b/processor/ethereum/deployer/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "serai-processor-ethereum-deployer" +version = "0.1.0" +description = "The deployer for Serai's Ethereum contracts" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/deployer" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.79" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +alloy-core = { version = "0.8", default-features = false } +alloy-consensus = { version = "0.3", default-features = false } + +alloy-sol-types = { version = "0.8", default-features = false } +alloy-sol-macro = { version = "0.8", default-features = false } + +alloy-rpc-types-eth = { version = "0.3", default-features = false } +alloy-transport = { version = "0.3", default-features = false } +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-provider = { version = "0.3", default-features = false } + +ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false } + +[build-dependencies] +build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts", default-features = false } diff --git a/processor/ethereum/deployer/LICENSE b/processor/ethereum/deployer/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/ethereum/deployer/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/deployer/README.md b/processor/ethereum/deployer/README.md new file mode 100644 index 000000000..6b4396506 --- /dev/null +++ b/processor/ethereum/deployer/README.md @@ -0,0 +1,23 @@ +# Ethereum Smart Contracts Deployer + +The deployer for Serai's Ethereum contracts. + +## Goals + +It should be possible to efficiently locate the Serai Router on an blockchain with the EVM, without +relying on any centralized (or even federated) entities. While deploying and locating an instance of +the Router would be trivial, by using a fixed signature for the deployment transaction, the Router +must be constructed with the correct key for the Serai network (or set to have the correct key +post-construction). Since this cannot be guaranteed to occur, the process must be retryable and the +first successful invocation must be efficiently findable. + +## Methodology + +We define a contract, the Deployer, to deploy the router. This contract could use `CREATE2` with the +key representing Serai as the salt, yet this would be open to collision attacks with just 2**80 +complexity. Instead, we use `CREATE` which would require 2**80 on-chain transactions (infeasible) to +use as the basis of a collision. + +In order to efficiently find the contract for a key, the Deployer contract saves the addresses of +deployed contracts (indexed by the initialization code hash). This allows using a single call to a +contract with a known address to find the proper Router. diff --git a/processor/ethereum/deployer/build.rs b/processor/ethereum/deployer/build.rs new file mode 100644 index 000000000..1906f1df5 --- /dev/null +++ b/processor/ethereum/deployer/build.rs @@ -0,0 +1,5 @@ +fn main() { + let artifacts_path = + std::env::var("OUT_DIR").unwrap().to_string() + "/serai-processor-ethereum-deployer"; + build_solidity_contracts::build(&[], "contracts", &artifacts_path).unwrap(); +} diff --git a/processor/ethereum/deployer/contracts/Deployer.sol b/processor/ethereum/deployer/contracts/Deployer.sol new file mode 100644 index 000000000..a7dac1d39 --- /dev/null +++ b/processor/ethereum/deployer/contracts/Deployer.sol @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; + +/* + The expected deployment process of the Router is as follows: + + 1) A transaction deploying Deployer is made. Then, a deterministic signature is + created such that an account with an unknown private key is the creator of + the contract. Anyone can fund this address, and once anyone does, the + transaction deploying Deployer can be published by anyone. No other + transaction may be made from that account. + + 2) Anyone deploys the Router through the Deployer. This uses a sequential nonce + such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible. + While such attacks would still be feasible if the Deployer's address was + controllable, the usage of a deterministic signature with a NUMS method + prevents that. + + This doesn't have any denial-of-service risks and will resolve once anyone steps + forward as deployer. This does fail to guarantee an identical address across + every chain, though it enables letting anyone efficiently ask the Deployer for + the address (with the Deployer having an identical address on every chain). + + Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the + Deployer contract to use a consistent salt for the Router, yet the Router must + be deployed with a specific public key for Serai. Since Ethereum isn't able to + determine a valid public key (one the result of a Serai DKG) from a dishonest + public key, we have to allow multiple deployments with Serai being the one to + determine which to use. + + The alternative would be to have a council publish the Serai key on-Ethereum, + with Serai verifying the published result. This would introduce a DoS risk in + the council not publishing the correct key/not publishing any key. + + This design does not work with designs expecting initialization (which may require re-deploying + the same code until the initialization successfully goes through, without being sniped). +*/ + +contract Deployer { + mapping(bytes32 => address) public deployments; + + error PriorDeployed(); + error DeploymentFailed(); + + function deploy(bytes memory init_code) external { + // Deploy the contract + address created_contract; + assembly { + created_contract := create(0, add(init_code, 0x20), mload(init_code)) + } + if (created_contract == address(0)) { + revert DeploymentFailed(); + } + + bytes32 init_code_hash = keccak256(init_code); + + // Check this wasn't prior deployed + // We check this *after* deploymeing (in violation of CEI) to handle re-entrancy related bugs + if (deployments[init_code_hash] != address(0)) { + revert PriorDeployed(); + } + + // Write the deployment to storage + deployments[init_code_hash] = created_contract; + } +} diff --git a/processor/ethereum/deployer/src/lib.rs b/processor/ethereum/deployer/src/lib.rs new file mode 100644 index 000000000..6fa59ee3c --- /dev/null +++ b/processor/ethereum/deployer/src/lib.rs @@ -0,0 +1,103 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::sync::Arc; + +use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; +use alloy_consensus::{Signed, TxLegacy}; + +use alloy_sol_types::SolCall; + +use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; +use alloy_transport::{TransportErrorKind, RpcError}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod abi { + alloy_sol_macro::sol!("contracts/Deployer.sol"); +} + +/// The Deployer contract for the Serai Router contract. +/// +/// This Deployer has a deterministic address, letting it be immediately identified on any +/// compatible chain. It then supports retrieving the Router contract's address (which isn't +/// deterministic) using a single call. +#[derive(Clone, Debug)] +pub struct Deployer(Arc>); +impl Deployer { + /// Obtain the transaction to deploy this contract, already signed. + /// + /// The account this transaction is sent from (which is populated in `from`) must be sufficiently + /// funded for this transaction to be submitted. This account has no known private key to anyone + /// so ETH sent can be neither misappropriated nor returned. + pub fn deployment_tx() -> Signed { + pub const BYTECODE: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-deployer/Deployer.bin")); + let bytecode = + Bytes::from_hex(BYTECODE).expect("compiled-in Deployer bytecode wasn't valid hex"); + + let tx = TxLegacy { + chain_id: None, + nonce: 0, + // 100 gwei + gas_price: 100_000_000_000u128, + // TODO: Use a more accurate gas limit + gas_limit: 1_000_000u128, + to: TxKind::Create, + value: U256::ZERO, + input: bytecode, + }; + + ethereum_primitives::deterministically_sign(&tx) + } + + /// Obtain the deterministic address for this contract. + pub(crate) fn address() -> Address { + let deployer_deployer = + Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature"); + Address::create(&deployer_deployer, 0) + } + + /// Construct a new view of the Deployer. + pub async fn new( + provider: Arc>, + ) -> Result, RpcError> { + let address = Self::address(); + let code = provider.get_code_at(address).await?; + // Contract has yet to be deployed + if code.is_empty() { + return Ok(None); + } + Ok(Some(Self(provider))) + } + + /// Find the deployment of a contract. + pub async fn find_deployment( + &self, + init_code_hash: [u8; 32], + ) -> Result, RpcError> { + let call = TransactionRequest::default().to(Self::address()).input(TransactionInput::new( + abi::Deployer::deploymentsCall::new((init_code_hash.into(),)).abi_encode().into(), + )); + let bytes = self.0.call(&call).await?; + let deployment = abi::Deployer::deploymentsCall::abi_decode_returns(&bytes, true) + .map_err(|e| { + TransportErrorKind::Custom( + format!("node returned a non-address for function returning address: {e:?}").into(), + ) + })? + ._0; + + if **deployment == [0; 20] { + return Ok(None); + } + Ok(Some(deployment)) + } +} diff --git a/processor/ethereum/erc20/Cargo.toml b/processor/ethereum/erc20/Cargo.toml new file mode 100644 index 000000000..3c7f51017 --- /dev/null +++ b/processor/ethereum/erc20/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "serai-processor-ethereum-erc20" +version = "0.1.0" +description = "A library for the Serai Processor to interact with ERC20s" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/erc20" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.79" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +alloy-core = { version = "0.8", default-features = false } + +alloy-sol-types = { version = "0.8", default-features = false } +alloy-sol-macro = { version = "0.8", default-features = false } + +alloy-rpc-types-eth = { version = "0.3", default-features = false } +alloy-transport = { version = "0.3", default-features = false } +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-provider = { version = "0.3", default-features = false } + +tokio = { version = "1", default-features = false, features = ["rt"] } diff --git a/processor/ethereum/erc20/LICENSE b/processor/ethereum/erc20/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/ethereum/erc20/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/erc20/README.md b/processor/ethereum/erc20/README.md new file mode 100644 index 000000000..f1e447b06 --- /dev/null +++ b/processor/ethereum/erc20/README.md @@ -0,0 +1,3 @@ +# ERC20 + +A library for the Serai Processor to interact with ERC20s. diff --git a/networks/ethereum/contracts/IERC20.sol b/processor/ethereum/erc20/contracts/IERC20.sol similarity index 97% rename from networks/ethereum/contracts/IERC20.sol rename to processor/ethereum/erc20/contracts/IERC20.sol index 70f1f93c9..c2de5ca02 100644 --- a/networks/ethereum/contracts/IERC20.sol +++ b/processor/ethereum/erc20/contracts/IERC20.sol @@ -1,5 +1,5 @@ // SPDX-License-Identifier: CC0 -pragma solidity ^0.8.0; +pragma solidity ^0.8.26; interface IERC20 { event Transfer(address indexed from, address indexed to, uint256 value); diff --git a/processor/ethereum/erc20/src/lib.rs b/processor/ethereum/erc20/src/lib.rs new file mode 100644 index 000000000..ec33989e7 --- /dev/null +++ b/processor/ethereum/erc20/src/lib.rs @@ -0,0 +1,197 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::{sync::Arc, collections::HashSet}; + +use alloy_core::primitives::{Address, B256, U256}; + +use alloy_sol_types::{SolInterface, SolEvent}; + +use alloy_rpc_types_eth::Filter; +use alloy_transport::{TransportErrorKind, RpcError}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use tokio::task::JoinSet; + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod abi { + alloy_sol_macro::sol!("contracts/IERC20.sol"); +} +use abi::IERC20::{IERC20Calls, transferCall, transferFromCall}; +pub use abi::IERC20::Transfer; + +/// A top-level ERC20 transfer +#[derive(Clone, Debug)] +pub struct TopLevelTransfer { + /// The ID of the event for this transfer. + pub id: ([u8; 32], u64), + /// The address which made the transfer. + pub from: [u8; 20], + /// The amount transferred. + pub amount: U256, + /// The data appended after the call itself. + pub data: Vec, +} + +/// A view for an ERC20 contract. +#[derive(Clone, Debug)] +pub struct Erc20(Arc>, Address); +impl Erc20 { + /// Construct a new view of the specified ERC20 contract. + pub fn new(provider: Arc>, address: [u8; 20]) -> Self { + Self(provider, Address::from(&address)) + } + + /// Match a transaction for its top-level transfer to the specified address (if one exists). + pub async fn match_top_level_transfer( + provider: impl AsRef>, + transaction_id: B256, + to: Address, + ) -> Result, RpcError> { + // Fetch the transaction + let transaction = + provider.as_ref().get_transaction_by_hash(transaction_id).await?.ok_or_else(|| { + TransportErrorKind::Custom( + "node didn't have the transaction which emitted a log it had".to_string().into(), + ) + })?; + + // If this is a top-level call... + // Don't validate the encoding as this can't be re-encoded to an identical bytestring due + // to the `InInstruction` appended after the call itself + if let Ok(call) = IERC20Calls::abi_decode(&transaction.input, false) { + // Extract the top-level call's from/to/value + let (from, call_to, value) = match call { + IERC20Calls::transfer(transferCall { to, value }) => (transaction.from, to, value), + IERC20Calls::transferFrom(transferFromCall { from, to, value }) => (from, to, value), + // Treat any other function selectors as unrecognized + _ => return Ok(None), + }; + // If this isn't a transfer to the expected address, return None + if call_to != to { + return Ok(None); + } + + // Fetch the transaction's logs + let receipt = + provider.as_ref().get_transaction_receipt(transaction_id).await?.ok_or_else(|| { + TransportErrorKind::Custom( + "node didn't have receipt for a transaction we were matching for a top-level transfer" + .to_string() + .into(), + ) + })?; + + // Find the log for this transfer + for log in receipt.inner.logs() { + // If this log was emitted by a different contract, continue + if Some(log.address()) != transaction.to { + continue; + } + + // Check if this is actually a transfer log + // https://github.com/alloy-rs/core/issues/589 + if log.topics().first() != Some(&Transfer::SIGNATURE_HASH) { + continue; + } + + let log_index = log.log_index.ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its index set".to_string().into()) + })?; + let log = log + .log_decode::() + .map_err(|e| { + TransportErrorKind::Custom(format!("failed to decode Transfer log: {e:?}").into()) + })? + .inner + .data; + + // Ensure the top-level transfer is equivalent to the transfer this log represents. Since + // we can't find the exact top-level transfer without tracing the call, we just rule the + // first equivalent transfer as THE top-level transfer + if !((log.from == from) && (log.to == to) && (log.value == value)) { + continue; + } + + // Read the data appended after + let encoded = call.abi_encode(); + let data = transaction.input.as_ref()[encoded.len() ..].to_vec(); + + return Ok(Some(TopLevelTransfer { + id: (*transaction_id, log_index), + from: *log.from.0, + amount: log.value, + data, + })); + } + } + + Ok(None) + } + + /// Fetch all top-level transfers to the specified address. + /// + /// The result of this function is unordered. + pub async fn top_level_transfers( + &self, + block: u64, + to: Address, + ) -> Result, RpcError> { + // Get all transfers within this block + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(Transfer::SIGNATURE_HASH); + let mut to_topic = [0; 32]; + to_topic[12 ..].copy_from_slice(to.as_ref()); + let filter = filter.topic2(B256::from(to_topic)); + let logs = self.0.get_logs(&filter).await?; + + // These logs are for all transactions which performed any transfer + // We now check each transaction for having a top-level transfer to the specified address + let tx_ids = logs + .into_iter() + .map(|log| { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(TransportErrorKind::Custom( + "node returned logs for a different address than requested".to_string().into(), + ))?; + } + + log.transaction_hash.ok_or_else(|| { + TransportErrorKind::Custom("log didn't specify its transaction hash".to_string().into()) + }) + }) + .collect::, _>>()?; + + let mut join_set = JoinSet::new(); + for tx_id in tx_ids { + join_set.spawn(Self::match_top_level_transfer(self.0.clone(), tx_id, to)); + } + + let mut top_level_transfers = vec![]; + while let Some(top_level_transfer) = join_set.join_next().await { + // This is an error if a task panics or aborts + // Panicking on a task panic is desired behavior, and we haven't aborted any tasks + match top_level_transfer.unwrap() { + // Top-level transfer + Ok(Some(top_level_transfer)) => top_level_transfers.push(top_level_transfer), + // Not a top-level transfer + Ok(None) => continue, + // Failed to get this transaction's information so abort + Err(e) => { + join_set.abort_all(); + Err(e)? + } + } + } + + Ok(top_level_transfers) + } +} diff --git a/processor/ethereum/primitives/Cargo.toml b/processor/ethereum/primitives/Cargo.toml new file mode 100644 index 000000000..6c6ff886c --- /dev/null +++ b/processor/ethereum/primitives/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "serai-processor-ethereum-primitives" +version = "0.1.0" +description = "Primitives for Serai's Ethereum Processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/primitives" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.79" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } +k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic"] } + +alloy-core = { version = "0.8", default-features = false } +alloy-consensus = { version = "0.3", default-features = false, features = ["k256"] } diff --git a/processor/ethereum/primitives/LICENSE b/processor/ethereum/primitives/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/ethereum/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/primitives/README.md b/processor/ethereum/primitives/README.md new file mode 100644 index 000000000..90da68c68 --- /dev/null +++ b/processor/ethereum/primitives/README.md @@ -0,0 +1,3 @@ +# Ethereum Processor Primitives + +This library contains miscellaneous primitives and helper functions. diff --git a/processor/ethereum/primitives/src/lib.rs b/processor/ethereum/primitives/src/lib.rs new file mode 100644 index 000000000..ccf413445 --- /dev/null +++ b/processor/ethereum/primitives/src/lib.rs @@ -0,0 +1,49 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use group::ff::PrimeField; +use k256::{elliptic_curve::ops::Reduce, U256, Scalar}; + +use alloy_core::primitives::{Parity, Signature}; +use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; + +/// The Keccak256 hash function. +pub fn keccak256(data: impl AsRef<[u8]>) -> [u8; 32] { + alloy_core::primitives::keccak256(data.as_ref()).into() +} + +/// Deterministically sign a transaction. +/// +/// This function panics if passed a transaction with a non-None chain ID. +pub fn deterministically_sign(tx: &TxLegacy) -> Signed { + pub fn hash_to_scalar(data: impl AsRef<[u8]>) -> Scalar { + >::reduce_bytes(&keccak256(data).into()) + } + + assert!( + tx.chain_id.is_none(), + "chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)" + ); + + let sig_hash = tx.signature_hash().0; + let mut r = hash_to_scalar([sig_hash.as_slice(), b"r"].concat()); + let mut s = hash_to_scalar([sig_hash.as_slice(), b"s"].concat()); + loop { + // Create the signature + let r_bytes: [u8; 32] = r.to_repr().into(); + let s_bytes: [u8; 32] = s.to_repr().into(); + let v = Parity::NonEip155(false); + let signature = Signature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap(); + + // Check if this is a valid signature + let tx = tx.clone().into_signed(signature); + if tx.recover_signer().is_ok() { + return tx; + } + + // Re-hash until valid + r = hash_to_scalar(r_bytes); + s = hash_to_scalar(s_bytes); + } +} diff --git a/processor/ethereum/router/Cargo.toml b/processor/ethereum/router/Cargo.toml new file mode 100644 index 000000000..d21a26d9c --- /dev/null +++ b/processor/ethereum/router/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "serai-processor-ethereum-router" +version = "0.1.0" +description = "The Router used by the Serai Processor for Ethereum" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/router" +authors = ["Luke Parker "] +edition = "2021" +publish = false +rust-version = "1.79" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +alloy-core = { version = "0.8", default-features = false } +alloy-consensus = { version = "0.3", default-features = false } + +alloy-sol-types = { version = "0.8", default-features = false } + +alloy-rpc-types-eth = { version = "0.3", default-features = false } +alloy-transport = { version = "0.3", default-features = false } +alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } +alloy-provider = { version = "0.3", default-features = false } + +ethereum-schnorr = { package = "ethereum-schnorr-contract", path = "../../../networks/ethereum/schnorr", default-features = false } + +ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false } +ethereum-deployer = { package = "serai-processor-ethereum-deployer", path = "../deployer", default-features = false } +erc20 = { package = "serai-processor-ethereum-erc20", path = "../erc20", default-features = false } + +serai-client = { path = "../../../substrate/client", default-features = false, features = ["ethereum"] } + +[build-dependencies] +build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts", default-features = false } + +syn = { version = "2", default-features = false, features = ["proc-macro"] } + +syn-solidity = { version = "0.8", default-features = false } +alloy-sol-macro-input = { version = "0.8", default-features = false } +alloy-sol-macro-expander = { version = "0.8", default-features = false } diff --git a/processor/ethereum/router/LICENSE b/processor/ethereum/router/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/ethereum/router/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/ethereum/router/README.md b/processor/ethereum/router/README.md new file mode 100644 index 000000000..b93c32195 --- /dev/null +++ b/processor/ethereum/router/README.md @@ -0,0 +1 @@ +# Ethereum Router diff --git a/processor/ethereum/router/build.rs b/processor/ethereum/router/build.rs new file mode 100644 index 000000000..1ce6d4f57 --- /dev/null +++ b/processor/ethereum/router/build.rs @@ -0,0 +1,42 @@ +use std::{env, fs}; + +use alloy_sol_macro_input::SolInputKind; + +fn write(sol: syn_solidity::File, file: &str) { + let sol = alloy_sol_macro_expander::expand::expand(sol).unwrap(); + fs::write(file, sol.to_string()).unwrap(); +} + +fn sol(sol_files: &[&str], file: &str) { + let mut sol = String::new(); + for sol_file in sol_files { + sol += &fs::read_to_string(sol_file).unwrap(); + } + let SolInputKind::Sol(sol) = syn::parse_str(&sol).unwrap() else { + panic!("parsed .sols file wasn't SolInputKind::Sol"); + }; + write(sol, file); +} + +fn main() { + let artifacts_path = + env::var("OUT_DIR").unwrap().to_string() + "/serai-processor-ethereum-router"; + + if !fs::exists(&artifacts_path).unwrap() { + fs::create_dir(&artifacts_path).unwrap(); + } + + build_solidity_contracts::build( + &["../../../networks/ethereum/schnorr/contracts", "../erc20/contracts"], + "contracts", + &artifacts_path, + ) + .unwrap(); + + // This cannot be handled with the sol! macro. The Solidity requires an import + // https://github.com/alloy-rs/core/issues/602 + sol( + &["../../../networks/ethereum/schnorr/contracts/Schnorr.sol", "contracts/Router.sol"], + &(artifacts_path + "/router.rs"), + ); +} diff --git a/processor/ethereum/router/contracts/Router.sol b/processor/ethereum/router/contracts/Router.sol new file mode 100644 index 000000000..9100f59e6 --- /dev/null +++ b/processor/ethereum/router/contracts/Router.sol @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: AGPL-3.0-only +pragma solidity ^0.8.26; + +import "IERC20.sol"; + +import "Schnorr.sol"; + +// _ is used as a prefix for internal functions and smart-contract-scoped variables +contract Router { + // Nonce is incremented for each command executed, preventing replays + uint256 private _nonce; + + // The nonce which will be used for the smart contracts we deploy, enabling + // predicting their addresses + uint256 private _smartContractNonce; + + // The current public key, defined as per the Schnorr library + bytes32 private _seraiKey; + + enum DestinationType { + Address, + Code + } + + struct AddressDestination { + address destination; + } + + struct CodeDestination { + uint32 gas_limit; + bytes code; + } + + struct OutInstruction { + DestinationType destinationType; + bytes destination; + uint256 value; + } + + struct Signature { + bytes32 c; + bytes32 s; + } + + event SeraiKeyUpdated(uint256 indexed nonce, bytes32 indexed key); + event InInstruction( + address indexed from, address indexed coin, uint256 amount, bytes instruction + ); + event Executed(uint256 indexed nonce, bytes32 indexed message_hash); + + error InvalidSignature(); + error InvalidAmount(); + error FailedTransfer(); + + // Update the Serai key at the end of the current function. + modifier _updateSeraiKeyAtEndOfFn(uint256 nonceUpdatedWith, bytes32 newSeraiKey) { + // Run the function itself. + _; + + // Update the key. + _seraiKey = newSeraiKey; + emit SeraiKeyUpdated(nonceUpdatedWith, newSeraiKey); + } + + constructor(bytes32 initialSeraiKey) _updateSeraiKeyAtEndOfFn(0, initialSeraiKey) { + // We consumed nonce 0 when setting the initial Serai key + _nonce = 1; + // Nonces are incremented by 1 upon account creation, prior to any code execution, per EIP-161 + // This is incompatible with any networks which don't have their nonces start at 0 + _smartContractNonce = 1; + } + + // updateSeraiKey validates the given Schnorr signature against the current public key, and if + // successful, updates the contract's public key to the one specified. + function updateSeraiKey(bytes32 newSeraiKey, Signature calldata signature) + external + _updateSeraiKeyAtEndOfFn(_nonce, newSeraiKey) + { + // This DST needs a length prefix as well to prevent DSTs potentially being substrings of each + // other, yet this fine for our very well-defined, limited use + bytes32 message = + keccak256(abi.encodePacked("updateSeraiKey", block.chainid, _nonce, newSeraiKey)); + _nonce++; + + if (!Schnorr.verify(_seraiKey, message, signature.c, signature.s)) { + revert InvalidSignature(); + } + } + + function inInstruction(address coin, uint256 amount, bytes memory instruction) external payable { + if (coin == address(0)) { + if (amount != msg.value) revert InvalidAmount(); + } else { + (bool success, bytes memory res) = address(coin).call( + abi.encodeWithSelector(IERC20.transferFrom.selector, msg.sender, address(this), amount) + ); + + // Require there was nothing returned, which is done by some non-standard tokens, or that the + // ERC20 contract did in fact return true + bool nonStandardResOrTrue = (res.length == 0) || abi.decode(res, (bool)); + if (!(success && nonStandardResOrTrue)) revert FailedTransfer(); + } + + /* + Due to fee-on-transfer tokens, emitting the amount directly is frowned upon. The amount + instructed to be transferred may not actually be the amount transferred. + + If we add nonReentrant to every single function which can effect the balance, we can check the + amount exactly matches. This prevents transfers of less value than expected occurring, at + least, not without an additional transfer to top up the difference (which isn't routed through + this contract and accordingly isn't trying to artificially create events from this contract). + + If we don't add nonReentrant, a transfer can be started, and then a new transfer for the + difference can follow it up (again and again until a rounding error is reached). This contract + would believe all transfers were done in full, despite each only being done in part (except + for the last one). + + Given fee-on-transfer tokens aren't intended to be supported, the only token actively planned + to be supported is Dai and it doesn't have any fee-on-transfer logic, and how fee-on-transfer + tokens aren't even able to be supported at this time by the larger Serai network, we simply + classify this entire class of tokens as non-standard implementations which induce undefined + behavior. + + It is the Serai network's role not to add support for any non-standard implementations. + */ + emit InInstruction(msg.sender, coin, amount, instruction); + } + + /* + We on purposely do not check if these calls succeed. A call either succeeded, and there's no + problem, or the call failed due to: + A) An insolvency + B) A malicious receiver + C) A non-standard token + A is an invariant, B should be dropped, C is something out of the control of this contract. + It is again the Serai's network role to not add support for any non-standard tokens, + */ + + // Perform an ERC20 transfer out + function _erc20TransferOut(address to, address coin, uint256 value) private { + coin.call{ gas: 100_000 }(abi.encodeWithSelector(IERC20.transfer.selector, msg.sender, value)); + } + + // Perform an ETH/ERC20 transfer out + function _transferOut(address to, address coin, uint256 value) private { + if (coin == address(0)) { + // Enough gas to service the transfer and a minimal amount of logic + to.call{ value: value, gas: 5_000 }(""); + } else { + _erc20TransferOut(to, coin, value); + } + } + + /* + Serai supports arbitrary calls out via deploying smart contracts (with user-specified code), + letting them execute whatever calls they're coded for. Since we can't meter CREATE, we call + CREATE from this function which we call not internally, but with CALL (which we can meter). + */ + function arbitaryCallOut(bytes memory code) external payable { + // Because we're creating a contract, increment our nonce + _smartContractNonce += 1; + + uint256 msg_value = msg.value; + address contractAddress; + assembly { + contractAddress := create(msg_value, add(code, 0x20), mload(code)) + } + } + + // Execute a list of transactions if they were signed by the current key with the current nonce + function execute( + address coin, + uint256 fee, + OutInstruction[] calldata transactions, + Signature calldata signature + ) external { + // Verify the signature + // We hash the message here as we need the message's hash for the Executed event + // Since we're already going to hash it, hashing it prior to verifying the signature reduces the + // amount of words hashed by its challenge function (reducing our gas costs) + bytes32 message = + keccak256(abi.encode("execute", block.chainid, _nonce, coin, fee, transactions)); + if (!Schnorr.verify(_seraiKey, message, signature.c, signature.s)) { + revert InvalidSignature(); + } + + // Since the signature was verified, perform execution + emit Executed(_nonce, message); + // While this is sufficient to prevent replays, it's still technically possible for instructions + // from later batches to be executed before these instructions upon re-entrancy + _nonce++; + + for (uint256 i = 0; i < transactions.length; i++) { + // If the destination is an address, we perform a direct transfer + if (transactions[i].destinationType == DestinationType.Address) { + // This may cause a panic and the contract to become stuck if the destination isn't actually + // 20 bytes. Serai is trusted to not pass a malformed destination + (AddressDestination memory destination) = + abi.decode(transactions[i].destination, (AddressDestination)); + _transferOut(destination.destination, coin, transactions[i].value); + } else { + // Prepare for the transfer + uint256 eth_value = 0; + if (coin == address(0)) { + // If it's ETH, we transfer the value with the call + eth_value = transactions[i].value; + } else { + // If it's an ERC20, we calculate the hash of the will-be contract and transfer to it + // before deployment. This avoids needing to deploy, then call again, offering a few + // optimizations + address nextAddress = + address(uint160(uint256(keccak256(abi.encode(address(this), _smartContractNonce))))); + _erc20TransferOut(nextAddress, coin, transactions[i].value); + } + + // Perform the deployment with the defined gas budget + (CodeDestination memory destination) = + abi.decode(transactions[i].destination, (CodeDestination)); + address(this).call{ gas: destination.gas_limit, value: eth_value }( + abi.encodeWithSelector(Router.arbitaryCallOut.selector, destination.code) + ); + } + } + + // Transfer to the caller the fee + _transferOut(msg.sender, coin, fee); + } + + function nonce() external view returns (uint256) { + return _nonce; + } + + function smartContractNonce() external view returns (uint256) { + return _smartContractNonce; + } + + function seraiKey() external view returns (bytes32) { + return _seraiKey; + } +} diff --git a/processor/ethereum/router/src/lib.rs b/processor/ethereum/router/src/lib.rs new file mode 100644 index 000000000..7a7cffd82 --- /dev/null +++ b/processor/ethereum/router/src/lib.rs @@ -0,0 +1,554 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::{sync::Arc, io, collections::HashSet}; + +use group::ff::PrimeField; + +use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; +use alloy_consensus::TxLegacy; + +use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; + +use alloy_rpc_types_eth::Filter; +use alloy_transport::{TransportErrorKind, RpcError}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use ethereum_schnorr::{PublicKey, Signature}; +use ethereum_deployer::Deployer; +use erc20::{Transfer, Erc20}; + +use serai_client::networks::ethereum::Address as SeraiAddress; + +#[rustfmt::skip] +#[expect(warnings)] +#[expect(needless_pass_by_value)] +#[expect(clippy::all)] +#[expect(clippy::ignored_unit_patterns)] +#[expect(clippy::redundant_closure_for_method_calls)] +mod _abi { + include!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-router/router.rs")); +} +use _abi::Router as abi; +use abi::{ + SeraiKeyUpdated as SeraiKeyUpdatedEvent, InInstruction as InInstructionEvent, + Executed as ExecutedEvent, +}; + +impl From<&Signature> for abi::Signature { + fn from(signature: &Signature) -> Self { + Self { + c: <[u8; 32]>::from(signature.c().to_repr()).into(), + s: <[u8; 32]>::from(signature.s().to_repr()).into(), + } + } +} + +/// A coin on Ethereum. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum Coin { + /// Ether, the native coin of Ethereum. + Ether, + /// An ERC20 token. + Erc20([u8; 20]), +} + +impl Coin { + fn address(&self) -> Address { + (match self { + Coin::Ether => [0; 20], + Coin::Erc20(address) => *address, + }) + .into() + } + + /// Read a `Coin`. + pub fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + Ok(match kind[0] { + 0 => Coin::Ether, + 1 => { + let mut address = [0; 20]; + reader.read_exact(&mut address)?; + Coin::Erc20(address) + } + _ => Err(io::Error::other("unrecognized Coin type"))?, + }) + } + + /// Write the `Coin`. + pub fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Coin::Ether => writer.write_all(&[0]), + Coin::Erc20(token) => { + writer.write_all(&[1])?; + writer.write_all(token) + } + } + } +} + +/// An InInstruction from the Router. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct InInstruction { + /// The ID for this `InInstruction`. + pub id: ([u8; 32], u64), + /// The address which transferred these coins to Serai. + pub from: [u8; 20], + /// The coin transferred. + pub coin: Coin, + /// The amount transferred. + pub amount: U256, + /// The data associated with the transfer. + pub data: Vec, +} + +impl InInstruction { + /// Read an `InInstruction`. + pub fn read(reader: &mut R) -> io::Result { + let id = { + let mut id_hash = [0; 32]; + reader.read_exact(&mut id_hash)?; + let mut id_pos = [0; 8]; + reader.read_exact(&mut id_pos)?; + let id_pos = u64::from_le_bytes(id_pos); + (id_hash, id_pos) + }; + + let mut from = [0; 20]; + reader.read_exact(&mut from)?; + + let coin = Coin::read(reader)?; + let mut amount = [0; 32]; + reader.read_exact(&mut amount)?; + let amount = U256::from_le_slice(&amount); + + let mut data_len = [0; 4]; + reader.read_exact(&mut data_len)?; + let data_len = usize::try_from(u32::from_le_bytes(data_len)) + .map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?; + let mut data = vec![0; data_len]; + reader.read_exact(&mut data)?; + + Ok(InInstruction { id, from, coin, amount, data }) + } + + /// Write the `InInstruction`. + pub fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&self.id.0)?; + writer.write_all(&self.id.1.to_le_bytes())?; + + writer.write_all(&self.from)?; + + self.coin.write(writer)?; + writer.write_all(&self.amount.as_le_bytes())?; + + writer.write_all( + &u32::try_from(self.data.len()) + .map_err(|_| { + io::Error::other("InInstruction being written had data exceeding 2**32 in length") + })? + .to_le_bytes(), + )?; + writer.write_all(&self.data) + } +} + +/// A list of `OutInstruction`s. +#[derive(Clone)] +pub struct OutInstructions(Vec); +impl From<&[(SeraiAddress, U256)]> for OutInstructions { + fn from(outs: &[(SeraiAddress, U256)]) -> Self { + Self( + outs + .iter() + .map(|(address, amount)| { + #[allow(non_snake_case)] + let (destinationType, destination) = match address { + SeraiAddress::Address(address) => ( + abi::DestinationType::Address, + (abi::AddressDestination { destination: Address::from(address) }).abi_encode(), + ), + SeraiAddress::Contract(contract) => ( + abi::DestinationType::Code, + (abi::CodeDestination { + gas_limit: contract.gas_limit(), + code: contract.code().to_vec().into(), + }) + .abi_encode(), + ), + }; + abi::OutInstruction { destinationType, destination: destination.into(), value: *amount } + }) + .collect(), + ) + } +} + +/// An action which was executed by the Router. +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum Executed { + /// Set a new key. + SetKey { + /// The nonce this was done with. + nonce: u64, + /// The key set. + key: [u8; 32], + }, + /// Executed Batch. + Batch { + /// The nonce this was done with. + nonce: u64, + /// The hash of the signed message for the Batch executed. + message_hash: [u8; 32], + }, +} + +impl Executed { + /// The nonce consumed by this executed event. + pub fn nonce(&self) -> u64 { + match self { + Executed::SetKey { nonce, .. } | Executed::Batch { nonce, .. } => *nonce, + } + } + + /// Write the Executed. + pub fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + match self { + Self::SetKey { nonce, key } => { + writer.write_all(&[0])?; + writer.write_all(&nonce.to_le_bytes())?; + writer.write_all(key) + } + Self::Batch { nonce, message_hash } => { + writer.write_all(&[1])?; + writer.write_all(&nonce.to_le_bytes())?; + writer.write_all(message_hash) + } + } + } + + /// Read an Executed. + pub fn read(reader: &mut impl io::Read) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + if kind[0] >= 2 { + Err(io::Error::other("unrecognized type of Executed"))?; + } + + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + let nonce = u64::from_le_bytes(nonce); + + let mut payload = [0; 32]; + reader.read_exact(&mut payload)?; + + Ok(match kind[0] { + 0 => Self::SetKey { nonce, key: payload }, + 1 => Self::Batch { nonce, message_hash: payload }, + _ => unreachable!(), + }) + } +} + +/// A view of the Router for Serai. +#[derive(Clone, Debug)] +pub struct Router(Arc>, Address); +impl Router { + fn code() -> Vec { + const BYTECODE: &[u8] = + include_bytes!(concat!(env!("OUT_DIR"), "/serai-processor-ethereum-router/Router.bin")); + Bytes::from_hex(BYTECODE).expect("compiled-in Router bytecode wasn't valid hex").to_vec() + } + + fn init_code(key: &PublicKey) -> Vec { + let mut bytecode = Self::code(); + // Append the constructor arguments + bytecode.extend((abi::constructorCall { initialSeraiKey: key.eth_repr().into() }).abi_encode()); + bytecode + } + + /// Create a new view of the Router. + /// + /// This performs an on-chain lookup for the first deployed Router constructed with this public + /// key. This lookup is of a constant amount of calls and does not read any logs. + pub async fn new( + provider: Arc>, + initial_serai_key: &PublicKey, + ) -> Result, RpcError> { + let Some(deployer) = Deployer::new(provider.clone()).await? else { + return Ok(None); + }; + let Some(deployment) = deployer + .find_deployment(ethereum_primitives::keccak256(Self::init_code(initial_serai_key))) + .await? + else { + return Ok(None); + }; + Ok(Some(Self(provider, deployment))) + } + + /// The address of the router. + pub fn address(&self) -> Address { + self.1 + } + + /// Get the message to be signed in order to update the key for Serai. + pub fn update_serai_key_message(chain_id: U256, nonce: u64, key: &PublicKey) -> Vec { + ( + "updateSeraiKey", + chain_id, + U256::try_from(nonce).expect("couldn't convert u64 to u256"), + key.eth_repr(), + ) + .abi_encode_packed() + } + + /// Construct a transaction to update the key representing Serai. + pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy { + // TODO: Set a more accurate gas + TxLegacy { + to: TxKind::Call(self.1), + input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into())) + .abi_encode() + .into(), + gas_limit: 100_000, + ..Default::default() + } + } + + /// Get the message to be signed in order to execute a series of `OutInstruction`s. + pub fn execute_message( + chain_id: U256, + nonce: u64, + coin: Coin, + fee: U256, + outs: OutInstructions, + ) -> Vec { + ("execute", chain_id, U256::try_from(nonce).unwrap(), coin.address(), fee, outs.0).abi_encode() + } + + /// Construct a transaction to execute a batch of `OutInstruction`s. + pub fn execute(&self, coin: Coin, fee: U256, outs: OutInstructions, sig: &Signature) -> TxLegacy { + let outs_len = outs.0.len(); + TxLegacy { + to: TxKind::Call(self.1), + input: abi::executeCall::new((coin.address(), fee, outs.0, sig.into())).abi_encode().into(), + // TODO + gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs_len).unwrap()), + ..Default::default() + } + } + + /// Fetch the `InInstruction`s emitted by the Router from this block. + pub async fn in_instructions( + &self, + block: u64, + allowed_tokens: &HashSet<[u8; 20]>, + ) -> Result, RpcError> { + // The InInstruction events for this block + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await?; + + /* + We check that for all InInstructions for ERC20s emitted, a corresponding transfer occurred. + In order to prevent a transfer from being used to justify multiple distinct InInstructions, + we insert the transfer's log index into this HashSet. + */ + let mut transfer_check = HashSet::new(); + + let mut in_instructions = vec![]; + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(TransportErrorKind::Custom( + "node returned a log from a different address than requested".to_string().into(), + ))?; + } + + let id = ( + log + .block_hash + .ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its block hash set".to_string().into()) + })? + .into(), + log.log_index.ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its index set".to_string().into()) + })?, + ); + + let tx_hash = log.transaction_hash.ok_or_else(|| { + TransportErrorKind::Custom("log didn't have its transaction hash set".to_string().into()) + })?; + + let log = log + .log_decode::() + .map_err(|e| { + TransportErrorKind::Custom( + format!("filtered to InInstructionEvent yet couldn't decode log: {e:?}").into(), + ) + })? + .inner + .data; + + let coin = if log.coin.0 == [0; 20] { + Coin::Ether + } else { + let token = *log.coin.0; + + if !allowed_tokens.contains(&token) { + continue; + } + + // Get all logs for this TX + let receipt = self.0.get_transaction_receipt(tx_hash).await?.ok_or_else(|| { + TransportErrorKind::Custom( + "node didn't have the receipt for a transaction it had".to_string().into(), + ) + })?; + let tx_logs = receipt.inner.logs(); + + /* + The transfer which causes an InInstruction event won't be a top-level transfer. + Accordingly, when looking for the matching transfer, disregard the top-level transfer (if + one exists). + */ + if let Some(matched) = Erc20::match_top_level_transfer(&self.0, tx_hash, self.1).await? { + // Mark this log index as used so it isn't used again + transfer_check.insert(matched.id.1); + } + + // Find a matching transfer log + let mut found_transfer = false; + for tx_log in tx_logs { + let log_index = tx_log.log_index.ok_or_else(|| { + TransportErrorKind::Custom( + "log in transaction receipt didn't have its log index set".to_string().into(), + ) + })?; + + // Ensure we didn't already use this transfer to check a distinct InInstruction event + if transfer_check.contains(&log_index) { + continue; + } + + // Check if this log is from the token we expected to be transferred + if tx_log.address().0 != token { + continue; + } + // Check if this is a transfer log + // https://github.com/alloy-rs/core/issues/589 + if tx_log.topics().first() != Some(&Transfer::SIGNATURE_HASH) { + continue; + } + let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue }; + // Check if this is a transfer to us for the expected amount + if (transfer.to == self.1) && (transfer.value == log.amount) { + transfer_check.insert(log_index); + found_transfer = true; + break; + } + } + if !found_transfer { + // This shouldn't be a simple error + // This is an exploit, a non-conforming ERC20, or a malicious connection + // This should halt the process. While this is sufficient, it's sub-optimal + // TODO + Err(TransportErrorKind::Custom( + "ERC20 InInstruction with no matching transfer log".to_string().into(), + ))?; + } + + Coin::Erc20(token) + }; + + in_instructions.push(InInstruction { + id, + from: *log.from.0, + coin, + amount: log.amount, + data: log.instruction.as_ref().to_vec(), + }); + } + + Ok(in_instructions) + } + + /// Fetch the executed actions from this block. + pub async fn executed(&self, block: u64) -> Result, RpcError> { + let mut res = vec![]; + + { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(SeraiKeyUpdatedEvent::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await?; + + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(TransportErrorKind::Custom( + "node returned a log from a different address than requested".to_string().into(), + ))?; + } + + let log = log + .log_decode::() + .map_err(|e| { + TransportErrorKind::Custom( + format!("filtered to SeraiKeyUpdatedEvent yet couldn't decode log: {e:?}").into(), + ) + })? + .inner + .data; + + res.push(Executed::SetKey { + nonce: log.nonce.try_into().map_err(|e| { + TransportErrorKind::Custom(format!("filtered to convert nonce to u64: {e:?}").into()) + })?, + key: log.key.into(), + }); + } + } + + { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await?; + + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(TransportErrorKind::Custom( + "node returned a log from a different address than requested".to_string().into(), + ))?; + } + + let log = log + .log_decode::() + .map_err(|e| { + TransportErrorKind::Custom( + format!("filtered to ExecutedEvent yet couldn't decode log: {e:?}").into(), + ) + })? + .inner + .data; + + res.push(Executed::Batch { + nonce: log.nonce.try_into().map_err(|e| { + TransportErrorKind::Custom(format!("filtered to convert nonce to u64: {e:?}").into()) + })?, + message_hash: log.message_hash.into(), + }); + } + } + + res.sort_by_key(Executed::nonce); + + Ok(res) + } +} diff --git a/processor/ethereum/src/key_gen.rs b/processor/ethereum/src/key_gen.rs new file mode 100644 index 000000000..581684efc --- /dev/null +++ b/processor/ethereum/src/key_gen.rs @@ -0,0 +1,25 @@ +use ciphersuite::{Ciphersuite, Secp256k1}; +use dkg::ThresholdKeys; + +use ethereum_schnorr::PublicKey; + +pub(crate) struct KeyGenParams; +impl key_gen::KeyGenParams for KeyGenParams { + const ID: &'static str = "Ethereum"; + + type ExternalNetworkCiphersuite = Secp256k1; + + fn tweak_keys(keys: &mut ThresholdKeys) { + while PublicKey::new(keys.group_key()).is_none() { + *keys = keys.offset(::F::ONE); + } + } + + fn encode_key(key: ::G) -> Vec { + PublicKey::new(key).unwrap().eth_repr().to_vec() + } + + fn decode_key(key: &[u8]) -> Option<::G> { + PublicKey::from_eth_repr(key.try_into().ok()?).map(|key| key.point()) + } +} diff --git a/processor/ethereum/src/main.rs b/processor/ethereum/src/main.rs new file mode 100644 index 000000000..7acdffdbd --- /dev/null +++ b/processor/ethereum/src/main.rs @@ -0,0 +1,90 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +#[global_allocator] +static ALLOCATOR: zalloc::ZeroizingAlloc = + zalloc::ZeroizingAlloc(std::alloc::System); + +use std::sync::Arc; + +use alloy_core::primitives::U256; +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; + +use serai_client::validator_sets::primitives::Session; + +use serai_env as env; +use serai_db::{Get, DbTxn, create_db}; + +use ::primitives::EncodableG; +use ::key_gen::KeyGenParams as KeyGenParamsTrait; + +mod primitives; +pub(crate) use crate::primitives::*; + +mod key_gen; +use crate::key_gen::KeyGenParams; +mod rpc; +use rpc::Rpc; +mod scheduler; +use scheduler::{SmartContract, Scheduler}; +mod publisher; +use publisher::TransactionPublisher; + +create_db! { + EthereumProcessor { + // The initial key for Serai on Ethereum + InitialSeraiKey: () -> EncodableG, + } +} + +struct SetInitialKey; +impl bin::Hooks for SetInitialKey { + fn on_message(txn: &mut impl DbTxn, msg: &messages::CoordinatorMessage) { + if let messages::CoordinatorMessage::Substrate( + messages::substrate::CoordinatorMessage::SetKeys { session, key_pair, .. }, + ) = msg + { + assert_eq!(*session, Session(0)); + let key = KeyGenParams::decode_key(key_pair.1.as_ref()) + .expect("invalid Ethereum key confirmed on Substrate"); + InitialSeraiKey::set(txn, &EncodableG(key)); + } + } +} + +#[tokio::main] +async fn main() { + let db = bin::init(); + + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(bin::url()), true), + )); + + let chain_id = loop { + match provider.get_chain_id().await { + Ok(chain_id) => break U256::try_from(chain_id).unwrap(), + Err(e) => { + log::error!("couldn't connect to the Ethereum node for the chain ID: {e:?}"); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } + } + }; + + bin::main_loop::( + db.clone(), + Rpc { db: db.clone(), provider: provider.clone() }, + Scheduler::::new(SmartContract { chain_id }), + TransactionPublisher::new(db, provider, { + let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") + .expect("ethereum relayer hostname wasn't specified") + .to_string(); + let relayer_port = + env::var("ETHEREUM_RELAYER_PORT").expect("ethereum relayer port wasn't specified"); + relayer_hostname + ":" + &relayer_port + }), + ) + .await; +} diff --git a/processor/ethereum/src/primitives/block.rs b/processor/ethereum/src/primitives/block.rs new file mode 100644 index 000000000..780837fae --- /dev/null +++ b/processor/ethereum/src/primitives/block.rs @@ -0,0 +1,128 @@ +use std::collections::HashMap; + +use ciphersuite::{Ciphersuite, Secp256k1}; + +use serai_client::networks::ethereum::Address; + +use primitives::{ReceivedOutput, EventualityTracker}; + +use ethereum_router::{InInstruction as EthereumInInstruction, Executed}; + +use crate::{output::Output, transaction::Eventuality}; + +// We interpret 32-block Epochs as singular blocks. +// There's no reason for further accuracy when these will all finalize at the same time. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub(crate) struct Epoch { + // The hash of the block which ended the prior Epoch. + pub(crate) prior_end_hash: [u8; 32], + // The hash of the last block within this Epoch. + pub(crate) end_hash: [u8; 32], +} + +impl primitives::BlockHeader for Epoch { + fn id(&self) -> [u8; 32] { + self.end_hash + } + fn parent(&self) -> [u8; 32] { + self.prior_end_hash + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct FullEpoch { + pub(crate) epoch: Epoch, + pub(crate) instructions: Vec, + pub(crate) executed: Vec, +} + +impl primitives::Block for FullEpoch { + type Header = Epoch; + + type Key = ::G; + type Address = Address; + type Output = Output; + type Eventuality = Eventuality; + + fn id(&self) -> [u8; 32] { + self.epoch.end_hash + } + + fn scan_for_outputs_unordered( + &self, + latest_active_key: Self::Key, + key: Self::Key, + ) -> Vec { + // Only return these outputs for the latest key + if latest_active_key != key { + return vec![]; + } + + // Associate all outputs with the latest active key + // We don't associate these with the current key within the SC as that'll cause outputs to be + // marked for forwarding if the SC is delayed to actually rotate + let mut outputs: Vec<_> = self + .instructions + .iter() + .cloned() + .map(|instruction| Output::Output { key, instruction }) + .collect(); + + /* + The scanner requires a change output be associated with every Eventuality that came from + fulfilling payments, unless said Eventuality descends from an Eventuality meeting that + requirement from the same fulfillment. This ensures we have a fully populated Eventualities + set by the time we process the block which has an Eventuality. + + Accordingly, for any block with an Eventuality completion, we claim there's a Change output + so that the block is flagged. Ethereum doesn't actually have Change outputs, yet the scanner + won't report them to Substrate, and the Smart Contract scheduler will drop any/all outputs + passed to it (handwaving their balances as present within the Smart Contract). + */ + if !self.executed.is_empty() { + outputs.push(Output::Eventuality { key, nonce: self.executed.first().unwrap().nonce() }); + } + + outputs + } + + #[allow(clippy::type_complexity)] + fn check_for_eventuality_resolutions( + &self, + eventualities: &mut EventualityTracker, + ) -> HashMap< + >::TransactionId, + Self::Eventuality, + > { + let mut res = HashMap::new(); + for executed in &self.executed { + let Some(expected) = + eventualities.active_eventualities.remove(executed.nonce().to_le_bytes().as_slice()) + else { + continue; + }; + assert_eq!( + executed, + &expected.0, + "Router emitted distinct event for nonce {}", + executed.nonce() + ); + + /* + The transaction ID is used to determine how internal outputs from this transaction should + be handled (if they were actually internal or if they were just to an internal address). + The Ethereum integration doesn't use internal addresses, and only uses internal outputs to + flag a block as having an Eventuality. Those internal outputs will always be scanned, and + while they may be dropped/kept by this ID, the scheduler will then always drop them. + Accordingly, we have free reign as to what to set the transaction ID to. + + We set the ID to the nonce as it's the most helpful value and unique barring someone + finding the premise for this as a hash. + */ + let mut tx_id = [0; 32]; + tx_id[.. 8].copy_from_slice(executed.nonce().to_le_bytes().as_slice()); + res.insert(tx_id, expected); + } + res + } +} diff --git a/processor/ethereum/src/primitives/machine.rs b/processor/ethereum/src/primitives/machine.rs new file mode 100644 index 000000000..1762eb283 --- /dev/null +++ b/processor/ethereum/src/primitives/machine.rs @@ -0,0 +1,146 @@ +use std::{io, collections::HashMap}; + +use rand_core::{RngCore, CryptoRng}; + +use ciphersuite::{Ciphersuite, Secp256k1}; +use frost::{ + dkg::{Participant, ThresholdKeys}, + FrostError, + algorithm::*, + sign::*, +}; + +use ethereum_schnorr::{PublicKey, Signature}; + +use crate::transaction::{Action, Transaction}; + +/// The HRAm to use for the Schnorr Solidity library. +/// +/// This will panic if the public key being signed for is not representable within the Schnorr +/// Solidity library. +#[derive(Clone, Default, Debug)] +pub struct EthereumHram; +impl Hram for EthereumHram { + #[allow(non_snake_case)] + fn hram( + R: &::G, + A: &::G, + m: &[u8], + ) -> ::F { + Signature::challenge(*R, &PublicKey::new(*A).unwrap(), m) + } +} + +/// A clonable machine to sign an action. +/// +/// This will panic if the public key being signed with is not representable within the Schnorr +/// Solidity library. +#[derive(Clone)] +pub(crate) struct ClonableTransctionMachine { + pub(crate) keys: ThresholdKeys, + pub(crate) action: Action, +} + +type LiteralAlgorithmMachine = AlgorithmMachine>; +type LiteralAlgorithmSignMachine = + AlgorithmSignMachine>; + +pub(crate) struct ActionSignMachine { + key: PublicKey, + action: Action, + machine: LiteralAlgorithmSignMachine, +} + +type LiteralAlgorithmSignatureMachine = + AlgorithmSignatureMachine>; + +pub(crate) struct ActionSignatureMachine { + key: PublicKey, + action: Action, + machine: LiteralAlgorithmSignatureMachine, +} + +impl PreprocessMachine for ClonableTransctionMachine { + type Preprocess = ::Preprocess; + type Signature = Transaction; + type SignMachine = ActionSignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + let (machine, preprocess) = + AlgorithmMachine::new(IetfSchnorr::::ietf(), self.keys.clone()) + .preprocess(rng); + ( + ActionSignMachine { + key: PublicKey::new(self.keys.group_key()).expect("signing with non-representable key"), + action: self.action, + machine, + }, + preprocess, + ) + } +} + +impl SignMachine for ActionSignMachine { + type Params = ::Signature, + >>::Params; + type Keys = ::Signature, + >>::Keys; + type Preprocess = ::Signature, + >>::Preprocess; + type SignatureShare = ::Signature, + >>::SignatureShare; + type SignatureMachine = ActionSignatureMachine; + + fn cache(self) -> CachedPreprocess { + unimplemented!() + } + fn from_cache( + _params: Self::Params, + _keys: Self::Keys, + _cache: CachedPreprocess, + ) -> (Self, Self::Preprocess) { + unimplemented!() + } + + fn read_preprocess(&self, reader: &mut R) -> io::Result { + self.machine.read_preprocess(reader) + } + fn sign( + self, + commitments: HashMap, + msg: &[u8], + ) -> Result<(Self::SignatureMachine, Self::SignatureShare), FrostError> { + assert!(msg.is_empty()); + self.machine.sign(commitments, &self.action.message()).map(|(machine, shares)| { + (ActionSignatureMachine { key: self.key, action: self.action, machine }, shares) + }) + } +} + +impl SignatureMachine for ActionSignatureMachine { + type SignatureShare = ::Signature, + >>::SignatureShare; + + fn read_share(&self, reader: &mut R) -> io::Result { + self.machine.read_share(reader) + } + + fn complete( + self, + shares: HashMap, + ) -> Result { + self.machine.complete(shares).map(|signature| { + let s = signature.s; + let c = Signature::challenge(signature.R, &self.key, &self.action.message()); + Transaction(self.action, Signature::new(c, s)) + }) + } +} diff --git a/processor/ethereum/src/primitives/mod.rs b/processor/ethereum/src/primitives/mod.rs new file mode 100644 index 000000000..197acf8f5 --- /dev/null +++ b/processor/ethereum/src/primitives/mod.rs @@ -0,0 +1,21 @@ +use serai_client::primitives::Amount; + +pub(crate) mod output; +pub(crate) mod transaction; +pub(crate) mod machine; +pub(crate) mod block; + +pub(crate) const DAI: [u8; 20] = + match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { + Ok(res) => res, + Err(_) => panic!("invalid non-test DAI hex address"), + }; + +pub(crate) const TOKENS: [[u8; 20]; 1] = [DAI]; + +// 8 decimals, so 1_000_000_00 would be 1 ETH. This is 0.0015 ETH (5 USD if Ether is ~3300 USD). +#[allow(clippy::inconsistent_digit_grouping)] +pub(crate) const ETHER_DUST: Amount = Amount(1_500_00); +// 5 DAI +#[allow(clippy::inconsistent_digit_grouping)] +pub(crate) const DAI_DUST: Amount = Amount(5_000_000_00); diff --git a/processor/ethereum/src/primitives/output.rs b/processor/ethereum/src/primitives/output.rs new file mode 100644 index 000000000..2215c29da --- /dev/null +++ b/processor/ethereum/src/primitives/output.rs @@ -0,0 +1,180 @@ +use std::io; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; + +use alloy_core::primitives::U256; + +use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::{ + primitives::{NetworkId, Coin, Amount, Balance}, + networks::ethereum::Address, +}; + +use primitives::{OutputType, ReceivedOutput}; +use ethereum_router::{Coin as EthereumCoin, InInstruction as EthereumInInstruction}; + +use crate::{DAI, ETHER_DUST}; + +fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { + match coin { + EthereumCoin::Ether => Some(Coin::Ether), + EthereumCoin::Erc20(token) => { + if *token == DAI { + return Some(Coin::Dai); + } + None + } + } +} + +fn amount_to_serai_amount(coin: Coin, amount: U256) -> Amount { + assert_eq!(coin.network(), NetworkId::Ethereum); + assert_eq!(coin.decimals(), 8); + // Remove 10 decimals so we go from 18 decimals to 8 decimals + let divisor = U256::from(10_000_000_000u64); + // This is valid up to 184b, which is assumed for the coins allowed + Amount(u64::try_from(amount / divisor).unwrap()) +} + +#[derive( + Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, +)] +pub(crate) struct OutputId(pub(crate) [u8; 40]); +impl Default for OutputId { + fn default() -> Self { + Self([0; 40]) + } +} +impl AsRef<[u8]> for OutputId { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} +impl AsMut<[u8]> for OutputId { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) enum Output { + Output { key: ::G, instruction: EthereumInInstruction }, + Eventuality { key: ::G, nonce: u64 }, +} +impl ReceivedOutput<::G, Address> for Output { + type Id = OutputId; + type TransactionId = [u8; 32]; + + fn kind(&self) -> OutputType { + match self { + // All outputs received are External + Output::Output { .. } => OutputType::External, + // Yet upon Eventuality completions, we report a Change output to ensure synchrony per the + // scanner's documented bounds + Output::Eventuality { .. } => OutputType::Change, + } + } + + fn id(&self) -> Self::Id { + match self { + Output::Output { key: _, instruction } => { + let mut id = [0; 40]; + id[.. 32].copy_from_slice(&instruction.id.0); + id[32 ..].copy_from_slice(&instruction.id.1.to_le_bytes()); + OutputId(id) + } + // Yet upon Eventuality completions, we report a Change output to ensure synchrony per the + // scanner's documented bounds + Output::Eventuality { key: _, nonce } => { + let mut id = [0; 40]; + id[.. 8].copy_from_slice(&nonce.to_le_bytes()); + OutputId(id) + } + } + } + + fn transaction_id(&self) -> Self::TransactionId { + match self { + Output::Output { key: _, instruction } => instruction.id.0, + Output::Eventuality { key: _, nonce } => { + let mut id = [0; 32]; + id[.. 8].copy_from_slice(&nonce.to_le_bytes()); + id + } + } + } + + fn key(&self) -> ::G { + match self { + Output::Output { key, .. } | Output::Eventuality { key, .. } => *key, + } + } + + fn presumed_origin(&self) -> Option
{ + match self { + Output::Output { key: _, instruction } => Some(Address::from(instruction.from)), + Output::Eventuality { .. } => None, + } + } + + fn balance(&self) -> Balance { + match self { + Output::Output { key: _, instruction } => { + let coin = coin_to_serai_coin(&instruction.coin).unwrap_or_else(|| { + panic!( + "mapping coin from an EthereumInInstruction with coin {}, which we don't handle.", + "this never should have been yielded" + ) + }); + Balance { coin, amount: amount_to_serai_amount(coin, instruction.amount) } + } + Output::Eventuality { .. } => Balance { coin: Coin::Ether, amount: ETHER_DUST }, + } + } + fn data(&self) -> &[u8] { + match self { + Output::Output { key: _, instruction } => &instruction.data, + Output::Eventuality { .. } => &[], + } + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Output::Output { key, instruction } => { + writer.write_all(&[0])?; + writer.write_all(key.to_bytes().as_ref())?; + instruction.write(writer) + } + Output::Eventuality { key, nonce } => { + writer.write_all(&[1])?; + writer.write_all(key.to_bytes().as_ref())?; + writer.write_all(&nonce.to_le_bytes()) + } + } + } + fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + if kind[0] >= 2 { + Err(io::Error::other("unknown Output type"))?; + } + + Ok(match kind[0] { + 0 => { + let key = Secp256k1::read_G(reader)?; + let instruction = EthereumInInstruction::read(reader)?; + Self::Output { key, instruction } + } + 1 => { + let key = Secp256k1::read_G(reader)?; + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + let nonce = u64::from_le_bytes(nonce); + Self::Eventuality { key, nonce } + } + _ => unreachable!(), + }) + } +} diff --git a/processor/ethereum/src/primitives/transaction.rs b/processor/ethereum/src/primitives/transaction.rs new file mode 100644 index 000000000..6730e7a99 --- /dev/null +++ b/processor/ethereum/src/primitives/transaction.rs @@ -0,0 +1,189 @@ +use std::io; + +use ciphersuite::Secp256k1; +use frost::dkg::ThresholdKeys; + +use alloy_core::primitives::U256; + +use serai_client::networks::ethereum::Address; + +use scheduler::SignableTransaction; + +use ethereum_primitives::keccak256; +use ethereum_schnorr::{PublicKey, Signature}; +use ethereum_router::{Coin, OutInstructions, Executed, Router}; + +use crate::{output::OutputId, machine::ClonableTransctionMachine}; + +#[derive(Clone, PartialEq, Debug)] +pub(crate) enum Action { + SetKey { chain_id: U256, nonce: u64, key: PublicKey }, + Batch { chain_id: U256, nonce: u64, coin: Coin, fee: U256, outs: Vec<(Address, U256)> }, +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Eventuality(pub(crate) Executed); + +impl Action { + pub(crate) fn nonce(&self) -> u64 { + match self { + Action::SetKey { nonce, .. } | Action::Batch { nonce, .. } => *nonce, + } + } + + pub(crate) fn message(&self) -> Vec { + match self { + Action::SetKey { chain_id, nonce, key } => { + Router::update_serai_key_message(*chain_id, *nonce, key) + } + Action::Batch { chain_id, nonce, coin, fee, outs } => Router::execute_message( + *chain_id, + *nonce, + *coin, + *fee, + OutInstructions::from(outs.as_ref()), + ), + } + } + + pub(crate) fn eventuality(&self) -> Eventuality { + Eventuality(match self { + Self::SetKey { chain_id: _, nonce, key } => { + Executed::SetKey { nonce: *nonce, key: key.eth_repr() } + } + Self::Batch { nonce, .. } => { + Executed::Batch { nonce: *nonce, message_hash: keccak256(self.message()) } + } + }) + } +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct Transaction(pub(crate) Action, pub(crate) Signature); +impl scheduler::Transaction for Transaction { + fn read(reader: &mut impl io::Read) -> io::Result { + let action = Action::read(reader)?; + let signature = Signature::read(reader)?; + Ok(Transaction(action, signature)) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.0.write(writer)?; + self.1.write(writer)?; + Ok(()) + } +} + +impl SignableTransaction for Action { + type Transaction = Transaction; + type Ciphersuite = Secp256k1; + type PreprocessMachine = ClonableTransctionMachine; + + fn read(reader: &mut impl io::Read) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + if kind[0] >= 2 { + Err(io::Error::other("unrecognized Action type"))?; + } + + let mut chain_id = [0; 32]; + reader.read_exact(&mut chain_id)?; + let chain_id = U256::from_le_bytes(chain_id); + + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + let nonce = u64::from_le_bytes(nonce); + + Ok(match kind[0] { + 0 => { + let mut key = [0; 32]; + reader.read_exact(&mut key)?; + let key = + PublicKey::from_eth_repr(key).ok_or_else(|| io::Error::other("invalid key in Action"))?; + + Action::SetKey { chain_id, nonce, key } + } + 1 => { + let coin = Coin::read(reader)?; + + let mut fee = [0; 32]; + reader.read_exact(&mut fee)?; + let fee = U256::from_le_bytes(fee); + + let mut outs_len = [0; 4]; + reader.read_exact(&mut outs_len)?; + let outs_len = usize::try_from(u32::from_le_bytes(outs_len)).unwrap(); + + let mut outs = vec![]; + for _ in 0 .. outs_len { + let address = borsh::from_reader(reader)?; + + let mut amount = [0; 32]; + reader.read_exact(&mut amount)?; + let amount = U256::from_le_bytes(amount); + + outs.push((address, amount)); + } + Action::Batch { chain_id, nonce, coin, fee, outs } + } + _ => unreachable!(), + }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + match self { + Self::SetKey { chain_id, nonce, key } => { + writer.write_all(&[0])?; + writer.write_all(&chain_id.as_le_bytes())?; + writer.write_all(&nonce.to_le_bytes())?; + writer.write_all(&key.eth_repr()) + } + Self::Batch { chain_id, nonce, coin, fee, outs } => { + writer.write_all(&[1])?; + writer.write_all(&chain_id.as_le_bytes())?; + writer.write_all(&nonce.to_le_bytes())?; + coin.write(writer)?; + writer.write_all(&fee.as_le_bytes())?; + writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?; + for (address, amount) in outs { + borsh::BorshSerialize::serialize(address, writer)?; + writer.write_all(&amount.as_le_bytes())?; + } + Ok(()) + } + } + } + + fn id(&self) -> [u8; 32] { + let mut res = [0; 32]; + res[.. 8].copy_from_slice(&self.nonce().to_le_bytes()); + res + } + + fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine { + ClonableTransctionMachine { keys, action: self } + } +} + +impl primitives::Eventuality for Eventuality { + type OutputId = OutputId; + + fn id(&self) -> [u8; 32] { + let mut res = [0; 32]; + res[.. 8].copy_from_slice(&self.0.nonce().to_le_bytes()); + res + } + + fn lookup(&self) -> Vec { + self.0.nonce().to_le_bytes().to_vec() + } + + fn singular_spent_output(&self) -> Option { + None + } + + fn read(reader: &mut impl io::Read) -> io::Result { + Executed::read(reader).map(Self) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.0.write(writer) + } +} diff --git a/processor/ethereum/src/publisher.rs b/processor/ethereum/src/publisher.rs new file mode 100644 index 000000000..3d18a6efe --- /dev/null +++ b/processor/ethereum/src/publisher.rs @@ -0,0 +1,126 @@ +use core::future::Future; +use std::sync::Arc; + +use alloy_rlp::Encodable; + +use alloy_transport::{TransportErrorKind, RpcError}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::RootProvider; + +use tokio::{ + sync::{RwLockReadGuard, RwLock}, + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, +}; + +use serai_db::Db; + +use ethereum_schnorr::PublicKey; +use ethereum_router::{OutInstructions, Router}; + +use crate::{ + InitialSeraiKey, + transaction::{Action, Transaction}, +}; + +#[derive(Clone)] +pub(crate) struct TransactionPublisher { + db: D, + rpc: Arc>, + router: Arc>>, + relayer_url: String, +} + +impl TransactionPublisher { + pub(crate) fn new(db: D, rpc: Arc>, relayer_url: String) -> Self { + Self { db, rpc, router: Arc::new(RwLock::new(None)), relayer_url } + } + + // This will always return Ok(Some(_)) or Err(_), never Ok(None) + async fn router( + &self, + ) -> Result>, RpcError> { + let router = self.router.read().await; + + // If the router is None, find it on-chain + if router.is_none() { + drop(router); + let mut router = self.router.write().await; + // Check again if it's None in case a different task already did this + if router.is_none() { + let Some(router_actual) = Router::new( + self.rpc.clone(), + &PublicKey::new( + InitialSeraiKey::get(&self.db) + .expect("publishing a transaction yet never confirmed a key") + .0, + ) + .expect("initial key used by Serai wasn't representable on Ethereum"), + ) + .await? + else { + Err(TransportErrorKind::Custom( + "publishing transaction yet couldn't find router on chain. was our node reset?" + .to_string() + .into(), + ))? + }; + *router = Some(router_actual); + } + return Ok(router.downgrade()); + } + + Ok(router) + } +} + +impl signers::TransactionPublisher for TransactionPublisher { + type EphemeralError = RpcError; + + fn publish( + &self, + tx: Transaction, + ) -> impl Send + Future> { + async move { + let router = self.router().await?; + let router = router.as_ref().unwrap(); + + let nonce = tx.0.nonce(); + // Convert from an Action (an internal representation of a signable event) to a TxLegacy + let tx = match tx.0 { + Action::SetKey { chain_id: _, nonce: _, key } => router.update_serai_key(&key, &tx.1), + Action::Batch { chain_id: _, nonce: _, coin, fee, outs } => { + router.execute(coin, fee, OutInstructions::from(outs.as_ref()), &tx.1) + } + }; + + // Nonce + let mut msg = nonce.to_le_bytes().to_vec(); + // Transaction + tx.encode(&mut msg); + + let Ok(mut socket) = TcpStream::connect(&self.relayer_url).await else { + Err(TransportErrorKind::Custom( + "couldn't connect to the relayer server".to_string().into(), + ))? + }; + let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { + Err(TransportErrorKind::Custom( + "couldn't send the message's len to the relayer server".to_string().into(), + ))? + }; + let Ok(()) = socket.write_all(&msg).await else { + Err(TransportErrorKind::Custom( + "couldn't write the message to the relayer server".to_string().into(), + ))? + }; + if socket.read_u8().await.ok() != Some(1) { + Err(TransportErrorKind::Custom( + "didn't get the ack from the relayer server".to_string().into(), + ))?; + } + + Ok(()) + } + } +} diff --git a/processor/ethereum/src/rpc.rs b/processor/ethereum/src/rpc.rs new file mode 100644 index 000000000..7f8a422b4 --- /dev/null +++ b/processor/ethereum/src/rpc.rs @@ -0,0 +1,248 @@ +use core::future::Future; +use std::{sync::Arc, collections::HashSet}; + +use alloy_core::primitives::B256; +use alloy_rpc_types_eth::{Header, BlockTransactionsKind, BlockNumberOrTag}; +use alloy_transport::{RpcError, TransportErrorKind}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use serai_client::primitives::{NetworkId, Coin, Amount}; + +use tokio::task::JoinSet; + +use serai_db::Db; + +use scanner::ScannerFeed; + +use ethereum_schnorr::PublicKey; +use ethereum_erc20::{TopLevelTransfer, Erc20}; +#[rustfmt::skip] +use ethereum_router::{Coin as EthereumCoin, InInstruction as EthereumInInstruction, Executed, Router}; + +use crate::{ + TOKENS, ETHER_DUST, DAI_DUST, InitialSeraiKey, + block::{Epoch, FullEpoch}, +}; + +#[derive(Clone)] +pub(crate) struct Rpc { + pub(crate) db: D, + pub(crate) provider: Arc>, +} + +impl ScannerFeed for Rpc { + const NETWORK: NetworkId = NetworkId::Ethereum; + + // We only need one confirmation as Ethereum properly finalizes + const CONFIRMATIONS: u64 = 1; + // The window length should be roughly an hour + const WINDOW_LENGTH: u64 = 10; + + const TEN_MINUTES: u64 = 2; + + type Block = FullEpoch; + + type EphemeralError = RpcError; + + fn latest_finalized_block_number( + &self, + ) -> impl Send + Future> { + async move { + let actual_number = self + .provider + .get_block(BlockNumberOrTag::Finalized.into(), BlockTransactionsKind::Hashes) + .await? + .ok_or_else(|| { + TransportErrorKind::Custom("there was no finalized block".to_string().into()) + })? + .header + .number; + // Error if there hasn't been a full epoch yet + if actual_number < 32 { + Err(TransportErrorKind::Custom( + "there has not been a completed epoch yet".to_string().into(), + ))? + } + // The divison by 32 returns the amount of completed epochs + // Converting from amount of completed epochs to the latest completed epoch requires + // subtracting 1 + let latest_full_epoch = (actual_number / 32) - 1; + Ok(latest_full_epoch) + } + } + + fn time_of_block( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + let header = self + .provider + .get_block(BlockNumberOrTag::Number(number).into(), BlockTransactionsKind::Hashes) + .await? + .ok_or_else(|| { + TransportErrorKind::Custom( + "asked for time of a block our node doesn't have".to_string().into(), + ) + })? + .header; + // This is monotonic ever since the merge + // https://github.com/ethereum/consensus-specs/blob/4afe39822c9ad9747e0f5635cca117c18441ec1b + // /specs/bellatrix/beacon-chain.md?plain=1#L393-L394 + Ok(header.timestamp) + } + } + + fn unchecked_block_header_by_number( + &self, + number: u64, + ) -> impl Send + + Future::Header, Self::EphemeralError>> + { + async move { + let start = number * 32; + let prior_end_hash = if start == 0 { + [0; 32] + } else { + self + .provider + .get_block((start - 1).into(), BlockTransactionsKind::Hashes) + .await? + .ok_or_else(|| { + TransportErrorKind::Custom( + format!("ethereum node didn't have requested block: {number:?}. was the node reset?") + .into(), + ) + })? + .header + .hash + .into() + }; + + let end_header = self + .provider + .get_block((start + 31).into(), BlockTransactionsKind::Hashes) + .await? + .ok_or_else(|| { + TransportErrorKind::Custom( + format!("ethereum node didn't have requested block: {number:?}. was the node reset?") + .into(), + ) + })? + .header; + + let end_hash = end_header.hash.into(); + + Ok(Epoch { prior_end_hash, end_hash }) + } + } + + fn unchecked_block_by_number( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + let epoch = self.unchecked_block_header_by_number(number).await?; + + let Some(router) = Router::new( + self.provider.clone(), + &PublicKey::new( + InitialSeraiKey::get(&self.db).expect("fetching a block yet never confirmed a key").0, + ) + .expect("initial key used by Serai wasn't representable on Ethereum"), + ) + .await? + else { + Err(TransportErrorKind::Custom("router wasn't deployed on-chain yet".to_string().into()))? + }; + + async fn sync_block( + provider: Arc>, + router: Router, + block: Header, + ) -> Result<(Vec, Vec), RpcError> { + let mut instructions = router.in_instructions(block.number, &HashSet::from(TOKENS)).await?; + + for token in TOKENS { + for TopLevelTransfer { id, from, amount, data } in Erc20::new(provider.clone(), token) + .top_level_transfers(block.number, router.address()) + .await? + { + instructions.push(EthereumInInstruction { + id, + from, + coin: EthereumCoin::Erc20(token), + amount, + data, + }); + } + } + + let executed = router.executed(block.number).await?; + + Ok((instructions, executed)) + } + + // We use JoinSet here to minimize the latency of the variety of requests we make. For each + // JoinError that may occur, we unwrap it as no underlying tasks should panic + let mut join_set = JoinSet::new(); + let mut to_check = epoch.end_hash; + // TODO: This makes 32 sequential requests. We should run them in parallel using block + // nunbers + while to_check != epoch.prior_end_hash { + let to_check_block = self + .provider + .get_block(B256::from(to_check).into(), BlockTransactionsKind::Hashes) + .await? + .ok_or_else(|| { + TransportErrorKind::Custom( + format!( + "ethereum node didn't have requested block: {}. was the node reset?", + hex::encode(to_check) + ) + .into(), + ) + })? + .header; + + // Update the next block to check + to_check = *to_check_block.parent_hash; + + // Spawn a task to sync this block + join_set.spawn(sync_block(self.provider.clone(), router.clone(), to_check_block)); + } + + let mut instructions = vec![]; + let mut executed = vec![]; + while let Some(instructions_and_executed) = join_set.join_next().await { + let (mut these_instructions, mut these_executed) = instructions_and_executed.unwrap()?; + instructions.append(&mut these_instructions); + executed.append(&mut these_executed); + } + + Ok(FullEpoch { epoch, instructions, executed }) + } + } + + fn dust(coin: Coin) -> Amount { + assert_eq!(coin.network(), NetworkId::Ethereum); + match coin { + Coin::Ether => ETHER_DUST, + Coin::Dai => DAI_DUST, + _ => unreachable!(), + } + } + + fn cost_to_aggregate( + &self, + coin: Coin, + _reference_block: &Self::Block, + ) -> impl Send + Future> { + async move { + assert_eq!(coin.network(), NetworkId::Ethereum); + // There is no cost to aggregate as we receive to an account + Ok(Amount(0)) + } + } +} diff --git a/processor/ethereum/src/scheduler.rs b/processor/ethereum/src/scheduler.rs new file mode 100644 index 000000000..e8a437c1f --- /dev/null +++ b/processor/ethereum/src/scheduler.rs @@ -0,0 +1,157 @@ +use std::collections::HashMap; + +use alloy_core::primitives::U256; + +use serai_client::{ + primitives::{NetworkId, Coin, Balance}, + networks::ethereum::Address, +}; + +use serai_db::Db; + +use primitives::Payment; +use scanner::{KeyFor, AddressFor, EventualityFor}; + +use ethereum_schnorr::PublicKey; +use ethereum_router::Coin as EthereumCoin; + +use crate::{DAI, transaction::Action, rpc::Rpc}; + +fn coin_to_ethereum_coin(coin: Coin) -> EthereumCoin { + assert_eq!(coin.network(), NetworkId::Ethereum); + match coin { + Coin::Ether => EthereumCoin::Ether, + Coin::Dai => EthereumCoin::Erc20(DAI), + _ => unreachable!(), + } +} + +fn balance_to_ethereum_amount(balance: Balance) -> U256 { + assert_eq!(balance.coin.network(), NetworkId::Ethereum); + assert_eq!(balance.coin.decimals(), 8); + // Restore 10 decimals so we go from 8 decimals to 18 decimals + // TODO: Document the expectation all integrated coins have 18 decimals + let factor = U256::from(10_000_000_000u64); + U256::from(balance.amount.0) * factor +} + +#[derive(Clone)] +pub(crate) struct SmartContract { + pub(crate) chain_id: U256, +} +impl smart_contract_scheduler::SmartContract> for SmartContract { + type SignableTransaction = Action; + + fn rotate( + &self, + nonce: u64, + _retiring_key: KeyFor>, + new_key: KeyFor>, + ) -> (Self::SignableTransaction, EventualityFor>) { + let action = Action::SetKey { + chain_id: self.chain_id, + nonce, + key: PublicKey::new(new_key).expect("rotating to an invald key"), + }; + (action.clone(), action.eventuality()) + } + + fn fulfill( + &self, + mut nonce: u64, + _key: KeyFor>, + payments: Vec>>>, + ) -> Vec<(Self::SignableTransaction, EventualityFor>)> { + // Sort by coin + let mut outs = HashMap::<_, _>::new(); + for payment in payments { + let coin = payment.balance().coin; + outs + .entry(coin) + .or_insert_with(|| Vec::with_capacity(1)) + .push((payment.address().clone(), balance_to_ethereum_amount(payment.balance()))); + } + + let mut res = vec![]; + for coin in [Coin::Ether, Coin::Dai] { + let Some(outs) = outs.remove(&coin) else { continue }; + assert!(!outs.is_empty()); + + let fee_per_gas = match coin { + // 10 gwei + Coin::Ether => { + U256::try_from(10u64).unwrap() * alloy_core::primitives::utils::Unit::GWEI.wei() + } + // 0.0003 DAI + Coin::Dai => { + U256::try_from(30u64).unwrap() * alloy_core::primitives::utils::Unit::TWEI.wei() + } + _ => unreachable!(), + }; + + // The gas required to perform any interaction with the Router. + const BASE_GAS: u32 = 0; // TODO + + // The gas required to handle an additional payment to an address, in the worst case. + const ADDRESS_PAYMENT_GAS: u32 = 0; // TODO + + // The gas required to handle an additional payment to an smart contract, in the worst case. + // This does not include the explicit gas budget defined within the address specification. + const CONTRACT_PAYMENT_GAS: u32 = 0; // TODO + + // The maximum amount of gas for a batch. + const BATCH_GAS_LIMIT: u32 = 10_000_000; + + // Split these outs into batches, respecting BATCH_GAS_LIMIT + let mut batches = vec![vec![]]; + let mut current_gas = BASE_GAS; + for out in outs { + let payment_gas = match &out.0 { + Address::Address(_) => ADDRESS_PAYMENT_GAS, + Address::Contract(deployment) => CONTRACT_PAYMENT_GAS + deployment.gas_limit(), + }; + if (current_gas + payment_gas) > BATCH_GAS_LIMIT { + assert!(!batches.last().unwrap().is_empty()); + batches.push(vec![]); + current_gas = BASE_GAS; + } + batches.last_mut().unwrap().push(out); + current_gas += payment_gas; + } + + // Push each batch onto the result + for mut outs in batches { + let mut total_gas = 0; + + let base_gas_per_payment = BASE_GAS.div_ceil(u32::try_from(outs.len()).unwrap()); + // Deduce the fee from each out + for out in &mut outs { + let payment_gas = base_gas_per_payment + + match &out.0 { + Address::Address(_) => ADDRESS_PAYMENT_GAS, + Address::Contract(deployment) => CONTRACT_PAYMENT_GAS + deployment.gas_limit(), + }; + total_gas += payment_gas; + + let payment_gas_cost = U256::try_from(payment_gas).unwrap() * fee_per_gas; + out.1 -= payment_gas_cost; + } + + res.push(Action::Batch { + chain_id: self.chain_id, + nonce, + coin: coin_to_ethereum_coin(coin), + fee: U256::try_from(total_gas).unwrap() * fee_per_gas, + outs, + }); + nonce += 1; + } + } + // Ensure we handled all payments we're supposed to + assert!(outs.is_empty()); + + res.into_iter().map(|action| (action.clone(), action.eventuality())).collect() + } +} + +pub(crate) type Scheduler = smart_contract_scheduler::Scheduler, SmartContract>; diff --git a/processor/frost-attempt-manager/Cargo.toml b/processor/frost-attempt-manager/Cargo.toml new file mode 100644 index 000000000..ad8d2a4c9 --- /dev/null +++ b/processor/frost-attempt-manager/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "serai-processor-frost-attempt-manager" +version = "0.1.0" +description = "A manager of multiple attempts of FROST signing protocols" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/frost-attempt-manager" +authors = ["Luke Parker "] +keywords = ["frost", "multisig", "threshold"] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["borsh", "scale"] + +[lints] +workspace = true + +[dependencies] +rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } + +frost = { package = "modular-frost", path = "../../crypto/frost", version = "^0.8.1", default-features = false } + +serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } + +log = { version = "0.4", default-features = false, features = ["std"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } +serai-db = { path = "../../common/db" } + +messages = { package = "serai-processor-messages", path = "../messages" } diff --git a/processor/frost-attempt-manager/LICENSE b/processor/frost-attempt-manager/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/frost-attempt-manager/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/frost-attempt-manager/README.md b/processor/frost-attempt-manager/README.md new file mode 100644 index 000000000..08a61398a --- /dev/null +++ b/processor/frost-attempt-manager/README.md @@ -0,0 +1,6 @@ +# FROST Attempt Manager + +A library for helper structures to manage various attempts of a FROST signing +protocol. + +This library is interacted with via the `serai_processor_messages::sign` API. diff --git a/processor/frost-attempt-manager/src/individual.rs b/processor/frost-attempt-manager/src/individual.rs new file mode 100644 index 000000000..6a8b33526 --- /dev/null +++ b/processor/frost-attempt-manager/src/individual.rs @@ -0,0 +1,284 @@ +use std::collections::HashMap; + +use rand_core::OsRng; + +use frost::{ + Participant, FrostError, + sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, +}; + +use serai_validator_sets_primitives::Session; + +use serai_db::{Get, DbTxn, Db, create_db}; +use messages::sign::{VariantSignId, SignId, ProcessorMessage}; + +create_db!( + FrostAttemptManager { + Attempted: (id: VariantSignId) -> u32, + } +); + +/// An instance of a signing protocol with re-attempts handled internally. +#[allow(clippy::type_complexity)] +pub(crate) struct SigningProtocol { + db: D, + // The session this signing protocol is being conducted by. + session: Session, + // The `i` of our first, or starting, set of key shares we will be signing with. + // The key shares we sign with are expected to be continguous from this position. + start_i: Participant, + // The ID of this signing protocol. + id: VariantSignId, + // This accepts a vector of `root` machines in order to support signing with multiple key shares. + root: Vec, + preprocessed: HashMap, HashMap>)>, + // Here, we drop to a single machine as we only need one to complete the signature. + shared: HashMap< + u32, + ( + >::SignatureMachine, + HashMap>, + ), + >, +} + +impl SigningProtocol { + /// Create a new signing protocol. + pub(crate) fn new( + db: D, + session: Session, + start_i: Participant, + id: VariantSignId, + root: Vec, + ) -> Self { + log::info!("starting signing protocol {id:?}"); + + Self { + db, + session, + start_i, + id, + root, + preprocessed: HashMap::with_capacity(1), + shared: HashMap::with_capacity(1), + } + } + + /// Start a new attempt of the signing protocol. + /// + /// Returns the (serialized) preprocesses for the attempt. + pub(crate) fn attempt(&mut self, attempt: u32) -> Vec { + /* + We'd get slashed as malicious if we: + 1) Preprocessed + 2) Rebooted + 3) On reboot, preprocessed again, sending new preprocesses which would be deduplicated by + the message-queue + 4) Got sent preprocesses + 5) Sent a share based on our new preprocesses, yet with everyone else expecting it to be + based on our old preprocesses + + We avoid this by saving to the DB we preprocessed before sending our preprocessed, and only + keeping our preprocesses for this instance of the processor. Accordingly, on reboot, we will + flag the prior preprocess and not send new preprocesses. This does require our own DB + transaction (to ensure we save to the DB we preprocessed before yielding the preprocess + messages). + + We also won't send the share we were supposed to, unfortunately, yet caching/reloading the + preprocess has enough safety issues it isn't worth the headache. + + Since we bind a signing attempt to the lifetime of the application, we're also safe against + nonce reuse (as the state machines enforce single-use and we never reuse a preprocess). + */ + { + let mut txn = self.db.txn(); + let prior_attempted = Attempted::get(&txn, self.id); + if Some(attempt) <= prior_attempted { + return vec![]; + } + Attempted::set(&mut txn, self.id, &attempt); + txn.commit(); + } + + log::debug!("attemting a new instance of signing protocol {:?}", self.id); + + let mut our_preprocesses = HashMap::with_capacity(self.root.len()); + let mut preprocessed = Vec::with_capacity(self.root.len()); + let mut preprocesses = Vec::with_capacity(self.root.len()); + for (i, machine) in self.root.iter().enumerate() { + let (machine, preprocess) = machine.clone().preprocess(&mut OsRng); + preprocessed.push(machine); + + let mut this_preprocess = Vec::with_capacity(64); + preprocess.write(&mut this_preprocess).unwrap(); + + our_preprocesses.insert( + Participant::new( + u16::from(self.start_i) + u16::try_from(i).expect("signing with 2**16 machines"), + ) + .expect("start_i + i exceeded the valid indexes for a Participant"), + this_preprocess.clone(), + ); + preprocesses.push(this_preprocess); + } + assert!(self.preprocessed.insert(attempt, (preprocessed, our_preprocesses)).is_none()); + + vec![ProcessorMessage::Preprocesses { + id: SignId { session: self.session, id: self.id, attempt }, + preprocesses, + }] + } + + /// Handle preprocesses for the signing protocol. + /// + /// Returns the (serialized) shares for the attempt. + pub(crate) fn preprocesses( + &mut self, + attempt: u32, + serialized_preprocesses: HashMap>, + ) -> Vec { + log::debug!("handling preprocesses for signing protocol {:?}", self.id); + + let Some((machines, our_serialized_preprocesses)) = self.preprocessed.remove(&attempt) else { + return vec![]; + }; + + let mut msgs = Vec::with_capacity(1); + + let mut preprocesses = + HashMap::with_capacity(serialized_preprocesses.len() + our_serialized_preprocesses.len()); + for (i, serialized_preprocess) in + serialized_preprocesses.into_iter().chain(our_serialized_preprocesses) + { + let mut serialized_preprocess = serialized_preprocess.as_slice(); + let Ok(preprocess) = machines[0].read_preprocess(&mut serialized_preprocess) else { + msgs.push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + continue; + }; + if !serialized_preprocess.is_empty() { + msgs.push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + continue; + } + preprocesses.insert(i, preprocess); + } + // We throw out our preprocessed machines here, despite the fact they haven't been invalidated + // We could reuse them with a new set of valid preprocesses + // https://github.com/serai-dex/serai/issues/588 + if !msgs.is_empty() { + return msgs; + } + + let mut our_shares = HashMap::with_capacity(self.root.len()); + let mut shared = Vec::with_capacity(machines.len()); + let mut shares = Vec::with_capacity(machines.len()); + for (i, machine) in machines.into_iter().enumerate() { + let i = Participant::new( + u16::from(self.start_i) + u16::try_from(i).expect("signing with 2**16 machines"), + ) + .expect("start_i + i exceeded the valid indexes for a Participant"); + + let mut preprocesses = preprocesses.clone(); + assert!(preprocesses.remove(&i).is_some()); + + // TODO: Replace this with `()`, which requires making the message type part of the trait + let (machine, share) = match machine.sign(preprocesses, &[]) { + Ok((machine, share)) => (machine, share), + Err(e) => match e { + FrostError::InternalError(_) | + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) | + FrostError::InvalidShare(_) => { + panic!("FROST had an error which shouldn't be reachable: {e:?}"); + } + FrostError::InvalidPreprocess(i) => { + msgs + .push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + return msgs; + } + }, + }; + shared.push(machine); + + let mut this_share = Vec::with_capacity(32); + share.write(&mut this_share).unwrap(); + + our_shares.insert(i, this_share.clone()); + shares.push(this_share); + } + + assert!(self.shared.insert(attempt, (shared.swap_remove(0), our_shares)).is_none()); + log::debug!( + "successfully handled preprocesses for signing protocol {:?}, sending shares", + self.id, + ); + msgs.push(ProcessorMessage::Shares { + id: SignId { session: self.session, id: self.id, attempt }, + shares, + }); + msgs + } + + /// Process shares for the signing protocol. + /// + /// Returns the signature produced by the protocol. + pub(crate) fn shares( + &mut self, + attempt: u32, + serialized_shares: HashMap>, + ) -> Result> { + log::debug!("handling shares for signing protocol {:?}", self.id); + + let Some((machine, our_serialized_shares)) = self.shared.remove(&attempt) else { Err(vec![])? }; + + let mut msgs = Vec::with_capacity(1); + + let mut shares = HashMap::with_capacity(serialized_shares.len() + our_serialized_shares.len()); + for (i, serialized_share) in our_serialized_shares.into_iter().chain(serialized_shares) { + let mut serialized_share = serialized_share.as_slice(); + let Ok(share) = machine.read_share(&mut serialized_share) else { + msgs.push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + continue; + }; + if !serialized_share.is_empty() { + msgs.push(ProcessorMessage::InvalidParticipant { session: self.session, participant: i }); + continue; + } + shares.insert(i, share); + } + if !msgs.is_empty() { + Err(msgs)?; + } + + assert!(shares.remove(&self.start_i).is_some()); + + let signature = match machine.complete(shares) { + Ok(signature) => signature, + Err(e) => match e { + FrostError::InternalError(_) | + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) | + FrostError::InvalidPreprocess(_) => { + panic!("FROST had an error which shouldn't be reachable: {e:?}"); + } + FrostError::InvalidShare(i) => { + Err(vec![ProcessorMessage::InvalidParticipant { session: self.session, participant: i }])? + } + }, + }; + + log::info!("finished signing for protocol {:?}", self.id); + + Ok(signature) + } + + /// Cleanup the database entries for a specified signing protocol. + pub(crate) fn cleanup(txn: &mut impl DbTxn, id: VariantSignId) { + Attempted::del(txn, id); + } +} diff --git a/processor/frost-attempt-manager/src/lib.rs b/processor/frost-attempt-manager/src/lib.rs new file mode 100644 index 000000000..db8b08617 --- /dev/null +++ b/processor/frost-attempt-manager/src/lib.rs @@ -0,0 +1,114 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::collections::HashMap; + +use frost::{Participant, sign::PreprocessMachine}; + +use serai_validator_sets_primitives::Session; + +use serai_db::{DbTxn, Db}; +use messages::sign::{VariantSignId, ProcessorMessage, CoordinatorMessage}; + +mod individual; +use individual::SigningProtocol; + +/// A response to handling a message from the coordinator. +pub enum Response { + /// Messages to send to the coordinator. + Messages(Vec), + /// A produced signature. + Signature { + /// The ID of the protocol this is for. + id: VariantSignId, + /// The signature. + signature: M::Signature, + }, +} + +/// A manager of attempts for a variety of signing protocols. +pub struct AttemptManager { + db: D, + session: Session, + start_i: Participant, + active: HashMap>, +} + +impl AttemptManager { + /// Create a new attempt manager. + /// + /// This will not restore any signing sessions from the database. Those must be re-registered. + pub fn new(db: D, session: Session, start_i: Participant) -> Self { + AttemptManager { db, session, start_i, active: HashMap::new() } + } + + /// Register a signing protocol to attempt. + /// + /// This ID must be unique across all sessions, attempt managers, protocols, etc. + pub fn register(&mut self, id: VariantSignId, machines: Vec) -> Vec { + let mut protocol = + SigningProtocol::new(self.db.clone(), self.session, self.start_i, id, machines); + let messages = protocol.attempt(0); + self.active.insert(id, protocol); + messages + } + + /// Retire a signing protocol. + /// + /// This frees all memory used for it and means no further messages will be handled for it. + /// This does not stop the protocol from being re-registered and further worked on (with + /// undefined behavior) then. The higher-level context must never call `register` again with this + /// ID accordingly. + pub fn retire(&mut self, txn: &mut impl DbTxn, id: VariantSignId) { + if self.active.remove(&id).is_none() { + log::info!("retiring protocol {id:?}, which we didn't register/already retired"); + } else { + log::info!("retired signing protocol {id:?}"); + } + SigningProtocol::::cleanup(txn, id); + } + + /// Handle a message for a signing protocol. + /// + /// Handling a message multiple times is safe and will cause subsequent calls to return + /// `Response::Messages(vec![])`. Handling a message for a signing protocol which isn't being + /// worked on (potentially due to rebooting) will also return `Response::Messages(vec![])`. + pub fn handle(&mut self, msg: CoordinatorMessage) -> Response { + match msg { + CoordinatorMessage::Preprocesses { id, preprocesses } => { + let Some(protocol) = self.active.get_mut(&id.id) else { + log::trace!( + "handling preprocesses for signing protocol {:?}, which we're not actively running", + id.id, + ); + return Response::Messages(vec![]); + }; + Response::Messages(protocol.preprocesses(id.attempt, preprocesses)) + } + CoordinatorMessage::Shares { id, shares } => { + let Some(protocol) = self.active.get_mut(&id.id) else { + log::trace!( + "handling shares for signing protocol {:?}, which we're not actively running", + id.id, + ); + return Response::Messages(vec![]); + }; + match protocol.shares(id.attempt, shares) { + Ok(signature) => Response::Signature { id: id.id, signature }, + Err(messages) => Response::Messages(messages), + } + } + CoordinatorMessage::Reattempt { id } => { + let Some(protocol) = self.active.get_mut(&id.id) else { + log::trace!( + "reattempting signing protocol {:?}, which we're not actively running", + id.id, + ); + return Response::Messages(vec![]); + }; + Response::Messages(protocol.attempt(id.attempt)) + } + } + } +} diff --git a/processor/key-gen/Cargo.toml b/processor/key-gen/Cargo.toml new file mode 100644 index 000000000..f1f005647 --- /dev/null +++ b/processor/key-gen/Cargo.toml @@ -0,0 +1,47 @@ +[package] +name = "serai-processor-key-gen" +version = "0.1.0" +description = "Key generation for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/key-gen" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale"] + +[lints] +workspace = true + +[dependencies] +# Macros +zeroize = { version = "1", default-features = false, features = ["std"] } + +# Libs +rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] } +rand_chacha = { version = "0.3", default-features = false, features = ["std"] } + +# Cryptography +blake2 = { version = "0.10", default-features = false, features = ["std"] } +transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["std"] } +ec-divisors = { package = "ec-divisors", path = "../../crypto/evrf/divisors", default-features = false } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] } +dkg = { package = "dkg", path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] } + +# Substrate +serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } + +# Encoders +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +# Application +log = { version = "0.4", default-features = false, features = ["std"] } +serai-db = { path = "../../common/db" } +messages = { package = "serai-processor-messages", path = "../messages" } diff --git a/processor/key-gen/LICENSE b/processor/key-gen/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/key-gen/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/key-gen/README.md b/processor/key-gen/README.md new file mode 100644 index 000000000..566d10354 --- /dev/null +++ b/processor/key-gen/README.md @@ -0,0 +1,8 @@ +# Key Generation + +This library implements the Distributed Key Generation (DKG) for the Serai +protocol. Two invocations of the eVRF-based DKG are performed, one for Ristretto +(to have a key to oraclize values onto the Serai blockchain with) and one for +the external network's curve. + +This library is interacted with via the `serai_processor_messages::key_gen` API. diff --git a/processor/key-gen/src/db.rs b/processor/key-gen/src/db.rs new file mode 100644 index 000000000..149fe1a2f --- /dev/null +++ b/processor/key-gen/src/db.rs @@ -0,0 +1,152 @@ +use core::marker::PhantomData; +use std::collections::HashMap; + +use zeroize::Zeroizing; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use dkg::{Participant, ThresholdCore, ThresholdKeys, evrf::EvrfCurve}; + +use serai_validator_sets_primitives::Session; + +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn}; + +use crate::KeyGenParams; + +pub(crate) struct Params { + pub(crate) t: u16, + pub(crate) n: u16, + pub(crate) substrate_evrf_public_keys: + Vec<<::EmbeddedCurve as Ciphersuite>::G>, + pub(crate) network_evrf_public_keys: + Vec<<::EmbeddedCurve as Ciphersuite>::G>, +} + +#[derive(BorshSerialize, BorshDeserialize)] +struct RawParams { + t: u16, + substrate_evrf_public_keys: Vec<[u8; 32]>, + network_evrf_public_keys: Vec>, +} + +#[derive(BorshSerialize, BorshDeserialize)] +pub(crate) struct Participations { + pub(crate) substrate_participations: HashMap>, + pub(crate) network_participations: HashMap>, +} + +mod _db { + use serai_validator_sets_primitives::Session; + + use serai_db::{Get, DbTxn, create_db}; + + create_db!( + KeyGen { + Params: (session: &Session) -> super::RawParams, + Participations: (session: &Session) -> super::Participations, + KeyShares: (session: &Session) -> Vec, + } + ); +} + +pub(crate) struct KeyGenDb(PhantomData

); +impl KeyGenDb

{ + pub(crate) fn set_params(txn: &mut impl DbTxn, session: Session, params: Params

) { + assert_eq!(params.substrate_evrf_public_keys.len(), params.network_evrf_public_keys.len()); + + _db::Params::set( + txn, + &session, + &RawParams { + t: params.t, + substrate_evrf_public_keys: params + .substrate_evrf_public_keys + .into_iter() + .map(|key| key.to_bytes()) + .collect(), + network_evrf_public_keys: params + .network_evrf_public_keys + .into_iter() + .map(|key| key.to_bytes().as_ref().to_vec()) + .collect(), + }, + ) + } + + pub(crate) fn params(getter: &impl Get, session: Session) -> Option> { + _db::Params::get(getter, &session).map(|params| Params { + t: params.t, + n: params + .network_evrf_public_keys + .len() + .try_into() + .expect("amount of keys exceeded the amount allowed during a DKG"), + substrate_evrf_public_keys: params + .substrate_evrf_public_keys + .into_iter() + .map(|key| { + <::EmbeddedCurve as Ciphersuite>::read_G(&mut key.as_slice()) + .unwrap() + }) + .collect(), + network_evrf_public_keys: params + .network_evrf_public_keys + .into_iter() + .map(|key| { + <::EmbeddedCurve as Ciphersuite>::read_G::< + &[u8], + >(&mut key.as_ref()) + .unwrap() + }) + .collect(), + }) + } + + pub(crate) fn set_participations( + txn: &mut impl DbTxn, + session: Session, + participations: &Participations, + ) { + _db::Participations::set(txn, &session, participations) + } + pub(crate) fn participations(getter: &impl Get, session: Session) -> Option { + _db::Participations::get(getter, &session) + } + + // Set the key shares for a session. + pub(crate) fn set_key_shares( + txn: &mut impl DbTxn, + session: Session, + substrate_keys: &[ThresholdKeys], + network_keys: &[ThresholdKeys], + ) { + assert_eq!(substrate_keys.len(), network_keys.len()); + + let mut keys = Zeroizing::new(vec![]); + for (substrate_keys, network_keys) in substrate_keys.iter().zip(network_keys) { + keys.extend(substrate_keys.serialize().as_slice()); + keys.extend(network_keys.serialize().as_slice()); + } + _db::KeyShares::set(txn, &session, &keys); + } + + #[allow(clippy::type_complexity)] + pub(crate) fn key_shares( + getter: &impl Get, + session: Session, + ) -> Option<(Vec>, Vec>)> + { + let keys = _db::KeyShares::get(getter, &session)?; + let mut keys: &[u8] = keys.as_ref(); + + let mut substrate_keys = vec![]; + let mut network_keys = vec![]; + while !keys.is_empty() { + substrate_keys.push(ThresholdKeys::new(ThresholdCore::read(&mut keys).unwrap())); + let mut these_network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys).unwrap()); + P::tweak_keys(&mut these_network_keys); + network_keys.push(these_network_keys); + } + Some((substrate_keys, network_keys)) + } +} diff --git a/processor/key-gen/src/generators.rs b/processor/key-gen/src/generators.rs new file mode 100644 index 000000000..3570ca6e6 --- /dev/null +++ b/processor/key-gen/src/generators.rs @@ -0,0 +1,38 @@ +use core::any::{TypeId, Any}; +use std::{ + sync::{LazyLock, Mutex}, + collections::HashMap, +}; + +use dkg::evrf::*; + +use serai_validator_sets_primitives::MAX_KEY_SHARES_PER_SET; + +/// A cache of the generators used by the eVRF DKG. +/// +/// This performs a lookup of the Ciphersuite to its generators. Since the Ciphersuite is a +/// generic, this takes advantage of `Any`. This static is isolated in a module to ensure +/// correctness can be evaluated solely by reviewing these few lines of code. +/// +/// This is arguably over-engineered as of right now, as we only need generators for Ristretto +/// and N::Curve. By having this HashMap, we enable de-duplication of the Ristretto == N::Curve +/// case, and we automatically support the n-curve case (rather than hard-coding to the 2-curve +/// case). +static GENERATORS: LazyLock>> = + LazyLock::new(|| Mutex::new(HashMap::new())); + +pub(crate) fn generators() -> &'static EvrfGenerators { + GENERATORS + .lock() + .unwrap() + .entry(TypeId::of::()) + .or_insert_with(|| { + // If we haven't prior needed generators for this Ciphersuite, generate new ones + Box::leak(Box::new(EvrfGenerators::::new( + ((MAX_KEY_SHARES_PER_SET * 2 / 3) + 1).try_into().unwrap(), + MAX_KEY_SHARES_PER_SET.try_into().unwrap(), + ))) + }) + .downcast_ref() + .unwrap() +} diff --git a/processor/src/key_gen.rs b/processor/key-gen/src/lib.rs similarity index 60% rename from processor/src/key_gen.rs rename to processor/key-gen/src/lib.rs index a059c350f..4db87b201 100644 --- a/processor/src/key_gen.rs +++ b/processor/key-gen/src/lib.rs @@ -1,7 +1,8 @@ -use std::{ - io, - collections::{HashSet, HashMap}, -}; +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use std::{io, collections::HashMap}; use zeroize::Zeroizing; @@ -14,156 +15,56 @@ use ciphersuite::{ group::{Group, GroupEncoding}, Ciphersuite, Ristretto, }; -use dkg::{Participant, ThresholdCore, ThresholdKeys, evrf::*}; +use dkg::{Participant, ThresholdKeys, evrf::*}; -use log::info; - -use serai_client::validator_sets::primitives::{Session, KeyPair}; +use serai_validator_sets_primitives::Session; use messages::key_gen::*; -use crate::{Get, DbTxn, Db, create_db, networks::Network}; - -mod generators { - use core::any::{TypeId, Any}; - use std::{ - sync::{LazyLock, Mutex}, - collections::HashMap, - }; +use serai_db::{Get, DbTxn}; - use frost::dkg::evrf::*; - - use serai_client::validator_sets::primitives::MAX_KEY_SHARES_PER_SET; - - /// A cache of the generators used by the eVRF DKG. - /// - /// This performs a lookup of the Ciphersuite to its generators. Since the Ciphersuite is a - /// generic, this takes advantage of `Any`. This static is isolated in a module to ensure - /// correctness can be evaluated solely by reviewing these few lines of code. - /// - /// This is arguably over-engineered as of right now, as we only need generators for Ristretto - /// and N::Curve. By having this HashMap, we enable de-duplication of the Ristretto == N::Curve - /// case, and we automatically support the n-curve case (rather than hard-coding to the 2-curve - /// case). - static GENERATORS: LazyLock>> = - LazyLock::new(|| Mutex::new(HashMap::new())); - - pub(crate) fn generators() -> &'static EvrfGenerators { - GENERATORS - .lock() - .unwrap() - .entry(TypeId::of::()) - .or_insert_with(|| { - // If we haven't prior needed generators for this Ciphersuite, generate new ones - Box::leak(Box::new(EvrfGenerators::::new( - ((MAX_KEY_SHARES_PER_SET * 2 / 3) + 1).try_into().unwrap(), - MAX_KEY_SHARES_PER_SET.try_into().unwrap(), - ))) - }) - .downcast_ref() - .unwrap() - } -} +mod generators; use generators::generators; -#[derive(Debug)] -pub struct KeyConfirmed { - pub substrate_keys: Vec>, - pub network_keys: Vec>, -} - -create_db!( - KeyGenDb { - ParamsDb: (session: &Session) -> (u16, Vec<[u8; 32]>, Vec>), - ParticipationDb: (session: &Session) -> ( - HashMap>, - HashMap>, - ), - // GeneratedKeysDb, KeysDb use `()` for their value as we manually serialize their values - // TODO: Don't do that - GeneratedKeysDb: (session: &Session) -> (), - // These do assume a key is only used once across sets, which holds true if the threshold is - // honest - // TODO: Remove this assumption - KeysDb: (network_key: &[u8]) -> (), - SessionDb: (network_key: &[u8]) -> Session, - NetworkKeyDb: (session: Session) -> Vec, - } -); +mod db; +use db::{Params, Participations, KeyGenDb}; -impl GeneratedKeysDb { - #[allow(clippy::type_complexity)] - fn read_keys( - getter: &impl Get, - key: &[u8], - ) -> Option<(Vec, (Vec>, Vec>))> { - let keys_vec = getter.get(key)?; - let mut keys_ref: &[u8] = keys_vec.as_ref(); - - let mut substrate_keys = vec![]; - let mut network_keys = vec![]; - while !keys_ref.is_empty() { - substrate_keys.push(ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap())); - let mut these_network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap()); - N::tweak_keys(&mut these_network_keys); - network_keys.push(these_network_keys); - } - Some((keys_vec, (substrate_keys, network_keys))) - } +/// Parameters for a key generation. +pub trait KeyGenParams { + /// The ID for this instantiation. + const ID: &'static str; - fn save_keys( - txn: &mut impl DbTxn, - session: &Session, - substrate_keys: &[ThresholdKeys], - network_keys: &[ThresholdKeys], - ) { - let mut keys = Zeroizing::new(vec![]); - for (substrate_keys, network_keys) in substrate_keys.iter().zip(network_keys) { - keys.extend(substrate_keys.serialize().as_slice()); - keys.extend(network_keys.serialize().as_slice()); - } - txn.put(Self::key(session), keys); - } -} + /// The curve used for the external network. + type ExternalNetworkCiphersuite: EvrfCurve< + EmbeddedCurve: Ciphersuite< + G: ec_divisors::DivisorCurve< + FieldElement = ::F, + >, + >, + >; -impl KeysDb { - fn confirm_keys( - txn: &mut impl DbTxn, - session: Session, - key_pair: &KeyPair, - ) -> (Vec>, Vec>) { - let (keys_vec, keys) = - GeneratedKeysDb::read_keys::(txn, &GeneratedKeysDb::key(&session)).unwrap(); - assert_eq!(key_pair.0 .0, keys.0[0].group_key().to_bytes()); - assert_eq!( - { - let network_key: &[u8] = key_pair.1.as_ref(); - network_key - }, - keys.1[0].group_key().to_bytes().as_ref(), - ); - txn.put(Self::key(key_pair.1.as_ref()), keys_vec); - NetworkKeyDb::set(txn, session, &key_pair.1.clone().into_inner()); - SessionDb::set(txn, key_pair.1.as_ref(), &session); - keys + /// Tweaks keys as necessary/beneficial. + /// + /// A default implementation which doesn't perform any tweaking is provided. + fn tweak_keys(keys: &mut ThresholdKeys) { + let _ = keys; } - #[allow(clippy::type_complexity)] - fn keys( - getter: &impl Get, - network_key: &::G, - ) -> Option<(Session, (Vec>, Vec>))> { - let res = - GeneratedKeysDb::read_keys::(getter, &Self::key(network_key.to_bytes().as_ref()))?.1; - assert_eq!(&res.1[0].group_key(), network_key); - Some((SessionDb::get(getter, network_key.to_bytes().as_ref()).unwrap(), res)) + /// Encode keys as optimal. + /// + /// A default implementation is provided which calls the traditional `to_bytes`. + fn encode_key(key: ::G) -> Vec { + key.to_bytes().as_ref().to_vec() } - pub fn substrate_keys_by_session( - getter: &impl Get, - session: Session, - ) -> Option>> { - let network_key = NetworkKeyDb::get(getter, session)?; - Some(GeneratedKeysDb::read_keys::(getter, &Self::key(&network_key))?.1 .0) + /// Decode keys from their optimal encoding. + /// + /// A default implementation is provided which calls the traditional `from_bytes`. + fn decode_key(mut key: &[u8]) -> Option<::G> { + let res = ::read_G(&mut key).ok()?; + if !key.is_empty() { + None?; + } + Some(res) } } @@ -242,60 +143,50 @@ fn coerce_keys( (keys, faulty) } +/// An instance of the Serai key generation protocol. #[derive(Debug)] -pub struct KeyGen { - db: D, +pub struct KeyGen { substrate_evrf_private_key: Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, - network_evrf_private_key: Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, + network_evrf_private_key: + Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, } -impl KeyGen { +impl KeyGen

{ + /// Create a new key generation instance. #[allow(clippy::new_ret_no_self)] pub fn new( - db: D, substrate_evrf_private_key: Zeroizing< <::EmbeddedCurve as Ciphersuite>::F, >, - network_evrf_private_key: Zeroizing<<::EmbeddedCurve as Ciphersuite>::F>, - ) -> KeyGen { - KeyGen { db, substrate_evrf_private_key, network_evrf_private_key } - } - - pub fn in_set(&self, session: &Session) -> bool { - // We determine if we're in set using if we have the parameters for a session's key generation - // We only have these if we were told to generate a key for this session - ParamsDb::get(&self.db, session).is_some() + network_evrf_private_key: Zeroizing< + <::EmbeddedCurve as Ciphersuite>::F, + >, + ) -> KeyGen

{ + KeyGen { substrate_evrf_private_key, network_evrf_private_key } } + /// Fetch the key shares for a specific session. #[allow(clippy::type_complexity)] - pub fn keys( - &self, - key: &::G, - ) -> Option<(Session, (Vec>, Vec>))> { + pub fn key_shares( + getter: &impl Get, + session: Session, + ) -> Option<(Vec>, Vec>)> + { // This is safe, despite not having a txn, since it's a static value // It doesn't change over time/in relation to other operations - KeysDb::keys::(&self.db, key) + // It is solely set or unset + KeyGenDb::

::key_shares(getter, session) } - pub fn substrate_keys_by_session( - &self, - session: Session, - ) -> Option>> { - KeysDb::substrate_keys_by_session::(&self.db, session) - } - - pub fn handle( - &mut self, - txn: &mut D::Transaction<'_>, - msg: CoordinatorMessage, - ) -> Vec { + /// Handle a message from the coordinator. + pub fn handle(&mut self, txn: &mut impl DbTxn, msg: CoordinatorMessage) -> Vec { const SUBSTRATE_KEY_CONTEXT: &[u8] = b"substrate"; const NETWORK_KEY_CONTEXT: &[u8] = b"network"; - fn context(session: Session, key_context: &[u8]) -> [u8; 32] { + fn context(session: Session, key_context: &[u8]) -> [u8; 32] { // TODO2: Also embed the chain ID/genesis block let mut transcript = RecommendedTranscript::new(b"Serai eVRF Key Gen"); - transcript.append_message(b"network", N::ID); + transcript.append_message(b"network", P::ID.as_bytes()); transcript.append_message(b"session", session.0.to_le_bytes()); transcript.append_message(b"key", key_context); (&(&transcript.challenge(b"context"))[.. 32]).try_into().unwrap() @@ -303,69 +194,73 @@ impl KeyGen { match msg { CoordinatorMessage::GenerateKey { session, threshold, evrf_public_keys } => { - info!("Generating new key. Session: {session:?}"); + log::info!("generating new key, session: {session:?}"); // Unzip the vector of eVRF keys let substrate_evrf_public_keys = evrf_public_keys.iter().map(|(key, _)| *key).collect::>(); + let (substrate_evrf_public_keys, mut faulty) = + coerce_keys::(&substrate_evrf_public_keys); + let network_evrf_public_keys = evrf_public_keys.into_iter().map(|(_, key)| key).collect::>(); - - let mut participation = Vec::with_capacity(2048); - let mut faulty = HashSet::new(); + let (network_evrf_public_keys, additional_faulty) = + coerce_keys::(&network_evrf_public_keys); + faulty.extend(additional_faulty); // Participate for both Substrate and the network fn participate( context: [u8; 32], threshold: u16, - evrf_public_keys: &[impl AsRef<[u8]>], + evrf_public_keys: &[::G], evrf_private_key: &Zeroizing<::F>, - faulty: &mut HashSet, output: &mut impl io::Write, ) { - let (coerced_keys, faulty_is) = coerce_keys::(evrf_public_keys); - for faulty_i in faulty_is { - faulty.insert(faulty_i); - } let participation = EvrfDkg::::participate( &mut OsRng, generators(), context, threshold, - &coerced_keys, + evrf_public_keys, evrf_private_key, ); participation.unwrap().write(output).unwrap(); } + + let mut participation = Vec::with_capacity(2048); participate::( - context::(session, SUBSTRATE_KEY_CONTEXT), + context::

(session, SUBSTRATE_KEY_CONTEXT), threshold, &substrate_evrf_public_keys, &self.substrate_evrf_private_key, - &mut faulty, &mut participation, ); - participate::( - context::(session, NETWORK_KEY_CONTEXT), + participate::( + context::

(session, NETWORK_KEY_CONTEXT), threshold, &network_evrf_public_keys, &self.network_evrf_private_key, - &mut faulty, &mut participation, ); // Save the params - ParamsDb::set( + KeyGenDb::

::set_params( txn, - &session, - &(threshold, substrate_evrf_public_keys, network_evrf_public_keys), + session, + Params { + t: threshold, + n: substrate_evrf_public_keys + .len() + .try_into() + .expect("amount of keys exceeded the amount allowed during a DKG"), + substrate_evrf_public_keys, + network_evrf_public_keys, + }, ); // Send back our Participation and all faulty parties - let mut faulty = faulty.into_iter().collect::>(); - faulty.sort(); - let mut res = Vec::with_capacity(faulty.len() + 1); + faulty.sort_unstable(); for faulty in faulty { res.push(ProcessorMessage::Blame { session, participant: faulty }); } @@ -375,15 +270,10 @@ impl KeyGen { } CoordinatorMessage::Participation { session, participant, participation } => { - info!("received participation from {:?} for {:?}", participant, session); - - let (threshold, substrate_evrf_public_keys, network_evrf_public_keys) = - ParamsDb::get(txn, &session).unwrap(); + log::debug!("received participation from {:?} for {:?}", participant, session); - let n = substrate_evrf_public_keys - .len() - .try_into() - .expect("performing a key gen with more than u16::MAX participants"); + let Params { t: threshold, n, substrate_evrf_public_keys, network_evrf_public_keys } = + KeyGenDb::

::params(txn, session).unwrap(); // Read these `Participation`s // If they fail basic sanity checks, fail fast @@ -399,7 +289,8 @@ impl KeyGen { return blame; }; let len_at_network_participation_start_pos = participation.len(); - let Ok(network_participation) = Participation::::read(&mut participation, n) + let Ok(network_participation) = + Participation::::read(&mut participation, n) else { return blame; }; @@ -413,16 +304,15 @@ impl KeyGen { // If we've already generated these keys, we don't actually need to save these // participations and continue. We solely have to verify them, as to identify malicious // participants and prevent DoSs, before returning - if txn.get(GeneratedKeysDb::key(&session)).is_some() { - info!("already finished generating a key for {:?}", session); + if Self::key_shares(txn, session).is_some() { + log::debug!("already finished generating a key for {:?}", session); match EvrfDkg::::verify( &mut OsRng, generators(), - context::(session, SUBSTRATE_KEY_CONTEXT), + context::

(session, SUBSTRATE_KEY_CONTEXT), threshold, - // Ignores the list of participants who were faulty, as they were prior blamed - &coerce_keys::(&substrate_evrf_public_keys).0, + &substrate_evrf_public_keys, &HashMap::from([(participant, substrate_participation)]), ) .unwrap() @@ -434,13 +324,12 @@ impl KeyGen { } } - match EvrfDkg::::verify( + match EvrfDkg::::verify( &mut OsRng, generators(), - context::(session, NETWORK_KEY_CONTEXT), + context::

(session, NETWORK_KEY_CONTEXT), threshold, - // Ignores the list of participants who were faulty, as they were prior blamed - &coerce_keys::(&network_evrf_public_keys).0, + &network_evrf_public_keys, &HashMap::from([(participant, network_participation)]), ) .unwrap() @@ -467,17 +356,22 @@ impl KeyGen { // Since these are valid `Participation`s, save them let (mut substrate_participations, mut network_participations) = - ParticipationDb::get(txn, &session) - .unwrap_or((HashMap::with_capacity(1), HashMap::with_capacity(1))); + KeyGenDb::

::participations(txn, session).map_or_else( + || (HashMap::with_capacity(1), HashMap::with_capacity(1)), + |p| (p.substrate_participations, p.network_participations), + ); assert!( substrate_participations.insert(participant, substrate_participation).is_none() && network_participations.insert(participant, network_participation).is_none(), "received participation for someone multiple times" ); - ParticipationDb::set( + KeyGenDb::

::set_participations( txn, - &session, - &(substrate_participations.clone(), network_participations.clone()), + session, + &Participations { + substrate_participations: substrate_participations.clone(), + network_participations: network_participations.clone(), + }, ); // This block is taken from the eVRF DKG itself to evaluate the amount participating @@ -510,12 +404,12 @@ impl KeyGen { } // If we now have the threshold participating, verify their `Participation`s - fn verify_dkg( + fn verify_dkg( txn: &mut impl DbTxn, session: Session, true_if_substrate_false_if_network: bool, threshold: u16, - evrf_public_keys: &[impl AsRef<[u8]>], + evrf_public_keys: &[::G], substrate_participations: &mut HashMap>, network_participations: &mut HashMap>, ) -> Result, Vec> { @@ -542,7 +436,7 @@ impl KeyGen { match EvrfDkg::::verify( &mut OsRng, generators(), - context::( + context::

( session, if true_if_substrate_false_if_network { SUBSTRATE_KEY_CONTEXT @@ -551,8 +445,7 @@ impl KeyGen { }, ), threshold, - // Ignores the list of participants who were faulty, as they were prior blamed - &coerce_keys::(evrf_public_keys).0, + evrf_public_keys, &participations, ) .unwrap() @@ -570,10 +463,13 @@ impl KeyGen { blames.push(ProcessorMessage::Blame { session, participant }); } // Since we removed `Participation`s, write the updated versions to the database - ParticipationDb::set( + KeyGenDb::

::set_participations( txn, - &session, - &(substrate_participations.clone(), network_participations.clone()), + session, + &Participations { + substrate_participations: substrate_participations.clone(), + network_participations: network_participations.clone(), + }, ); Err(blames)? } @@ -586,7 +482,7 @@ impl KeyGen { } } - let substrate_dkg = match verify_dkg::( + let substrate_dkg = match verify_dkg::( txn, session, true, @@ -601,7 +497,7 @@ impl KeyGen { Err(blames) => return blames, }; - let network_dkg = match verify_dkg::( + let network_dkg = match verify_dkg::( txn, session, false, @@ -623,38 +519,19 @@ impl KeyGen { let mut network_keys = network_dkg.keys(&self.network_evrf_private_key); // Tweak the keys for the network for network_keys in &mut network_keys { - N::tweak_keys(network_keys); + P::tweak_keys(network_keys); } - GeneratedKeysDb::save_keys::(txn, &session, &substrate_keys, &network_keys); + KeyGenDb::

::set_key_shares(txn, session, &substrate_keys, &network_keys); + + log::info!("generated key, session: {session:?}"); // Since no one we verified was invalid, and we had the threshold, yield the new keys vec![ProcessorMessage::GeneratedKeyPair { session, substrate_key: substrate_keys[0].group_key().to_bytes(), - // TODO: This can be made more efficient since tweaked keys may be a subset of keys - network_key: network_keys[0].group_key().to_bytes().as_ref().to_vec(), + network_key: P::encode_key(network_keys[0].group_key()), }] } } } - - // This should only be called if we're participating, hence taking our instance - #[allow(clippy::unused_self)] - pub fn confirm( - &mut self, - txn: &mut D::Transaction<'_>, - session: Session, - key_pair: &KeyPair, - ) -> KeyConfirmed { - info!( - "Confirmed key pair {} {} for {:?}", - hex::encode(key_pair.0), - hex::encode(&key_pair.1), - session, - ); - - let (substrate_keys, network_keys) = KeysDb::confirm_keys::(txn, session, key_pair); - - KeyConfirmed { substrate_keys, network_keys } - } } diff --git a/processor/messages/Cargo.toml b/processor/messages/Cargo.toml index 0eba999df..dbadd9db7 100644 --- a/processor/messages/Cargo.toml +++ b/processor/messages/Cargo.toml @@ -17,6 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] +hex = { version = "0.4", default-features = false, features = ["std"] } + scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index 98af97ce7..659491d49 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -1,3 +1,4 @@ +use core::fmt; use std::collections::HashMap; use scale::{Encode, Decode}; @@ -6,9 +7,9 @@ use borsh::{BorshSerialize, BorshDeserialize}; use dkg::Participant; use serai_primitives::BlockHash; -use in_instructions_primitives::{Batch, SignedBatch}; +use validator_sets_primitives::{Session, KeyPair, Slash}; use coins_primitives::OutInstructionWithBalance; -use validator_sets_primitives::{Session, KeyPair}; +use in_instructions_primitives::{Batch, SignedBatch}; #[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub struct SubstrateContext { @@ -22,7 +23,6 @@ pub mod key_gen { #[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { // Instructs the Processor to begin the key generation process. - // TODO: Should this be moved under Substrate? GenerateKey { session: Session, threshold: u16, evrf_public_keys: Vec<([u8; 32], Vec)> }, // Received participations for the specified key generation protocol. Participation { session: Session, participant: Participant, participation: Vec }, @@ -46,12 +46,6 @@ pub mod key_gen { } } - impl CoordinatorMessage { - pub fn required_block(&self) -> Option { - None - } - } - #[derive(Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { // Participated in the specified key generation protocol. @@ -86,10 +80,36 @@ pub mod key_gen { pub mod sign { use super::*; - #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] + #[derive(Clone, Copy, PartialEq, Eq, Hash, Encode, Decode, BorshSerialize, BorshDeserialize)] + pub enum VariantSignId { + Cosign(u64), + Batch(u32), + SlashReport(Session), + Transaction([u8; 32]), + } + impl fmt::Debug for VariantSignId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { + match self { + Self::Cosign(cosign) => { + f.debug_struct("VariantSignId::Cosign").field("0", &cosign).finish() + } + Self::Batch(batch) => f.debug_struct("VariantSignId::Batch").field("0", &batch).finish(), + Self::SlashReport(session) => { + f.debug_struct("VariantSignId::SlashReport").field("0", &session).finish() + } + Self::Transaction(tx) => { + f.debug_struct("VariantSignId::Transaction").field("0", &hex::encode(tx)).finish() + } + } + } + } + + #[derive( + Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, + )] pub struct SignId { pub session: Session, - pub id: [u8; 32], + pub id: VariantSignId, pub attempt: u32, } @@ -101,21 +121,14 @@ pub mod sign { Shares { id: SignId, shares: HashMap> }, // Re-attempt a signing protocol. Reattempt { id: SignId }, - // Completed a signing protocol already. - Completed { session: Session, id: [u8; 32], tx: Vec }, } impl CoordinatorMessage { - pub fn required_block(&self) -> Option { - None - } - - pub fn session(&self) -> Session { + pub fn sign_id(&self) -> &SignId { match self { CoordinatorMessage::Preprocesses { id, .. } | CoordinatorMessage::Shares { id, .. } | - CoordinatorMessage::Reattempt { id } => id.session, - CoordinatorMessage::Completed { session, .. } => *session, + CoordinatorMessage::Reattempt { id, .. } => id, } } } @@ -123,19 +136,18 @@ pub mod sign { #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { // Participant sent an invalid message during the sign protocol. - InvalidParticipant { id: SignId, participant: Participant }, - // Created preprocess for the specified signing protocol. - Preprocess { id: SignId, preprocesses: Vec> }, - // Signed share for the specified signing protocol. - Share { id: SignId, shares: Vec> }, - // Completed a signing protocol already. - Completed { session: Session, id: [u8; 32], tx: Vec }, + InvalidParticipant { session: Session, participant: Participant }, + // Created preprocesses for the specified signing protocol. + Preprocesses { id: SignId, preprocesses: Vec> }, + // Signed shares for the specified signing protocol. + Shares { id: SignId, shares: Vec> }, } } pub mod coordinator { use super::*; + // TODO: Why does this not simply take the block hash? pub fn cosign_block_msg(block_number: u64, block: [u8; 32]) -> Vec { const DST: &[u8] = b"Cosign"; let mut res = vec![u8::try_from(DST.len()).unwrap()]; @@ -145,40 +157,10 @@ pub mod coordinator { res } - #[derive( - Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, - )] - pub enum SubstrateSignableId { - CosigningSubstrateBlock([u8; 32]), - Batch(u32), - SlashReport, - } - - #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] - pub struct SubstrateSignId { - pub session: Session, - pub id: SubstrateSignableId, - pub attempt: u32, - } - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { - CosignSubstrateBlock { id: SubstrateSignId, block_number: u64 }, - SignSlashReport { id: SubstrateSignId, report: Vec<([u8; 32], u32)> }, - SubstratePreprocesses { id: SubstrateSignId, preprocesses: HashMap }, - SubstrateShares { id: SubstrateSignId, shares: HashMap }, - // Re-attempt a batch signing protocol. - BatchReattempt { id: SubstrateSignId }, - } - - impl CoordinatorMessage { - // The Coordinator will only send Batch messages once the Batch ID has been recognized - // The ID will only be recognized when the block is acknowledged by a super-majority of the - // network *and the local node* - // This synchrony obtained lets us ignore the synchrony requirement offered here - pub fn required_block(&self) -> Option { - None - } + CosignSubstrateBlock { session: Session, block_number: u64, block: [u8; 32] }, + SignSlashReport { session: Session, report: Vec }, } #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] @@ -189,14 +171,9 @@ pub mod coordinator { #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { - SubstrateBlockAck { block: u64, plans: Vec }, - InvalidParticipant { id: SubstrateSignId, participant: Participant }, - CosignPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> }, - BatchPreprocess { id: SubstrateSignId, block: BlockHash, preprocesses: Vec<[u8; 64]> }, - SlashReportPreprocess { id: SubstrateSignId, preprocesses: Vec<[u8; 64]> }, - SubstrateShare { id: SubstrateSignId, shares: Vec<[u8; 32]> }, - // TODO: Make these signatures [u8; 64]? CosignedBlock { block_number: u64, block: [u8; 32], signature: Vec }, + SignedBatch { batch: SignedBatch }, + SubstrateBlockAck { block: u64, plans: Vec }, SignedSlashReport { session: Session, signature: Vec }, } } @@ -204,35 +181,34 @@ pub mod coordinator { pub mod substrate { use super::*; + #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + pub enum InInstructionResult { + Succeeded, + Failed, + } + #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] + pub struct ExecutedBatch { + pub id: u32, + pub in_instructions: Vec, + } + #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum CoordinatorMessage { - ConfirmKeyPair { - context: SubstrateContext, - session: Session, - key_pair: KeyPair, - }, - SubstrateBlock { - context: SubstrateContext, - block: u64, + /// Keys set on the Serai blockchain. + SetKeys { serai_time: u64, session: Session, key_pair: KeyPair }, + /// Slashes reported on the Serai blockchain OR the process timed out. + SlashesReported { session: Session }, + /// A block from Serai with relevance to this processor. + Block { + serai_block_number: u64, + batches: Vec, burns: Vec, - batches: Vec, }, } - impl CoordinatorMessage { - pub fn required_block(&self) -> Option { - let context = match self { - CoordinatorMessage::ConfirmKeyPair { context, .. } | - CoordinatorMessage::SubstrateBlock { context, .. } => context, - }; - Some(context.network_latest_finalized_block) - } - } - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { Batch { batch: Batch }, - SignedBatch { batch: SignedBatch }, } } @@ -259,24 +235,6 @@ impl_from!(sign, CoordinatorMessage, Sign); impl_from!(coordinator, CoordinatorMessage, Coordinator); impl_from!(substrate, CoordinatorMessage, Substrate); -impl CoordinatorMessage { - pub fn required_block(&self) -> Option { - let required = match self { - CoordinatorMessage::KeyGen(msg) => msg.required_block(), - CoordinatorMessage::Sign(msg) => msg.required_block(), - CoordinatorMessage::Coordinator(msg) => msg.required_block(), - CoordinatorMessage::Substrate(msg) => msg.required_block(), - }; - - // 0 is used when Serai hasn't acknowledged *any* block for this network, which also means - // there's no need to wait for the block in question - if required == Some(BlockHash([0; 32])) { - return None; - } - required - } -} - #[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] pub enum ProcessorMessage { KeyGen(key_gen::ProcessorMessage), @@ -295,10 +253,10 @@ impl_from!(substrate, ProcessorMessage, Substrate); const COORDINATOR_UID: u8 = 0; const PROCESSOR_UID: u8 = 1; -const TYPE_KEY_GEN_UID: u8 = 2; -const TYPE_SIGN_UID: u8 = 3; -const TYPE_COORDINATOR_UID: u8 = 4; -const TYPE_SUBSTRATE_UID: u8 = 5; +const TYPE_KEY_GEN_UID: u8 = 0; +const TYPE_SIGN_UID: u8 = 1; +const TYPE_COORDINATOR_UID: u8 = 2; +const TYPE_SUBSTRATE_UID: u8 = 3; impl CoordinatorMessage { /// The intent for this message, which should be unique across the validator's entire system, @@ -327,32 +285,24 @@ impl CoordinatorMessage { } CoordinatorMessage::Sign(msg) => { let (sub, id) = match msg { - // Unique since SignId includes a hash of the network, and specific transaction info - sign::CoordinatorMessage::Preprocesses { id, .. } => (0, id.encode()), - sign::CoordinatorMessage::Shares { id, .. } => (1, id.encode()), - sign::CoordinatorMessage::Reattempt { id } => (2, id.encode()), - // The coordinator should report all reported completions to the processor - // Accordingly, the intent is a combination of plan ID and actual TX - // While transaction alone may suffice, that doesn't cover cross-chain TX ID conflicts, - // which are possible - sign::CoordinatorMessage::Completed { id, tx, .. } => (3, (id, tx).encode()), + // Unique since SignId + sign::CoordinatorMessage::Preprocesses { id, .. } => (0, id), + sign::CoordinatorMessage::Shares { id, .. } => (1, id), + sign::CoordinatorMessage::Reattempt { id, .. } => (2, id), }; let mut res = vec![COORDINATOR_UID, TYPE_SIGN_UID, sub]; - res.extend(&id); + res.extend(id.encode()); res } CoordinatorMessage::Coordinator(msg) => { let (sub, id) = match msg { - // Unique since this ID contains the hash of the block being cosigned - coordinator::CoordinatorMessage::CosignSubstrateBlock { id, .. } => (0, id.encode()), - // Unique since there's only one of these per session/attempt, and ID is inclusive to - // both - coordinator::CoordinatorMessage::SignSlashReport { id, .. } => (1, id.encode()), - // Unique since this embeds the batch ID (including its network) and attempt - coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. } => (2, id.encode()), - coordinator::CoordinatorMessage::SubstrateShares { id, .. } => (3, id.encode()), - coordinator::CoordinatorMessage::BatchReattempt { id, .. } => (4, id.encode()), + // We only cosign a block once, and Reattempt is a separate message + coordinator::CoordinatorMessage::CosignSubstrateBlock { block_number, .. } => { + (0, block_number.encode()) + } + // We only sign one slash report, and Reattempt is a separate message + coordinator::CoordinatorMessage::SignSlashReport { session, .. } => (1, session.encode()), }; let mut res = vec![COORDINATOR_UID, TYPE_COORDINATOR_UID, sub]; @@ -361,9 +311,11 @@ impl CoordinatorMessage { } CoordinatorMessage::Substrate(msg) => { let (sub, id) = match msg { - // Unique since there's only one key pair for a session - substrate::CoordinatorMessage::ConfirmKeyPair { session, .. } => (0, session.encode()), - substrate::CoordinatorMessage::SubstrateBlock { block, .. } => (1, block.encode()), + substrate::CoordinatorMessage::SetKeys { session, .. } => (0, session.encode()), + substrate::CoordinatorMessage::SlashesReported { session } => (1, session.encode()), + substrate::CoordinatorMessage::Block { serai_block_number, .. } => { + (2, serai_block_number.encode()) + } }; let mut res = vec![COORDINATOR_UID, TYPE_SUBSTRATE_UID, sub]; @@ -404,12 +356,13 @@ impl ProcessorMessage { } ProcessorMessage::Sign(msg) => { let (sub, id) = match msg { + // Unique since we'll only fatally slash a a participant once + sign::ProcessorMessage::InvalidParticipant { session, participant } => { + (0, (session, u16::from(*participant)).encode()) + } // Unique since SignId - sign::ProcessorMessage::InvalidParticipant { id, .. } => (0, id.encode()), - sign::ProcessorMessage::Preprocess { id, .. } => (1, id.encode()), - sign::ProcessorMessage::Share { id, .. } => (2, id.encode()), - // Unique since a processor will only sign a TX once - sign::ProcessorMessage::Completed { id, .. } => (3, id.to_vec()), + sign::ProcessorMessage::Preprocesses { id, .. } => (1, id.encode()), + sign::ProcessorMessage::Shares { id, .. } => (2, id.encode()), }; let mut res = vec![PROCESSOR_UID, TYPE_SIGN_UID, sub]; @@ -418,16 +371,10 @@ impl ProcessorMessage { } ProcessorMessage::Coordinator(msg) => { let (sub, id) = match msg { - coordinator::ProcessorMessage::SubstrateBlockAck { block, .. } => (0, block.encode()), - // Unique since SubstrateSignId - coordinator::ProcessorMessage::InvalidParticipant { id, .. } => (1, id.encode()), - coordinator::ProcessorMessage::CosignPreprocess { id, .. } => (2, id.encode()), - coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (3, id.encode()), - coordinator::ProcessorMessage::SlashReportPreprocess { id, .. } => (4, id.encode()), - coordinator::ProcessorMessage::SubstrateShare { id, .. } => (5, id.encode()), - // Unique since only one instance of a signature matters - coordinator::ProcessorMessage::CosignedBlock { block, .. } => (6, block.encode()), - coordinator::ProcessorMessage::SignedSlashReport { .. } => (7, vec![]), + coordinator::ProcessorMessage::CosignedBlock { block, .. } => (0, block.encode()), + coordinator::ProcessorMessage::SignedBatch { batch, .. } => (1, batch.batch.id.encode()), + coordinator::ProcessorMessage::SubstrateBlockAck { block, .. } => (2, block.encode()), + coordinator::ProcessorMessage::SignedSlashReport { session, .. } => (3, session.encode()), }; let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub]; @@ -436,11 +383,7 @@ impl ProcessorMessage { } ProcessorMessage::Substrate(msg) => { let (sub, id) = match msg { - // Unique since network and ID binding - substrate::ProcessorMessage::Batch { batch } => (0, (batch.network, batch.id).encode()), - substrate::ProcessorMessage::SignedBatch { batch, .. } => { - (1, (batch.batch.network, batch.batch.id).encode()) - } + substrate::ProcessorMessage::Batch { batch } => (0, batch.id.encode()), }; let mut res = vec![PROCESSOR_UID, TYPE_SUBSTRATE_UID, sub]; diff --git a/processor/monero/Cargo.toml b/processor/monero/Cargo.toml new file mode 100644 index 000000000..6ea49a0ca --- /dev/null +++ b/processor/monero/Cargo.toml @@ -0,0 +1,55 @@ +[package] +name = "serai-monero-processor" +version = "0.1.0" +description = "Serai Monero Processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/monero" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +rand_core = { version = "0.6", default-features = false } +rand_chacha = { version = "0.3", default-features = false, features = ["std"] } +zeroize = { version = "1", default-features = false, features = ["std"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false, features = ["std"] } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "ed25519"] } +dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ed25519"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } + +monero-wallet = { path = "../../networks/monero/wallet", default-features = false, features = ["std", "multisig"] } +monero-simple-request-rpc = { path = "../../networks/monero/rpc/simple-request", default-features = false } + +serai-client = { path = "../../substrate/client", default-features = false, features = ["monero"] } + +zalloc = { path = "../../common/zalloc" } +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +key-gen = { package = "serai-processor-key-gen", path = "../key-gen" } +view-keys = { package = "serai-processor-view-keys", path = "../view-keys" } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +utxo-scheduler = { package = "serai-processor-utxo-scheduler-primitives", path = "../scheduler/utxo/primitives" } +utxo-standard-scheduler = { package = "serai-processor-utxo-scheduler", path = "../scheduler/utxo/standard" } +signers = { package = "serai-processor-signers", path = "../signers" } + +bin = { package = "serai-processor-bin", path = "../bin" } + +[features] +parity-db = ["bin/parity-db"] +rocksdb = ["bin/rocksdb"] diff --git a/processor/monero/LICENSE b/processor/monero/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/monero/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/monero/README.md b/processor/monero/README.md new file mode 100644 index 000000000..564c83a02 --- /dev/null +++ b/processor/monero/README.md @@ -0,0 +1 @@ +# Serai Monero Processor diff --git a/processor/monero/src/key_gen.rs b/processor/monero/src/key_gen.rs new file mode 100644 index 000000000..6e30d7bf3 --- /dev/null +++ b/processor/monero/src/key_gen.rs @@ -0,0 +1,8 @@ +use ciphersuite::Ed25519; + +pub(crate) struct KeyGenParams; +impl key_gen::KeyGenParams for KeyGenParams { + const ID: &'static str = "Monero"; + + type ExternalNetworkCiphersuite = Ed25519; +} diff --git a/processor/monero/src/main.rs b/processor/monero/src/main.rs new file mode 100644 index 000000000..b5c67f12a --- /dev/null +++ b/processor/monero/src/main.rs @@ -0,0 +1,189 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +#[global_allocator] +static ALLOCATOR: zalloc::ZeroizingAlloc = + zalloc::ZeroizingAlloc(std::alloc::System); + +use monero_simple_request_rpc::SimpleRequestRpc; + +mod primitives; +pub(crate) use crate::primitives::*; + +mod key_gen; +use crate::key_gen::KeyGenParams; +mod rpc; +use rpc::Rpc; +mod scheduler; +use scheduler::{Planner, Scheduler}; + +#[tokio::main] +async fn main() { + let db = bin::init(); + let feed = Rpc { + rpc: loop { + match SimpleRequestRpc::new(bin::url()).await { + Ok(rpc) => break rpc, + Err(e) => { + log::error!("couldn't connect to the Monero node: {e:?}"); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } + } + }, + }; + + bin::main_loop::<(), _, KeyGenParams, _>( + db, + feed.clone(), + Scheduler::new(Planner(feed.clone())), + feed, + ) + .await; +} + +/* +#[async_trait] +impl TransactionTrait for Transaction { + #[cfg(test)] + async fn fee(&self, _: &Monero) -> u64 { + match self { + Transaction::V1 { .. } => panic!("v1 TX in test-only function"), + Transaction::V2 { ref proofs, .. } => proofs.as_ref().unwrap().base.fee, + } + } +} + +impl Monero { + async fn median_fee(&self, block: &Block) -> Result { + let mut fees = vec![]; + for tx_hash in &block.transactions { + let tx = + self.rpc.get_transaction(*tx_hash).await.map_err(|_| NetworkError::ConnectionError)?; + // Only consider fees from RCT transactions, else the fee property read wouldn't be accurate + let fee = match &tx { + Transaction::V2 { proofs: Some(proofs), .. } => proofs.base.fee, + _ => continue, + }; + fees.push(fee / u64::try_from(tx.weight()).unwrap()); + } + fees.sort(); + let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); + + // TODO: Set a sane minimum fee + const MINIMUM_FEE: u64 = 1_500_000; + Ok(FeeRate::new(fee.max(MINIMUM_FEE), 10000).unwrap()) + } + + #[cfg(test)] + fn test_view_pair() -> ViewPair { + ViewPair::new(*EdwardsPoint::generator(), Zeroizing::new(Scalar::ONE.0)).unwrap() + } + + #[cfg(test)] + fn test_scanner() -> Scanner { + Scanner::new(Self::test_view_pair()) + } + + #[cfg(test)] + fn test_address() -> Address { + Address::new(Self::test_view_pair().legacy_address(MoneroNetwork::Mainnet)).unwrap() + } +} + +#[async_trait] +impl Network for Monero { + #[cfg(test)] + async fn get_block_number(&self, id: &[u8; 32]) -> usize { + self.rpc.get_block(*id).await.unwrap().number().unwrap() + } + + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Eventuality, + ) -> Transaction { + let block = self.rpc.get_block_by_number(block).await.unwrap(); + for tx in &block.transactions { + let tx = self.rpc.get_transaction(*tx).await.unwrap(); + if eventuality.matches(&tx.clone().into()) { + return tx; + } + } + panic!("block didn't have a transaction for this eventuality") + } + + #[cfg(test)] + async fn mine_block(&self) { + // https://github.com/serai-dex/serai/issues/198 + sleep(std::time::Duration::from_millis(100)).await; + self.rpc.generate_blocks(&Self::test_address().into(), 1).await.unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, address: Address) -> Block { + use zeroize::Zeroizing; + use rand_core::{RngCore, OsRng}; + use monero_wallet::rpc::FeePriority; + + let new_block = self.get_latest_block_number().await.unwrap() + 1; + for _ in 0 .. 80 { + self.mine_block().await; + } + + let new_block = self.rpc.get_block_by_number(new_block).await.unwrap(); + let mut outputs = Self::test_scanner() + .scan(self.rpc.get_scannable_block(new_block.clone()).await.unwrap()) + .unwrap() + .ignore_additional_timelock(); + let output = outputs.swap_remove(0); + + let amount = output.commitment().amount; + // The dust should always be sufficient for the fee + let fee = Monero::DUST; + + let rct_type = match new_block.header.hardfork_version { + 14 => RctType::ClsagBulletproof, + 15 | 16 => RctType::ClsagBulletproofPlus, + _ => panic!("Monero hard forked and the processor wasn't updated for it"), + }; + + let output = OutputWithDecoys::fingerprintable_deterministic_new( + &mut OsRng, + &self.rpc, + match rct_type { + RctType::ClsagBulletproof => 11, + RctType::ClsagBulletproofPlus => 16, + _ => panic!("selecting decoys for an unsupported RctType"), + }, + self.rpc.get_height().await.unwrap(), + output, + ) + .await + .unwrap(); + + let mut outgoing_view_key = Zeroizing::new([0; 32]); + OsRng.fill_bytes(outgoing_view_key.as_mut()); + let tx = MSignableTransaction::new( + rct_type, + outgoing_view_key, + vec![output], + vec![(address.into(), amount - fee)], + Change::fingerprintable(Some(Self::test_address().into())), + vec![], + self.rpc.get_fee_rate(FeePriority::Unimportant).await.unwrap(), + ) + .unwrap() + .sign(&mut OsRng, &Zeroizing::new(Scalar::ONE.0)) + .unwrap(); + + let block = self.get_latest_block_number().await.unwrap() + 1; + self.rpc.publish_transaction(&tx).await.unwrap(); + for _ in 0 .. 10 { + self.mine_block().await; + } + self.get_block(block).await.unwrap() + } +} +*/ diff --git a/processor/monero/src/primitives/block.rs b/processor/monero/src/primitives/block.rs new file mode 100644 index 000000000..6afae4291 --- /dev/null +++ b/processor/monero/src/primitives/block.rs @@ -0,0 +1,83 @@ +use std::collections::HashMap; + +use ciphersuite::{Ciphersuite, Ed25519}; + +use monero_wallet::{ + block::Block as MBlock, rpc::ScannableBlock as MScannableBlock, ScanError, GuaranteedScanner, +}; + +use serai_client::networks::monero::Address; + +use primitives::{ReceivedOutput, EventualityTracker}; +use crate::{ + EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS, view_pair, + output::Output, transaction::Eventuality, +}; + +#[derive(Clone, Debug)] +pub(crate) struct BlockHeader(pub(crate) MBlock); +impl primitives::BlockHeader for BlockHeader { + fn id(&self) -> [u8; 32] { + self.0.hash() + } + fn parent(&self) -> [u8; 32] { + self.0.header.previous + } +} + +#[derive(Clone, Debug)] +pub(crate) struct Block(pub(crate) MScannableBlock); + +impl primitives::Block for Block { + type Header = BlockHeader; + + type Key = ::G; + type Address = Address; + type Output = Output; + type Eventuality = Eventuality; + + fn id(&self) -> [u8; 32] { + self.0.block.hash() + } + + fn scan_for_outputs_unordered( + &self, + _latest_active_key: Self::Key, + key: Self::Key, + ) -> Vec { + let mut scanner = GuaranteedScanner::new(view_pair(key)); + scanner.register_subaddress(EXTERNAL_SUBADDRESS); + scanner.register_subaddress(BRANCH_SUBADDRESS); + scanner.register_subaddress(CHANGE_SUBADDRESS); + scanner.register_subaddress(FORWARDED_SUBADDRESS); + match scanner.scan(self.0.clone()) { + Ok(outputs) => outputs.not_additionally_locked().into_iter().map(Output).collect(), + Err(ScanError::UnsupportedProtocol(version)) => { + panic!("Monero unexpectedly hard-forked (version {version})") + } + Err(ScanError::InvalidScannableBlock(reason)) => { + panic!("fetched an invalid scannable block from the RPC: {reason}") + } + } + } + + #[allow(clippy::type_complexity)] + fn check_for_eventuality_resolutions( + &self, + eventualities: &mut EventualityTracker, + ) -> HashMap< + >::TransactionId, + Self::Eventuality, + > { + let mut res = HashMap::new(); + assert_eq!(self.0.block.transactions.len(), self.0.transactions.len()); + for (hash, tx) in self.0.block.transactions.iter().zip(&self.0.transactions) { + if let Some(eventuality) = eventualities.active_eventualities.get(&tx.prefix().extra) { + if eventuality.eventuality.matches(tx) { + res.insert(*hash, eventualities.active_eventualities.remove(&tx.prefix().extra).unwrap()); + } + } + } + res + } +} diff --git a/processor/monero/src/primitives/mod.rs b/processor/monero/src/primitives/mod.rs new file mode 100644 index 000000000..317cae280 --- /dev/null +++ b/processor/monero/src/primitives/mod.rs @@ -0,0 +1,37 @@ +use zeroize::Zeroizing; + +use ciphersuite::{Ciphersuite, Ed25519}; + +use monero_wallet::{address::SubaddressIndex, ViewPairError, GuaranteedViewPair}; + +use view_keys::view_key; + +pub(crate) mod output; +pub(crate) mod transaction; +pub(crate) mod block; + +pub(crate) const EXTERNAL_SUBADDRESS: SubaddressIndex = match SubaddressIndex::new(1, 0) { + Some(index) => index, + None => panic!("SubaddressIndex for EXTERNAL_SUBADDRESS was None"), +}; +pub(crate) const BRANCH_SUBADDRESS: SubaddressIndex = match SubaddressIndex::new(2, 0) { + Some(index) => index, + None => panic!("SubaddressIndex for BRANCH_SUBADDRESS was None"), +}; +pub(crate) const CHANGE_SUBADDRESS: SubaddressIndex = match SubaddressIndex::new(2, 1) { + Some(index) => index, + None => panic!("SubaddressIndex for CHANGE_SUBADDRESS was None"), +}; +pub(crate) const FORWARDED_SUBADDRESS: SubaddressIndex = match SubaddressIndex::new(2, 2) { + Some(index) => index, + None => panic!("SubaddressIndex for FORWARDED_SUBADDRESS was None"), +}; + +pub(crate) fn view_pair(key: ::G) -> GuaranteedViewPair { + match GuaranteedViewPair::new(key.0, Zeroizing::new(*view_key::(0))) { + Ok(view_pair) => view_pair, + Err(ViewPairError::TorsionedSpendKey) => { + unreachable!("dalek_ff_group::EdwardsPoint had torsion") + } + } +} diff --git a/processor/monero/src/primitives/output.rs b/processor/monero/src/primitives/output.rs new file mode 100644 index 000000000..201e75c99 --- /dev/null +++ b/processor/monero/src/primitives/output.rs @@ -0,0 +1,94 @@ +use std::io; + +use ciphersuite::{group::Group, Ciphersuite, Ed25519}; + +use monero_wallet::WalletOutput; + +use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::{ + primitives::{Coin, Amount, Balance}, + networks::monero::Address, +}; + +use primitives::{OutputType, ReceivedOutput}; + +use crate::{EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS}; + +#[rustfmt::skip] +#[derive( + Clone, Copy, PartialEq, Eq, Default, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize, +)] +pub(crate) struct OutputId(pub(crate) [u8; 32]); +impl AsRef<[u8]> for OutputId { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} +impl AsMut<[u8]> for OutputId { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Output(pub(crate) WalletOutput); +impl ReceivedOutput<::G, Address> for Output { + type Id = OutputId; + type TransactionId = [u8; 32]; + + fn kind(&self) -> OutputType { + let subaddress = self.0.subaddress().unwrap(); + if subaddress == EXTERNAL_SUBADDRESS { + return OutputType::External; + } + if subaddress == BRANCH_SUBADDRESS { + return OutputType::Branch; + } + if subaddress == CHANGE_SUBADDRESS { + return OutputType::Change; + } + if subaddress == FORWARDED_SUBADDRESS { + return OutputType::Forwarded; + } + unreachable!("scanned output to unknown subaddress"); + } + + fn id(&self) -> Self::Id { + OutputId(self.0.key().compress().to_bytes()) + } + + fn transaction_id(&self) -> Self::TransactionId { + self.0.transaction() + } + + fn key(&self) -> ::G { + // The spend key will be a key we generated, so it'll be in the prime-order subgroup + // The output's key is the spend key + (key_offset * G), so it's in the prime-order subgroup if + // the spend key is + dalek_ff_group::EdwardsPoint( + self.0.key() - (*::G::generator() * self.0.key_offset()), + ) + } + + fn presumed_origin(&self) -> Option

{ + None + } + + fn balance(&self) -> Balance { + Balance { coin: Coin::Monero, amount: Amount(self.0.commitment().amount) } + } + + fn data(&self) -> &[u8] { + self.0.arbitrary_data().first().map_or(&[], Vec::as_slice) + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + self.0.write(writer) + } + + fn read(reader: &mut R) -> io::Result { + WalletOutput::read(reader).map(Self) + } +} diff --git a/processor/monero/src/primitives/transaction.rs b/processor/monero/src/primitives/transaction.rs new file mode 100644 index 000000000..eeeef81dc --- /dev/null +++ b/processor/monero/src/primitives/transaction.rs @@ -0,0 +1,137 @@ +use std::io; + +use rand_core::{RngCore, CryptoRng}; + +use ciphersuite::Ed25519; +use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; + +use monero_wallet::{ + transaction::Transaction as MTransaction, + send::{ + SignableTransaction as MSignableTransaction, TransactionMachine, Eventuality as MEventuality, + }, +}; + +use crate::output::OutputId; + +#[derive(Clone, Debug)] +pub(crate) struct Transaction(pub(crate) MTransaction); + +impl From for Transaction { + fn from(tx: MTransaction) -> Self { + Self(tx) + } +} + +impl scheduler::Transaction for Transaction { + fn read(reader: &mut impl io::Read) -> io::Result { + MTransaction::read(reader).map(Self) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.0.write(writer) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct SignableTransaction { + pub(crate) id: [u8; 32], + pub(crate) signable: MSignableTransaction, +} + +#[derive(Clone)] +pub(crate) struct ClonableTransctionMachine(MSignableTransaction, ThresholdKeys); +impl PreprocessMachine for ClonableTransctionMachine { + type Preprocess = ::Preprocess; + type Signature = ::Signature; + type SignMachine = ::SignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + self.0.multisig(self.1).expect("incorrect keys used for SignableTransaction").preprocess(rng) + } +} + +impl scheduler::SignableTransaction for SignableTransaction { + type Transaction = Transaction; + type Ciphersuite = Ed25519; + type PreprocessMachine = ClonableTransctionMachine; + + fn read(reader: &mut impl io::Read) -> io::Result { + let mut id = [0; 32]; + reader.read_exact(&mut id)?; + + let signable = MSignableTransaction::read(reader)?; + Ok(SignableTransaction { id, signable }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(&self.id)?; + self.signable.write(writer) + } + + fn id(&self) -> [u8; 32] { + self.id + } + + fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine { + ClonableTransctionMachine(self.signable, keys) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub(crate) struct Eventuality { + pub(crate) id: [u8; 32], + pub(crate) singular_spent_output: Option, + pub(crate) eventuality: MEventuality, +} + +impl primitives::Eventuality for Eventuality { + type OutputId = OutputId; + + fn id(&self) -> [u8; 32] { + self.id + } + + // We define the lookup as our ID since the resolving transaction only has a singular possible ID + fn lookup(&self) -> Vec { + self.eventuality.extra() + } + + fn singular_spent_output(&self) -> Option { + self.singular_spent_output + } + + fn read(reader: &mut impl io::Read) -> io::Result { + let mut id = [0; 32]; + reader.read_exact(&mut id)?; + + let singular_spent_output = { + let mut singular_spent_output_opt = [0xff]; + reader.read_exact(&mut singular_spent_output_opt)?; + assert!(singular_spent_output_opt[0] <= 1); + (singular_spent_output_opt[0] == 1) + .then(|| -> io::Result<_> { + let mut singular_spent_output = [0; 32]; + reader.read_exact(&mut singular_spent_output)?; + Ok(OutputId(singular_spent_output)) + }) + .transpose()? + }; + + let eventuality = MEventuality::read(reader)?; + Ok(Self { id, singular_spent_output, eventuality }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + writer.write_all(&self.id)?; + + if let Some(singular_spent_output) = self.singular_spent_output { + writer.write_all(&[1])?; + writer.write_all(singular_spent_output.as_ref())?; + } else { + writer.write_all(&[0])?; + } + + self.eventuality.write(writer) + } +} diff --git a/processor/monero/src/rpc.rs b/processor/monero/src/rpc.rs new file mode 100644 index 000000000..9244b23f3 --- /dev/null +++ b/processor/monero/src/rpc.rs @@ -0,0 +1,139 @@ +use core::future::Future; + +use monero_wallet::rpc::{RpcError, Rpc as RpcTrait}; +use monero_simple_request_rpc::SimpleRequestRpc; + +use serai_client::primitives::{NetworkId, Coin, Amount}; + +use scanner::ScannerFeed; +use signers::TransactionPublisher; + +use crate::{ + transaction::Transaction, + block::{BlockHeader, Block}, +}; + +#[derive(Clone)] +pub(crate) struct Rpc { + pub(crate) rpc: SimpleRequestRpc, +} + +impl ScannerFeed for Rpc { + const NETWORK: NetworkId = NetworkId::Monero; + // Outputs aren't spendable until 10 blocks later due to the 10-block lock + // Since we assumed scanned outputs are spendable, that sets a minimum confirmation depth of 10 + // A 10-block reorganization hasn't been observed in years and shouldn't occur + const CONFIRMATIONS: u64 = 10; + // The window length should be roughly an hour + const WINDOW_LENGTH: u64 = 30; + + const TEN_MINUTES: u64 = 5; + + type Block = Block; + + type EphemeralError = RpcError; + + fn latest_finalized_block_number( + &self, + ) -> impl Send + Future> { + async move { + Ok( + self + .rpc + .get_height() + .await? + .checked_sub(1) + .expect("connected to an invalid Monero RPC") + .try_into() + .unwrap(), + ) + } + } + + fn time_of_block( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + // Constant from Monero + const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60; + + // If Monero doesn't have enough blocks to build a window, it doesn't define a network time + if (number + 1) < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { + return Ok(0); + } + + // Fetch all the timestamps within the window + let block_for_time_of = self.rpc.get_block_by_number(number.try_into().unwrap()).await?; + let mut timestamps = vec![block_for_time_of.header.timestamp]; + let mut parent = block_for_time_of.header.previous; + for _ in 1 .. BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { + let parent_block = self.rpc.get_block(parent).await?; + timestamps.push(parent_block.header.timestamp); + parent = parent_block.header.previous; + } + timestamps.sort(); + + // Because there are two timestamps equidistance from the ends, Monero's epee picks the + // in-between value, calculated by the following formula (from the "get_mid" function) + let n = timestamps.len() / 2; + let a = timestamps[n - 1]; + let b = timestamps[n]; + #[rustfmt::skip] // Enables Ctrl+F'ing for everything after the `= ` + let res = (a/2) + (b/2) + ((a - 2*(a/2)) + (b - 2*(b/2)))/2; + + // Monero does check that the new block's time is greater than the median, causing the median + // to be monotonic + Ok(res) + } + } + + fn unchecked_block_header_by_number( + &self, + number: u64, + ) -> impl Send + + Future::Header, Self::EphemeralError>> + { + async move { Ok(BlockHeader(self.rpc.get_block_by_number(number.try_into().unwrap()).await?)) } + } + + #[rustfmt::skip] // It wants to improperly format the `async move` to a single line + fn unchecked_block_by_number( + &self, + number: u64, + ) -> impl Send + Future> { + async move { + Ok(Block(self.rpc.get_scannable_block_by_number(number.try_into().unwrap()).await?)) + } + } + + fn dust(coin: Coin) -> Amount { + assert_eq!(coin, Coin::Monero); + + // 0.01 XMR + Amount(10_000_000_000) + } + + fn cost_to_aggregate( + &self, + coin: Coin, + _reference_block: &Self::Block, + ) -> impl Send + Future> { + async move { + assert_eq!(coin, Coin::Bitcoin); + // TODO + Ok(Amount(0)) + } + } +} + +impl TransactionPublisher for Rpc { + type EphemeralError = RpcError; + + fn publish( + &self, + tx: Transaction, + ) -> impl Send + Future> { + async move { self.rpc.publish_transaction(&tx.0).await } + } +} diff --git a/processor/monero/src/scheduler.rs b/processor/monero/src/scheduler.rs new file mode 100644 index 000000000..489db8105 --- /dev/null +++ b/processor/monero/src/scheduler.rs @@ -0,0 +1,269 @@ +use core::future::Future; + +use zeroize::Zeroizing; +use rand_core::SeedableRng; +use rand_chacha::ChaCha20Rng; + +use ciphersuite::{Ciphersuite, Ed25519}; + +use monero_wallet::rpc::{FeeRate, RpcError}; + +use serai_client::{ + primitives::{Coin, Amount}, + networks::monero::Address, +}; + +use primitives::{OutputType, ReceivedOutput, Payment}; +use scanner::{KeyFor, AddressFor, OutputFor, BlockFor}; +use utxo_scheduler::{PlannedTransaction, TransactionPlanner}; + +use monero_wallet::{ + ringct::RctType, + address::{Network, AddressType, MoneroAddress}, + OutputWithDecoys, + send::{ + Change, SendError, SignableTransaction as MSignableTransaction, Eventuality as MEventuality, + }, +}; + +use crate::{ + EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARDED_SUBADDRESS, view_pair, + transaction::{SignableTransaction, Eventuality}, + rpc::Rpc, +}; + +fn address_from_serai_key(key: ::G, kind: OutputType) -> Address { + view_pair(key) + .address( + Network::Mainnet, + Some(match kind { + OutputType::External => EXTERNAL_SUBADDRESS, + OutputType::Branch => BRANCH_SUBADDRESS, + OutputType::Change => CHANGE_SUBADDRESS, + OutputType::Forwarded => FORWARDED_SUBADDRESS, + }), + None, + ) + .try_into() + .expect("created address which wasn't representable") +} + +async fn signable_transaction( + rpc: &Rpc, + reference_block: &BlockFor, + inputs: Vec>, + payments: Vec>>, + change: Option>, +) -> Result, RpcError> { + assert!(inputs.len() < >::MAX_INPUTS); + assert!( + (payments.len() + usize::from(u8::from(change.is_some()))) < + >::MAX_OUTPUTS + ); + + // TODO: Set a sane minimum fee + const MINIMUM_FEE: u64 = 1_500_000; + // TODO: Set a fee rate based on the reference block + let fee_rate = FeeRate::new(MINIMUM_FEE, 10000).unwrap(); + + // Determine the RCT proofs to make based off the hard fork + let rct_type = match reference_block.0.block.header.hardfork_version { + 14 => RctType::ClsagBulletproof, + 15 | 16 => RctType::ClsagBulletproofPlus, + _ => panic!("Monero hard forked and the processor wasn't updated for it"), + }; + + // We need a unique ID to distinguish this transaction from another transaction with an identical + // set of payments (as our Eventualities only match over the payments). The output's ID is + // guaranteed to be unique, making it satisfactory + let id = inputs.first().unwrap().id().0; + + let mut inputs_actual = Vec::with_capacity(inputs.len()); + for input in inputs { + inputs_actual.push( + OutputWithDecoys::fingerprintable_deterministic_new( + // We need a deterministic RNG here with *some* seed + // The unique ID means we don't pick some static seed + // It is a public value, yet that's fine as this is assumed fully transparent + // It is a reused value (with later code), but that's not an issue. Just an oddity + &mut ChaCha20Rng::from_seed(id), + &rpc.rpc, + match rct_type { + RctType::ClsagBulletproof => 11, + RctType::ClsagBulletproofPlus => 16, + _ => panic!("selecting decoys for an unsupported RctType"), + }, + reference_block.0.block.number().unwrap() + 1, + input.0.clone(), + ) + .await?, + ); + } + let inputs = inputs_actual; + + let mut payments = payments + .into_iter() + .map(|payment| { + (MoneroAddress::from(*payment.address()), { + let balance = payment.balance(); + assert_eq!(balance.coin, Coin::Monero); + balance.amount.0 + }) + }) + .collect::>(); + if (payments.len() + usize::from(u8::from(change.is_some()))) == 1 { + // Monero requires at least two outputs, so add a dummy payment + payments.push(( + MoneroAddress::new( + Network::Mainnet, + AddressType::Legacy, + ::generator().0, + ::generator().0, + ), + 0, + )); + } + + let change = if let Some(change) = change { + Change::guaranteed(view_pair(change), Some(CHANGE_SUBADDRESS)) + } else { + Change::fingerprintable(None) + }; + + Ok( + MSignableTransaction::new( + rct_type, + Zeroizing::new(id), + inputs, + payments, + change, + vec![], + fee_rate, + ) + .map(|signable| (SignableTransaction { id, signable: signable.clone() }, signable)), + ) +} + +#[derive(Clone)] +pub(crate) struct Planner(pub(crate) Rpc); +impl TransactionPlanner for Planner { + type EphemeralError = RpcError; + + type SignableTransaction = SignableTransaction; + + // wallet2 will not create a transaction larger than 100 KB, and Monero won't relay a transaction + // larger than 150 KB. This fits within the 100 KB mark to fit in and not poke the bear. + // Technically, it can be ~124, yet a small bit of buffer is appreciated + // TODO: Test creating a TX this big + const MAX_INPUTS: usize = 120; + const MAX_OUTPUTS: usize = 16; + + fn branch_address(key: KeyFor) -> AddressFor { + address_from_serai_key(key, OutputType::Branch) + } + fn change_address(key: KeyFor) -> AddressFor { + address_from_serai_key(key, OutputType::Change) + } + fn forwarding_address(key: KeyFor) -> AddressFor { + address_from_serai_key(key, OutputType::Forwarded) + } + + fn calculate_fee( + &self, + reference_block: &BlockFor, + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> impl Send + Future> { + async move { + Ok(match signable_transaction(&self.0, reference_block, inputs, payments, change).await? { + Ok(tx) => Amount(tx.1.necessary_fee()), + Err(SendError::NotEnoughFunds { necessary_fee, .. }) => { + Amount(necessary_fee.expect("outputs value exceeded inputs value")) + } + Err(SendError::UnsupportedRctType) => { + panic!("tried to use an RctType monero-wallet doesn't support") + } + Err(SendError::NoInputs | SendError::NoOutputs | SendError::TooManyOutputs) => { + panic!("malformed plan passed to calculate_fee") + } + Err(SendError::InvalidDecoyQuantity) => panic!("selected the wrong amount of decoys"), + Err(SendError::NoChange) => { + panic!("didn't add a dummy payment to satisfy the 2-output minimum") + } + Err(SendError::MultiplePaymentIds) => { + panic!("included multiple payment IDs despite not supporting addresses with payment IDs") + } + Err(SendError::TooMuchArbitraryData) => { + panic!("included too much arbitrary data despite not including any") + } + Err(SendError::TooLargeTransaction) => { + panic!("too large transaction despite MAX_INPUTS/MAX_OUTPUTS") + } + Err( + SendError::WrongPrivateKey | + SendError::MaliciousSerialization | + SendError::ClsagError(_) | + SendError::FrostError(_), + ) => unreachable!("signing/serialization error when not signing/serializing"), + }) + } + } + + fn plan( + &self, + reference_block: &BlockFor, + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> impl Send + + Future, RpcError>> + { + let singular_spent_output = (inputs.len() == 1).then(|| inputs[0].id()); + + async move { + Ok(match signable_transaction(&self.0, reference_block, inputs, payments, change).await? { + Ok(tx) => { + let id = tx.0.id; + PlannedTransaction { + signable: tx.0, + eventuality: Eventuality { + id, + singular_spent_output, + eventuality: MEventuality::from(tx.1), + }, + auxilliary: (), + } + } + Err(SendError::NotEnoughFunds { .. }) => panic!("failed to successfully amortize the fee"), + Err(SendError::UnsupportedRctType) => { + panic!("tried to use an RctType monero-wallet doesn't support") + } + Err(SendError::NoInputs | SendError::NoOutputs | SendError::TooManyOutputs) => { + panic!("malformed plan passed to calculate_fee") + } + Err(SendError::InvalidDecoyQuantity) => panic!("selected the wrong amount of decoys"), + Err(SendError::NoChange) => { + panic!("didn't add a dummy payment to satisfy the 2-output minimum") + } + Err(SendError::MultiplePaymentIds) => { + panic!("included multiple payment IDs despite not supporting addresses with payment IDs") + } + Err(SendError::TooMuchArbitraryData) => { + panic!("included too much arbitrary data despite not including any") + } + Err(SendError::TooLargeTransaction) => { + panic!("too large transaction despite MAX_INPUTS/MAX_OUTPUTS") + } + Err( + SendError::WrongPrivateKey | + SendError::MaliciousSerialization | + SendError::ClsagError(_) | + SendError::FrostError(_), + ) => unreachable!("signing/serialization error when not signing/serializing"), + }) + } + } +} + +pub(crate) type Scheduler = utxo_standard_scheduler::Scheduler; diff --git a/processor/primitives/Cargo.toml b/processor/primitives/Cargo.toml new file mode 100644 index 000000000..6dd3082b5 --- /dev/null +++ b/processor/primitives/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "serai-processor-primitives" +version = "0.1.0" +description = "Primitives for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/primitives" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } +serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["macros", "sync", "time"] } diff --git a/processor/primitives/LICENSE b/processor/primitives/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/primitives/README.md b/processor/primitives/README.md new file mode 100644 index 000000000..d616993cb --- /dev/null +++ b/processor/primitives/README.md @@ -0,0 +1,3 @@ +# Primitives + +Primitive types/traits/structs used by the Processor. diff --git a/processor/primitives/src/block.rs b/processor/primitives/src/block.rs new file mode 100644 index 000000000..a3dec40bb --- /dev/null +++ b/processor/primitives/src/block.rs @@ -0,0 +1,66 @@ +use core::fmt::Debug; +use std::collections::HashMap; + +use group::{Group, GroupEncoding}; + +use crate::{Address, ReceivedOutput, Eventuality, EventualityTracker}; + +/// A block header from an external network. +pub trait BlockHeader: Send + Sync + Sized + Clone + Debug { + /// The ID of this block. + /// + /// This is fixed to 32-bytes and is expected to be cryptographically binding with 128-bit + /// security. This is not required to be the ID used natively by the external network. + fn id(&self) -> [u8; 32]; + /// The ID of the parent block. + fn parent(&self) -> [u8; 32]; +} + +/// A block from an external network. +/// +/// A block is defined as a consensus event associated with a set of transactions. It is not +/// necessary to literally define it as whatever the external network defines as a block. For +/// external networks which finalize block(s), this block type should be a representation of all +/// transactions within a period finalization (whether block or epoch). +pub trait Block: Send + Sync + Sized + Clone + Debug { + /// The type used for this block's header. + type Header: BlockHeader; + + /// The type used to represent keys on this external network. + type Key: Group + GroupEncoding; + /// The type used to represent addresses on this external network. + type Address: Address; + /// The type used to represent received outputs on this external network. + type Output: ReceivedOutput; + /// The type used to represent an Eventuality for a transaction on this external network. + type Eventuality: Eventuality< + OutputId = >::Id, + >; + + /// The ID of this block. + fn id(&self) -> [u8; 32]; + + /// Scan all outputs within this block to find the outputs spendable by this key. + /// + /// No assumption on the order of the returned outputs is made. + fn scan_for_outputs_unordered( + &self, + latest_active_key: Self::Key, + key: Self::Key, + ) -> Vec; + + /// Check if this block resolved any Eventualities. + /// + /// This MUST mutate `eventualities` to no longer contain the resolved Eventualities. + /// + /// Returns tbe resolved Eventualities, indexed by the ID of the transactions which resolved + /// them. + #[allow(clippy::type_complexity)] + fn check_for_eventuality_resolutions( + &self, + eventualities: &mut EventualityTracker, + ) -> HashMap< + >::TransactionId, + Self::Eventuality, + >; +} diff --git a/processor/primitives/src/eventuality.rs b/processor/primitives/src/eventuality.rs new file mode 100644 index 000000000..f68ceeae9 --- /dev/null +++ b/processor/primitives/src/eventuality.rs @@ -0,0 +1,59 @@ +use std::{io, collections::HashMap}; + +use crate::Id; + +/// A description of a transaction which will eventually happen. +pub trait Eventuality: Sized + Send + Sync { + /// The type used to identify a received output. + type OutputId: Id; + + /// The ID of the SignableTransaction this Eventuality is for. + /// + /// This is an internal ID arbitrarily definable so long as it's unique. + fn id(&self) -> [u8; 32]; + + /// A unique byte sequence which can be used to identify potentially resolving transactions. + /// + /// Both a transaction and an Eventuality are expected to be able to yield lookup sequences. + /// Lookup sequences MUST be unique to the Eventuality and identical to any transaction's which + /// satisfies this Eventuality. Transactions which don't satisfy this Eventuality MAY also have + /// an identical lookup sequence. + /// + /// This is used to find the Eventuality a transaction MAY resolve so we don't have to check all + /// transactions against all Eventualities. Once the potential resolved Eventuality is + /// identified, the full check is performed. + fn lookup(&self) -> Vec; + + /// The output the resolution of this Eventuality was supposed to spend. + /// + /// If the resolution of this Eventuality has multiple inputs, there is no singular spent output + /// so this MUST return None. + fn singular_spent_output(&self) -> Option; + + /// Read an Eventuality. + fn read(reader: &mut impl io::Read) -> io::Result; + /// Write an Eventuality. + fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; +} + +/// A tracker of unresolved Eventualities. +#[derive(Debug)] +pub struct EventualityTracker { + /// The active Eventualities. + /// + /// These are keyed by their lookups. + pub active_eventualities: HashMap, E>, +} + +impl Default for EventualityTracker { + fn default() -> Self { + EventualityTracker { active_eventualities: HashMap::new() } + } +} + +impl EventualityTracker { + /// Insert an Eventuality into the tracker. + pub fn insert(&mut self, eventuality: E) { + self.active_eventualities.insert(eventuality.lookup(), eventuality); + } +} diff --git a/processor/primitives/src/lib.rs b/processor/primitives/src/lib.rs new file mode 100644 index 000000000..cc915ca2f --- /dev/null +++ b/processor/primitives/src/lib.rs @@ -0,0 +1,89 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{hash::Hash, fmt::Debug}; + +use group::GroupEncoding; + +use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; + +/// A module for task-related structs and functionality. +pub mod task; + +mod output; +pub use output::*; + +mod eventuality; +pub use eventuality::*; + +mod block; +pub use block::*; + +mod payment; +pub use payment::*; + +/// An ID for an output/transaction/block/etc. +/// +/// IDs don't need to implement `Copy`, enabling `[u8; 33]`, `[u8; 64]` to be used. IDs are still +/// bound to being of a constant-size, where `Default::default()` returns an instance of such size +/// (making `Vec` invalid as an `Id`). +pub trait Id: + Send + + Sync + + Clone + + Default + + PartialEq + + Eq + + Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Debug + + Encode + + Decode + + BorshSerialize + + BorshDeserialize +{ +} +impl< + I: Send + + Sync + + Clone + + Default + + PartialEq + + Eq + + Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Debug + + Encode + + Decode + + BorshSerialize + + BorshDeserialize, + > Id for I +{ +} + +/// A wrapper for a group element which implements the scale/borsh traits. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct EncodableG(pub G); +impl Encode for EncodableG { + fn using_encoded R>(&self, f: F) -> R { + f(self.0.to_bytes().as_ref()) + } +} +impl BorshSerialize for EncodableG { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + writer.write_all(self.0.to_bytes().as_ref()) + } +} +impl BorshDeserialize for EncodableG { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let mut repr = G::Repr::default(); + reader.read_exact(repr.as_mut())?; + Ok(Self( + Option::::from(G::from_bytes(&repr)).ok_or(borsh::io::Error::other("invalid point"))?, + )) + } +} diff --git a/processor/primitives/src/output.rs b/processor/primitives/src/output.rs new file mode 100644 index 000000000..76acde600 --- /dev/null +++ b/processor/primitives/src/output.rs @@ -0,0 +1,144 @@ +use core::fmt::Debug; +use std::io; + +use group::GroupEncoding; + +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_primitives::{ExternalAddress, Balance}; + +use crate::Id; + +/// An address on the external network. +pub trait Address: + Send + + Sync + + Clone + + Into + + TryFrom + + BorshSerialize + + BorshDeserialize +{ +} +// This casts a wide net, yet it only implements `Address` for things `Into` so +// it should only implement this for addresses +impl< + A: Send + + Sync + + Clone + + Into + + TryFrom + + BorshSerialize + + BorshDeserialize, + > Address for A +{ +} + +/// The type of the output. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum OutputType { + /// An output received to the address external payments use. + /// + /// This is reported to Substrate in a `Batch`. + External, + + /// A branch output. + /// + /// Given a known output set, and a known series of outbound transactions, we should be able to + /// form a completely deterministic schedule S. The issue is when S has TXs which spend prior TXs + /// in S (which is needed for our logarithmic scheduling). In order to have the descendant TX, + /// say S[1], build off S[0], we need to observe when S[0] is included on-chain. + /// + /// We cannot. + /// + /// Monero (and other privacy coins) do not expose their UTXO graphs. Even if we know how to + /// create S[0], and the actual payment info behind it, we cannot observe it on the blockchain + /// unless we participated in creating it. Locking the entire schedule, when we cannot sign for + /// the entire schedule at once, to a single signing set isn't feasible. + /// + /// While any member of the active signing set can provide data enabling other signers to + /// participate, it's several KB of data which we then have to code communication for. + /// The other option is to simply not observe S[0]. Instead, observe a TX with an identical + /// output to the one in S[0] we intended to use for S[1]. It's either from S[0], or Eve, a + /// malicious actor, has sent us a forged TX which is... equally as usable? So who cares? + /// + /// The only issue is if we have multiple outputs on-chain with identical amounts and purposes. + /// Accordingly, when the scheduler makes a plan for when a specific output is available, it + /// shouldn't set that plan. It should *push* that plan to a queue of plans to perform when + /// instances of that output occur. + Branch, + + /// A change output. + /// + /// This should be added to the available UTXO pool with no further action taken. It does not + /// need to be reported (though we do still need synchrony on the block it's in). There's no + /// explicit expectation for the usage of this output at time of recipience. + Change, + + /// A forwarded output from the prior multisig. + /// + /// This is distinguished for technical reasons around detecting when a multisig should be + /// retired. + Forwarded, +} + +impl OutputType { + /// Write the OutputType. + pub fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&[match self { + OutputType::External => 0, + OutputType::Branch => 1, + OutputType::Change => 2, + OutputType::Forwarded => 3, + }]) + } + + /// Read an OutputType. + pub fn read(reader: &mut R) -> io::Result { + let mut byte = [0; 1]; + reader.read_exact(&mut byte)?; + Ok(match byte[0] { + 0 => OutputType::External, + 1 => OutputType::Branch, + 2 => OutputType::Change, + 3 => OutputType::Forwarded, + _ => Err(io::Error::other("invalid OutputType"))?, + }) + } +} + +/// A received output. +pub trait ReceivedOutput: + Send + Sync + Sized + Clone + PartialEq + Eq + Debug +{ + /// The type used to identify this output. + type Id: 'static + Id; + /// The type used to identify the transaction which created this output. + type TransactionId: 'static + Id; + + /// The type of this output. + fn kind(&self) -> OutputType; + + /// The ID of this output. + fn id(&self) -> Self::Id; + /// The ID of the transaction which created this output. + fn transaction_id(&self) -> Self::TransactionId; + /// The key this output was received by. + fn key(&self) -> K; + + /// The presumed origin for this output. + /// + /// This is used as the address to refund coins to if we can't handle the output as desired + /// (unless overridden). + fn presumed_origin(&self) -> Option; + + /// The balance associated with this output. + fn balance(&self) -> Balance; + /// The arbitrary data (presumably an InInstruction) associated with this output. + fn data(&self) -> &[u8]; + + /// Write this output. + fn write(&self, writer: &mut W) -> io::Result<()>; + /// Read an output. + fn read(reader: &mut R) -> io::Result; +} diff --git a/processor/primitives/src/payment.rs b/processor/primitives/src/payment.rs new file mode 100644 index 000000000..59b10f7f9 --- /dev/null +++ b/processor/primitives/src/payment.rs @@ -0,0 +1,56 @@ +use std::io; + +use scale::{Encode, Decode, IoReader}; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_primitives::Balance; +use serai_coins_primitives::OutInstructionWithBalance; + +use crate::Address; + +/// A payment to fulfill. +#[derive(Clone, BorshSerialize, BorshDeserialize)] +pub struct Payment { + address: A, + balance: Balance, +} + +impl TryFrom for Payment { + type Error = (); + fn try_from(out_instruction_with_balance: OutInstructionWithBalance) -> Result { + Ok(Payment { + address: out_instruction_with_balance.instruction.address.try_into().map_err(|_| ())?, + balance: out_instruction_with_balance.balance, + }) + } +} + +impl Payment { + /// Create a new Payment. + pub fn new(address: A, balance: Balance) -> Self { + Payment { address, balance } + } + + /// The address to pay. + pub fn address(&self) -> &A { + &self.address + } + /// The balance to transfer. + pub fn balance(&self) -> Balance { + self.balance + } + + /// Read a Payment. + pub fn read(reader: &mut impl io::Read) -> io::Result { + let address = A::deserialize_reader(reader)?; + let reader = &mut IoReader(reader); + let balance = Balance::decode(reader).map_err(io::Error::other)?; + Ok(Self { address, balance }) + } + /// Write the Payment. + pub fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.address.serialize(writer)?; + self.balance.encode_to(writer); + Ok(()) + } +} diff --git a/processor/primitives/src/task.rs b/processor/primitives/src/task.rs new file mode 100644 index 000000000..e8efc64ca --- /dev/null +++ b/processor/primitives/src/task.rs @@ -0,0 +1,155 @@ +use core::{future::Future, time::Duration}; +use std::sync::Arc; + +use tokio::sync::{mpsc, oneshot, Mutex}; + +enum Closed { + NotClosed(Option>), + Closed, +} + +/// A handle for a task. +#[derive(Clone)] +pub struct TaskHandle { + run_now: mpsc::Sender<()>, + close: mpsc::Sender<()>, + closed: Arc>, +} +/// A task's internal structures. +pub struct Task { + run_now: mpsc::Receiver<()>, + close: mpsc::Receiver<()>, + closed: oneshot::Sender<()>, +} + +impl Task { + /// Create a new task definition. + pub fn new() -> (Self, TaskHandle) { + // Uses a capacity of 1 as any call to run as soon as possible satisfies all calls to run as + // soon as possible + let (run_now_send, run_now_recv) = mpsc::channel(1); + // And any call to close satisfies all calls to close + let (close_send, close_recv) = mpsc::channel(1); + let (closed_send, closed_recv) = oneshot::channel(); + ( + Self { run_now: run_now_recv, close: close_recv, closed: closed_send }, + TaskHandle { + run_now: run_now_send, + close: close_send, + closed: Arc::new(Mutex::new(Closed::NotClosed(Some(closed_recv)))), + }, + ) + } +} + +impl TaskHandle { + /// Tell the task to run now (and not whenever its next iteration on a timer is). + /// + /// Panics if the task has been dropped. + pub fn run_now(&self) { + #[allow(clippy::match_same_arms)] + match self.run_now.try_send(()) { + Ok(()) => {} + // NOP on full, as this task will already be ran as soon as possible + Err(mpsc::error::TrySendError::Full(())) => {} + Err(mpsc::error::TrySendError::Closed(())) => { + panic!("task was unexpectedly closed when calling run_now") + } + } + } + + /// Close the task. + /// + /// Returns once the task shuts down after it finishes its current iteration (which may be of + /// unbounded time). + pub async fn close(self) { + // If another instance of the handle called tfhis, don't error + let _ = self.close.send(()).await; + // Wait until we receive the closed message + let mut closed = self.closed.lock().await; + match &mut *closed { + Closed::NotClosed(ref mut recv) => { + assert_eq!(recv.take().unwrap().await, Ok(()), "continually ran task dropped itself?"); + *closed = Closed::Closed; + } + Closed::Closed => {} + } + } +} + +/// A task to be continually ran. +pub trait ContinuallyRan: Sized + Send { + /// The amount of seconds before this task should be polled again. + const DELAY_BETWEEN_ITERATIONS: u64 = 5; + /// The maximum amount of seconds before this task should be run again. + /// + /// Upon error, the amount of time waited will be linearly increased until this limit. + const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120; + + /// Run an iteration of the task. + /// + /// If this returns `true`, all dependents of the task will immediately have a new iteration ran + /// (without waiting for whatever timer they were already on). + fn run_iteration(&mut self) -> impl Send + Future>; + + /// Continually run the task. + fn continually_run( + mut self, + mut task: Task, + dependents: Vec, + ) -> impl Send + Future { + async move { + // The default number of seconds to sleep before running the task again + let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS; + // The current number of seconds to sleep before running the task again + // We increment this upon errors in order to not flood the logs with errors + let mut current_sleep_before_next_task = default_sleep_before_next_task; + let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| { + let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task; + // Set a limit of sleeping for two minutes + *current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS); + }; + + loop { + // If we were told to close/all handles were dropped, drop it + { + let should_close = task.close.try_recv(); + match should_close { + Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break, + Err(mpsc::error::TryRecvError::Empty) => {} + } + } + + match self.run_iteration().await { + Ok(run_dependents) => { + // Upon a successful (error-free) loop iteration, reset the amount of time we sleep + current_sleep_before_next_task = default_sleep_before_next_task; + + if run_dependents { + for dependent in &dependents { + dependent.run_now(); + } + } + } + Err(e) => { + log::warn!("{}", e); + increase_sleep_before_next_task(&mut current_sleep_before_next_task); + } + } + + // Don't run the task again for another few seconds UNLESS told to run now + tokio::select! { + () = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {}, + msg = task.run_now.recv() => { + // Check if this is firing because the handle was dropped + if msg.is_none() { + break; + } + }, + } + } + + task.closed.send(()).unwrap(); + } + } +} diff --git a/processor/scanner/Cargo.toml b/processor/scanner/Cargo.toml new file mode 100644 index 000000000..09a6a937c --- /dev/null +++ b/processor/scanner/Cargo.toml @@ -0,0 +1,41 @@ +[package] +name = "serai-processor-scanner" +version = "0.1.0" +description = "Scanner of abstract blockchains for Serai" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scanner" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +# Encoders +hex = { version = "0.4", default-features = false, features = ["std"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +# Cryptography +group = { version = "0.13", default-features = false } + +# Application +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +serai-db = { path = "../../common/db" } + +messages = { package = "serai-processor-messages", path = "../messages" } + +serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } +serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std", "borsh"] } +serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] } + +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } diff --git a/processor/scanner/LICENSE b/processor/scanner/LICENSE new file mode 100644 index 000000000..41d5a2616 --- /dev/null +++ b/processor/scanner/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2022-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scanner/README.md b/processor/scanner/README.md new file mode 100644 index 000000000..f6c6ccc64 --- /dev/null +++ b/processor/scanner/README.md @@ -0,0 +1,12 @@ +# Scanner + +A scanner of arbitrary blockchains for Serai. + +This scanner has two distinct roles: + +1) Scanning blocks for received outputs contained within them +2) Scanning blocks for the completion of eventualities + +While these can be optimized into a single structure, they are written as two +distinct structures (with the associated overhead) for clarity and simplicity +reasons. diff --git a/processor/scanner/src/db.rs b/processor/scanner/src/db.rs new file mode 100644 index 000000000..a985ba43c --- /dev/null +++ b/processor/scanner/src/db.rs @@ -0,0 +1,630 @@ +use core::marker::PhantomData; +use std::io::{self, Read, Write}; + +use group::GroupEncoding; + +use scale::{Encode, Decode, IoReader}; +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, create_db, db_channel}; + +use serai_in_instructions_primitives::{InInstructionWithBalance, Batch}; +use serai_coins_primitives::OutInstructionWithBalance; + +use primitives::{EncodableG, ReceivedOutput}; + +use crate::{ + lifetime::{LifetimeStage, Lifetime}, + ScannerFeed, KeyFor, AddressFor, OutputFor, Return, + scan::next_to_scan_for_outputs_block, +}; + +// The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this. +trait Borshy: BorshSerialize + BorshDeserialize {} +impl Borshy for T {} + +#[derive(BorshSerialize, BorshDeserialize)] +struct SeraiKeyDbEntry { + activation_block_number: u64, + key: K, +} + +#[derive(Clone)] +pub(crate) struct SeraiKey { + pub(crate) key: K, + pub(crate) stage: LifetimeStage, + pub(crate) activation_block_number: u64, + pub(crate) block_at_which_reporting_starts: u64, + pub(crate) block_at_which_forwarding_starts: Option, +} + +pub(crate) struct OutputWithInInstruction { + pub(crate) output: OutputFor, + pub(crate) return_address: Option>, + pub(crate) in_instruction: InInstructionWithBalance, +} + +impl OutputWithInInstruction { + pub(crate) fn read(reader: &mut impl io::Read) -> io::Result { + let output = OutputFor::::read(reader)?; + let return_address = { + let mut opt = [0xff]; + reader.read_exact(&mut opt)?; + assert!((opt[0] == 0) || (opt[0] == 1)); + (opt[0] == 1).then(|| AddressFor::::deserialize_reader(reader)).transpose()? + }; + let in_instruction = + InInstructionWithBalance::decode(&mut IoReader(reader)).map_err(io::Error::other)?; + Ok(Self { output, return_address, in_instruction }) + } + pub(crate) fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.output.write(writer)?; + if let Some(return_address) = &self.return_address { + writer.write_all(&[1])?; + return_address.serialize(writer)?; + } else { + writer.write_all(&[0])?; + } + self.in_instruction.encode_to(writer); + Ok(()) + } +} + +create_db!( + ScannerGlobal { + StartBlock: () -> u64, + + QueuedKey: (key: K) -> (), + + ActiveKeys: () -> Vec>, + RetireAt: (key: K) -> u64, + + // The next block to potentially report + NextToPotentiallyReportBlock: () -> u64, + // Highest acknowledged block + HighestAcknowledgedBlock: () -> u64, + + // If a block was notable + /* + A block is notable if one of three conditions are met: + + 1) We activated a key within this block (or explicitly forward to an activated key). + 2) We retired a key within this block. + 3) We received outputs within this block. + + The first two conditions, and the reasoning for them, is extensively documented in + `spec/processor/Multisig Rotation.md`. The third is obvious (as any block we receive outputs + in needs synchrony so that we can spend the received outputs). + + We save if a block is notable here by either the scan for received outputs task or the + check for eventuality completion task. Once a block has been processed by both, the reporting + task will report any notable blocks. Finally, the task which sets the block safe to scan to + makes its decision based on the notable blocks and the acknowledged blocks. + */ + // This collapses from `bool` to `()`, using if the value was set for true and false otherwise + NotableBlock: (number: u64) -> (), + + SerializedForwardedOutput: (id: &[u8]) -> Vec, + } +); + +pub(crate) struct ScannerGlobalDb(PhantomData); +impl ScannerGlobalDb { + pub(crate) fn start_block(getter: &impl Get) -> Option { + StartBlock::get(getter) + } + pub(crate) fn set_start_block(txn: &mut impl DbTxn, block: u64) { + StartBlock::set(txn, &block) + } + + fn tidy_keys(txn: &mut impl DbTxn) { + let mut keys: Vec>>> = + ActiveKeys::get(txn).expect("retiring key yet no active keys"); + let Some(key) = keys.first() else { return }; + + // Get the block we're scanning for next + let block_number = next_to_scan_for_outputs_block::(txn).expect( + "tidying keys despite never setting the next to scan for block (done on initialization)", + ); + // If this key is scheduled for retiry... + if let Some(retire_at) = RetireAt::get(txn, key.key) { + // And is retired by/at this block... + if retire_at <= block_number { + // Remove it from the list of keys + let key = keys.remove(0); + ActiveKeys::set(txn, &keys); + // Also clean up the retiry block + RetireAt::del(txn, key.key); + } + } + } + + /// Queue a key. + /// + /// Keys may be queued whenever, so long as they're scheduled to activate `WINDOW_LENGTH` blocks + /// after the next block acknowledged after they've been set. There is no requirement that any + /// prior keys have had their processing completed (meaning what should be a length-2 vector may + /// be a length-n vector). + /// + /// A new key MUST NOT be queued to activate a block preceding the finishing of the key prior to + /// its prior. There MUST only be two keys active at one time. + /// + /// `activation_block_number` is inclusive, so the key will be scanned for starting at the + /// specified block. + pub(crate) fn queue_key(txn: &mut impl DbTxn, activation_block_number: u64, key: KeyFor) { + // Set the block which has a key activate as notable + NotableBlock::set(txn, activation_block_number, &()); + + // Check this key has never been queued before + // This should only happen if a malicious supermajority collude, and breaks indexing by the key + assert!(QueuedKey::get(txn, EncodableG(key)).is_none(), "key being queued was prior queued"); + QueuedKey::set(txn, EncodableG(key), &()); + + // Fetch the existing keys + let mut keys: Vec>>> = + ActiveKeys::get(txn).unwrap_or(vec![]); + + // If this new key retires a key, mark the block at which forwarding explicitly occurs notable + // This lets us obtain synchrony over the transactions we'll make to accomplish this + if let Some(key_retired_by_this) = keys.last() { + NotableBlock::set( + txn, + Lifetime::calculate::( + // The 'current block number' used for this calculation + activation_block_number, + // The activation block of the key we're getting the lifetime of + key_retired_by_this.activation_block_number, + // The activation block of the key which will retire this key + Some(activation_block_number), + ) + .block_at_which_forwarding_starts + .expect( + "didn't calculate the block forwarding starts at despite passing the next key's info", + ), + &(), + ); + } + + // Push and save the next key + keys.push(SeraiKeyDbEntry { activation_block_number, key: EncodableG(key) }); + ActiveKeys::set(txn, &keys); + + // Now tidy the keys, ensuring this has a maximum length of 2 + Self::tidy_keys(txn); + } + /// Retire a key. + /// + /// The key retired must be the oldest key. There must be another key actively tracked. + pub(crate) fn retire_key(txn: &mut impl DbTxn, at_block: u64, key: KeyFor) { + // Set the block which has a key retire as notable + NotableBlock::set(txn, at_block, &()); + + let keys: Vec>>> = + ActiveKeys::get(txn).expect("retiring key yet no active keys"); + + assert!(keys.len() > 1, "retiring our only key"); + assert_eq!(keys[0].key.0, key, "not retiring the oldest key"); + + RetireAt::set(txn, EncodableG(key), &at_block); + } + /// Fetch the active keys, as of the next-to-scan-for-outputs Block. + /// + /// This means the scan task should scan for all keys returned by this. + pub(crate) fn active_keys_as_of_next_to_scan_for_outputs_block( + getter: &impl Get, + ) -> Option>>> { + // We don't take this as an argument as we don't keep all historical keys in memory + // If we've scanned block 1,000,000, we can't answer the active keys as of block 0 + let block_number = next_to_scan_for_outputs_block::(getter)?; + + let raw_keys: Vec>>> = ActiveKeys::get(getter)?; + let mut keys = Vec::with_capacity(2); + for i in 0 .. raw_keys.len() { + // Ensure this key isn't retired + if let Some(retire_at) = RetireAt::get(getter, raw_keys[i].key) { + if retire_at <= block_number { + continue; + } + } + // Ensure this key isn't yet to activate + if block_number < raw_keys[i].activation_block_number { + continue; + } + let Lifetime { stage, block_at_which_reporting_starts, block_at_which_forwarding_starts } = + Lifetime::calculate::( + block_number, + raw_keys[i].activation_block_number, + raw_keys.get(i + 1).map(|key| key.activation_block_number), + ); + keys.push(SeraiKey { + key: raw_keys[i].key.0, + stage, + activation_block_number: raw_keys[i].activation_block_number, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + }); + } + assert!(keys.len() <= 2, "more than two keys active"); + Some(keys) + } + + pub(crate) fn set_highest_acknowledged_block( + txn: &mut impl DbTxn, + highest_acknowledged_block: u64, + ) { + HighestAcknowledgedBlock::set(txn, &highest_acknowledged_block); + } + pub(crate) fn highest_acknowledged_block(getter: &impl Get) -> Option { + HighestAcknowledgedBlock::get(getter) + } + + /* + This is so verbosely named as the DB itself already flags upon external outputs. Specifically, + if any block yields External outputs to accumulate, we flag it as notable. + + There is the slight edge case where some External outputs are queued for accumulation later. We + consider those outputs received as of the block they're queued to (maintaining the policy any + blocks in which we receive outputs is notable). + */ + pub(crate) fn flag_notable_due_to_non_external_output(txn: &mut impl DbTxn, block_number: u64) { + assert!( + NextToPotentiallyReportBlock::get(txn).unwrap() <= block_number, + "already potentially reported a block we're only now flagging as notable" + ); + NotableBlock::set(txn, block_number, &()); + } + + pub(crate) fn is_block_notable(getter: &impl Get, number: u64) -> bool { + NotableBlock::get(getter, number).is_some() + } + + pub(crate) fn return_address_and_in_instruction_for_forwarded_output( + getter: &impl Get, + output: & as ReceivedOutput, AddressFor>>::Id, + ) -> Option<(Option>, InInstructionWithBalance)> { + let buf = SerializedForwardedOutput::get(getter, output.as_ref())?; + let mut buf = buf.as_slice(); + + let mut opt = [0xff]; + buf.read_exact(&mut opt).unwrap(); + assert!((opt[0] == 0) || (opt[0] == 1)); + + let address = (opt[0] == 1).then(|| AddressFor::::deserialize_reader(&mut buf).unwrap()); + Some((address, InInstructionWithBalance::decode(&mut IoReader(buf)).unwrap())) + } +} + +/// The data produced by scanning a block. +/// +/// This is the sender's version which includes the forwarded outputs with their InInstructions, +/// which need to be saved to the database for later retrieval. +pub(crate) struct SenderScanData { + /// The block number. + pub(crate) block_number: u64, + /// The received outputs which should be accumulated into the scheduler. + pub(crate) received_external_outputs: Vec>, + /// The outputs which need to be forwarded. + pub(crate) forwards: Vec>, + /// The outputs which need to be returned. + pub(crate) returns: Vec>, +} + +/// The data produced by scanning a block. +/// +/// This is the receiver's version which doesn't include the forwarded outputs' InInstructions, as +/// the Eventuality task doesn't need it to process this block. +pub(crate) struct ReceiverScanData { + /// The block number. + pub(crate) block_number: u64, + /// The received outputs which should be accumulated into the scheduler. + pub(crate) received_external_outputs: Vec>, + /// The outputs which need to be forwarded. + pub(crate) forwards: Vec>, + /// The outputs which need to be returned. + pub(crate) returns: Vec>, +} + +db_channel! { + ScannerScanEventuality { + ScannedBlock: () -> Vec, + } +} + +pub(crate) struct ScanToEventualityDb(PhantomData); +impl ScanToEventualityDb { + pub(crate) fn send_scan_data(txn: &mut impl DbTxn, block_number: u64, data: &SenderScanData) { + // If we received an External output to accumulate, or have an External output to forward + // (meaning we received an External output), or have an External output to return (again + // meaning we received an External output), set this block as notable due to receiving outputs + // The non-External output case is covered with `flag_notable_due_to_non_external_output` + if !(data.received_external_outputs.is_empty() && + data.forwards.is_empty() && + data.returns.is_empty()) + { + NotableBlock::set(txn, block_number, &()); + } + + // Save all the forwarded outputs' data + for forward in &data.forwards { + let mut buf = vec![]; + if let Some(address) = &forward.return_address { + buf.write_all(&[1]).unwrap(); + address.serialize(&mut buf).unwrap(); + } else { + buf.write_all(&[0]).unwrap(); + } + forward.in_instruction.encode_to(&mut buf); + + SerializedForwardedOutput::set(txn, forward.output.id().as_ref(), &buf); + } + + let mut buf = vec![]; + buf.write_all(&data.block_number.to_le_bytes()).unwrap(); + buf + .write_all(&u32::try_from(data.received_external_outputs.len()).unwrap().to_le_bytes()) + .unwrap(); + for output in &data.received_external_outputs { + output.write(&mut buf).unwrap(); + } + buf.write_all(&u32::try_from(data.forwards.len()).unwrap().to_le_bytes()).unwrap(); + for output_with_in_instruction in &data.forwards { + // Only write the output, as we saved the InInstruction above as needed + output_with_in_instruction.output.write(&mut buf).unwrap(); + } + buf.write_all(&u32::try_from(data.returns.len()).unwrap().to_le_bytes()).unwrap(); + for output in &data.returns { + output.write(&mut buf).unwrap(); + } + ScannedBlock::send(txn, &buf); + } + pub(crate) fn recv_scan_data( + txn: &mut impl DbTxn, + expected_block_number: u64, + ) -> ReceiverScanData { + let data = + ScannedBlock::try_recv(txn).expect("receiving data for a scanned block not yet sent"); + let mut data = data.as_slice(); + + let block_number = { + let mut block_number = [0; 8]; + data.read_exact(&mut block_number).unwrap(); + u64::from_le_bytes(block_number) + }; + assert_eq!( + block_number, expected_block_number, + "received data for a scanned block distinct than expected" + ); + + let received_external_outputs = { + let mut len = [0; 4]; + data.read_exact(&mut len).unwrap(); + let len = usize::try_from(u32::from_le_bytes(len)).unwrap(); + + let mut received_external_outputs = Vec::with_capacity(len); + for _ in 0 .. len { + received_external_outputs.push(OutputFor::::read(&mut data).unwrap()); + } + received_external_outputs + }; + + let forwards = { + let mut len = [0; 4]; + data.read_exact(&mut len).unwrap(); + let len = usize::try_from(u32::from_le_bytes(len)).unwrap(); + + let mut forwards = Vec::with_capacity(len); + for _ in 0 .. len { + forwards.push(OutputFor::::read(&mut data).unwrap()); + } + forwards + }; + + let returns = { + let mut len = [0; 4]; + data.read_exact(&mut len).unwrap(); + let len = usize::try_from(u32::from_le_bytes(len)).unwrap(); + + let mut returns = Vec::with_capacity(len); + for _ in 0 .. len { + returns.push(Return::::read(&mut data).unwrap()); + } + returns + }; + + ReceiverScanData { block_number, received_external_outputs, forwards, returns } + } +} + +pub(crate) struct Returnable { + pub(crate) return_address: Option>, + pub(crate) in_instruction: InInstructionWithBalance, +} + +impl Returnable { + fn read(reader: &mut impl io::Read) -> io::Result { + let mut opt = [0xff]; + reader.read_exact(&mut opt).unwrap(); + assert!((opt[0] == 0) || (opt[0] == 1)); + + let return_address = + (opt[0] == 1).then(|| AddressFor::::deserialize_reader(reader)).transpose()?; + + let in_instruction = + InInstructionWithBalance::decode(&mut IoReader(reader)).map_err(io::Error::other)?; + Ok(Returnable { return_address, in_instruction }) + } + fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + if let Some(return_address) = &self.return_address { + writer.write_all(&[1])?; + return_address.serialize(writer)?; + } else { + writer.write_all(&[0])?; + } + self.in_instruction.encode_to(writer); + Ok(()) + } +} + +#[derive(BorshSerialize, BorshDeserialize)] +struct BlockBoundInInstructions { + block_number: u64, + returnable_in_instructions: Vec, +} + +db_channel! { + ScannerScanReport { + InInstructions: () -> BlockBoundInInstructions, + } +} + +pub(crate) struct InInstructionData { + pub(crate) external_key_for_session_to_sign_batch: KeyFor, + pub(crate) returnable_in_instructions: Vec>, +} + +pub(crate) struct ScanToReportDb(PhantomData); +impl ScanToReportDb { + pub(crate) fn send_in_instructions( + txn: &mut impl DbTxn, + block_number: u64, + data: &InInstructionData, + ) { + let mut buf = data.external_key_for_session_to_sign_batch.to_bytes().as_ref().to_vec(); + for returnable_in_instruction in &data.returnable_in_instructions { + returnable_in_instruction.write(&mut buf).unwrap(); + } + InInstructions::send( + txn, + &BlockBoundInInstructions { block_number, returnable_in_instructions: buf }, + ); + } + + pub(crate) fn recv_in_instructions( + txn: &mut impl DbTxn, + block_number: u64, + ) -> InInstructionData { + let data = InInstructions::try_recv(txn) + .expect("receiving InInstructions for a scanned block not yet sent"); + assert_eq!( + block_number, data.block_number, + "received InInstructions for a scanned block distinct than expected" + ); + let mut buf = data.returnable_in_instructions.as_slice(); + + let external_key_for_session_to_sign_batch = { + let mut external_key_for_session_to_sign_batch = + as GroupEncoding>::Repr::default(); + let key_len = external_key_for_session_to_sign_batch.as_ref().len(); + external_key_for_session_to_sign_batch.as_mut().copy_from_slice(&buf[.. key_len]); + buf = &buf[key_len ..]; + KeyFor::::from_bytes(&external_key_for_session_to_sign_batch).unwrap() + }; + + let mut returnable_in_instructions = vec![]; + while !buf.is_empty() { + returnable_in_instructions.push(Returnable::read(&mut buf).unwrap()); + } + InInstructionData { external_key_for_session_to_sign_batch, returnable_in_instructions } + } +} + +db_channel! { + ScannerSubstrateEventuality { + Burns: (acknowledged_block: u64) -> Vec, + } +} + +pub(crate) struct SubstrateToEventualityDb; +impl SubstrateToEventualityDb { + pub(crate) fn send_burns( + txn: &mut impl DbTxn, + acknowledged_block: u64, + burns: Vec, + ) { + // Drop burns less than the dust + let burns = burns + .into_iter() + .filter(|burn| burn.balance.amount.0 >= S::dust(burn.balance.coin).0) + .collect::>(); + if !burns.is_empty() { + Burns::send(txn, acknowledged_block, &burns); + } + } + + pub(crate) fn try_recv_burns( + txn: &mut impl DbTxn, + acknowledged_block: u64, + ) -> Option> { + Burns::try_recv(txn, acknowledged_block) + } +} + +mod _public_db { + use serai_in_instructions_primitives::Batch; + + use serai_db::{Get, DbTxn, create_db, db_channel}; + + db_channel! { + ScannerPublic { + Batches: () -> Batch, + BatchesToSign: (key: &[u8]) -> Batch, + AcknowledgedBatches: (key: &[u8]) -> u32, + CompletedEventualities: (key: &[u8]) -> [u8; 32], + } + } +} + +/// The batches to publish. +/// +/// This is used for auditing the Batches published to Serai. +pub struct Batches; +impl Batches { + pub(crate) fn send(txn: &mut impl DbTxn, batch: &Batch) { + _public_db::Batches::send(txn, batch); + } + + /// Receive a batch to publish. + pub fn try_recv(txn: &mut impl DbTxn) -> Option { + _public_db::Batches::try_recv(txn) + } +} + +/// The batches to sign and publish. +/// +/// This is used for publishing Batches onto Serai. +pub struct BatchesToSign(PhantomData); +impl BatchesToSign { + pub(crate) fn send(txn: &mut impl DbTxn, key: &K, batch: &Batch) { + _public_db::BatchesToSign::send(txn, key.to_bytes().as_ref(), batch); + } + + /// Receive a batch to sign and publish. + pub fn try_recv(txn: &mut impl DbTxn, key: &K) -> Option { + _public_db::BatchesToSign::try_recv(txn, key.to_bytes().as_ref()) + } +} + +/// The batches which were acknowledged on-chain. +pub struct AcknowledgedBatches(PhantomData); +impl AcknowledgedBatches { + pub(crate) fn send(txn: &mut impl DbTxn, key: &K, batch: u32) { + _public_db::AcknowledgedBatches::send(txn, key.to_bytes().as_ref(), &batch); + } + + /// Receive the ID of a batch which was acknowledged. + pub fn try_recv(txn: &mut impl DbTxn, key: &K) -> Option { + _public_db::AcknowledgedBatches::try_recv(txn, key.to_bytes().as_ref()) + } +} + +/// The IDs of completed Eventualities found on-chain, within a finalized block. +pub struct CompletedEventualities(PhantomData); +impl CompletedEventualities { + pub(crate) fn send(txn: &mut impl DbTxn, key: &K, id: [u8; 32]) { + _public_db::CompletedEventualities::send(txn, key.to_bytes().as_ref(), &id); + } + + /// Receive the ID of a completed Eventuality. + pub fn try_recv(txn: &mut impl DbTxn, key: &K) -> Option<[u8; 32]> { + _public_db::CompletedEventualities::try_recv(txn, key.to_bytes().as_ref()) + } +} diff --git a/processor/scanner/src/eventuality/db.rs b/processor/scanner/src/eventuality/db.rs new file mode 100644 index 000000000..3e5088d1c --- /dev/null +++ b/processor/scanner/src/eventuality/db.rs @@ -0,0 +1,83 @@ +use core::marker::PhantomData; + +use scale::Encode; +use serai_db::{Get, DbTxn, create_db}; + +use primitives::{EncodableG, ReceivedOutput, Eventuality, EventualityTracker}; + +use crate::{ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor}; + +create_db!( + ScannerEventuality { + // The next block to check for resolving eventualities + NextToCheckForEventualitiesBlock: () -> u64, + // The latest block this task has handled which was notable + LatestHandledNotableBlock: () -> u64, + + SerializedEventualities: (key: K) -> Vec, + + AccumulatedOutput: (id: &[u8]) -> (), + } +); + +pub(crate) struct EventualityDb(PhantomData); +impl EventualityDb { + pub(crate) fn set_next_to_check_for_eventualities_block( + txn: &mut impl DbTxn, + next_to_check_for_eventualities_block: u64, + ) { + NextToCheckForEventualitiesBlock::set(txn, &next_to_check_for_eventualities_block); + } + pub(crate) fn next_to_check_for_eventualities_block(getter: &impl Get) -> Option { + NextToCheckForEventualitiesBlock::get(getter) + } + + pub(crate) fn set_latest_handled_notable_block( + txn: &mut impl DbTxn, + latest_handled_notable_block: u64, + ) { + LatestHandledNotableBlock::set(txn, &latest_handled_notable_block); + } + pub(crate) fn latest_handled_notable_block(getter: &impl Get) -> Option { + LatestHandledNotableBlock::get(getter) + } + + pub(crate) fn set_eventualities( + txn: &mut impl DbTxn, + key: KeyFor, + eventualities: &EventualityTracker>, + ) { + let mut serialized = Vec::with_capacity(eventualities.active_eventualities.len() * 128); + for eventuality in eventualities.active_eventualities.values() { + eventuality.write(&mut serialized).unwrap(); + } + SerializedEventualities::set(txn, EncodableG(key), &serialized); + } + pub(crate) fn eventualities( + getter: &impl Get, + key: KeyFor, + ) -> EventualityTracker> { + let serialized = SerializedEventualities::get(getter, EncodableG(key)).unwrap_or(vec![]); + let mut serialized = serialized.as_slice(); + + let mut res = EventualityTracker::default(); + while !serialized.is_empty() { + let eventuality = EventualityFor::::read(&mut serialized).unwrap(); + res.insert(eventuality); + } + res + } + + pub(crate) fn prior_accumulated_output( + getter: &impl Get, + id: & as ReceivedOutput, AddressFor>>::Id, + ) -> bool { + AccumulatedOutput::get(getter, id.as_ref()).is_some() + } + pub(crate) fn accumulated_output( + txn: &mut impl DbTxn, + id: & as ReceivedOutput, AddressFor>>::Id, + ) { + AccumulatedOutput::set(txn, id.as_ref(), &()); + } +} diff --git a/processor/scanner/src/eventuality/mod.rs b/processor/scanner/src/eventuality/mod.rs new file mode 100644 index 000000000..bb3e4b7e4 --- /dev/null +++ b/processor/scanner/src/eventuality/mod.rs @@ -0,0 +1,529 @@ +use core::future::Future; +use std::collections::{HashSet, HashMap}; + +use group::GroupEncoding; + +use serai_db::{Get, DbTxn, Db}; + +use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Eventuality, Block, Payment}; + +use crate::{ + lifetime::LifetimeStage, + db::{ + SeraiKey, OutputWithInInstruction, ReceiverScanData, ScannerGlobalDb, SubstrateToEventualityDb, + ScanToEventualityDb, + }, + BlockExt, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, SchedulerUpdate, Scheduler, + CompletedEventualities, sort_outputs, + scan::{next_to_scan_for_outputs_block, queue_output_until_block}, +}; + +mod db; +use db::EventualityDb; + +/// The latest scannable block, which is determined by this task. +/// +/// This task decides when a key retires, which impacts the scan task. Accordingly, the scanner is +/// only allowed to scan `S::WINDOW_LENGTH - 1` blocks ahead so we can safely schedule keys to +/// retire `S::WINDOW_LENGTH` blocks out. +pub(crate) fn latest_scannable_block(getter: &impl Get) -> Option { + assert!(S::WINDOW_LENGTH > 0); + EventualityDb::::next_to_check_for_eventualities_block(getter) + .map(|b| b + S::WINDOW_LENGTH - 1) +} + +/// Intake a set of Eventualities into the DB. +/// +/// The HashMap is keyed by the key these Eventualities are for. +fn intake_eventualities( + txn: &mut impl DbTxn, + to_intake: HashMap, Vec>>, +) { + for (key, new_eventualities) in to_intake { + let key = { + let mut key_repr = as GroupEncoding>::Repr::default(); + assert_eq!(key.len(), key_repr.as_ref().len()); + key_repr.as_mut().copy_from_slice(&key); + KeyFor::::from_bytes(&key_repr).unwrap() + }; + + let mut eventualities = EventualityDb::::eventualities(txn, key); + for new_eventuality in new_eventualities { + eventualities.insert(new_eventuality); + } + EventualityDb::::set_eventualities(txn, key, &eventualities); + } +} + +/* + When we scan a block, we receive outputs. When this block is acknowledged, we accumulate those + outputs into some scheduler, potentially causing certain transactions to begin their signing + protocol. + + Despite only scanning blocks with `CONFIRMATIONS`, we cannot assume that these transactions (in + their signed form) will only appear after `CONFIRMATIONS`. For `CONFIRMATIONS = 10`, the scanned + block's number being `1`, the blockchain will have blocks with numbers `0 ..= 10`. While this + implies the earliest the transaction will appear is when the block number is `11`, which is + `1 + CONFIRMATIONS` (the number of the scanned block, plus the confirmations), this isn't + guaranteed. + + A reorganization could occur which causes all unconfirmed blocks to be replaced, with the new + blockchain having the signed transaction present immediately. + + This means that in order to detect Eventuality completions, we can only check block `b+1` once + we've acknowledged block `b`, accumulated its outputs, triggered any transactions, and prepared + for their Eventualities. This is important as both the completion of Eventualities, and the scan + process, may cause a block to be considered notable (where notable blocks must be perfectly + ordered). + + We do not want to fully serialize the scan flow solely because the Eventuality flow must be. If + the time to scan, acknowledge, and intake a block ever exceeded the block time, we'd form a + backlog. + + The solution is to form a window of blocks we can scan/acknowledge/intake, safely, such that we + only form a backlog if the latency for a block exceeds the duration of the entire window (the + amount of blocks in the window * the block time). + + By considering the block an Eventuality resolves not as the block it does, yet the block a window + later, we enable the following flow: + + - The scanner scans within its window, submitting blocks for acknowledgement. + - We have the blocks acknowledged (the consensus protocol handling this in parallel). + - The scanner checks for Eventualities completed following acknowledged blocks. + - If all Eventualities for a retiring multisig have been cleared, the notable block is one window + later. + - The start of the window shifts to the last block we've checked for Eventualities. This means + the end of the window is the block we just set as notable, and yes, once that's scanned we can + successfully publish a batch for it in a canonical fashion. + + This forms a backlog only if the latency of scanning, acknowledgement, and intake (including + checking Eventualities) exceeds the window duration (the desired property). +*/ +pub(crate) struct EventualityTask> { + db: D, + feed: S, + scheduler: Sch, +} + +impl> EventualityTask { + pub(crate) fn new(mut db: D, feed: S, scheduler: Sch, start_block: u64) -> Self { + if EventualityDb::::next_to_check_for_eventualities_block(&db).is_none() { + // Initialize the DB + let mut txn = db.txn(); + EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, start_block); + txn.commit(); + } + + Self { db, feed, scheduler } + } + + #[allow(clippy::type_complexity)] + fn keys_and_keys_with_stages( + &self, + block_number: u64, + ) -> (Vec>>, Vec<(KeyFor, LifetimeStage)>) { + /* + This is proper as the keys for the next-to-scan block (at most `WINDOW_LENGTH` ahead) will be + the keys to use here, with only minor edge cases. + + This may include a key which has yet to activate by our perception. We can simply drop + those. + + This may not include a key which has retired by the next-to-scan block. This task is the + one which decides when to retire a key, and when it marks a key to be retired, it is done + with it. Accordingly, it's not an issue if such a key was dropped. + + This also may include a key we've retired which has yet to officially retire. That's fine as + we'll do nothing with it, and the Scheduler traits document this behavior. + */ + let mut keys = ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&self.db) + .expect("scanning for a blockchain without any keys set"); + // Since the next-to-scan block is ahead of us, drop keys which have yet to actually activate + keys.retain(|key| block_number <= key.activation_block_number); + let keys_with_stages = keys.iter().map(|key| (key.key, key.stage)).collect::>(); + + (keys, keys_with_stages) + } + + // Returns a boolean of if we intaked any Burns. + async fn intake_burns(&mut self) -> Result { + let mut intaked_any = false; + + // If we've handled an notable block, we may have Burns being queued with it as the reference + if let Some(latest_handled_notable_block) = + EventualityDb::::latest_handled_notable_block(&self.db) + { + // We always intake Burns per this block as it's the block we have consensus on + // We would have a consensus failure if some thought the change should be the old key and + // others the new key + let (_keys, keys_with_stages) = self.keys_and_keys_with_stages(latest_handled_notable_block); + + let block = self.feed.block_by_number(&self.db, latest_handled_notable_block).await?; + + let mut txn = self.db.txn(); + // Drain the entire channel + while let Some(burns) = + SubstrateToEventualityDb::try_recv_burns(&mut txn, latest_handled_notable_block) + { + intaked_any = true; + + let new_eventualities = self + .scheduler + .fulfill( + &mut txn, + &block, + &keys_with_stages, + burns + .into_iter() + .filter_map(|burn| Payment::>::try_from(burn).ok()) + .collect(), + ) + .await + .map_err(|e| format!("failed to queue fulfilling payments: {e:?}"))?; + intake_eventualities::(&mut txn, new_eventualities); + } + txn.commit(); + } + + Ok(intaked_any) + } +} + +impl> ContinuallyRan for EventualityTask { + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + // Fetch the highest acknowledged block + let Some(highest_acknowledged) = ScannerGlobalDb::::highest_acknowledged_block(&self.db) + else { + // If we've never acknowledged a block, return + return Ok(false); + }; + + // A boolean of if we've made any progress to return at the end of the function + let mut made_progress = false; + + // Start by intaking any Burns we have sitting around + // It's important we run this regardless of if we have a new block to handle + made_progress |= self.intake_burns().await?; + + /* + Eventualities increase upon one of two cases: + + 1) We're fulfilling Burns + 2) We acknowledged a block + + We can't know the processor has intaked all Burns it should have when we process block `b`. + We solve this by executing a consensus protocol whenever a resolution for an Eventuality + created to fulfill Burns occurs. Accordingly, we force ourselves to obtain synchrony on + such blocks (and all preceding Burns). + + This means we can only iterate up to the block currently pending acknowledgement. + + We only know blocks will need acknowledgement *for sure* if they were scanned. The only + other causes are key activation and retirement (both scheduled outside the scan window). + This makes the exclusive upper bound the *next block to scan*. + */ + let exclusive_upper_bound = { + // Fetch the next to scan block + let next_to_scan = next_to_scan_for_outputs_block::(&self.db) + .expect("EventualityTask run before writing the start block"); + // If we haven't done any work, return + if next_to_scan == 0 { + return Ok(false); + } + next_to_scan + }; + + // Fetch the next block to check + let next_to_check = EventualityDb::::next_to_check_for_eventualities_block(&self.db) + .expect("EventualityTask run before writing the start block"); + + // Check all blocks + for b in next_to_check .. exclusive_upper_bound { + let is_block_notable = ScannerGlobalDb::::is_block_notable(&self.db, b); + if is_block_notable { + /* + If this block is notable *and* not acknowledged, break. + + This is so if Burns queued prior to this block's acknowledgement caused any + Eventualities (which may resolve this block), we have them. If it wasn't for that, it'd + be so if this block's acknowledgement caused any Eventualities, we have them, though + those would only potentially resolve in the next block (letting us scan this block + without delay). + */ + if b > highest_acknowledged { + break; + } + + // Since this block is notable, ensure we've intaked all the Burns preceding it + // We can know with certainty that the channel is fully populated at this time since + // we've acknowledged a newer block (so we've handled the state up to this point and any + // new state will be for the newer block) + #[allow(unused_assignments)] + { + made_progress |= self.intake_burns().await?; + } + } + + // Since we're handling this block, we are making progress + made_progress = true; + + let block = self.feed.block_by_number(&self.db, b).await?; + + log::debug!("checking eventuality completions in block: {} ({b})", hex::encode(block.id())); + + let (keys, keys_with_stages) = self.keys_and_keys_with_stages(b); + let latest_active_key = { + let mut keys_with_stages = keys_with_stages.clone(); + loop { + // Use the most recent key + let (key, stage) = keys_with_stages.pop().unwrap(); + // Unless this key is active, but not yet reporting + if stage == LifetimeStage::ActiveYetNotReporting { + continue; + } + break key; + } + }; + + let mut txn = self.db.txn(); + + // Fetch the data from the scanner + let scan_data = ScanToEventualityDb::recv_scan_data(&mut txn, b); + assert_eq!(scan_data.block_number, b); + let ReceiverScanData { block_number: _, received_external_outputs, forwards, returns } = + scan_data; + let mut outputs = received_external_outputs; + + for key in &keys { + // If this is the key's activation block, activate it + if key.activation_block_number == b { + Sch::activate_key(&mut txn, key.key); + } + + let completed_eventualities = { + let mut eventualities = EventualityDb::::eventualities(&txn, key.key); + let completed_eventualities = + block.check_for_eventuality_resolutions(&mut eventualities); + EventualityDb::::set_eventualities(&mut txn, key.key, &eventualities); + completed_eventualities + }; + + for (tx, eventuality) in &completed_eventualities { + log::info!( + "eventuality {} resolved by {}", + hex::encode(eventuality.id()), + hex::encode(tx.as_ref()) + ); + CompletedEventualities::send(&mut txn, &key.key, eventuality.id()); + } + + // Fetch all non-External outputs + let mut non_external_outputs = block.scan_for_outputs(latest_active_key, key.key); + non_external_outputs.retain(|output| output.kind() != OutputType::External); + // Drop any outputs less than the dust limit + non_external_outputs.retain(|output| { + let balance = output.balance(); + balance.amount.0 >= S::dust(balance.coin).0 + }); + + /* + Now that we have all non-External outputs, we filter them to be only the outputs which + are from transactions which resolve our own Eventualities *if* the multisig is retiring. + This implements step 6 of `spec/processor/Multisig Rotation.md`. + + We may receive a Change output. The only issue with accumulating this would be if it + extends the multisig's lifetime (by increasing the amount of outputs yet to be + forwarded). By checking it's one we made, either: + 1) It's a legitimate Change output to be forwarded + 2) It's a Change output created by a user burning coins (specifying the Change address), + which can only be created while the multisig is actively handling `Burn`s (therefore + ensuring this multisig cannot be kept alive ad-infinitum) + + The commentary on Change outputs also applies to Branch/Forwarded. They'll presumably + get ignored if not usable however. + */ + if key.stage == LifetimeStage::Finishing { + non_external_outputs + .retain(|output| completed_eventualities.contains_key(&output.transaction_id())); + } + + // Finally, for non-External outputs we didn't make, we check they're worth more than the + // cost to aggregate them to avoid some profitable spam attacks by malicious miners + { + // Fetch and cache the costs to aggregate as this call may be expensive + let coins = non_external_outputs + .iter() + .map(|output| output.balance().coin) + .collect::>(); + let mut costs_to_aggregate = HashMap::new(); + for coin in coins { + costs_to_aggregate.insert( + coin, + self.feed.cost_to_aggregate(coin, &block).await.map_err(|e| { + format!("EventualityTask couldn't fetch cost to aggregate {coin:?} at {b}: {e:?}") + })?, + ); + } + + // Only retain out outputs/outputs sufficiently worthwhile + non_external_outputs.retain(|output| { + completed_eventualities.contains_key(&output.transaction_id()) || { + let balance = output.balance(); + balance.amount.0 >= (2 * costs_to_aggregate[&balance.coin].0) + } + }); + } + + // Now, we iterate over all Forwarded outputs and queue their InInstructions + for output in + non_external_outputs.iter().filter(|output| output.kind() == OutputType::Forwarded) + { + let Some(eventuality) = completed_eventualities.get(&output.transaction_id()) else { + // Output sent to the forwarding address yet not one we made + continue; + }; + let Some(forwarded) = eventuality.singular_spent_output() else { + // This was a TX made by us, yet someone burned to the forwarding address as it + // doesn't follow the structure of forwarding transactions + continue; + }; + + let Some((return_address, mut in_instruction)) = + ScannerGlobalDb::::return_address_and_in_instruction_for_forwarded_output( + &txn, &forwarded, + ) + else { + // This was a TX made by us, coincidentally with the necessary structure, yet wasn't + // forwarding an output + continue; + }; + + // We use the original amount, minus twice the cost to aggregate + // If the fees we paid to forward this now (less than the cost to aggregate now, yet not + // necessarily the cost to aggregate historically) caused this amount to be less, reduce + // it accordingly + in_instruction.balance.amount.0 = + in_instruction.balance.amount.0.min(output.balance().amount.0); + + queue_output_until_block::( + &mut txn, + b + S::WINDOW_LENGTH, + &OutputWithInInstruction { output: output.clone(), return_address, in_instruction }, + ); + } + + // Accumulate all of these outputs + outputs.extend(non_external_outputs); + } + + // Update the scheduler + { + let mut scheduler_update = SchedulerUpdate { outputs, forwards, returns }; + scheduler_update.outputs.sort_by(sort_outputs); + scheduler_update.forwards.sort_by(sort_outputs); + scheduler_update.returns.sort_by(|a, b| sort_outputs(&a.output, &b.output)); + + let empty = { + let a: core::slice::Iter<'_, OutputFor> = scheduler_update.outputs.iter(); + let b: core::slice::Iter<'_, OutputFor> = scheduler_update.forwards.iter(); + let c = + scheduler_update.returns.iter().map(|output_to_return| &output_to_return.output); + let mut all_outputs = a.chain(b).chain(c).peekable(); + + // If we received any output, sanity check this block is notable + let empty = all_outputs.peek().is_none(); + if !empty { + assert!(is_block_notable, "accumulating output(s) in non-notable block"); + } + + // Sanity check we've never accumulated these outputs before + for output in all_outputs { + assert!( + !EventualityDb::::prior_accumulated_output(&txn, &output.id()), + "prior accumulated an output with this ID" + ); + EventualityDb::::accumulated_output(&mut txn, &output.id()); + } + + empty + }; + + if !empty { + // Accumulate the outputs + /* + This uses the `keys_with_stages` for the current block, yet this block is notable. + Accordingly, all future intaked Burns will use at least this block when determining + what LifetimeStage a key is. That makes the LifetimeStage monotonically incremented. + If this block wasn't notable, we'd potentially intake Burns with the LifetimeStage + determined off an earlier block than this (enabling an earlier LifetimeStage to be + used after a later one was already used). + */ + let new_eventualities = self + .scheduler + .update(&mut txn, &block, &keys_with_stages, scheduler_update) + .await + .map_err(|e| format!("failed to update scheduler: {e:?}"))?; + // Intake the new Eventualities + for key in new_eventualities.keys() { + keys + .iter() + .find(|serai_key| serai_key.key.to_bytes().as_ref() == key.as_slice()) + .expect("intaking Eventuality for key which isn't active"); + } + intake_eventualities::(&mut txn, new_eventualities); + } + } + + for key in &keys { + // If this is the block at which forwarding starts for this key, flush it + // We do this after we issue the above update for any efficiencies gained by doing so + if key.block_at_which_forwarding_starts == Some(b) { + assert!( + key.key != keys.last().unwrap().key, + "key which was forwarding was the last key (which has no key after it to forward to)" + ); + let new_eventualities = self + .scheduler + .flush_key(&mut txn, &block, key.key, keys.last().unwrap().key) + .await + .map_err(|e| format!("failed to flush key from scheduler: {e:?}"))?; + intake_eventualities::(&mut txn, new_eventualities); + } + + // Now that we've intaked any Eventualities caused, check if we're retiring any keys + if key.stage == LifetimeStage::Finishing { + let eventualities = EventualityDb::::eventualities(&txn, key.key); + if eventualities.active_eventualities.is_empty() { + log::info!( + "key {} has finished and is being retired", + hex::encode(key.key.to_bytes().as_ref()) + ); + + // Retire this key `WINDOW_LENGTH` blocks in the future to ensure the scan task never + // has a malleable view of the keys. + ScannerGlobalDb::::retire_key(&mut txn, b + S::WINDOW_LENGTH, key.key); + + // We tell the scheduler to retire it now as we're done with it, and this fn doesn't + // require it be called with a canonical order + Sch::retire_key(&mut txn, key.key); + } + } + } + + // Update the next-to-check block + EventualityDb::::set_next_to_check_for_eventualities_block(&mut txn, next_to_check); + + // If this block was notable, update the latest-handled notable block + if is_block_notable { + EventualityDb::::set_latest_handled_notable_block(&mut txn, b); + } + + txn.commit(); + } + + // Run dependents if we successfully checked any blocks + Ok(made_progress) + } + } +} diff --git a/processor/scanner/src/index/db.rs b/processor/scanner/src/index/db.rs new file mode 100644 index 000000000..9254f9bcb --- /dev/null +++ b/processor/scanner/src/index/db.rs @@ -0,0 +1,28 @@ +use serai_db::{Get, DbTxn, create_db}; + +create_db!( + ScannerIndex { + // A lookup of a block's number to its ID + BlockId: (number: u64) -> [u8; 32], + + // The latest finalized block to appear on the blockchain + LatestFinalizedBlock: () -> u64, + } +); + +pub(crate) struct IndexDb; +impl IndexDb { + pub(crate) fn set_block(txn: &mut impl DbTxn, number: u64, id: [u8; 32]) { + BlockId::set(txn, number, &id); + } + pub(crate) fn block_id(getter: &impl Get, number: u64) -> Option<[u8; 32]> { + BlockId::get(getter, number) + } + + pub(crate) fn set_latest_finalized_block(txn: &mut impl DbTxn, latest_finalized_block: u64) { + LatestFinalizedBlock::set(txn, &latest_finalized_block); + } + pub(crate) fn latest_finalized_block(getter: &impl Get) -> Option { + LatestFinalizedBlock::get(getter) + } +} diff --git a/processor/scanner/src/index/mod.rs b/processor/scanner/src/index/mod.rs new file mode 100644 index 000000000..03abc8a81 --- /dev/null +++ b/processor/scanner/src/index/mod.rs @@ -0,0 +1,115 @@ +use core::future::Future; + +use serai_db::{Get, DbTxn, Db}; +use primitives::{task::ContinuallyRan, BlockHeader}; + +use crate::ScannerFeed; + +mod db; +use db::IndexDb; + +/// Panics if an unindexed block's ID is requested. +pub(crate) fn block_id(getter: &impl Get, block_number: u64) -> [u8; 32] { + IndexDb::block_id(getter, block_number) + .unwrap_or_else(|| panic!("requested block ID for unindexed block {block_number}")) +} + +/* + This processor should build its own index of the blockchain, yet only for finalized blocks which + are safe to process. For Proof of Work blockchains, which only have probabilistic finality, these + are the set of sufficiently confirmed blocks. For blockchains with finality, these are the + finalized blocks. + + This task finds the finalized blocks, verifies they're continguous, and saves their IDs. +*/ +pub(crate) struct IndexTask { + db: D, + feed: S, +} + +impl IndexTask { + pub(crate) async fn new(mut db: D, feed: S, start_block: u64) -> Self { + if IndexDb::block_id(&db, start_block).is_none() { + // Fetch the block for its ID + let block = { + let mut delay = Self::DELAY_BETWEEN_ITERATIONS; + loop { + match feed.unchecked_block_header_by_number(start_block).await { + Ok(block) => break block, + Err(e) => { + log::warn!("IndexTask couldn't fetch start block {start_block}: {e:?}"); + tokio::time::sleep(core::time::Duration::from_secs(delay)).await; + delay += Self::DELAY_BETWEEN_ITERATIONS; + delay = delay.min(Self::MAX_DELAY_BETWEEN_ITERATIONS); + } + }; + } + }; + + // Initialize the DB + let mut txn = db.txn(); + IndexDb::set_block(&mut txn, start_block, block.id()); + IndexDb::set_latest_finalized_block(&mut txn, start_block); + txn.commit(); + } + + Self { db, feed } + } +} + +impl ContinuallyRan for IndexTask { + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + // Fetch the latest finalized block + let our_latest_finalized = IndexDb::latest_finalized_block(&self.db) + .expect("IndexTask run before writing the start block"); + let latest_finalized = match self.feed.latest_finalized_block_number().await { + Ok(latest_finalized) => latest_finalized, + Err(e) => Err(format!("couldn't fetch the latest finalized block number: {e:?}"))?, + }; + + if latest_finalized < our_latest_finalized { + // Explicitly log this as an error as returned ephemeral errors are logged with debug + // This doesn't panic as the node should sync along our indexed chain, and if it doesn't, + // we'll panic at that point in time + log::error!( + "node is out of sync, latest finalized {} is behind our indexed {}", + latest_finalized, + our_latest_finalized + ); + Err("node is out of sync".to_string())?; + } + + // Index the hashes of all blocks until the latest finalized block + for b in (our_latest_finalized + 1) ..= latest_finalized { + let block = match self.feed.unchecked_block_header_by_number(b).await { + Ok(block) => block, + Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?, + }; + + // Check this descends from our indexed chain + { + let expected_parent = + IndexDb::block_id(&self.db, b - 1).expect("didn't have the ID of the prior block"); + if block.parent() != expected_parent { + panic!( + "current finalized block (#{b}, {}) doesn't build off finalized block (#{}, {})", + hex::encode(block.parent()), + b - 1, + hex::encode(expected_parent) + ); + } + } + + // Update the latest finalized block + let mut txn = self.db.txn(); + IndexDb::set_block(&mut txn, b, block.id()); + IndexDb::set_latest_finalized_block(&mut txn, b); + txn.commit(); + } + + // Have dependents run if we updated the latest finalized block + Ok(our_latest_finalized != latest_finalized) + } + } +} diff --git a/processor/scanner/src/lib.rs b/processor/scanner/src/lib.rs new file mode 100644 index 000000000..5046753cf --- /dev/null +++ b/processor/scanner/src/lib.rs @@ -0,0 +1,507 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{marker::PhantomData, future::Future, fmt::Debug}; +use std::{io, collections::HashMap}; + +use group::GroupEncoding; + +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, Db}; + +use serai_primitives::{NetworkId, Coin, Amount}; +use serai_coins_primitives::OutInstructionWithBalance; + +use primitives::{task::*, Address, ReceivedOutput, Block, Payment}; + +// Logic for deciding where in its lifetime a multisig is. +mod lifetime; +pub use lifetime::LifetimeStage; + +// Database schema definition and associated functions. +mod db; +use db::ScannerGlobalDb; +pub use db::{Batches, BatchesToSign, AcknowledgedBatches, CompletedEventualities}; +// Task to index the blockchain, ensuring we don't reorganize finalized blocks. +mod index; +// Scans blocks for received coins. +mod scan; +/// Task which reports Batches to Substrate. +mod report; +/// Task which handles events from Substrate once we can. +mod substrate; +/// Check blocks for transactions expected to eventually occur. +mod eventuality; + +pub(crate) fn sort_outputs>( + a: &O, + b: &O, +) -> core::cmp::Ordering { + use core::cmp::{Ordering, Ord}; + let res = a.id().as_ref().cmp(b.id().as_ref()); + assert!(res != Ordering::Equal, "two outputs within a collection had the same ID"); + res +} + +/// Extension traits around Block. +pub(crate) trait BlockExt: Block { + fn scan_for_outputs(&self, latest_active_key: Self::Key, key: Self::Key) -> Vec; +} +impl BlockExt for B { + fn scan_for_outputs(&self, latest_active_key: Self::Key, key: Self::Key) -> Vec { + let mut outputs = self.scan_for_outputs_unordered(latest_active_key, key); + outputs.sort_by(sort_outputs); + outputs + } +} + +/// A feed usable to scan a blockchain. +/// +/// This defines the primitive types used, along with various getters necessary for indexing. +pub trait ScannerFeed: 'static + Send + Sync + Clone { + /// The ID of the network being scanned for. + const NETWORK: NetworkId; + + /// The amount of confirmations a block must have to be considered finalized. + /// + /// This value must be at least `1`. + // This is distinct from `WINDOW_LENGTH` as it's only used for determining the lifetime of the + // key. The key switches to various stages of its lifetime depending on when user transactions + // will hit the Serai network (relative to the time they're made) and when outputs created by + // Serai become available again. If we set a long WINDOW_LENGTH, say two hours, that doesn't mean + // we expect user transactions made within a few minutes of a new key being declared to only + // appear in finalized blocks two hours later. + const CONFIRMATIONS: u64; + + /// The amount of blocks to process in parallel. + /// + /// This must be at least `1`. This value MUST be at least the worst-case latency to publish a + /// Batch for a block divided by the expected block time. Setting this value too low will risk a + /// backlog forming. Setting this value too high will only delay key rotation and forwarded + /// outputs. + // The latency to publish a Batch for a block is the latency of a provided transaction + // (1 minute), the latency of a signing protocol (1 minute), the latency of Serai to finalize a + // block (1 minute), and the latency to cosign such a block (5 minutes for the cosign distance + // plus 1 minute). Accordingly, this should be at least ~30 minutes, ideally 60 minutes. + const WINDOW_LENGTH: u64; + + /// The amount of blocks which will occur in 10 minutes (approximate). + /// + /// This value must be at least `1`. + const TEN_MINUTES: u64; + + /// The representation of a block for this blockchain. + /// + /// A block is defined as a consensus event associated with a set of transactions. It is not + /// necessary to literally define it as whatever the external network defines as a block. For + /// external networks which finalize block(s), this block type should be a representation of all + /// transactions within a finalization event. + type Block: Block; + + /// An error encountered when fetching data from the blockchain. + /// + /// This MUST be an ephemeral error. Retrying fetching data from the blockchain MUST eventually + /// resolve without manual intervention/changing the arguments. + type EphemeralError: Debug; + + /// Fetch the number of the latest finalized block. + /// + /// The block number is its zero-indexed position within a linear view of the external network's + /// consensus. The genesis block accordingly has block number 0. + fn latest_finalized_block_number( + &self, + ) -> impl Send + Future>; + + /// Fetch the timestamp of a block (represented in seconds since the epoch). + /// + /// This must be monotonically incrementing. Two blocks may share a timestamp. + fn time_of_block( + &self, + number: u64, + ) -> impl Send + Future>; + + /// Fetch a block header by its number. + /// + /// This does not check the returned BlockHeader is the header for the block we indexed. + fn unchecked_block_header_by_number( + &self, + number: u64, + ) -> impl Send + Future::Header, Self::EphemeralError>>; + + /// Fetch a block by its number. + /// + /// This does not check the returned Block is the block we indexed. + fn unchecked_block_by_number( + &self, + number: u64, + ) -> impl Send + Future>; + + /// Fetch a block by its number. + /// + /// Panics if the block requested wasn't indexed. + fn block_by_number( + &self, + getter: &(impl Send + Sync + Get), + number: u64, + ) -> impl Send + Future> { + async move { + let block = match self.unchecked_block_by_number(number).await { + Ok(block) => block, + Err(e) => Err(format!("couldn't fetch block {number}: {e:?}"))?, + }; + + // Check the ID of this block is the expected ID + { + let expected = crate::index::block_id(getter, number); + if block.id() != expected { + panic!( + "finalized chain reorganized from {} to {} at {}", + hex::encode(expected), + hex::encode(block.id()), + number, + ); + } + } + + Ok(block) + } + } + + /// The dust threshold for the specified coin. + /// + /// This MUST be constant. Serai MUST NOT create internal outputs worth less than this. This + /// SHOULD be a value worth handling at a human level. + fn dust(coin: Coin) -> Amount; + + /// The cost to aggregate an input as of the specified block. + /// + /// This is defined as the transaction fee for a 2-input, 1-output transaction. + fn cost_to_aggregate( + &self, + coin: Coin, + reference_block: &Self::Block, + ) -> impl Send + Future>; +} + +/// The key type for this ScannerFeed. +pub type KeyFor = <::Block as Block>::Key; +/// The address type for this ScannerFeed. +pub type AddressFor = <::Block as Block>::Address; +/// The output type for this ScannerFeed. +pub type OutputFor = <::Block as Block>::Output; +/// The eventuality type for this ScannerFeed. +pub type EventualityFor = <::Block as Block>::Eventuality; +/// The block type for this ScannerFeed. +pub type BlockFor = ::Block; + +/// A return to occur. +pub struct Return { + address: AddressFor, + output: OutputFor, +} + +impl Return { + pub(crate) fn write(&self, writer: &mut impl io::Write) -> io::Result<()> { + self.address.serialize(writer)?; + self.output.write(writer) + } + + pub(crate) fn read(reader: &mut impl io::Read) -> io::Result { + let address = AddressFor::::deserialize_reader(reader)?; + let output = OutputFor::::read(reader)?; + Ok(Return { address, output }) + } + + /// The address to return the output to. + pub fn address(&self) -> &AddressFor { + &self.address + } + + /// The output to return. + pub fn output(&self) -> &OutputFor { + &self.output + } +} + +/// An update for the scheduler. +pub struct SchedulerUpdate { + outputs: Vec>, + forwards: Vec>, + returns: Vec>, +} + +impl SchedulerUpdate { + /// The outputs to accumulate. + /// + /// These MUST be accumulated. + pub fn outputs(&self) -> &[OutputFor] { + &self.outputs + } + + /// The outputs to forward to the latest multisig. + /// + /// These MUST be forwarded in a 1-input 1-output transaction or dropped (if the fees are too + /// high to make the forwarding transaction). + pub fn forwards(&self) -> &[OutputFor] { + &self.forwards + } + + /// The outputs to return. + /// + /// These SHOULD be returned as specified (potentially in batch). They MAY be dropped if the fees + /// are too high to make the return transaction. + pub fn returns(&self) -> &[Return] { + &self.returns + } +} + +/// Eventualities, keyed by the encoding of the key the Eventualities are for. +pub type KeyScopedEventualities = HashMap, Vec>>; + +/// The object responsible for accumulating outputs and planning new transactions. +// TODO: Move this to Scheduler primitives +pub trait Scheduler: 'static + Send { + /// An error encountered when handling updates/payments. + /// + /// This MUST be an ephemeral error. Retrying handling updates/payments MUST eventually + /// resolve without manual intervention/changing the arguments. + type EphemeralError: Debug; + + /// The type for a signable transaction. + type SignableTransaction: scheduler_primitives::SignableTransaction; + + /// Activate a key. + /// + /// This SHOULD setup any necessary database structures. This SHOULD NOT cause the new key to + /// be used as the primary key. The multisig rotation time clearly establishes its steps. + fn activate_key(txn: &mut impl DbTxn, key: KeyFor); + + /// Flush all outputs within a retiring key to the new key. + /// + /// When a key is activated, the existing multisig should retain its outputs and utility for a + /// certain time period. With `flush_key`, all outputs should be directed towards fulfilling some + /// obligation or the `new_key`. Every output held by the retiring key MUST be connected to an + /// Eventuality. If a key no longer has active Eventualities, it MUST be able to be retired + /// without losing any coins. + /// + /// If the retiring key has any unfulfilled payments associated with it, those MUST be made + /// the responsibility of the new key. + fn flush_key( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> impl Send + Future, Self::EphemeralError>>; + + /// Retire a key as it'll no longer be used. + /// + /// Any key retired MUST NOT still have outputs associated with it. This SHOULD be a NOP other + /// than any assertions and database cleanup. This MUST NOT be expected to be called in a fashion + /// ordered to any other calls. + fn retire_key(txn: &mut impl DbTxn, key: KeyFor); + + /// Accumulate outputs into the scheduler, yielding the Eventualities now to be scanned for. + /// + /// `active_keys` is the list of active keys, potentially including a key for which we've already + /// called `retire_key` on. If so, its stage will be `Finishing` and no further operations will + /// be expected for it. Nonetheless, it may be present. + /// + /// The `Vec` used as the key in the returned HashMap should be the encoded key the + /// Eventualities are for. + fn update( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + update: SchedulerUpdate, + ) -> impl Send + Future, Self::EphemeralError>>; + + /// Fulfill a series of payments, yielding the Eventualities now to be scanned for. + /// + /// Any Eventualities returned by this function must include an output-to-Serai (such as a Branch + /// or Change), unless they descend from a transaction returned by this function which satisfies + /// that requirement. This ensures when we scan outputs from transactions we made, we report the + /// block up to Substrate, and obtain synchrony on all prior blocks (allowing us to identify our + /// own transactions, which we may be prior unaware of due to a lagging view of Substrate). + /// + /// `active_keys` is the list of active keys, potentially including a key for which we've already + /// called `retire_key` on. If so, its stage will be `Finishing` and no further operations will + /// be expected for it. Nonetheless, it may be present. + /// + /// The `Vec` used as the key in the returned HashMap should be the encoded key the + /// Eventualities are for. + /* + We need an output-to-Serai so we can detect a block with an Eventuality completion with regards + to Burns, forcing us to ensure we have accumulated all the Burns we should by the time we + handle that block. We explicitly don't require children have this requirement as by detecting + the first resolution, we ensure we'll accumulate the Burns (therefore becoming aware of the + childrens' Eventualities, enabling recognizing their resolutions). + + This carve out enables the following: + + ------------------ Fulfillment TX ---------------------- + | Primary Output | ---------------> | New Primary Output | + ------------------ | ---------------------- + | + | ------------------------------ + |------> | Branching Output for Burns | + ------------------------------ + + Without wasting pointless Change outputs on every transaction (as there's a single parent which + has an output-to-Serai, the new primary output). + */ + fn fulfill( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> impl Send + Future, Self::EphemeralError>>; +} + +/// A representation of a scanner. +#[allow(non_snake_case)] +pub struct Scanner { + substrate_handle: TaskHandle, + _S: PhantomData, +} +impl Scanner { + /// Create a new scanner. + /// + /// This will begin its execution, spawning several asynchronous tasks. + /// + /// This will return None if the Scanner was never initialized. + pub async fn new(db: impl Db, feed: S, scheduler: impl Scheduler) -> Option { + let start_block = ScannerGlobalDb::::start_block(&db)?; + + let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await; + let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block); + let report_task = report::ReportTask::<_, S>::new(db.clone(), start_block); + let substrate_task = substrate::SubstrateTask::<_, S>::new(db.clone()); + let eventuality_task = + eventuality::EventualityTask::<_, _, _>::new(db, feed, scheduler, start_block); + + let (index_task_def, _index_handle) = Task::new(); + let (scan_task_def, scan_handle) = Task::new(); + let (report_task_def, report_handle) = Task::new(); + let (substrate_task_def, substrate_handle) = Task::new(); + let (eventuality_task_def, eventuality_handle) = Task::new(); + + // Upon indexing a new block, scan it + tokio::spawn(index_task.continually_run(index_task_def, vec![scan_handle.clone()])); + // Upon scanning a block, report it + tokio::spawn(scan_task.continually_run(scan_task_def, vec![report_handle])); + // Upon reporting a block, we do nothing (as the burden is on Substrate which won't be + // immediately ready) + tokio::spawn(report_task.continually_run(report_task_def, vec![])); + // Upon handling an event from Substrate, we run the Eventuality task (as it's what's affected) + tokio::spawn(substrate_task.continually_run(substrate_task_def, vec![eventuality_handle])); + // Upon handling the Eventualities in a block, we run the scan task as we've advanced the + // window its allowed to scan + tokio::spawn(eventuality_task.continually_run(eventuality_task_def, vec![scan_handle])); + + Some(Self { substrate_handle, _S: PhantomData }) + } + + /// Initialize the scanner. + /// + /// This will begin its execution, spawning several asynchronous tasks. + /// + /// This passes through to `Scanner::new` if prior called. + pub async fn initialize( + mut db: impl Db, + feed: S, + scheduler: impl Scheduler, + start_block: u64, + start_key: KeyFor, + ) -> Self { + if ScannerGlobalDb::::start_block(&db).is_none() { + let mut txn = db.txn(); + ScannerGlobalDb::::set_start_block(&mut txn, start_block); + ScannerGlobalDb::::queue_key(&mut txn, start_block, start_key); + txn.commit(); + } + + Self::new(db, feed, scheduler).await.unwrap() + } + + /// Acknowledge a Batch having been published on Serai. + /// + /// This means the specified Batch was ordered on Serai in relation to Burn events, and all + /// validators have achieved synchrony on it. + /// + /// `burns` is a list of Burns to queue with the acknowledgement of this Batch for efficiency's + /// sake. Any Burns passed here MUST NOT be passed into any other call of `acknowledge_batch` nor + /// `queue_burns`. Doing so will cause them to be executed multiple times. + /// + /// The calls to this function must be ordered with regards to `queue_burns`. + pub fn acknowledge_batch( + &mut self, + mut txn: impl DbTxn, + batch_id: u32, + in_instruction_results: Vec, + burns: Vec, + key_to_activate: Option>, + ) { + log::info!("acknowledging batch {batch_id}"); + + // Queue acknowledging this block via the Substrate task + substrate::queue_acknowledge_batch::( + &mut txn, + batch_id, + in_instruction_results, + burns, + key_to_activate, + ); + // Commit this txn so this data is flushed + txn.commit(); + // Then run the Substrate task + self.substrate_handle.run_now(); + } + + /// Queue Burns. + /// + /// The scanner only updates the scheduler with new outputs upon acknowledging a block. The + /// ability to fulfill Burns, and therefore their order, is dependent on the current output + /// state. This immediately sets a bound that this function is ordered with regards to + /// `acknowledge_batch`. + /// + /// The Burns specified here MUST NOT also be passed to `acknowledge_batch`. + /* + The fact Burns can be queued during any Substrate block is problematic. The scanner is allowed + to scan anything within the window set by the Eventuality task. The Eventuality task is allowed + to handle all blocks until it reaches a block needing acknowledgement. + + This means we may queue Burns when the latest acknowledged block is 1, yet we've already + scanned 101. Such Burns may complete back in block 2, and we simply wouldn't have noticed due + to not having yet generated the Eventualities. + + We solve this by mandating all transactions made as the result of an Eventuality include a + output-to-Serai worth at least `DUST`. If that occurs, the scanner will force a consensus + protocol on block 2. Accordingly, we won't scan all the way to block 101 (missing the + resolution of the Eventuality) as we'll obtain synchrony on block 2 and all Burns queued prior + to it. + + Another option would be to re-check historical blocks, yet this would potentially redo an + unbounded amount of work. It would also not allow us to safely detect if received outputs were + in fact the result of Eventualities or not. + + Another option would be to schedule Burns after the next-acknowledged block, yet this would add + latency and likely practically require we add regularly scheduled notable blocks (which may be + unnecessary). + */ + pub fn queue_burns(&mut self, mut txn: impl DbTxn, burns: Vec) { + if burns.is_empty() { + return; + } + + // Queue queueing these burns via the Substrate task + substrate::queue_queue_burns::(&mut txn, burns); + // Commit this txn so this data is flushed + txn.commit(); + // Then run the Substrate task + self.substrate_handle.run_now(); + } +} diff --git a/processor/scanner/src/lifetime.rs b/processor/scanner/src/lifetime.rs new file mode 100644 index 000000000..e07f5f420 --- /dev/null +++ b/processor/scanner/src/lifetime.rs @@ -0,0 +1,134 @@ +use crate::ScannerFeed; + +/// An enum representing the stage of a multisig within its lifetime. +/// +/// This corresponds to `spec/processor/Multisig Rotation.md`, which details steps 1-8 of the +/// rotation process. Steps 7-8 regard a multisig which isn't retiring yet retired, and +/// accordingly, no longer exists, so they are not modelled here (as this only models active +/// multisigs. Inactive multisigs aren't represented in the first place). +#[derive(Clone, Copy, PartialEq, Debug)] +pub enum LifetimeStage { + /// A new multisig, once active, shouldn't actually start receiving coins until several blocks + /// later. If any UI is premature in sending to this multisig, we delay to report the outputs to + /// prevent some DoS concerns. + /// + /// This represents steps 1-3 for a new multisig. + ActiveYetNotReporting, + /// Active with all outputs being reported on-chain. + /// + /// This represents step 4 onwards for a new multisig. + Active, + /// Retiring with all outputs being reported on-chain. + /// + /// This represents step 4 for a retiring multisig. + UsingNewForChange, + /// Retiring with outputs being forwarded, reported on-chain once forwarded. + /// + /// This represents step 5 for a retiring multisig. + Forwarding, + /// Retiring with only existing obligations being handled. + /// + /// This represents step 6 for a retiring multisig. + /// + /// Steps 7 and 8 are represented by the retiring multisig no longer existing, and these states + /// are only for multisigs which actively exist. + Finishing, +} + +/// The lifetime of the multisig, including various block numbers. +pub(crate) struct Lifetime { + pub(crate) stage: LifetimeStage, + pub(crate) block_at_which_reporting_starts: u64, + // This is only Some if the next key's activation block number is passed to calculate, and the + // stage is at least `LifetimeStage::Active.` + pub(crate) block_at_which_forwarding_starts: Option, +} + +impl Lifetime { + /// Get the lifetime of this multisig. + /// + /// Panics if the multisig being calculated for isn't actually active and a variety of other + /// insane cases. + pub(crate) fn calculate( + block_number: u64, + activation_block_number: u64, + next_keys_activation_block_number: Option, + ) -> Self { + assert!( + activation_block_number >= block_number, + "calculating lifetime stage for an inactive multisig" + ); + // This is exclusive, not inclusive, since we want a CONFIRMATIONS + 10 minutes window and the + // activation block itself is the first block within this window + let active_yet_not_reporting_end_block = + activation_block_number + S::CONFIRMATIONS + S::TEN_MINUTES; + // The exclusive end block is the inclusive start block + let block_at_which_reporting_starts = active_yet_not_reporting_end_block; + if block_number < active_yet_not_reporting_end_block { + return Lifetime { + stage: LifetimeStage::ActiveYetNotReporting, + block_at_which_reporting_starts, + block_at_which_forwarding_starts: None, + }; + } + + let Some(next_keys_activation_block_number) = next_keys_activation_block_number else { + // If there is no next multisig, this is the active multisig + return Lifetime { + stage: LifetimeStage::Active, + block_at_which_reporting_starts, + block_at_which_forwarding_starts: None, + }; + }; + + assert!( + next_keys_activation_block_number > active_yet_not_reporting_end_block, + "next set of keys activated before this multisig activated" + ); + + let new_active_yet_not_reporting_end_block = + next_keys_activation_block_number + S::CONFIRMATIONS + S::TEN_MINUTES; + let new_active_and_used_for_change_end_block = + new_active_yet_not_reporting_end_block + S::CONFIRMATIONS; + // The exclusive end block is the inclusive start block + let block_at_which_forwarding_starts = Some(new_active_and_used_for_change_end_block); + + // If the new multisig is still having its activation block finalized on-chain, this multisig + // is still active (step 3) + if block_number < new_active_yet_not_reporting_end_block { + return Lifetime { + stage: LifetimeStage::Active, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + }; + } + + // Step 4 details a further CONFIRMATIONS + if block_number < new_active_and_used_for_change_end_block { + return Lifetime { + stage: LifetimeStage::UsingNewForChange, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + }; + } + + // Step 5 details a further 6 hours + // 6 hours = 6 * 60 minutes = 6 * 6 * 10 minutes + let new_active_and_forwarded_to_end_block = + new_active_and_used_for_change_end_block + (6 * 6 * S::TEN_MINUTES); + if block_number < new_active_and_forwarded_to_end_block { + return Lifetime { + stage: LifetimeStage::Forwarding, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + }; + } + + // Step 6 + Lifetime { + stage: LifetimeStage::Finishing, + block_at_which_reporting_starts, + block_at_which_forwarding_starts, + } + } +} diff --git a/processor/scanner/src/report/db.rs b/processor/scanner/src/report/db.rs new file mode 100644 index 000000000..186accacd --- /dev/null +++ b/processor/scanner/src/report/db.rs @@ -0,0 +1,121 @@ +use core::marker::PhantomData; +use std::io::{Read, Write}; + +use group::GroupEncoding; + +use scale::{Encode, Decode, IoReader}; +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, create_db}; + +use serai_primitives::Balance; + +use crate::{ScannerFeed, KeyFor, AddressFor}; + +create_db!( + ScannerReport { + // The next block to potentially report + NextToPotentiallyReportBlock: () -> u64, + // The next Batch ID to use + NextBatchId: () -> u32, + + // The block number which caused a batch + BlockNumberForBatch: (batch: u32) -> u64, + + // The external key for the session which should sign a batch + ExternalKeyForSessionToSignBatch: (batch: u32) -> Vec, + + // The return addresses for the InInstructions within a Batch + SerializedReturnAddresses: (batch: u32) -> Vec, + } +); + +pub(crate) struct ReturnInformation { + pub(crate) address: AddressFor, + pub(crate) balance: Balance, +} + +pub(crate) struct ReportDb(PhantomData); +impl ReportDb { + pub(crate) fn set_next_to_potentially_report_block( + txn: &mut impl DbTxn, + next_to_potentially_report_block: u64, + ) { + NextToPotentiallyReportBlock::set(txn, &next_to_potentially_report_block); + } + pub(crate) fn next_to_potentially_report_block(getter: &impl Get) -> Option { + NextToPotentiallyReportBlock::get(getter) + } + + pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn, block_number: u64) -> u32 { + let id = NextBatchId::get(txn).unwrap_or(0); + NextBatchId::set(txn, &(id + 1)); + BlockNumberForBatch::set(txn, id, &block_number); + id + } + + pub(crate) fn take_block_number_for_batch(txn: &mut impl DbTxn, id: u32) -> Option { + BlockNumberForBatch::take(txn, id) + } + + pub(crate) fn save_external_key_for_session_to_sign_batch( + txn: &mut impl DbTxn, + id: u32, + external_key_for_session_to_sign_batch: &KeyFor, + ) { + ExternalKeyForSessionToSignBatch::set( + txn, + id, + &external_key_for_session_to_sign_batch.to_bytes().as_ref().to_vec(), + ); + } + + pub(crate) fn take_external_key_for_session_to_sign_batch( + txn: &mut impl DbTxn, + id: u32, + ) -> Option> { + ExternalKeyForSessionToSignBatch::get(txn, id).map(|key_vec| { + let mut key = as GroupEncoding>::Repr::default(); + key.as_mut().copy_from_slice(&key_vec); + KeyFor::::from_bytes(&key).unwrap() + }) + } + + pub(crate) fn save_return_information( + txn: &mut impl DbTxn, + id: u32, + return_information: &Vec>>, + ) { + let mut buf = Vec::with_capacity(return_information.len() * (32 + 1 + 8)); + for return_information in return_information { + if let Some(ReturnInformation { address, balance }) = return_information { + buf.write_all(&[1]).unwrap(); + address.serialize(&mut buf).unwrap(); + balance.encode_to(&mut buf); + } else { + buf.write_all(&[0]).unwrap(); + } + } + SerializedReturnAddresses::set(txn, id, &buf); + } + pub(crate) fn take_return_information( + txn: &mut impl DbTxn, + id: u32, + ) -> Option>>> { + let buf = SerializedReturnAddresses::take(txn, id)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / (32 + 1 + 8)); + while !buf.is_empty() { + let mut opt = [0xff]; + buf.read_exact(&mut opt).unwrap(); + assert!((opt[0] == 0) || (opt[0] == 1)); + + res.push((opt[0] == 1).then(|| { + let address = AddressFor::::deserialize_reader(&mut buf).unwrap(); + let balance = Balance::decode(&mut IoReader(&mut buf)).unwrap(); + ReturnInformation { address, balance } + })); + } + Some(res) + } +} diff --git a/processor/scanner/src/report/mod.rs b/processor/scanner/src/report/mod.rs new file mode 100644 index 000000000..afb1b6720 --- /dev/null +++ b/processor/scanner/src/report/mod.rs @@ -0,0 +1,183 @@ +use core::{marker::PhantomData, future::Future}; + +use scale::Encode; +use serai_db::{DbTxn, Db}; + +use serai_primitives::BlockHash; +use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; + +use primitives::task::ContinuallyRan; +use crate::{ + db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToReportDb, Batches, BatchesToSign}, + index, + scan::next_to_scan_for_outputs_block, + ScannerFeed, KeyFor, +}; + +mod db; +pub(crate) use db::ReturnInformation; +use db::ReportDb; + +pub(crate) fn take_block_number_for_batch( + txn: &mut impl DbTxn, + id: u32, +) -> Option { + ReportDb::::take_block_number_for_batch(txn, id) +} + +pub(crate) fn take_external_key_for_session_to_sign_batch( + txn: &mut impl DbTxn, + id: u32, +) -> Option> { + ReportDb::::take_external_key_for_session_to_sign_batch(txn, id) +} + +pub(crate) fn take_return_information( + txn: &mut impl DbTxn, + id: u32, +) -> Option>>> { + ReportDb::::take_return_information(txn, id) +} + +/* + This task produces Batches for notable blocks, with all InInstructions, in an ordered fashion. + + We only report blocks once both tasks, scanning for received outputs and checking for resolved + Eventualities, have processed the block. This ensures we know if this block is notable, and have + the InInstructions for it. +*/ +#[allow(non_snake_case)] +pub(crate) struct ReportTask { + db: D, + _S: PhantomData, +} + +impl ReportTask { + pub(crate) fn new(mut db: D, start_block: u64) -> Self { + if ReportDb::::next_to_potentially_report_block(&db).is_none() { + // Initialize the DB + let mut txn = db.txn(); + ReportDb::::set_next_to_potentially_report_block(&mut txn, start_block); + txn.commit(); + } + + Self { db, _S: PhantomData } + } +} + +impl ContinuallyRan for ReportTask { + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let highest_reportable = { + // Fetch the next to scan block + let next_to_scan = next_to_scan_for_outputs_block::(&self.db) + .expect("ReportTask run before writing the start block"); + // If we haven't done any work, return + if next_to_scan == 0 { + return Ok(false); + } + // The last scanned block is the block prior to this + #[allow(clippy::let_and_return)] + let last_scanned = next_to_scan - 1; + // The last scanned block is the highest reportable block as we only scan blocks within a + // window where it's safe to immediately report the block + // See `eventuality.rs` for more info + last_scanned + }; + + let next_to_potentially_report = ReportDb::::next_to_potentially_report_block(&self.db) + .expect("ReportTask run before writing the start block"); + + for b in next_to_potentially_report ..= highest_reportable { + let mut txn = self.db.txn(); + + // Receive the InInstructions for this block + // We always do this as we can't trivially tell if we should recv InInstructions before we + // do + let InInstructionData { + external_key_for_session_to_sign_batch, + returnable_in_instructions: in_instructions, + } = ScanToReportDb::::recv_in_instructions(&mut txn, b); + let notable = ScannerGlobalDb::::is_block_notable(&txn, b); + if !notable { + assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions"); + } + // If this block is notable, create the Batch(s) for it + if notable { + let network = S::NETWORK; + let block_hash = index::block_id(&txn, b); + let mut batch_id = ReportDb::::acquire_batch_id(&mut txn, b); + + // start with empty batch + let mut batches = vec![Batch { + network, + id: batch_id, + block: BlockHash(block_hash), + instructions: vec![], + }]; + // We also track the return information for the InInstructions within a Batch in case + // they error + let mut return_information = vec![vec![]]; + + for Returnable { return_address, in_instruction } in in_instructions { + let balance = in_instruction.balance; + + let batch = batches.last_mut().unwrap(); + batch.instructions.push(in_instruction); + + // check if batch is over-size + if batch.encode().len() > MAX_BATCH_SIZE { + // pop the last instruction so it's back in size + let in_instruction = batch.instructions.pop().unwrap(); + + // bump the id for the new batch + batch_id = ReportDb::::acquire_batch_id(&mut txn, b); + + // make a new batch with this instruction included + batches.push(Batch { + network, + id: batch_id, + block: BlockHash(block_hash), + instructions: vec![in_instruction], + }); + // Since we're allocating a new batch, allocate a new set of return addresses for it + return_information.push(vec![]); + } + + // For the set of return addresses for the InInstructions for the batch we just pushed + // onto, push this InInstruction's return addresses + return_information + .last_mut() + .unwrap() + .push(return_address.map(|address| ReturnInformation { address, balance })); + } + + // Save the return addresses to the database + assert_eq!(batches.len(), return_information.len()); + for (batch, return_information) in batches.iter().zip(&return_information) { + assert_eq!(batch.instructions.len(), return_information.len()); + ReportDb::::save_external_key_for_session_to_sign_batch( + &mut txn, + batch.id, + &external_key_for_session_to_sign_batch, + ); + ReportDb::::save_return_information(&mut txn, batch.id, return_information); + } + + for batch in batches { + Batches::send(&mut txn, &batch); + BatchesToSign::send(&mut txn, &external_key_for_session_to_sign_batch, &batch); + } + } + + // Update the next to potentially report block + ReportDb::::set_next_to_potentially_report_block(&mut txn, b + 1); + + txn.commit(); + } + + // Run dependents if we decided to report any blocks + Ok(next_to_potentially_report <= highest_reportable) + } + } +} diff --git a/processor/scanner/src/scan/db.rs b/processor/scanner/src/scan/db.rs new file mode 100644 index 000000000..44023bc86 --- /dev/null +++ b/processor/scanner/src/scan/db.rs @@ -0,0 +1,68 @@ +use core::marker::PhantomData; + +use serai_db::{Get, DbTxn, create_db}; + +use primitives::ReceivedOutput; + +use crate::{db::OutputWithInInstruction, ScannerFeed, KeyFor, AddressFor, OutputFor}; + +create_db!( + ScannerScan { + // The next block to scan for received outputs + NextToScanForOutputsBlock: () -> u64, + + SerializedQueuedOutputs: (block_number: u64) -> Vec, + + ReportedInInstructionForOutput: (id: &[u8]) -> (), + } +); + +pub(crate) struct ScanDb(PhantomData); +impl ScanDb { + pub(crate) fn set_next_to_scan_for_outputs_block( + txn: &mut impl DbTxn, + next_to_scan_for_outputs_block: u64, + ) { + NextToScanForOutputsBlock::set(txn, &next_to_scan_for_outputs_block); + } + pub(crate) fn next_to_scan_for_outputs_block(getter: &impl Get) -> Option { + NextToScanForOutputsBlock::get(getter) + } + + pub(crate) fn take_queued_outputs( + txn: &mut impl DbTxn, + block_number: u64, + ) -> Vec> { + let serialized = SerializedQueuedOutputs::get(txn, block_number).unwrap_or(vec![]); + let mut serialized = serialized.as_slice(); + + let mut res = Vec::with_capacity(serialized.len() / 128); + while !serialized.is_empty() { + res.push(OutputWithInInstruction::::read(&mut serialized).unwrap()); + } + res + } + pub(crate) fn queue_output_until_block( + txn: &mut impl DbTxn, + queue_for_block: u64, + output: &OutputWithInInstruction, + ) { + let mut outputs = + SerializedQueuedOutputs::get(txn, queue_for_block).unwrap_or(Vec::with_capacity(128)); + output.write(&mut outputs).unwrap(); + SerializedQueuedOutputs::set(txn, queue_for_block, &outputs); + } + + pub(crate) fn prior_reported_in_instruction_for_output( + getter: &impl Get, + id: & as ReceivedOutput, AddressFor>>::Id, + ) -> bool { + ReportedInInstructionForOutput::get(getter, id.as_ref()).is_some() + } + pub(crate) fn reported_in_instruction_for_output( + txn: &mut impl DbTxn, + id: & as ReceivedOutput, AddressFor>>::Id, + ) { + ReportedInInstructionForOutput::set(txn, id.as_ref(), &()); + } +} diff --git a/processor/scanner/src/scan/mod.rs b/processor/scanner/src/scan/mod.rs new file mode 100644 index 000000000..0ebdf9925 --- /dev/null +++ b/processor/scanner/src/scan/mod.rs @@ -0,0 +1,368 @@ +use core::future::Future; +use std::collections::HashMap; + +use scale::Decode; +use serai_db::{Get, DbTxn, Db}; + +use serai_in_instructions_primitives::{ + Shorthand, RefundableInInstruction, InInstruction, InInstructionWithBalance, +}; + +use primitives::{task::ContinuallyRan, OutputType, ReceivedOutput, Block}; + +use crate::{ + lifetime::LifetimeStage, + db::{ + OutputWithInInstruction, Returnable, SenderScanData, ScannerGlobalDb, InInstructionData, + ScanToReportDb, ScanToEventualityDb, + }, + BlockExt, ScannerFeed, AddressFor, OutputFor, Return, sort_outputs, + eventuality::latest_scannable_block, +}; + +mod db; +use db::ScanDb; + +pub(crate) fn next_to_scan_for_outputs_block(getter: &impl Get) -> Option { + ScanDb::::next_to_scan_for_outputs_block(getter) +} + +pub(crate) fn queue_output_until_block( + txn: &mut impl DbTxn, + queue_for_block: u64, + output: &OutputWithInInstruction, +) { + // This isn't a perfect assertion as by the time this txn commits, we may have already started + // scanning this block. That doesn't change it should never trip as we queue outside the window + // we'll scan + assert!( + queue_for_block >= + next_to_scan_for_outputs_block::(txn) + .expect("queueing an output despite no next-to-scan-for-outputs block"), + "queueing an output for a block already scanned" + ); + ScanDb::::queue_output_until_block(txn, queue_for_block, output) +} + +// Construct an InInstruction from an external output. +// +// Also returns the address to return the coins to upon error. +fn in_instruction_from_output( + output: &OutputFor, +) -> (Option>, Option) { + assert_eq!(output.kind(), OutputType::External); + + let presumed_origin = output.presumed_origin(); + + let mut data = output.data(); + let shorthand = match Shorthand::decode(&mut data) { + Ok(shorthand) => shorthand, + Err(e) => { + log::info!("data in output {} wasn't valid shorthand: {e:?}", hex::encode(output.id())); + return (presumed_origin, None); + } + }; + let instruction = match RefundableInInstruction::try_from(shorthand) { + Ok(instruction) => instruction, + Err(e) => { + log::info!( + "shorthand in output {} wasn't convertible to a RefundableInInstruction: {e:?}", + hex::encode(output.id()) + ); + return (presumed_origin, None); + } + }; + + ( + instruction.origin.and_then(|addr| AddressFor::::try_from(addr).ok()).or(presumed_origin), + Some(instruction.instruction), + ) +} + +pub(crate) struct ScanTask { + db: D, + feed: S, +} + +impl ScanTask { + pub(crate) fn new(mut db: D, feed: S, start_block: u64) -> Self { + if ScanDb::::next_to_scan_for_outputs_block(&db).is_none() { + // Initialize the DB + let mut txn = db.txn(); + ScanDb::::set_next_to_scan_for_outputs_block(&mut txn, start_block); + txn.commit(); + } + + Self { db, feed } + } +} + +impl ContinuallyRan for ScanTask { + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + // Fetch the safe to scan block + let latest_scannable = + latest_scannable_block::(&self.db).expect("ScanTask run before writing the start block"); + // Fetch the next block to scan + let next_to_scan = ScanDb::::next_to_scan_for_outputs_block(&self.db) + .expect("ScanTask run before writing the start block"); + + for b in next_to_scan ..= latest_scannable { + let block = self.feed.block_by_number(&self.db, b).await?; + + log::info!("scanning block: {} ({b})", hex::encode(block.id())); + + let mut txn = self.db.txn(); + + assert_eq!(ScanDb::::next_to_scan_for_outputs_block(&txn).unwrap(), b); + + let keys = ScannerGlobalDb::::active_keys_as_of_next_to_scan_for_outputs_block(&txn) + .expect("scanning for a blockchain without any keys set"); + + let latest_active_key = { + let mut keys = keys.clone(); + loop { + // Use the most recent key + let key = keys.pop().unwrap(); + // Unless this key is active, but not yet reporting + if key.stage == LifetimeStage::ActiveYetNotReporting { + continue; + } + break key.key; + } + }; + + // The scan data for this block + let mut scan_data = SenderScanData { + block_number: b, + received_external_outputs: vec![], + forwards: vec![], + returns: vec![], + }; + // The InInstructions for this block + let mut in_instructions = vec![]; + + // The outputs queued for this block + let queued_outputs = { + let mut queued_outputs = ScanDb::::take_queued_outputs(&mut txn, b); + // Sort the queued outputs in case they weren't queued in a deterministic fashion + queued_outputs.sort_by(|a, b| sort_outputs(&a.output, &b.output)); + queued_outputs + }; + for queued_output in queued_outputs { + in_instructions.push(( + queued_output.output.id(), + Returnable { + return_address: queued_output.return_address, + in_instruction: queued_output.in_instruction, + }, + )); + scan_data.received_external_outputs.push(queued_output.output); + } + + // We subtract the cost to aggregate from some outputs we scan + // This cost is fetched with an asynchronous function which may be non-trivial + // We cache the result of this function here to avoid calling it multiple times + let mut costs_to_aggregate = HashMap::with_capacity(1); + + // Scan for each key + for key in &keys { + for output in block.scan_for_outputs(latest_active_key, key.key) { + assert_eq!(output.key(), key.key); + + /* + The scan task runs ahead of time, obtaining ordering on the external network's blocks + with relation to events on the Serai network. This is done via publishing a Batch + which contains the InInstructions from External outputs. Accordingly, the scan + process only has to yield External outputs. + + It'd appear to make sense to scan for all outputs, and after scanning for all + outputs, yield all outputs. The issue is we can't identify outputs we created here. + We can only identify the outputs we receive and their *declared intention*. + + We only want to handle Change/Branch/Forwarded outputs we made ourselves. For + Forwarded, the reasoning is obvious (retiring multisigs should only downsize, yet + accepting new outputs solely because they claim to be Forwarded would increase the + size of the multisig). For Change/Branch, it's because such outputs which aren't ours + are pointless. They wouldn't hurt to accumulate though. + + The issue is they would hurt to accumulate. We want to filter outputs which are less + than their cost to aggregate, a variable itself variable to the current blockchain. + We can filter such outputs here, yet if we drop a Change output, we create an + insolvency. We'd need to track the loss and offset it later. That means we can't + filter such outputs, as we expect any Change output we make. + + The issue is the Change outputs we don't make. Someone can create an output declaring + to be Change, yet not actually Change. If we don't filter it, it'd be queued for + accumulation, yet it may cost more to accumulate than it's worth. + + The solution is to let the Eventuality task, which does know if we made an output or + not (or rather, if a transaction is identical to a transaction which should exist + regarding effects) decide to keep/yield the outputs which we should only keep if we + made them (as Serai itself should not make worthless outputs, so we can assume + they're worthwhile, and even if they're not economically, they are technically). + + The alternative, we drop outputs here with a generic filter rule and then report back + the insolvency created, still doesn't work as we'd only be creating an insolvency if + the output was actually made by us (and not simply someone else sending in). We can + have the Eventuality task report the insolvency, yet that requires the scanner be + responsible for such filter logic. It's more flexible, and has a cleaner API, + to do so at a higher level. + */ + if output.kind() != OutputType::External { + // While we don't report these outputs, we still need consensus on this block and + // accordingly still need to set it as notable + let balance = output.balance(); + // We ensure it's over the dust limit to prevent people sending 1 satoshi from + // causing an invocation of a consensus/signing protocol + if balance.amount.0 >= S::dust(balance.coin).0 { + ScannerGlobalDb::::flag_notable_due_to_non_external_output(&mut txn, b); + } + continue; + } + + // Check this isn't dust + let balance_to_use = { + let mut balance = output.balance(); + + // First, subtract 2 * the cost to aggregate, as detailed in + // `spec/processor/UTXO Management.md` + + // We cache this, so if it isn't yet cached, insert it into the cache + if let std::collections::hash_map::Entry::Vacant(e) = + costs_to_aggregate.entry(balance.coin) + { + e.insert(self.feed.cost_to_aggregate(balance.coin, &block).await.map_err(|e| { + format!( + "ScanTask couldn't fetch cost to aggregate {:?} at {b}: {e:?}", + balance.coin + ) + })?); + } + let cost_to_aggregate = costs_to_aggregate[&balance.coin]; + balance.amount.0 -= 2 * cost_to_aggregate.0; + + // Now, check it's still past the dust threshold + if balance.amount.0 < S::dust(balance.coin).0 { + continue; + } + + balance + }; + + // Fetch the InInstruction/return addr for this output + let output_with_in_instruction = match in_instruction_from_output::(&output) { + (return_address, Some(instruction)) => OutputWithInInstruction { + output, + return_address, + in_instruction: InInstructionWithBalance { instruction, balance: balance_to_use }, + }, + (Some(address), None) => { + // Since there was no instruction here, return this since we parsed a return + // address + if key.stage != LifetimeStage::Finishing { + scan_data.returns.push(Return { address, output }); + } + continue; + } + // Since we didn't receive an instruction nor can we return this, queue this for + // accumulation and move on + (None, None) => { + if key.stage != LifetimeStage::Finishing { + scan_data.received_external_outputs.push(output); + } + continue; + } + }; + + // Drop External outputs if they're to a multisig which won't report them + // This means we should report any External output we save to disk here + #[allow(clippy::match_same_arms)] + match key.stage { + // This multisig isn't yet reporting its External outputs to avoid a DoS + // Queue the output to be reported when this multisig starts reporting + LifetimeStage::ActiveYetNotReporting => { + ScanDb::::queue_output_until_block( + &mut txn, + key.block_at_which_reporting_starts, + &output_with_in_instruction, + ); + continue; + } + // We should report External outputs in these cases + LifetimeStage::Active | LifetimeStage::UsingNewForChange => {} + // We should report External outputs only once forwarded, where they'll appear as + // OutputType::Forwarded. We save them now for when they appear + LifetimeStage::Forwarding => { + // When the forwarded output appears, we can see which Plan it's associated with + // and from there recover this output + scan_data.forwards.push(output_with_in_instruction); + continue; + } + // We should drop these as we should not be handling new External outputs at this + // time + LifetimeStage::Finishing => { + continue; + } + } + // Ensures we didn't miss a `continue` above + assert!(matches!(key.stage, LifetimeStage::Active | LifetimeStage::UsingNewForChange)); + + in_instructions.push(( + output_with_in_instruction.output.id(), + Returnable { + return_address: output_with_in_instruction.return_address, + in_instruction: output_with_in_instruction.in_instruction, + }, + )); + scan_data.received_external_outputs.push(output_with_in_instruction.output); + } + } + + // Sort the InInstructions by the output ID + in_instructions.sort_by(|(output_id_a, _), (output_id_b, _)| { + use core::cmp::{Ordering, Ord}; + let res = output_id_a.as_ref().cmp(output_id_b.as_ref()); + assert!(res != Ordering::Equal, "two outputs within a collection had the same ID"); + res + }); + // Check we haven't prior reported an InInstruction for this output + // This is a sanity check which is intended to prevent multiple instances of sriXYZ + // on-chain due to a single output + for (id, _) in &in_instructions { + assert!( + !ScanDb::::prior_reported_in_instruction_for_output(&txn, id), + "prior reported an InInstruction for an output with this ID" + ); + ScanDb::::reported_in_instruction_for_output(&mut txn, id); + } + // Reformat the InInstructions to just the InInstructions + let in_instructions = in_instructions + .into_iter() + .map(|(_id, in_instruction)| in_instruction) + .collect::>(); + // Send the InInstructions to the report task + // We need to also specify which key is responsible for signing the Batch for these, which + // will always be the oldest key (as the new key signing the Batch signifies handover + // acceptance) + ScanToReportDb::::send_in_instructions( + &mut txn, + b, + &InInstructionData { + external_key_for_session_to_sign_batch: keys[0].key, + returnable_in_instructions: in_instructions, + }, + ); + + // Send the scan data to the eventuality task + ScanToEventualityDb::::send_scan_data(&mut txn, b, &scan_data); + // Update the next to scan block + ScanDb::::set_next_to_scan_for_outputs_block(&mut txn, b + 1); + txn.commit(); + } + + // Run dependents if we successfully scanned any blocks + Ok(next_to_scan <= latest_scannable) + } + } +} diff --git a/processor/scanner/src/substrate/db.rs b/processor/scanner/src/substrate/db.rs new file mode 100644 index 000000000..c1a1b0e22 --- /dev/null +++ b/processor/scanner/src/substrate/db.rs @@ -0,0 +1,88 @@ +use core::marker::PhantomData; + +use group::GroupEncoding; + +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_db::{Get, DbTxn, create_db, db_channel}; + +use serai_coins_primitives::OutInstructionWithBalance; + +use crate::{ScannerFeed, KeyFor}; + +#[derive(BorshSerialize, BorshDeserialize)] +struct AcknowledgeBatchEncodable { + batch_id: u32, + in_instruction_results: Vec, + burns: Vec, + key_to_activate: Option>, +} + +#[derive(BorshSerialize, BorshDeserialize)] +enum ActionEncodable { + AcknowledgeBatch(AcknowledgeBatchEncodable), + QueueBurns(Vec), +} + +pub(crate) struct AcknowledgeBatch { + pub(crate) batch_id: u32, + pub(crate) in_instruction_results: Vec, + pub(crate) burns: Vec, + pub(crate) key_to_activate: Option>, +} + +pub(crate) enum Action { + AcknowledgeBatch(AcknowledgeBatch), + QueueBurns(Vec), +} + +db_channel!( + ScannerSubstrate { + Actions: () -> ActionEncodable, + } +); + +pub(crate) struct SubstrateDb(PhantomData); +impl SubstrateDb { + pub(crate) fn queue_acknowledge_batch( + txn: &mut impl DbTxn, + batch_id: u32, + in_instruction_results: Vec, + burns: Vec, + key_to_activate: Option>, + ) { + Actions::send( + txn, + &ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { + batch_id, + in_instruction_results, + burns, + key_to_activate: key_to_activate.map(|key| key.to_bytes().as_ref().to_vec()), + }), + ); + } + pub(crate) fn queue_queue_burns(txn: &mut impl DbTxn, burns: Vec) { + Actions::send(txn, &ActionEncodable::QueueBurns(burns)); + } + + pub(crate) fn next_action(txn: &mut impl DbTxn) -> Option> { + let action_encodable = Actions::try_recv(txn)?; + Some(match action_encodable { + ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { + batch_id, + in_instruction_results, + burns, + key_to_activate, + }) => Action::AcknowledgeBatch(AcknowledgeBatch { + batch_id, + in_instruction_results, + burns, + key_to_activate: key_to_activate.map(|key| { + let mut repr = as GroupEncoding>::Repr::default(); + repr.as_mut().copy_from_slice(&key); + KeyFor::::from_bytes(&repr).unwrap() + }), + }), + ActionEncodable::QueueBurns(burns) => Action::QueueBurns(burns), + }) + } +} diff --git a/processor/scanner/src/substrate/mod.rs b/processor/scanner/src/substrate/mod.rs new file mode 100644 index 000000000..ce28470d4 --- /dev/null +++ b/processor/scanner/src/substrate/mod.rs @@ -0,0 +1,172 @@ +use core::{marker::PhantomData, future::Future}; + +use serai_db::{DbTxn, Db}; + +use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance}; + +use primitives::task::ContinuallyRan; +use crate::{ + db::{ScannerGlobalDb, SubstrateToEventualityDb, AcknowledgedBatches}, + report, ScannerFeed, KeyFor, +}; + +mod db; +use db::*; + +pub(crate) fn queue_acknowledge_batch( + txn: &mut impl DbTxn, + batch_id: u32, + in_instruction_results: Vec, + burns: Vec, + key_to_activate: Option>, +) { + SubstrateDb::::queue_acknowledge_batch( + txn, + batch_id, + in_instruction_results, + burns, + key_to_activate, + ) +} +pub(crate) fn queue_queue_burns( + txn: &mut impl DbTxn, + burns: Vec, +) { + SubstrateDb::::queue_queue_burns(txn, burns) +} + +/* + When Serai acknowledges a Batch, we can only handle it once we've scanned the chain and generated + the same Batch ourselves. This takes the `acknowledge_batch`, `queue_burns` arguments and sits on + them until we're able to process them. +*/ +#[allow(non_snake_case)] +pub(crate) struct SubstrateTask { + db: D, + _S: PhantomData, +} + +impl SubstrateTask { + pub(crate) fn new(db: D) -> Self { + Self { db, _S: PhantomData } + } +} + +impl ContinuallyRan for SubstrateTask { + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut made_progress = false; + loop { + // Fetch the next action to handle + let mut txn = self.db.txn(); + let Some(action) = SubstrateDb::::next_action(&mut txn) else { + drop(txn); + return Ok(made_progress); + }; + + match action { + Action::AcknowledgeBatch(AcknowledgeBatch { + batch_id, + in_instruction_results, + mut burns, + key_to_activate, + }) => { + // Check if we have the information for this batch + let Some(block_number) = report::take_block_number_for_batch::(&mut txn, batch_id) + else { + // If we don't, drop this txn (restoring the action to the database) + drop(txn); + return Ok(made_progress); + }; + + { + let external_key_for_session_to_sign_batch = + report::take_external_key_for_session_to_sign_batch::(&mut txn, batch_id) + .unwrap(); + AcknowledgedBatches::send( + &mut txn, + &external_key_for_session_to_sign_batch, + batch_id, + ); + } + + // Mark we made progress and handle this + made_progress = true; + + assert!( + ScannerGlobalDb::::is_block_notable(&txn, block_number), + "acknowledging a block which wasn't notable" + ); + if let Some(prior_highest_acknowledged_block) = + ScannerGlobalDb::::highest_acknowledged_block(&txn) + { + // If a single block produced multiple Batches, the block number won't increment + assert!( + block_number >= prior_highest_acknowledged_block, + "acknowledging blocks out-of-order" + ); + for b in (prior_highest_acknowledged_block + 1) .. block_number { + assert!( + !ScannerGlobalDb::::is_block_notable(&txn, b), + "skipped acknowledging a block which was notable" + ); + } + } + + ScannerGlobalDb::::set_highest_acknowledged_block(&mut txn, block_number); + if let Some(key_to_activate) = key_to_activate { + ScannerGlobalDb::::queue_key( + &mut txn, + block_number + S::WINDOW_LENGTH, + key_to_activate, + ); + } + + // Return the balances for any InInstructions which failed to execute + { + let return_information = report::take_return_information::(&mut txn, batch_id) + .expect("didn't save the return information for Batch we published"); + assert_eq!( + in_instruction_results.len(), + return_information.len(), + "amount of InInstruction succeededs differed from amount of return information saved" + ); + + // We map these into standard Burns + for (result, return_information) in + in_instruction_results.into_iter().zip(return_information) + { + if result == messages::substrate::InInstructionResult::Succeeded { + continue; + } + + if let Some(report::ReturnInformation { address, balance }) = return_information { + burns.push(OutInstructionWithBalance { + instruction: OutInstruction { address: address.into() }, + balance, + }); + } + } + } + + // We send these Burns as stemming from this block we just acknowledged + // This causes them to be acted on after we accumulate the outputs from this block + SubstrateToEventualityDb::send_burns::(&mut txn, block_number, burns); + } + + Action::QueueBurns(burns) => { + // We can instantly handle this so long as we've handled all prior actions + made_progress = true; + + let queue_as_of = ScannerGlobalDb::::highest_acknowledged_block(&txn) + .expect("queueing Burns yet never acknowledged a block"); + + SubstrateToEventualityDb::send_burns::(&mut txn, queue_as_of, burns); + } + } + + txn.commit(); + } + } + } +} diff --git a/processor/scheduler/primitives/Cargo.toml b/processor/scheduler/primitives/Cargo.toml new file mode 100644 index 000000000..f847300a8 --- /dev/null +++ b/processor/scheduler/primitives/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "serai-processor-scheduler-primitives" +version = "0.1.0" +description = "Primitives for schedulers for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/primitives" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale", "borsh"] + +[lints] +workspace = true + +[dependencies] +ciphersuite = { path = "../../../crypto/ciphersuite", default-features = false, features = ["std"] } +frost = { package = "modular-frost", path = "../../../crypto/frost", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-db = { path = "../../../common/db" } diff --git a/processor/scheduler/primitives/LICENSE b/processor/scheduler/primitives/LICENSE new file mode 100644 index 000000000..e091b1498 --- /dev/null +++ b/processor/scheduler/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/primitives/README.md b/processor/scheduler/primitives/README.md new file mode 100644 index 000000000..6e81249d9 --- /dev/null +++ b/processor/scheduler/primitives/README.md @@ -0,0 +1,3 @@ +# Scheduler Primitives + +Primitives for schedulers. diff --git a/processor/scheduler/primitives/src/lib.rs b/processor/scheduler/primitives/src/lib.rs new file mode 100644 index 000000000..3c214d159 --- /dev/null +++ b/processor/scheduler/primitives/src/lib.rs @@ -0,0 +1,77 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::marker::PhantomData; +use std::io; + +use ciphersuite::{group::GroupEncoding, Ciphersuite}; +use frost::{dkg::ThresholdKeys, sign::PreprocessMachine}; + +use serai_db::DbTxn; + +/// A transaction. +pub trait Transaction: Sized + Send { + /// Read a `Transaction`. + fn read(reader: &mut impl io::Read) -> io::Result; + /// Write a `Transaction`. + fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; +} + +/// A signable transaction. +pub trait SignableTransaction: 'static + Sized + Send + Sync + Clone { + /// The underlying transaction type. + type Transaction: Transaction; + /// The ciphersuite used to sign this transaction. + type Ciphersuite: Ciphersuite; + /// The preprocess machine for the signing protocol for this transaction. + type PreprocessMachine: Clone + PreprocessMachine>; + + /// Read a `SignableTransaction`. + fn read(reader: &mut impl io::Read) -> io::Result; + /// Write a `SignableTransaction`. + fn write(&self, writer: &mut impl io::Write) -> io::Result<()>; + + /// The ID for this transaction. + /// + /// This is an internal ID arbitrarily definable so long as it's unique. + /// + /// This same ID MUST be returned by the Eventuality for this transaction. + fn id(&self) -> [u8; 32]; + + /// Sign this transaction. + fn sign(self, keys: ThresholdKeys) -> Self::PreprocessMachine; +} + +/// The transaction type for a SignableTransaction. +pub type TransactionFor = ::Transaction; + +mod db { + use serai_db::{Get, DbTxn, create_db, db_channel}; + + db_channel! { + SchedulerPrimitives { + TransactionsToSign: (key: &[u8]) -> Vec, + } + } +} + +/// The transactions to sign, as scheduled by a Scheduler. +pub struct TransactionsToSign(PhantomData); +impl TransactionsToSign { + /// Send a transaction to sign. + pub fn send(txn: &mut impl DbTxn, key: &impl GroupEncoding, tx: &T) { + let mut buf = Vec::with_capacity(128); + tx.write(&mut buf).unwrap(); + db::TransactionsToSign::send(txn, key.to_bytes().as_ref(), &buf); + } + + /// Try to receive a transaction to sign. + pub fn try_recv(txn: &mut impl DbTxn, key: &impl GroupEncoding) -> Option { + let tx = db::TransactionsToSign::try_recv(txn, key.to_bytes().as_ref())?; + let mut tx = tx.as_slice(); + let res = T::read(&mut tx).unwrap(); + assert!(tx.is_empty()); + Some(res) + } +} diff --git a/processor/scheduler/smart-contract/Cargo.toml b/processor/scheduler/smart-contract/Cargo.toml new file mode 100644 index 000000000..c43569fb6 --- /dev/null +++ b/processor/scheduler/smart-contract/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "serai-processor-smart-contract-scheduler" +version = "0.1.0" +description = "Scheduler for a smart contract representing the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/smart-contract" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale", "borsh"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-db = { path = "../../../common/db" } + +primitives = { package = "serai-processor-primitives", path = "../../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../scanner" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../primitives" } diff --git a/processor/scheduler/smart-contract/LICENSE b/processor/scheduler/smart-contract/LICENSE new file mode 100644 index 000000000..e091b1498 --- /dev/null +++ b/processor/scheduler/smart-contract/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/smart-contract/README.md b/processor/scheduler/smart-contract/README.md new file mode 100644 index 000000000..0be94d20f --- /dev/null +++ b/processor/scheduler/smart-contract/README.md @@ -0,0 +1,3 @@ +# Smart Contract Scheduler + +A scheduler for a smart contract representing the Serai processor. diff --git a/processor/scheduler/smart-contract/src/lib.rs b/processor/scheduler/smart-contract/src/lib.rs new file mode 100644 index 000000000..0c9c690b4 --- /dev/null +++ b/processor/scheduler/smart-contract/src/lib.rs @@ -0,0 +1,150 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{marker::PhantomData, future::Future}; +use std::collections::HashMap; + +use group::GroupEncoding; + +use serai_db::{Get, DbTxn, create_db}; + +use primitives::{ReceivedOutput, Payment}; +use scanner::{ + LifetimeStage, ScannerFeed, KeyFor, AddressFor, EventualityFor, BlockFor, SchedulerUpdate, + KeyScopedEventualities, Scheduler as SchedulerTrait, +}; +use scheduler_primitives::*; + +create_db! { + SmartContractScheduler { + NextNonce: () -> u64, + } +} + +/// A smart contract. +pub trait SmartContract: 'static + Send { + /// The type representing a signable transaction. + type SignableTransaction: SignableTransaction; + + /// Rotate from the retiring key to the new key. + fn rotate( + &self, + nonce: u64, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> (Self::SignableTransaction, EventualityFor); + + /// Fulfill the set of payments, dropping any not worth handling. + fn fulfill( + &self, + starting_nonce: u64, + key: KeyFor, + payments: Vec>>, + ) -> Vec<(Self::SignableTransaction, EventualityFor)>; +} + +/// A scheduler for a smart contract representing the Serai processor. +#[allow(non_snake_case)] +#[derive(Clone)] +pub struct Scheduler> { + smart_contract: SC, + _S: PhantomData, +} + +impl> Scheduler { + /// Create a new scheduler. + pub fn new(smart_contract: SC) -> Self { + Self { smart_contract, _S: PhantomData } + } + + fn fulfill_payments( + &self, + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> KeyScopedEventualities { + let key = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting | + LifetimeStage::Active | + LifetimeStage::UsingNewForChange => active_keys[0].0, + LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, + }; + + let mut nonce = NextNonce::get(txn).unwrap_or(0); + let mut eventualities = Vec::with_capacity(1); + for (signable, eventuality) in self.smart_contract.fulfill(nonce, key, payments) { + TransactionsToSign::::send(txn, &key, &signable); + nonce += 1; + eventualities.push(eventuality); + } + NextNonce::set(txn, &nonce); + HashMap::from([(key.to_bytes().as_ref().to_vec(), eventualities)]) + } +} + +impl> SchedulerTrait for Scheduler { + type EphemeralError = (); + type SignableTransaction = SC::SignableTransaction; + + fn activate_key(_txn: &mut impl DbTxn, _key: KeyFor) {} + + fn flush_key( + &self, + txn: &mut impl DbTxn, + _block: &BlockFor, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + let nonce = NextNonce::get(txn).unwrap_or(0); + let (signable, eventuality) = self.smart_contract.rotate(nonce, retiring_key, new_key); + NextNonce::set(txn, &(nonce + 1)); + TransactionsToSign::::send(txn, &retiring_key, &signable); + Ok(HashMap::from([(retiring_key.to_bytes().as_ref().to_vec(), vec![eventuality])])) + } + } + + fn retire_key(_txn: &mut impl DbTxn, _key: KeyFor) {} + + fn update( + &self, + txn: &mut impl DbTxn, + _block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + update: SchedulerUpdate, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + // We ignore the outputs as we don't need to know our current state as it never suffers + // partial availability + + // We shouldn't have any forwards though + assert!(update.forwards().is_empty()); + + // Create the transactions for the returns + Ok( + self.fulfill_payments( + txn, + active_keys, + update + .returns() + .iter() + .map(|to_return| { + Payment::new(to_return.address().clone(), to_return.output().balance()) + }) + .collect::>(), + ), + ) + } + } + + fn fulfill( + &self, + txn: &mut impl DbTxn, + _block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { Ok(self.fulfill_payments(txn, active_keys, payments)) } + } +} diff --git a/processor/scheduler/utxo/primitives/Cargo.toml b/processor/scheduler/utxo/primitives/Cargo.toml new file mode 100644 index 000000000..80b1f22e5 --- /dev/null +++ b/processor/scheduler/utxo/primitives/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "serai-processor-utxo-scheduler-primitives" +version = "0.1.0" +description = "Primitives for UTXO schedulers for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/utxo/primitives" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } + +primitives = { package = "serai-processor-primitives", path = "../../../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../../scanner" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../../primitives" } diff --git a/processor/scheduler/utxo/primitives/LICENSE b/processor/scheduler/utxo/primitives/LICENSE new file mode 100644 index 000000000..e091b1498 --- /dev/null +++ b/processor/scheduler/utxo/primitives/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/utxo/primitives/README.md b/processor/scheduler/utxo/primitives/README.md new file mode 100644 index 000000000..81bc954a7 --- /dev/null +++ b/processor/scheduler/utxo/primitives/README.md @@ -0,0 +1,3 @@ +# UTXO Scheduler Primitives + +Primitives for UTXO schedulers. diff --git a/processor/scheduler/utxo/primitives/src/lib.rs b/processor/scheduler/utxo/primitives/src/lib.rs new file mode 100644 index 000000000..c01baf021 --- /dev/null +++ b/processor/scheduler/utxo/primitives/src/lib.rs @@ -0,0 +1,279 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{fmt::Debug, future::Future}; + +use serai_primitives::Amount; + +use primitives::{ReceivedOutput, Payment}; +use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor}; +use scheduler_primitives::*; + +mod tree; +pub use tree::*; + +/// A planned transaction. +pub struct PlannedTransaction { + /// The signable transaction. + pub signable: ST, + /// The Eventuality to watch for. + pub eventuality: EventualityFor, + /// The auxilliary data for this transaction. + pub auxilliary: A, +} + +/// A planned transaction which was created via amortizing the fee. +pub struct AmortizePlannedTransaction { + /// The amounts the included payments were worth. + /// + /// If the payments passed as an argument are sorted from highest to lowest valued, these `n` + /// amounts will be for the first `n` payments. + pub effected_payments: Vec, + /// Whether or not the planned transaction had a change output. + pub has_change: bool, + /// The signable transaction. + pub signable: ST, + /// The Eventuality to watch for. + pub eventuality: EventualityFor, + /// The auxilliary data for this transaction. + pub auxilliary: A, +} + +/// An object able to plan a transaction. +pub trait TransactionPlanner: 'static + Send + Sync { + /// An error encountered when handling planning transactions. + /// + /// This MUST be an ephemeral error. Retrying planning transactions MUST eventually resolve + /// resolve manual intervention/changing the arguments. + type EphemeralError: Debug; + + /// The type representing a signable transaction. + type SignableTransaction: SignableTransaction; + + /// The maximum amount of inputs allowed in a transaction. + const MAX_INPUTS: usize; + /// The maximum amount of outputs allowed in a transaction, including the change output. + const MAX_OUTPUTS: usize; + + /// The branch address for this key of Serai's. + fn branch_address(key: KeyFor) -> AddressFor; + /// The change address for this key of Serai's. + fn change_address(key: KeyFor) -> AddressFor; + /// The forwarding address for this key of Serai's. + fn forwarding_address(key: KeyFor) -> AddressFor; + + /// Calculate the for a tansaction with this structure. + /// + /// The fee rate, inputs, and payments, will all be for the same coin. The returned fee is + /// denominated in this coin. + fn calculate_fee( + &self, + reference_block: &BlockFor, + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> impl Send + Future>; + + /// Plan a transaction. + /// + /// This must only require the same fee as would be returned by `calculate_fee`. The caller is + /// trusted to maintain `sum(inputs) - sum(payments) >= if change.is_some() { DUST } else { 0 }`. + /// + /// `change` will always be an address belonging to the Serai network. If it is `Some`, a change + /// output must be created. + fn plan( + &self, + reference_block: &BlockFor, + inputs: Vec>, + payments: Vec>>, + change: Option>, + ) -> impl Send + + Future< + Output = Result, Self::EphemeralError>, + >; + + /// Obtain a PlannedTransaction via amortizing the fee over the payments. + /// + /// `operating_costs` is accrued to if Serai faces the burden of a fee or drops inputs not worth + /// accumulating. `operating_costs` will be amortized along with this transaction's fee as + /// possible, if there is a change output. Please see `spec/processor/UTXO Management.md` for + /// more information. + /// + /// Returns `None` if the fee exceeded the inputs, or `Some` otherwise. + // TODO: Enum for Change of None, Some, Mandatory + fn plan_transaction_with_fee_amortization( + &self, + operating_costs: &mut u64, + reference_block: &BlockFor, + inputs: Vec>, + mut payments: Vec>>, + mut change: Option>, + ) -> impl Send + + Future< + Output = Result< + Option>, + Self::EphemeralError, + >, + > { + async move { + // If there's no change output, we can't recoup any operating costs we would amortize + // We also don't have any losses if the inputs are written off/the change output is reduced + let mut operating_costs_if_no_change = 0; + let operating_costs_in_effect = + if change.is_none() { &mut operating_costs_if_no_change } else { operating_costs }; + + // Sanity checks + { + assert!(!inputs.is_empty()); + assert!((!payments.is_empty()) || change.is_some()); + let coin = inputs.first().unwrap().balance().coin; + for input in &inputs { + assert_eq!(coin, input.balance().coin); + } + for payment in &payments { + assert_eq!(coin, payment.balance().coin); + } + assert!( + (inputs.iter().map(|input| input.balance().amount.0).sum::() + + *operating_costs_in_effect) >= + payments.iter().map(|payment| payment.balance().amount.0).sum::(), + "attempted to fulfill payments without a sufficient input set" + ); + } + + let coin = inputs.first().unwrap().balance().coin; + + // Amortization + { + // Sort payments from high amount to low amount + payments.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse()); + + let mut fee = + self.calculate_fee(reference_block, inputs.clone(), payments.clone(), change).await?.0; + let mut amortized = 0; + while !payments.is_empty() { + // We need to pay the fee, and any accrued operating costs, minus what we've already + // amortized + let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized); + + /* + Ideally, we wouldn't use a ceil div yet would be accurate about it. Any remainder could + be amortized over the largest outputs, which wouldn't be relevant here as we only work + with the smallest output. The issue is the theoretical edge case where all outputs have + the same value and are of the minimum value. In that case, none would be able to have + the remainder amortized as it'd cause them to need to be dropped. Using a ceil div + avoids this. + */ + let per_payment_fee = adjusted_fee.div_ceil(u64::try_from(payments.len()).unwrap()); + // Pop the last payment if it can't pay the fee, remaining about the dust limit as it does + if payments.last().unwrap().balance().amount.0 <= (per_payment_fee + S::dust(coin).0) { + amortized += payments.pop().unwrap().balance().amount.0; + // Recalculate the fee and try again + fee = self + .calculate_fee(reference_block, inputs.clone(), payments.clone(), change) + .await? + .0; + continue; + } + // Break since all of these payments shouldn't be dropped + break; + } + + // If we couldn't amortize the fee over the payments, check if we even have enough to pay it + if payments.is_empty() { + // If we don't have a change output, we simply return here + // We no longer have anything to do here, nor any expectations + if change.is_none() { + return Ok(None); + } + + let inputs = inputs.iter().map(|input| input.balance().amount.0).sum::(); + // Checks not just if we can pay for it, yet that the would-be change output is at least + // dust + if inputs < (fee + S::dust(coin).0) { + // Write off these inputs + *operating_costs_in_effect += inputs; + // Yet also claw back the payments we dropped, as we only lost the change + // The dropped payments will be worth less than the inputs + operating_costs we started + // with, so this shouldn't use `saturating_sub` + *operating_costs_in_effect -= amortized; + return Ok(None); + } + } else { + // Since we have payments which can pay the fee we ended up with, amortize it + let adjusted_fee = (*operating_costs_in_effect + fee).saturating_sub(amortized); + let per_payment_base_fee = adjusted_fee / u64::try_from(payments.len()).unwrap(); + let payments_paying_one_atomic_unit_more = + usize::try_from(adjusted_fee % u64::try_from(payments.len()).unwrap()).unwrap(); + + for (i, payment) in payments.iter_mut().enumerate() { + let per_payment_fee = + per_payment_base_fee + u64::from(u8::from(i < payments_paying_one_atomic_unit_more)); + payment.balance().amount.0 -= per_payment_fee; + amortized += per_payment_fee; + } + assert!(amortized >= (*operating_costs_in_effect + fee)); + + // If the change is less than the dust, drop it + let would_be_change = inputs.iter().map(|input| input.balance().amount.0).sum::() - + payments.iter().map(|payment| payment.balance().amount.0).sum::() - + fee; + if would_be_change < S::dust(coin).0 { + change = None; + *operating_costs_in_effect += would_be_change; + } + } + + // Update the amount of operating costs + *operating_costs_in_effect = (*operating_costs_in_effect + fee).saturating_sub(amortized); + } + + // Because we amortized, or accrued as operating costs, the fee, make the transaction + let effected_payments = payments.iter().map(|payment| payment.balance().amount).collect(); + let has_change = change.is_some(); + + let PlannedTransaction { signable, eventuality, auxilliary } = + self.plan(reference_block, inputs, payments, change).await?; + Ok(Some(AmortizePlannedTransaction { + effected_payments, + has_change, + signable, + eventuality, + auxilliary, + })) + } + } + + /// Create a tree to fulfill a set of payments. + /// + /// Returns a `TreeTransaction` whose children (and arbitrary children of children) fulfill all + /// these payments. This tree root will be able to be made with a change output. + fn tree(payments: &[Payment>]) -> TreeTransaction> { + // This variable is for the current layer of the tree being built + let mut tree = Vec::with_capacity(payments.len().div_ceil(Self::MAX_OUTPUTS)); + + // Push the branches for the leaves (the payments out) + for payments in payments.chunks(Self::MAX_OUTPUTS) { + let value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); + tree.push(TreeTransaction::>::Leaves { payments: payments.to_vec(), value }); + } + + // While we haven't calculated a tree root, or the tree root doesn't support a change output, + // keep working + while (tree.len() != 1) || (tree[0].children() == Self::MAX_OUTPUTS) { + let mut branch_layer = vec![]; + for children in tree.chunks(Self::MAX_OUTPUTS) { + branch_layer.push(TreeTransaction::>::Branch { + children: children.to_vec(), + value: children.iter().map(TreeTransaction::value).sum(), + }); + } + tree = branch_layer; + } + assert_eq!(tree.len(), 1); + let tree_root = tree.remove(0); + assert!((tree_root.children() + 1) <= Self::MAX_OUTPUTS); + tree_root + } +} diff --git a/processor/scheduler/utxo/primitives/src/tree.rs b/processor/scheduler/utxo/primitives/src/tree.rs new file mode 100644 index 000000000..d5b47309e --- /dev/null +++ b/processor/scheduler/utxo/primitives/src/tree.rs @@ -0,0 +1,142 @@ +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_primitives::{Coin, Amount, Balance}; + +use primitives::{Address, Payment}; +use scanner::ScannerFeed; + +/// A transaction within a tree to fulfill payments. +#[derive(Clone, BorshSerialize, BorshDeserialize)] +pub enum TreeTransaction { + /// A transaction for the leaves (payments) of the tree. + Leaves { + /// The payments within this transaction. + payments: Vec>, + /// The sum value of the payments. + value: u64, + }, + /// A transaction for the branches of the tree. + Branch { + /// The child transactions. + children: Vec, + /// The sum value of the child transactions. + value: u64, + }, +} +impl TreeTransaction { + /// How many children this transaction has. + /// + /// A child is defined as any dependent, whether payment or transaction. + pub fn children(&self) -> usize { + match self { + Self::Leaves { payments, .. } => payments.len(), + Self::Branch { children, .. } => children.len(), + } + } + + /// The value this transaction wants to spend. + pub fn value(&self) -> u64 { + match self { + Self::Leaves { value, .. } | Self::Branch { value, .. } => *value, + } + } + + /// The payments to make to enable this transaction's children. + /// + /// A child is defined as any dependent, whether payment or transaction. + /// + /// The input value given to this transaction MUST be less than or equal to the desired value. + /// The difference will be amortized over all dependents. + /// + /// Returns None if no payments should be made. Returns Some containing a non-empty Vec if any + /// payments should be made. + pub fn payments( + &self, + coin: Coin, + branch_address: &A, + input_value: u64, + ) -> Option>> { + // Fetch the amounts for the payments we'll make + let mut amounts: Vec<_> = match self { + Self::Leaves { payments, .. } => payments + .iter() + .map(|payment| { + assert_eq!(payment.balance().coin, coin); + Some(payment.balance().amount.0) + }) + .collect(), + Self::Branch { children, .. } => children.iter().map(|child| Some(child.value())).collect(), + }; + + // We need to reduce them so their sum is our input value + assert!(input_value <= self.value()); + let amount_to_amortize = self.value() - input_value; + + // If any payments won't survive the reduction, set them to None + let mut amortized = 0; + 'outer: while amounts.iter().any(Option::is_some) && (amortized < amount_to_amortize) { + let adjusted_fee = amount_to_amortize - amortized; + let amounts_len = + u64::try_from(amounts.iter().filter(|amount| amount.is_some()).count()).unwrap(); + let per_payment_fee_check = adjusted_fee.div_ceil(amounts_len); + + // Check each amount to see if it's not viable + let mut i = 0; + while i < amounts.len() { + if let Some(amount) = amounts[i] { + if amount.saturating_sub(per_payment_fee_check) < S::dust(coin).0 { + amounts[i] = None; + amortized += amount; + // If this amount wasn't viable, re-run with the new fee/amortization amounts + continue 'outer; + } + } + i += 1; + } + + // Now that we have the payments which will survive, reduce them + for (i, amount) in amounts.iter_mut().enumerate() { + if let Some(amount) = amount { + *amount -= adjusted_fee / amounts_len; + if i < usize::try_from(adjusted_fee % amounts_len).unwrap() { + *amount -= 1; + } + } + } + break; + } + + // Now that we have the reduced amounts, create the payments + let payments: Vec<_> = match self { + Self::Leaves { payments, .. } => { + payments + .iter() + .zip(amounts) + .filter_map(|(payment, amount)| { + amount.map(|amount| { + // The existing payment, with the new amount + Payment::new(payment.address().clone(), Balance { coin, amount: Amount(amount) }) + }) + }) + .collect() + } + Self::Branch { .. } => { + amounts + .into_iter() + .filter_map(|amount| { + amount.map(|amount| { + // A branch output with the new amount + Payment::new(branch_address.clone(), Balance { coin, amount: Amount(amount) }) + }) + }) + .collect() + } + }; + + // Use None for vec![] so we never actually use vec![] + if payments.is_empty() { + None?; + } + Some(payments) + } +} diff --git a/processor/scheduler/utxo/standard/Cargo.toml b/processor/scheduler/utxo/standard/Cargo.toml new file mode 100644 index 000000000..d6c16161d --- /dev/null +++ b/processor/scheduler/utxo/standard/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "serai-processor-utxo-scheduler" +version = "0.1.0" +description = "Scheduler for UTXO networks for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/utxo/standard" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale", "borsh"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } + +serai-db = { path = "../../../../common/db" } + +primitives = { package = "serai-processor-primitives", path = "../../../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../../scanner" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../../primitives" } +utxo-scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" } diff --git a/processor/scheduler/utxo/standard/LICENSE b/processor/scheduler/utxo/standard/LICENSE new file mode 100644 index 000000000..e091b1498 --- /dev/null +++ b/processor/scheduler/utxo/standard/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/utxo/standard/README.md b/processor/scheduler/utxo/standard/README.md new file mode 100644 index 000000000..8e5360f06 --- /dev/null +++ b/processor/scheduler/utxo/standard/README.md @@ -0,0 +1,17 @@ +# UTXO Scheduler + +A scheduler of transactions for networks premised on the UTXO model. + +### Design + +The scheduler is designed to achieve fulfillment of all expected payments with +an `O(1)` delay (regardless of prior scheduler state), `O(log n)` time, and +`O(log(n) + n)` computational complexity. + +For the time/computational complexity, we use a tree to fulfill payments. +This quickly gives us the ability to make as many outputs as necessary +(regardless of per-transaction output limits) and only has the latency of +including a chain of `O(log n)` transactions on-chain. The only computational +overhead is in creating the transactions which are branches in the tree. +Since we split off the root of the tree from a master output, the delay to start +fulfillment is the delay for the master output to re-appear on-chain. diff --git a/processor/scheduler/utxo/standard/src/db.rs b/processor/scheduler/utxo/standard/src/db.rs new file mode 100644 index 000000000..007615950 --- /dev/null +++ b/processor/scheduler/utxo/standard/src/db.rs @@ -0,0 +1,113 @@ +use core::marker::PhantomData; + +use group::GroupEncoding; + +use serai_primitives::{Coin, Amount, Balance}; + +use borsh::BorshDeserialize; +use serai_db::{Get, DbTxn, create_db, db_channel}; + +use primitives::{Payment, ReceivedOutput}; +use utxo_scheduler_primitives::TreeTransaction; +use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor}; + +create_db! { + UtxoScheduler { + OperatingCosts: (coin: Coin) -> Amount, + SerializedOutputs: (key: &[u8], coin: Coin) -> Vec, + SerializedQueuedPayments: (key: &[u8], coin: Coin) -> Vec, + } +} + +db_channel! { + UtxoScheduler { + PendingBranch: (key: &[u8], balance: Balance) -> Vec, + } +} + +pub(crate) struct Db(PhantomData); +impl Db { + pub(crate) fn operating_costs(getter: &impl Get, coin: Coin) -> Amount { + OperatingCosts::get(getter, coin).unwrap_or(Amount(0)) + } + pub(crate) fn set_operating_costs(txn: &mut impl DbTxn, coin: Coin, amount: Amount) { + OperatingCosts::set(txn, coin, &amount) + } + + pub(crate) fn outputs( + getter: &impl Get, + key: KeyFor, + coin: Coin, + ) -> Option>> { + let buf = SerializedOutputs::get(getter, key.to_bytes().as_ref(), coin)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / 128); + while !buf.is_empty() { + res.push(OutputFor::::read(&mut buf).unwrap()); + } + Some(res) + } + pub(crate) fn set_outputs( + txn: &mut impl DbTxn, + key: KeyFor, + coin: Coin, + outputs: &[OutputFor], + ) { + let mut buf = Vec::with_capacity(outputs.len() * 128); + for output in outputs { + output.write(&mut buf).unwrap(); + } + SerializedOutputs::set(txn, key.to_bytes().as_ref(), coin, &buf); + } + pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { + SerializedOutputs::del(txn, key.to_bytes().as_ref(), coin); + } + + pub(crate) fn queued_payments( + getter: &impl Get, + key: KeyFor, + coin: Coin, + ) -> Option>>> { + let buf = SerializedQueuedPayments::get(getter, key.to_bytes().as_ref(), coin)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / 128); + while !buf.is_empty() { + res.push(Payment::read(&mut buf).unwrap()); + } + Some(res) + } + pub(crate) fn set_queued_payments( + txn: &mut impl DbTxn, + key: KeyFor, + coin: Coin, + queued: &[Payment>], + ) { + let mut buf = Vec::with_capacity(queued.len() * 128); + for queued in queued { + queued.write(&mut buf).unwrap(); + } + SerializedQueuedPayments::set(txn, key.to_bytes().as_ref(), coin, &buf); + } + pub(crate) fn del_queued_payments(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { + SerializedQueuedPayments::del(txn, key.to_bytes().as_ref(), coin); + } + + pub(crate) fn queue_pending_branch( + txn: &mut impl DbTxn, + key: KeyFor, + balance: Balance, + child: &TreeTransaction>, + ) { + PendingBranch::send(txn, key.to_bytes().as_ref(), balance, &borsh::to_vec(child).unwrap()) + } + pub(crate) fn take_pending_branch( + txn: &mut impl DbTxn, + key: KeyFor, + balance: Balance, + ) -> Option>> { + PendingBranch::try_recv(txn, key.to_bytes().as_ref(), balance) + .map(|bytes| TreeTransaction::>::deserialize(&mut bytes.as_slice()).unwrap()) + } +} diff --git a/processor/scheduler/utxo/standard/src/lib.rs b/processor/scheduler/utxo/standard/src/lib.rs new file mode 100644 index 000000000..e826c300f --- /dev/null +++ b/processor/scheduler/utxo/standard/src/lib.rs @@ -0,0 +1,568 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{marker::PhantomData, future::Future}; +use std::collections::HashMap; + +use group::GroupEncoding; + +use serai_primitives::{Coin, Amount, Balance}; + +use serai_db::DbTxn; + +use primitives::{ReceivedOutput, Payment}; +use scanner::{ + LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor, + SchedulerUpdate, KeyScopedEventualities, Scheduler as SchedulerTrait, +}; +use scheduler_primitives::*; +use utxo_scheduler_primitives::*; + +mod db; +use db::Db; + +/// A scheduler of transactions for networks premised on the UTXO model. +#[allow(non_snake_case)] +#[derive(Clone)] +pub struct Scheduler> { + planner: P, + _S: PhantomData, +} + +impl> Scheduler { + /// Create a new scheduler. + pub fn new(planner: P) -> Self { + Self { planner, _S: PhantomData } + } + + async fn aggregate_inputs( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + key_for_change: KeyFor, + key: KeyFor, + coin: Coin, + ) -> Result>, >::EphemeralError> { + let mut eventualities = vec![]; + + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let mut outputs = Db::::outputs(txn, key, coin).unwrap(); + outputs.sort_by_key(|output| output.balance().amount.0); + while outputs.len() > P::MAX_INPUTS { + let to_aggregate = outputs.drain(.. P::MAX_INPUTS).collect::>(); + + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + block, + to_aggregate, + vec![], + Some(key_for_change), + ) + .await? + else { + continue; + }; + + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + } + + Db::::set_outputs(txn, key, coin, &outputs); + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + Ok(eventualities) + } + + fn fulfillable_payments( + txn: &mut impl DbTxn, + operating_costs: &mut u64, + key: KeyFor, + coin: Coin, + value_of_outputs: u64, + ) -> Vec>> { + // Fetch all payments for this key + let mut payments = Db::::queued_payments(txn, key, coin).unwrap(); + if payments.is_empty() { + return vec![]; + } + + loop { + // inputs must be >= (payments - operating costs) + // Accordingly, (inputs + operating costs) must be >= payments + let value_fulfillable = value_of_outputs + *operating_costs; + + // Drop to just the payments we can currently fulfill + { + let mut can_handle = 0; + let mut value_used = 0; + for payment in &payments { + value_used += payment.balance().amount.0; + if value_fulfillable < value_used { + break; + } + can_handle += 1; + } + + let remaining_payments = payments.drain(can_handle ..).collect::>(); + // Restore the rest to the database + Db::::set_queued_payments(txn, key, coin, &remaining_payments); + } + + // If these payments are worth less than the operating costs, immediately drop them + let payments_value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); + if payments_value <= *operating_costs { + *operating_costs -= payments_value; + Db::::set_operating_costs(txn, coin, Amount(*operating_costs)); + + // Reset payments to the queued payments + payments = Db::::queued_payments(txn, key, coin).unwrap(); + // If there's no more payments, stop looking for which payments we should fulfill + if payments.is_empty() { + return vec![]; + } + // Find which of these we should handle + continue; + } + + return payments; + } + } + + fn queue_branches( + txn: &mut impl DbTxn, + key: KeyFor, + coin: Coin, + effected_payments: Vec, + tx: TreeTransaction>, + ) { + match tx { + TreeTransaction::Leaves { .. } => {} + TreeTransaction::Branch { mut children, .. } => { + children.sort_by_key(TreeTransaction::value); + children.reverse(); + + /* + This may only be a subset of payments but it'll be the originally-highest-valued + payments. `zip` will truncate to the first children which will be the highest-valued + children thanks to our sort. + */ + for (amount, child) in effected_payments.into_iter().zip(children) { + Db::::queue_pending_branch(txn, key, Balance { coin, amount }, &child); + } + } + } + } + + async fn handle_branch( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + eventualities: &mut Vec>, + output: OutputFor, + tx: TreeTransaction>, + ) -> Result>::EphemeralError> { + let key = output.key(); + let coin = output.balance().coin; + let Some(payments) = tx.payments::(coin, &P::branch_address(key), output.balance().amount.0) + else { + // If this output has become too small to satisfy this branch, drop it + return Ok(false); + }; + + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + // Uses 0 as there's no operating costs to incur/amortize here + &mut 0, + block, + vec![output], + payments, + None, + ) + .await? + else { + // This Branch isn't viable, so drop it (and its children) + return Ok(false); + }; + + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + + Self::queue_branches(txn, key, coin, planned.effected_payments, tx); + + Ok(true) + } + + async fn step( + &self, + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + block: &BlockFor, + key: KeyFor, + ) -> Result>, >::EphemeralError> { + let mut eventualities = vec![]; + + let key_for_change = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active => active_keys[0].0, + LifetimeStage::UsingNewForChange | LifetimeStage::Forwarding | LifetimeStage::Finishing => { + active_keys[1].0 + } + }; + let branch_address = P::branch_address(key); + + 'coin: for coin in S::NETWORK.coins() { + let coin = *coin; + + // Perform any input aggregation we should + eventualities + .append(&mut self.aggregate_inputs(txn, block, key_for_change, key, coin).await?); + + // Fetch the operating costs/outputs + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let outputs = Db::::outputs(txn, key, coin).unwrap(); + if outputs.is_empty() { + continue; + } + + // Fetch the fulfillable payments + let payments = Self::fulfillable_payments( + txn, + &mut operating_costs, + key, + coin, + outputs.iter().map(|output| output.balance().amount.0).sum(), + ); + if payments.is_empty() { + continue; + } + + // Create a tree to fulfill the payments + let mut tree = vec![P::tree(&payments)]; + + // Create the transaction for the root of the tree + // Try creating this transaction twice, once with a change output and once with increased + // operating costs to ensure a change output (as necessary to meet the requirements of the + // scanner API) + let mut planned_outer = None; + for i in 0 .. 2 { + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + block, + outputs.clone(), + tree[0] + .payments::(coin, &branch_address, tree[0].value()) + .expect("payments were dropped despite providing an input of the needed value"), + Some(key_for_change), + ) + .await? + else { + // This should trip on the first iteration or not at all + assert_eq!(i, 0); + // This doesn't have inputs even worth aggregating so drop the entire tree + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + continue 'coin; + }; + + // If this doesn't have a change output, increase operating costs and try again + if !planned.has_change { + /* + Since we'll create a change output if it's worth at least dust, amortizing dust from + the payments should solve this. If the new transaction can't afford those operating + costs, then the payments should be amortized out, causing there to be a change or no + transaction at all. + */ + operating_costs += S::dust(coin).0; + continue; + } + + // Since this had a change output, move forward with it + planned_outer = Some(planned); + break; + } + let Some(planned) = planned_outer else { + panic!("couldn't create a tree root with a change output") + }; + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + + // Now save the next layer of the tree to the database + // We'll execute it when it appears + Self::queue_branches(txn, key, coin, planned.effected_payments, tree.remove(0)); + } + + Ok(eventualities) + } + + async fn flush_outputs( + &self, + txn: &mut impl DbTxn, + eventualities: &mut KeyScopedEventualities, + block: &BlockFor, + from: KeyFor, + to: KeyFor, + coin: Coin, + ) -> Result<(), >::EphemeralError> { + let from_bytes = from.to_bytes().as_ref().to_vec(); + // Ensure our inputs are aggregated + eventualities + .entry(from_bytes.clone()) + .or_insert(vec![]) + .append(&mut self.aggregate_inputs(txn, block, to, from, coin).await?); + + // Now that our inputs are aggregated, transfer all of them to the new key + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let outputs = Db::::outputs(txn, from, coin).unwrap(); + if outputs.is_empty() { + return Ok(()); + } + let planned = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + block, + outputs, + vec![], + Some(to), + ) + .await?; + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + let Some(planned) = planned else { return Ok(()) }; + + TransactionsToSign::::send(txn, &from, &planned.signable); + eventualities.get_mut(&from_bytes).unwrap().push(planned.eventuality); + + Ok(()) + } +} + +impl> SchedulerTrait for Scheduler { + type EphemeralError = P::EphemeralError; + type SignableTransaction = P::SignableTransaction; + + fn activate_key(txn: &mut impl DbTxn, key: KeyFor) { + for coin in S::NETWORK.coins() { + assert!(Db::::outputs(txn, key, *coin).is_none()); + Db::::set_outputs(txn, key, *coin, &[]); + assert!(Db::::queued_payments(txn, key, *coin).is_none()); + Db::::set_queued_payments(txn, key, *coin, &[]); + } + } + + fn flush_key( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + let mut eventualities = HashMap::new(); + for coin in S::NETWORK.coins() { + // Move the payments to the new key + { + let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); + let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); + + let mut queued = still_queued; + queued.append(&mut new_queued); + + Db::::set_queued_payments(txn, retiring_key, *coin, &[]); + Db::::set_queued_payments(txn, new_key, *coin, &queued); + } + + // Move the outputs to the new key + self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin).await?; + } + Ok(eventualities) + } + } + + fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { + for coin in S::NETWORK.coins() { + assert!(Db::::outputs(txn, key, *coin).unwrap().is_empty()); + Db::::del_outputs(txn, key, *coin); + assert!(Db::::queued_payments(txn, key, *coin).unwrap().is_empty()); + Db::::del_queued_payments(txn, key, *coin); + } + } + + fn update( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + update: SchedulerUpdate, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + let mut eventualities = HashMap::new(); + + // Accumulate the new outputs + { + let mut outputs_by_key = HashMap::new(); + for output in update.outputs() { + // If this aligns for a branch, handle it + if let Some(branch) = Db::::take_pending_branch(txn, output.key(), output.balance()) { + if self + .handle_branch( + txn, + block, + eventualities.entry(output.key().to_bytes().as_ref().to_vec()).or_insert(vec![]), + output.clone(), + branch, + ) + .await? + { + // If we could use it for a branch, we do and move on + // Else, we let it be accumulated by the standard accumulation code + continue; + } + } + + let coin = output.balance().coin; + outputs_by_key + // Index by key and coin + .entry((output.key().to_bytes().as_ref().to_vec(), coin)) + // If we haven't accumulated here prior, read the outputs from the database + .or_insert_with(|| (output.key(), Db::::outputs(txn, output.key(), coin).unwrap())) + .1 + .push(output.clone()); + } + // Write the outputs back to the database + for ((_key_vec, coin), (key, outputs)) in outputs_by_key { + Db::::set_outputs(txn, key, coin, &outputs); + } + } + + // Fulfill the payments we prior couldn't + for (key, _stage) in active_keys { + eventualities + .entry(key.to_bytes().as_ref().to_vec()) + .or_insert(vec![]) + .append(&mut self.step(txn, active_keys, block, *key).await?); + } + + // If this key has been flushed, forward all outputs + match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting | + LifetimeStage::Active | + LifetimeStage::UsingNewForChange => {} + LifetimeStage::Forwarding | LifetimeStage::Finishing => { + for coin in S::NETWORK.coins() { + self + .flush_outputs( + txn, + &mut eventualities, + block, + active_keys[0].0, + active_keys[1].0, + *coin, + ) + .await?; + } + } + } + + // Create the transactions for the forwards/returns + { + let mut planned_txs = vec![]; + for forward in update.forwards() { + let key = forward.key(); + + assert_eq!(active_keys.len(), 2); + assert_eq!(active_keys[0].1, LifetimeStage::Forwarding); + assert_eq!(active_keys[1].1, LifetimeStage::Active); + let forward_to_key = active_keys[1].0; + + let Some(plan) = self + .planner + .plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be forwarded, we simply drop it + &mut 0, + block, + vec![forward.clone()], + vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance())], + None, + ) + .await? + else { + continue; + }; + planned_txs.push((key, plan)); + } + for to_return in update.returns() { + let key = to_return.output().key(); + let out_instruction = + Payment::new(to_return.address().clone(), to_return.output().balance()); + let Some(plan) = self + .planner + .plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be returned, we simply drop it + &mut 0, + block, + vec![to_return.output().clone()], + vec![out_instruction], + None, + ) + .await? + else { + continue; + }; + planned_txs.push((key, plan)); + } + + for (key, planned_tx) in planned_txs { + // Send the transactions off for signing + TransactionsToSign::::send(txn, &key, &planned_tx.signable); + + // Insert the Eventualities into the result + eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality); + } + + Ok(eventualities) + } + } + } + + fn fulfill( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + // Find the key to filfill these payments with + let fulfillment_key = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0, + LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, + }; + + // Queue the payments for this key + for coin in S::NETWORK.coins() { + let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, *coin).unwrap(); + queued_payments + .extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned()); + Db::::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments); + } + + // Handle the queued payments + Ok(HashMap::from([( + fulfillment_key.to_bytes().as_ref().to_vec(), + self.step(txn, active_keys, block, fulfillment_key).await?, + )])) + } + } +} diff --git a/processor/scheduler/utxo/transaction-chaining/Cargo.toml b/processor/scheduler/utxo/transaction-chaining/Cargo.toml new file mode 100644 index 000000000..0b1eb155b --- /dev/null +++ b/processor/scheduler/utxo/transaction-chaining/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "serai-processor-transaction-chaining-scheduler" +version = "0.1.0" +description = "Scheduler for UTXO networks with transaction chaining for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/scheduler/utxo/transaction-chaining" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["scale", "borsh"] + +[lints] +workspace = true + +[dependencies] +group = { version = "0.13", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-primitives = { path = "../../../../substrate/primitives", default-features = false, features = ["std"] } + +serai-db = { path = "../../../../common/db" } + +primitives = { package = "serai-processor-primitives", path = "../../../primitives" } +scanner = { package = "serai-processor-scanner", path = "../../../scanner" } +scheduler-primitives = { package = "serai-processor-scheduler-primitives", path = "../../primitives" } +utxo-scheduler-primitives = { package = "serai-processor-utxo-scheduler-primitives", path = "../primitives" } diff --git a/processor/scheduler/utxo/transaction-chaining/LICENSE b/processor/scheduler/utxo/transaction-chaining/LICENSE new file mode 100644 index 000000000..e091b1498 --- /dev/null +++ b/processor/scheduler/utxo/transaction-chaining/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/scheduler/utxo/transaction-chaining/README.md b/processor/scheduler/utxo/transaction-chaining/README.md new file mode 100644 index 000000000..a129b6693 --- /dev/null +++ b/processor/scheduler/utxo/transaction-chaining/README.md @@ -0,0 +1,19 @@ +# Transaction Chaining Scheduler + +A scheduler of transactions for networks premised on the UTXO model which +support transaction chaining. Transaction chaining refers to the ability to +obtain an identifier for an output within a transaction not yet signed usable +to build and sign a transaction spending it. + +### Design + +The scheduler is designed to achieve fulfillment of all expected payments with +an `O(1)` delay (regardless of prior scheduler state), `O(log n)` time, and +`O(log(n) + n)` computational complexity. + +Due to the ability to chain transactions, we can immediately plan/sign dependent +transactions. For the time/computational complexity, we use a tree to fulfill +payments. This quickly gives us the ability to make as many outputs as necessary +(regardless of per-transaction output limits) and only has the latency of +including a chain of `O(log n)` transactions on-chain. The only computational +overhead is in creating the transactions which are branches in the tree. diff --git a/processor/scheduler/utxo/transaction-chaining/src/db.rs b/processor/scheduler/utxo/transaction-chaining/src/db.rs new file mode 100644 index 000000000..11bcd78db --- /dev/null +++ b/processor/scheduler/utxo/transaction-chaining/src/db.rs @@ -0,0 +1,104 @@ +use core::marker::PhantomData; + +use group::GroupEncoding; + +use serai_primitives::{Coin, Amount}; + +use serai_db::{Get, DbTxn, create_db}; + +use primitives::{Payment, ReceivedOutput}; +use scanner::{ScannerFeed, KeyFor, AddressFor, OutputFor}; + +create_db! { + TransactionChainingScheduler { + OperatingCosts: (coin: Coin) -> Amount, + SerializedOutputs: (key: &[u8], coin: Coin) -> Vec, + AlreadyAccumulatedOutput: (id: &[u8]) -> (), + // We should be immediately able to schedule the fulfillment of payments, yet this may not be + // possible if we're in the middle of a multisig rotation (as our output set will be split) + SerializedQueuedPayments: (key: &[u8], coin: Coin) -> Vec, + } +} + +pub(crate) struct Db(PhantomData); +impl Db { + pub(crate) fn operating_costs(getter: &impl Get, coin: Coin) -> Amount { + OperatingCosts::get(getter, coin).unwrap_or(Amount(0)) + } + pub(crate) fn set_operating_costs(txn: &mut impl DbTxn, coin: Coin, amount: Amount) { + OperatingCosts::set(txn, coin, &amount) + } + + pub(crate) fn outputs( + getter: &impl Get, + key: KeyFor, + coin: Coin, + ) -> Option>> { + let buf = SerializedOutputs::get(getter, key.to_bytes().as_ref(), coin)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / 128); + while !buf.is_empty() { + res.push(OutputFor::::read(&mut buf).unwrap()); + } + Some(res) + } + pub(crate) fn set_outputs( + txn: &mut impl DbTxn, + key: KeyFor, + coin: Coin, + outputs: &[OutputFor], + ) { + let mut buf = Vec::with_capacity(outputs.len() * 128); + for output in outputs { + output.write(&mut buf).unwrap(); + } + SerializedOutputs::set(txn, key.to_bytes().as_ref(), coin, &buf); + } + pub(crate) fn del_outputs(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { + SerializedOutputs::del(txn, key.to_bytes().as_ref(), coin); + } + + pub(crate) fn set_already_accumulated_output( + txn: &mut impl DbTxn, + output: & as ReceivedOutput, AddressFor>>::Id, + ) { + AlreadyAccumulatedOutput::set(txn, output.as_ref(), &()); + } + pub(crate) fn take_if_already_accumulated_output( + txn: &mut impl DbTxn, + output: & as ReceivedOutput, AddressFor>>::Id, + ) -> bool { + AlreadyAccumulatedOutput::take(txn, output.as_ref()).is_some() + } + + pub(crate) fn queued_payments( + getter: &impl Get, + key: KeyFor, + coin: Coin, + ) -> Option>>> { + let buf = SerializedQueuedPayments::get(getter, key.to_bytes().as_ref(), coin)?; + let mut buf = buf.as_slice(); + + let mut res = Vec::with_capacity(buf.len() / 128); + while !buf.is_empty() { + res.push(Payment::read(&mut buf).unwrap()); + } + Some(res) + } + pub(crate) fn set_queued_payments( + txn: &mut impl DbTxn, + key: KeyFor, + coin: Coin, + queued: &[Payment>], + ) { + let mut buf = Vec::with_capacity(queued.len() * 128); + for queued in queued { + queued.write(&mut buf).unwrap(); + } + SerializedQueuedPayments::set(txn, key.to_bytes().as_ref(), coin, &buf); + } + pub(crate) fn del_queued_payments(txn: &mut impl DbTxn, key: KeyFor, coin: Coin) { + SerializedQueuedPayments::del(txn, key.to_bytes().as_ref(), coin); + } +} diff --git a/processor/scheduler/utxo/transaction-chaining/src/lib.rs b/processor/scheduler/utxo/transaction-chaining/src/lib.rs new file mode 100644 index 000000000..bb39dcd30 --- /dev/null +++ b/processor/scheduler/utxo/transaction-chaining/src/lib.rs @@ -0,0 +1,586 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{marker::PhantomData, future::Future}; +use std::collections::HashMap; + +use group::GroupEncoding; + +use serai_primitives::{Coin, Amount}; + +use serai_db::DbTxn; + +use primitives::{OutputType, ReceivedOutput, Payment}; +use scanner::{ + LifetimeStage, ScannerFeed, KeyFor, AddressFor, OutputFor, EventualityFor, BlockFor, + SchedulerUpdate, KeyScopedEventualities, Scheduler as SchedulerTrait, +}; +use scheduler_primitives::*; +use utxo_scheduler_primitives::*; + +mod db; +use db::Db; + +/// The outputs which will be effected by a PlannedTransaction and received by Serai. +pub struct EffectedReceivedOutputs(pub Vec>); + +/// A scheduler of transactions for networks premised on the UTXO model which support +/// transaction chaining. +#[allow(non_snake_case)] +#[derive(Clone)] +pub struct Scheduler>> { + planner: P, + _S: PhantomData, +} + +impl>> Scheduler { + /// Create a new scheduler. + pub fn new(planner: P) -> Self { + Self { planner, _S: PhantomData } + } + + fn accumulate_outputs(txn: &mut impl DbTxn, outputs: Vec>, from_scanner: bool) { + let mut outputs_by_key = HashMap::new(); + for output in outputs { + if !from_scanner { + // Since this isn't being reported by the scanner, flag it so when the scanner does report + // it, we don't accumulate it again + Db::::set_already_accumulated_output(txn, &output.id()); + } else if Db::::take_if_already_accumulated_output(txn, &output.id()) { + continue; + } + + let coin = output.balance().coin; + outputs_by_key + // Index by key and coin + .entry((output.key().to_bytes().as_ref().to_vec(), coin)) + // If we haven't accumulated here prior, read the outputs from the database + .or_insert_with(|| (output.key(), Db::::outputs(txn, output.key(), coin).unwrap())) + .1 + .push(output); + } + // Write the outputs back to the database + for ((_key_vec, coin), (key, outputs)) in outputs_by_key { + Db::::set_outputs(txn, key, coin, &outputs); + } + } + + async fn aggregate_inputs( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + key_for_change: KeyFor, + key: KeyFor, + coin: Coin, + ) -> Result>, >::EphemeralError> { + let mut eventualities = vec![]; + + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let mut outputs = Db::::outputs(txn, key, coin).unwrap(); + while outputs.len() > P::MAX_INPUTS { + let to_aggregate = outputs.drain(.. P::MAX_INPUTS).collect::>(); + Db::::set_outputs(txn, key, coin, &outputs); + + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + block, + to_aggregate, + vec![], + Some(key_for_change), + ) + .await? + else { + continue; + }; + + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + Self::accumulate_outputs(txn, planned.auxilliary.0, false); + + // Reload the outputs for the next loop iteration + outputs = Db::::outputs(txn, key, coin).unwrap(); + } + + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + Ok(eventualities) + } + + fn fulfillable_payments( + txn: &mut impl DbTxn, + operating_costs: &mut u64, + key: KeyFor, + coin: Coin, + value_of_outputs: u64, + ) -> Vec>> { + // Fetch all payments for this key + let mut payments = Db::::queued_payments(txn, key, coin).unwrap(); + if payments.is_empty() { + return vec![]; + } + + loop { + // inputs must be >= (payments - operating costs) + // Accordingly, (inputs + operating costs) must be >= payments + let value_fulfillable = value_of_outputs + *operating_costs; + + // Drop to just the payments we can currently fulfill + { + let mut can_handle = 0; + let mut value_used = 0; + for payment in &payments { + value_used += payment.balance().amount.0; + if value_fulfillable < value_used { + break; + } + can_handle += 1; + } + + let remaining_payments = payments.drain(can_handle ..).collect::>(); + // Restore the rest to the database + Db::::set_queued_payments(txn, key, coin, &remaining_payments); + } + + // If these payments are worth less than the operating costs, immediately drop them + let payments_value = payments.iter().map(|payment| payment.balance().amount.0).sum::(); + if payments_value <= *operating_costs { + *operating_costs -= payments_value; + Db::::set_operating_costs(txn, coin, Amount(*operating_costs)); + + // Reset payments to the queued payments + payments = Db::::queued_payments(txn, key, coin).unwrap(); + // If there's no more payments, stop looking for which payments we should fulfill + if payments.is_empty() { + return vec![]; + } + // Find which of these we should handle + continue; + } + + return payments; + } + } + + async fn step( + &self, + txn: &mut impl DbTxn, + active_keys: &[(KeyFor, LifetimeStage)], + block: &BlockFor, + key: KeyFor, + ) -> Result>, >::EphemeralError> { + let mut eventualities = vec![]; + + let key_for_change = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active => active_keys[0].0, + LifetimeStage::UsingNewForChange | LifetimeStage::Forwarding | LifetimeStage::Finishing => { + active_keys[1].0 + } + }; + let branch_address = P::branch_address(key); + + 'coin: for coin in S::NETWORK.coins() { + let coin = *coin; + + // Perform any input aggregation we should + eventualities + .append(&mut self.aggregate_inputs(txn, block, key_for_change, key, coin).await?); + + // Fetch the operating costs/outputs + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let outputs = Db::::outputs(txn, key, coin).unwrap(); + if outputs.is_empty() { + continue; + } + + // Fetch the fulfillable payments + let payments = Self::fulfillable_payments( + txn, + &mut operating_costs, + key, + coin, + outputs.iter().map(|output| output.balance().amount.0).sum(), + ); + if payments.is_empty() { + continue; + } + + // If this is our only key, we should be able to fulfill all payments + // Else, we'd be insolvent + if active_keys.len() == 1 { + assert!(Db::::queued_payments(txn, key, coin).unwrap().is_empty()); + } + + // Create a tree to fulfill the payments + let mut tree = vec![P::tree(&payments)]; + + // Create the transaction for the root of the tree + let mut branch_outputs = { + // Try creating this transaction twice, once with a change output and once with increased + // operating costs to ensure a change output (as necessary to meet the requirements of the + // scanner API) + let mut planned_outer = None; + for i in 0 .. 2 { + let Some(planned) = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + block, + outputs.clone(), + tree[0] + .payments::(coin, &branch_address, tree[0].value()) + .expect("payments were dropped despite providing an input of the needed value"), + Some(key_for_change), + ) + .await? + else { + // This should trip on the first iteration or not at all + assert_eq!(i, 0); + // This doesn't have inputs even worth aggregating so drop the entire tree + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + continue 'coin; + }; + + // If this doesn't have a change output, increase operating costs and try again + if !planned.has_change { + /* + Since we'll create a change output if it's worth at least dust, amortizing dust from + the payments should solve this. If the new transaction can't afford those operating + costs, then the payments should be amortized out, causing there to be a change or no + transaction at all. + */ + operating_costs += S::dust(coin).0; + continue; + } + + // Since this had a change output, move forward with it + planned_outer = Some(planned); + break; + } + let Some(mut planned) = planned_outer else { + panic!("couldn't create a tree root with a change output") + }; + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + + // We accumulate the change output, but not the branches as we'll consume them momentarily + Self::accumulate_outputs( + txn, + planned + .auxilliary + .0 + .iter() + .filter(|output| output.kind() == OutputType::Change) + .cloned() + .collect(), + false, + ); + planned.auxilliary.0.retain(|output| output.kind() == OutputType::Branch); + planned.auxilliary.0 + }; + + // Now execute each layer of the tree + tree = match tree.remove(0) { + TreeTransaction::Leaves { .. } => vec![], + TreeTransaction::Branch { children, .. } => children, + }; + while !tree.is_empty() { + // Sort the branch outputs by their value (high to low) + branch_outputs.sort_by_key(|a| a.balance().amount.0); + branch_outputs.reverse(); + // Sort the transactions we should create by their value so they share an order with the + // branch outputs + tree.sort_by_key(TreeTransaction::value); + tree.reverse(); + + // If we dropped any Branch outputs, drop the associated children + tree.truncate(branch_outputs.len()); + assert_eq!(branch_outputs.len(), tree.len()); + + let branch_outputs_for_this_layer = branch_outputs; + let this_layer = tree; + branch_outputs = vec![]; + tree = vec![]; + + for (branch_output, tx) in branch_outputs_for_this_layer.into_iter().zip(this_layer) { + assert_eq!(branch_output.kind(), OutputType::Branch); + + let Some(payments) = + tx.payments::(coin, &branch_address, branch_output.balance().amount.0) + else { + // If this output has become too small to satisfy this branch, drop it + continue; + }; + + let branch_output_id = branch_output.id(); + let Some(mut planned) = self + .planner + .plan_transaction_with_fee_amortization( + // Uses 0 as there's no operating costs to incur/amortize here + &mut 0, + block, + vec![branch_output], + payments, + None, + ) + .await? + else { + // This Branch isn't viable, so drop it (and its children) + continue; + }; + // Since we've made a TX spending this output, don't accumulate it later + Db::::set_already_accumulated_output(txn, &branch_output_id); + TransactionsToSign::::send(txn, &key, &planned.signable); + eventualities.push(planned.eventuality); + + match tx { + TreeTransaction::Leaves { .. } => {} + // If this was a branch, handle its children + TreeTransaction::Branch { mut children, .. } => { + branch_outputs.append(&mut planned.auxilliary.0); + tree.append(&mut children); + } + } + } + } + } + + Ok(eventualities) + } + + async fn flush_outputs( + &self, + txn: &mut impl DbTxn, + eventualities: &mut KeyScopedEventualities, + block: &BlockFor, + from: KeyFor, + to: KeyFor, + coin: Coin, + ) -> Result<(), >::EphemeralError> { + let from_bytes = from.to_bytes().as_ref().to_vec(); + // Ensure our inputs are aggregated + eventualities + .entry(from_bytes.clone()) + .or_insert(vec![]) + .append(&mut self.aggregate_inputs(txn, block, to, from, coin).await?); + + // Now that our inputs are aggregated, transfer all of them to the new key + let mut operating_costs = Db::::operating_costs(txn, coin).0; + let outputs = Db::::outputs(txn, from, coin).unwrap(); + if outputs.is_empty() { + return Ok(()); + } + let planned = self + .planner + .plan_transaction_with_fee_amortization( + &mut operating_costs, + block, + outputs, + vec![], + Some(to), + ) + .await?; + Db::::set_operating_costs(txn, coin, Amount(operating_costs)); + let Some(planned) = planned else { return Ok(()) }; + + TransactionsToSign::::send(txn, &from, &planned.signable); + eventualities.get_mut(&from_bytes).unwrap().push(planned.eventuality); + Self::accumulate_outputs(txn, planned.auxilliary.0, false); + + Ok(()) + } +} + +impl>> SchedulerTrait + for Scheduler +{ + type EphemeralError = P::EphemeralError; + type SignableTransaction = P::SignableTransaction; + + fn activate_key(txn: &mut impl DbTxn, key: KeyFor) { + for coin in S::NETWORK.coins() { + assert!(Db::::outputs(txn, key, *coin).is_none()); + Db::::set_outputs(txn, key, *coin, &[]); + assert!(Db::::queued_payments(txn, key, *coin).is_none()); + Db::::set_queued_payments(txn, key, *coin, &[]); + } + } + + fn flush_key( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + retiring_key: KeyFor, + new_key: KeyFor, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + let mut eventualities = HashMap::new(); + for coin in S::NETWORK.coins() { + // Move the payments to the new key + { + let still_queued = Db::::queued_payments(txn, retiring_key, *coin).unwrap(); + let mut new_queued = Db::::queued_payments(txn, new_key, *coin).unwrap(); + + let mut queued = still_queued; + queued.append(&mut new_queued); + + Db::::set_queued_payments(txn, retiring_key, *coin, &[]); + Db::::set_queued_payments(txn, new_key, *coin, &queued); + } + + // Move the outputs to the new key + self.flush_outputs(txn, &mut eventualities, block, retiring_key, new_key, *coin).await?; + } + Ok(eventualities) + } + } + + fn retire_key(txn: &mut impl DbTxn, key: KeyFor) { + for coin in S::NETWORK.coins() { + assert!(Db::::outputs(txn, key, *coin).unwrap().is_empty()); + Db::::del_outputs(txn, key, *coin); + assert!(Db::::queued_payments(txn, key, *coin).unwrap().is_empty()); + Db::::del_queued_payments(txn, key, *coin); + } + } + + fn update( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + update: SchedulerUpdate, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + Self::accumulate_outputs(txn, update.outputs().to_vec(), true); + + // Fulfill the payments we prior couldn't + let mut eventualities = HashMap::new(); + for (key, _stage) in active_keys { + assert!(eventualities + .insert(key.to_bytes().as_ref().to_vec(), self.step(txn, active_keys, block, *key).await?) + .is_none()); + } + + // If this key has been flushed, forward all outputs + match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting | + LifetimeStage::Active | + LifetimeStage::UsingNewForChange => {} + LifetimeStage::Forwarding | LifetimeStage::Finishing => { + for coin in S::NETWORK.coins() { + self + .flush_outputs( + txn, + &mut eventualities, + block, + active_keys[0].0, + active_keys[1].0, + *coin, + ) + .await?; + } + } + } + + // Create the transactions for the forwards/returns + { + let mut planned_txs = vec![]; + for forward in update.forwards() { + let key = forward.key(); + + assert_eq!(active_keys.len(), 2); + assert_eq!(active_keys[0].1, LifetimeStage::Forwarding); + assert_eq!(active_keys[1].1, LifetimeStage::Active); + let forward_to_key = active_keys[1].0; + + let Some(plan) = self + .planner + .plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be forwarded, we simply drop it + &mut 0, + block, + vec![forward.clone()], + vec![Payment::new(P::forwarding_address(forward_to_key), forward.balance())], + None, + ) + .await? + else { + continue; + }; + planned_txs.push((key, plan)); + } + for to_return in update.returns() { + let key = to_return.output().key(); + let out_instruction = + Payment::new(to_return.address().clone(), to_return.output().balance()); + let Some(plan) = self + .planner + .plan_transaction_with_fee_amortization( + // This uses 0 for the operating costs as we don't incur any here + // If the output can't pay for itself to be returned, we simply drop it + &mut 0, + block, + vec![to_return.output().clone()], + vec![out_instruction], + None, + ) + .await? + else { + continue; + }; + planned_txs.push((key, plan)); + } + + for (key, planned_tx) in planned_txs { + // Send the transactions off for signing + TransactionsToSign::::send(txn, &key, &planned_tx.signable); + + // Insert the Eventualities into the result + eventualities.get_mut(key.to_bytes().as_ref()).unwrap().push(planned_tx.eventuality); + } + + Ok(eventualities) + } + } + } + + fn fulfill( + &self, + txn: &mut impl DbTxn, + block: &BlockFor, + active_keys: &[(KeyFor, LifetimeStage)], + payments: Vec>>, + ) -> impl Send + Future, Self::EphemeralError>> { + async move { + // Find the key to filfill these payments with + let fulfillment_key = match active_keys[0].1 { + LifetimeStage::ActiveYetNotReporting => { + panic!("expected to fulfill payments despite not reporting for the oldest key") + } + LifetimeStage::Active | LifetimeStage::UsingNewForChange => active_keys[0].0, + LifetimeStage::Forwarding | LifetimeStage::Finishing => active_keys[1].0, + }; + + // Queue the payments for this key + for coin in S::NETWORK.coins() { + let mut queued_payments = Db::::queued_payments(txn, fulfillment_key, *coin).unwrap(); + queued_payments + .extend(payments.iter().filter(|payment| payment.balance().coin == *coin).cloned()); + Db::::set_queued_payments(txn, fulfillment_key, *coin, &queued_payments); + } + + // Handle the queued payments + Ok(HashMap::from([( + fulfillment_key.to_bytes().as_ref().to_vec(), + self.step(txn, active_keys, block, fulfillment_key).await?, + )])) + } + } +} diff --git a/processor/signers/Cargo.toml b/processor/signers/Cargo.toml new file mode 100644 index 000000000..652228969 --- /dev/null +++ b/processor/signers/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "serai-processor-signers" +version = "0.1.0" +description = "Signers for the Serai processor" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/signers" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[package.metadata.cargo-machete] +ignored = ["borsh"] + +[lints] +workspace = true + +[dependencies] +rand_core = { version = "0.6", default-features = false } +zeroize = { version = "1", default-features = false, features = ["std"] } + +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false } +frost-schnorrkel = { path = "../../crypto/schnorrkel", default-features = false } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } +borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } + +serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } +serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] } +serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std"] } + +serai-db = { path = "../../common/db" } +log = { version = "0.4", default-features = false, features = ["std"] } +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } + +messages = { package = "serai-processor-messages", path = "../messages" } +primitives = { package = "serai-processor-primitives", path = "../primitives" } +scanner = { package = "serai-processor-scanner", path = "../scanner" } +scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" } + +frost-attempt-manager = { package = "serai-processor-frost-attempt-manager", path = "../frost-attempt-manager" } diff --git a/processor/signers/LICENSE b/processor/signers/LICENSE new file mode 100644 index 000000000..e091b1498 --- /dev/null +++ b/processor/signers/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/processor/signers/README.md b/processor/signers/README.md new file mode 100644 index 000000000..b6eddd56a --- /dev/null +++ b/processor/signers/README.md @@ -0,0 +1,6 @@ +# Processor Signers + +Implementations of the tree signers used by a processor (the transaction signer, +the Substrate signer, and the cosigner). + +This library is interacted with via the `serai_processor_messages::sign` API. diff --git a/processor/signers/src/batch/db.rs b/processor/signers/src/batch/db.rs new file mode 100644 index 000000000..a895e0bbb --- /dev/null +++ b/processor/signers/src/batch/db.rs @@ -0,0 +1,13 @@ +use serai_validator_sets_primitives::Session; +use serai_in_instructions_primitives::{Batch, SignedBatch}; + +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + SignersBatch { + ActiveSigningProtocols: (session: Session) -> Vec, + Batches: (id: u32) -> Batch, + SignedBatches: (id: u32) -> SignedBatch, + LastAcknowledgedBatch: () -> u32, + } +} diff --git a/processor/signers/src/batch/mod.rs b/processor/signers/src/batch/mod.rs new file mode 100644 index 000000000..b8ad7ccb4 --- /dev/null +++ b/processor/signers/src/batch/mod.rs @@ -0,0 +1,190 @@ +use core::future::Future; +use std::collections::HashSet; + +use ciphersuite::{group::GroupEncoding, Ristretto}; +use frost::dkg::ThresholdKeys; + +use serai_validator_sets_primitives::Session; +use serai_in_instructions_primitives::{SignedBatch, batch_message}; + +use serai_db::{Get, DbTxn, Db}; + +use messages::sign::VariantSignId; + +use primitives::task::ContinuallyRan; +use scanner::{BatchesToSign, AcknowledgedBatches}; + +use frost_attempt_manager::*; + +use crate::{ + db::{CoordinatorToBatchSignerMessages, BatchSignerToCoordinatorMessages}, + WrappedSchnorrkelMachine, +}; + +mod db; +use db::*; + +pub(crate) fn last_acknowledged_batch(getter: &impl Get) -> Option { + LastAcknowledgedBatch::get(getter) +} + +pub(crate) fn signed_batch(getter: &impl Get, id: u32) -> Option { + SignedBatches::get(getter, id) +} + +// Fetches batches to sign and signs them. +pub(crate) struct BatchSignerTask { + db: D, + + session: Session, + external_key: E, + keys: Vec>, + + active_signing_protocols: HashSet, + attempt_manager: AttemptManager, +} + +impl BatchSignerTask { + pub(crate) fn new( + db: D, + session: Session, + external_key: E, + keys: Vec>, + ) -> Self { + let mut active_signing_protocols = HashSet::new(); + let mut attempt_manager = AttemptManager::new( + db.clone(), + session, + keys.first().expect("creating a batch signer with 0 keys").params().i(), + ); + + // Re-register all active signing protocols + for id in ActiveSigningProtocols::get(&db, session).unwrap_or(vec![]) { + active_signing_protocols.insert(id); + + let batch = Batches::get(&db, id).unwrap(); + assert_eq!(batch.id, id); + + let mut machines = Vec::with_capacity(keys.len()); + for keys in &keys { + machines.push(WrappedSchnorrkelMachine::new(keys.clone(), batch_message(&batch))); + } + attempt_manager.register(VariantSignId::Batch(id), machines); + } + + Self { db, session, external_key, keys, active_signing_protocols, attempt_manager } + } +} + +impl ContinuallyRan for BatchSignerTask { + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut iterated = false; + + // Check for new batches to sign + loop { + let mut txn = self.db.txn(); + let Some(batch) = BatchesToSign::try_recv(&mut txn, &self.external_key) else { + break; + }; + iterated = true; + + // Save this to the database as a transaction to sign + self.active_signing_protocols.insert(batch.id); + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + Batches::set(&mut txn, batch.id, &batch); + + let mut machines = Vec::with_capacity(self.keys.len()); + for keys in &self.keys { + machines.push(WrappedSchnorrkelMachine::new(keys.clone(), batch_message(&batch))); + } + for msg in self.attempt_manager.register(VariantSignId::Batch(batch.id), machines) { + BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + + txn.commit(); + } + + // Check for acknowledged Batches (meaning we should no longer sign for these Batches) + loop { + let mut txn = self.db.txn(); + let Some(id) = AcknowledgedBatches::try_recv(&mut txn, &self.external_key) else { + break; + }; + + { + let last_acknowledged = LastAcknowledgedBatch::get(&txn); + if Some(id) > last_acknowledged { + LastAcknowledgedBatch::set(&mut txn, &id); + } + } + + /* + We may have yet to register this signing protocol. + + While `BatchesToSign` is populated before `AcknowledgedBatches`, we could theoretically + have `BatchesToSign` populated with a new batch _while iterating over + `AcknowledgedBatches`_, and then have `AcknowledgedBatched` populated. In that edge case, + we will see the acknowledgement notification before we see the transaction. + + In such a case, we break (dropping the txn, re-queueing the acknowledgement notification). + On the task's next iteration, we'll process the Batch from `BatchesToSign` and be + able to make progress. + */ + if !self.active_signing_protocols.remove(&id) { + break; + } + iterated = true; + + // Since it was, remove this as an active signing protocol + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + // Clean up the database + Batches::del(&mut txn, id); + SignedBatches::del(&mut txn, id); + + // We retire with a txn so we either successfully flag this Batch as acknowledged, and + // won't re-register it (making this retire safe), or we don't flag it, meaning we will + // re-register it, yet that's safe as we have yet to retire it + self.attempt_manager.retire(&mut txn, VariantSignId::Batch(id)); + + txn.commit(); + } + + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToBatchSignerMessages::try_recv(&mut txn, self.session) else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + BatchSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature } => { + let VariantSignId::Batch(id) = id else { panic!("BatchSignerTask signed a non-Batch") }; + let batch = + Batches::get(&txn, id).expect("signed a Batch we didn't save to the database"); + let signed_batch = SignedBatch { batch, signature: signature.into() }; + SignedBatches::set(&mut txn, signed_batch.batch.id, &signed_batch); + } + } + + txn.commit(); + } + + Ok(iterated) + } + } +} diff --git a/processor/signers/src/coordinator/db.rs b/processor/signers/src/coordinator/db.rs new file mode 100644 index 000000000..c8235ede0 --- /dev/null +++ b/processor/signers/src/coordinator/db.rs @@ -0,0 +1,7 @@ +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + SignersCoordinator { + LastPublishedBatch: () -> u32, + } +} diff --git a/processor/signers/src/coordinator/mod.rs b/processor/signers/src/coordinator/mod.rs new file mode 100644 index 000000000..1e3c84d2a --- /dev/null +++ b/processor/signers/src/coordinator/mod.rs @@ -0,0 +1,177 @@ +use core::future::Future; + +use scale::Decode; +use serai_db::{DbTxn, Db}; + +use primitives::task::ContinuallyRan; + +use crate::{db::*, Coordinator}; + +mod db; + +// Fetches messages to send the coordinator and sends them. +pub(crate) struct CoordinatorTask { + db: D, + coordinator: C, +} + +impl CoordinatorTask { + pub(crate) fn new(db: D, coordinator: C) -> Self { + Self { db, coordinator } + } +} + +impl ContinuallyRan for CoordinatorTask { + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut iterated = false; + + for session in RegisteredKeys::get(&self.db).unwrap_or(vec![]) { + // Publish the messages generated by this key's signers + loop { + let mut txn = self.db.txn(); + let Some(msg) = CosignerToCoordinatorMessages::try_recv(&mut txn, session) else { + break; + }; + iterated = true; + + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + + txn.commit(); + } + + loop { + let mut txn = self.db.txn(); + let Some(msg) = BatchSignerToCoordinatorMessages::try_recv(&mut txn, session) else { + break; + }; + iterated = true; + + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + + txn.commit(); + } + + loop { + let mut txn = self.db.txn(); + let Some(msg) = SlashReportSignerToCoordinatorMessages::try_recv(&mut txn, session) + else { + break; + }; + iterated = true; + + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + + txn.commit(); + } + + loop { + let mut txn = self.db.txn(); + let Some(msg) = TransactionSignerToCoordinatorMessages::try_recv(&mut txn, session) + else { + break; + }; + iterated = true; + + self + .coordinator + .send(msg) + .await + .map_err(|e| format!("couldn't send sign message to the coordinator: {e:?}"))?; + + txn.commit(); + } + + // Publish the cosigns from this session + { + let mut txn = self.db.txn(); + while let Some(((block_number, block_id), signature)) = + Cosign::try_recv(&mut txn, session) + { + iterated = true; + self + .coordinator + .publish_cosign( + block_number, + block_id, + <_>::decode(&mut signature.as_slice()).unwrap(), + ) + .await + .map_err(|e| format!("couldn't publish Cosign: {e:?}"))?; + } + txn.commit(); + } + + // If this session signed its slash report, publish its signature + { + let mut txn = self.db.txn(); + if let Some(slash_report_signature) = SlashReportSignature::try_recv(&mut txn, session) { + iterated = true; + + self + .coordinator + .publish_slash_report_signature( + session, + <_>::decode(&mut slash_report_signature.as_slice()).unwrap(), + ) + .await + .map_err(|e| { + format!("couldn't send slash report signature to the coordinator: {e:?}") + })?; + + txn.commit(); + } + } + } + + // Publish the Batches + { + let mut txn = self.db.txn(); + while let Some(batch) = scanner::Batches::try_recv(&mut txn) { + iterated = true; + self + .coordinator + .publish_batch(batch) + .await + .map_err(|e| format!("couldn't publish Batch: {e:?}"))?; + } + txn.commit(); + } + + // Publish the signed Batches + { + let mut txn = self.db.txn(); + // The last acknowledged Batch may exceed the last Batch we published if we didn't sign for + // the prior Batch(es) (and accordingly didn't publish them) + let last_batch = + crate::batch::last_acknowledged_batch(&txn).max(db::LastPublishedBatch::get(&txn)); + let mut next_batch = last_batch.map_or(0, |id| id + 1); + while let Some(batch) = crate::batch::signed_batch(&txn, next_batch) { + iterated = true; + db::LastPublishedBatch::set(&mut txn, &batch.batch.id); + self + .coordinator + .publish_signed_batch(batch) + .await + .map_err(|e| format!("couldn't publish Batch: {e:?}"))?; + next_batch += 1; + } + txn.commit(); + } + + Ok(iterated) + } + } +} diff --git a/processor/signers/src/cosign/db.rs b/processor/signers/src/cosign/db.rs new file mode 100644 index 000000000..01a42446a --- /dev/null +++ b/processor/signers/src/cosign/db.rs @@ -0,0 +1,9 @@ +use serai_validator_sets_primitives::Session; + +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + SignersCosigner { + LatestCosigned: (session: Session) -> u64, + } +} diff --git a/processor/signers/src/cosign/mod.rs b/processor/signers/src/cosign/mod.rs new file mode 100644 index 000000000..2de18e868 --- /dev/null +++ b/processor/signers/src/cosign/mod.rs @@ -0,0 +1,125 @@ +use core::future::Future; + +use ciphersuite::Ristretto; +use frost::dkg::ThresholdKeys; + +use scale::Encode; +use serai_primitives::Signature; +use serai_validator_sets_primitives::Session; + +use serai_db::{DbTxn, Db}; + +use messages::{sign::VariantSignId, coordinator::cosign_block_msg}; + +use primitives::task::ContinuallyRan; + +use frost_attempt_manager::*; + +use crate::{ + db::{ToCosign, Cosign, CoordinatorToCosignerMessages, CosignerToCoordinatorMessages}, + WrappedSchnorrkelMachine, +}; + +mod db; +use db::LatestCosigned; + +/// Fetches the latest cosign information and works on it. +/// +/// Only the latest cosign attempt is kept. We don't work on historical attempts as later cosigns +/// supersede them. +#[allow(non_snake_case)] +pub(crate) struct CosignerTask { + db: D, + + session: Session, + keys: Vec>, + + current_cosign: Option<(u64, [u8; 32])>, + attempt_manager: AttemptManager, +} + +impl CosignerTask { + pub(crate) fn new(db: D, session: Session, keys: Vec>) -> Self { + let attempt_manager = AttemptManager::new( + db.clone(), + session, + keys.first().expect("creating a cosigner with 0 keys").params().i(), + ); + + Self { db, session, keys, current_cosign: None, attempt_manager } + } +} + +impl ContinuallyRan for CosignerTask { + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut iterated = false; + + // Check the cosign to work on + { + let mut txn = self.db.txn(); + if let Some(cosign) = ToCosign::get(&txn, self.session) { + // If this wasn't already signed for... + if LatestCosigned::get(&txn, self.session) < Some(cosign.0) { + // If this isn't the cosign we're currently working on, meaning it's fresh + if self.current_cosign != Some(cosign) { + // Retire the current cosign + if let Some(current_cosign) = self.current_cosign { + assert!(current_cosign.0 < cosign.0); + self.attempt_manager.retire(&mut txn, VariantSignId::Cosign(current_cosign.0)); + } + + // Set the cosign being worked on + self.current_cosign = Some(cosign); + + let mut machines = Vec::with_capacity(self.keys.len()); + { + let message = cosign_block_msg(cosign.0, cosign.1); + for keys in &self.keys { + machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone())); + } + } + for msg in self.attempt_manager.register(VariantSignId::Cosign(cosign.0), machines) { + CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + + txn.commit(); + } + } + } + } + + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToCosignerMessages::try_recv(&mut txn, self.session) else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + CosignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature } => { + let VariantSignId::Cosign(block_number) = id else { + panic!("CosignerTask signed a non-Cosign") + }; + assert_eq!(Some(block_number), self.current_cosign.map(|cosign| cosign.0)); + + let cosign = self.current_cosign.take().unwrap(); + LatestCosigned::set(&mut txn, self.session, &cosign.0); + // Send the cosign + Cosign::send(&mut txn, self.session, &(cosign, Signature::from(signature).encode())); + } + } + + txn.commit(); + } + + Ok(iterated) + } + } +} diff --git a/processor/signers/src/db.rs b/processor/signers/src/db.rs new file mode 100644 index 000000000..2c13ddba3 --- /dev/null +++ b/processor/signers/src/db.rs @@ -0,0 +1,50 @@ +use serai_validator_sets_primitives::{Session, Slash}; + +use serai_db::{Get, DbTxn, create_db, db_channel}; + +use messages::sign::{ProcessorMessage, CoordinatorMessage}; + +create_db! { + SignersGlobal { + RegisteredKeys: () -> Vec, + SerializedKeys: (session: Session) -> Vec, + LatestRetiredSession: () -> Session, + ToCleanup: () -> Vec<(Session, Vec)>, + + ToCosign: (session: Session) -> (u64, [u8; 32]), + } +} + +db_channel! { + SignersGlobal { + Cosign: (session: Session) -> ((u64, [u8; 32]), Vec), + + SlashReport: (session: Session) -> Vec, + SlashReportSignature: (session: Session) -> Vec, + + /* + TODO: Most of these are pointless? We drop all active signing sessions on reboot. It's + accordingly not valuable to use a DB-backed channel to communicate messages for signing + sessions (Preprocess/Shares). + + Transactions, Batches, Slash Reports, and Cosigns all have their own mechanisms/DB entries + and don't use the following channels. The only questions are: + + 1) If it's safe to drop Reattempt? Or if we need tweaks to enable that + 2) If we reboot with a pending Reattempt, we'll participate on reboot. If we drop that + Reattempt, we won't. Accordingly, we have degraded performance in that edge case in + exchange for less disk IO in the majority of cases. Is that work it? + */ + CoordinatorToCosignerMessages: (session: Session) -> CoordinatorMessage, + CosignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + + CoordinatorToBatchSignerMessages: (session: Session) -> CoordinatorMessage, + BatchSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + + CoordinatorToSlashReportSignerMessages: (session: Session) -> CoordinatorMessage, + SlashReportSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + + CoordinatorToTransactionSignerMessages: (session: Session) -> CoordinatorMessage, + TransactionSignerToCoordinatorMessages: (session: Session) -> ProcessorMessage, + } +} diff --git a/processor/signers/src/lib.rs b/processor/signers/src/lib.rs new file mode 100644 index 000000000..a6714fdf6 --- /dev/null +++ b/processor/signers/src/lib.rs @@ -0,0 +1,442 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use core::{future::Future, fmt::Debug, marker::PhantomData}; +use std::collections::HashMap; + +use zeroize::Zeroizing; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use frost::dkg::{ThresholdCore, ThresholdKeys}; + +use serai_primitives::Signature; +use serai_validator_sets_primitives::{Session, Slash}; +use serai_in_instructions_primitives::{Batch, SignedBatch}; + +use serai_db::{DbTxn, Db}; + +use messages::sign::{VariantSignId, ProcessorMessage, CoordinatorMessage}; + +use primitives::task::{Task, TaskHandle, ContinuallyRan}; +use scheduler::{Transaction, SignableTransaction, TransactionFor}; +use scanner::{ScannerFeed, Scheduler}; + +mod wrapped_schnorrkel; +pub(crate) use wrapped_schnorrkel::WrappedSchnorrkelMachine; + +pub(crate) mod db; + +mod coordinator; +use coordinator::CoordinatorTask; + +mod cosign; +use cosign::CosignerTask; + +mod batch; +use batch::BatchSignerTask; + +mod slash_report; +use slash_report::SlashReportSignerTask; + +mod transaction; +use transaction::TransactionSignerTask; + +/// A connection to the Coordinator which messages can be published with. +pub trait Coordinator: 'static + Send + Sync { + /// An error encountered when interacting with a coordinator. + /// + /// This MUST be an ephemeral error. Retrying an interaction MUST eventually resolve without + /// manual intervention/changing the arguments. + type EphemeralError: Debug; + + /// Send a `messages::sign::ProcessorMessage`. + fn send( + &mut self, + message: ProcessorMessage, + ) -> impl Send + Future>; + + /// Publish a cosign. + fn publish_cosign( + &mut self, + block_number: u64, + block_id: [u8; 32], + signature: Signature, + ) -> impl Send + Future>; + + /// Publish a `Batch`. + fn publish_batch( + &mut self, + batch: Batch, + ) -> impl Send + Future>; + + /// Publish a `SignedBatch`. + fn publish_signed_batch( + &mut self, + batch: SignedBatch, + ) -> impl Send + Future>; + + /// Publish a slash report's signature. + fn publish_slash_report_signature( + &mut self, + session: Session, + signature: Signature, + ) -> impl Send + Future>; +} + +/// An object capable of publishing a transaction. +pub trait TransactionPublisher: 'static + Send + Sync + Clone { + /// An error encountered when publishing a transaction. + /// + /// This MUST be an ephemeral error. Retrying publication MUST eventually resolve without manual + /// intervention/changing the arguments. + type EphemeralError: Debug; + + /// Publish a transaction. + /// + /// This will be called multiple times, with the same transaction, until the transaction is + /// confirmed on-chain. + /// + /// The transaction already being present in the mempool/on-chain MUST NOT be considered an + /// error. + fn publish(&self, tx: T) -> impl Send + Future>; +} + +struct Tasks { + cosigner: TaskHandle, + batch: TaskHandle, + slash_report: TaskHandle, + transaction: TaskHandle, +} + +/// The signers used by a processor. +#[allow(non_snake_case)] +pub struct Signers< + D: Db, + S: ScannerFeed, + Sch: Scheduler, + P: TransactionPublisher>>, +> { + db: D, + publisher: P, + coordinator_handle: TaskHandle, + tasks: HashMap, + _Sch: PhantomData, + _S: PhantomData, +} + +type CiphersuiteFor = + <>::SignableTransaction as SignableTransaction>::Ciphersuite; +type SignableTransactionFor = >::SignableTransaction; + +/* + This is completely outside of consensus, so the worst that can happen is: + + 1) Leakage of a private key, hence the usage of frost-attempt-manager which has an API to ensure + that doesn't happen + 2) The database isn't perfectly cleaned up (leaving some bytes on disk wasted) + 3) The state isn't perfectly cleaned up (leaving some bytes in RAM wasted) + + The last two are notably possible via a series of race conditions. For example, if an Eventuality + completion comes in *before* we registered a key, the signer will hold the signing protocol in + memory until the session is retired entirely. +*/ +impl< + D: Db, + S: ScannerFeed, + Sch: Scheduler, + P: TransactionPublisher>>, + > Signers +{ + fn tasks( + db: D, + publisher: P, + coordinator_handle: TaskHandle, + session: Session, + substrate_keys: Vec>, + external_keys: Vec>>, + ) -> Tasks { + let (cosign_task, cosign_handle) = Task::new(); + tokio::spawn( + CosignerTask::new(db.clone(), session, substrate_keys.clone()) + .continually_run(cosign_task, vec![coordinator_handle.clone()]), + ); + + let (batch_task, batch_handle) = Task::new(); + tokio::spawn( + BatchSignerTask::new( + db.clone(), + session, + external_keys[0].group_key(), + substrate_keys.clone(), + ) + .continually_run(batch_task, vec![coordinator_handle.clone()]), + ); + + let (slash_report_task, slash_report_handle) = Task::new(); + tokio::spawn( + SlashReportSignerTask::<_, S>::new(db.clone(), session, substrate_keys) + .continually_run(slash_report_task, vec![coordinator_handle.clone()]), + ); + + let (transaction_task, transaction_handle) = Task::new(); + tokio::spawn( + TransactionSignerTask::<_, SignableTransactionFor, _>::new( + db, + publisher, + session, + external_keys, + ) + .continually_run(transaction_task, vec![coordinator_handle]), + ); + + Tasks { + cosigner: cosign_handle, + batch: batch_handle, + slash_report: slash_report_handle, + transaction: transaction_handle, + } + } + /// Initialize the signers. + /// + /// This will spawn tasks for any historically registered keys. + pub fn new(mut db: D, coordinator: impl Coordinator, publisher: P) -> Self { + /* + On boot, perform any database cleanup which was queued. + + We don't do this cleanup at time of dropping the task as we'd need to wait an unbounded + amount of time for the task to stop (requiring an async task), then we'd have to drain the + channels (which would be on a distinct DB transaction and risk not occurring if we rebooted + while waiting for the task to stop). This is the easiest way to handle this. + */ + { + let mut txn = db.txn(); + for (session, external_key_bytes) in db::ToCleanup::get(&txn).unwrap_or(vec![]) { + let mut external_key_bytes = external_key_bytes.as_slice(); + let external_key = CiphersuiteFor::::read_G(&mut external_key_bytes).unwrap(); + assert!(external_key_bytes.is_empty()); + + // Drain the Batches to sign + // This will be fully populated by the scanner before retiry occurs, making this perfect + // in not leaving any pending blobs behind + while scanner::BatchesToSign::try_recv(&mut txn, &external_key).is_some() {} + // Drain the acknowledged batches to no longer sign + while scanner::AcknowledgedBatches::try_recv(&mut txn, &external_key).is_some() {} + + // Drain the transactions to sign + // This will be fully populated by the scheduler before retiry + while scheduler::TransactionsToSign::>::try_recv( + &mut txn, + &external_key, + ) + .is_some() + {} + + // Drain the completed Eventualities + while scanner::CompletedEventualities::try_recv(&mut txn, &external_key).is_some() {} + + // Delete the cosign this session should be working on + db::ToCosign::del(&mut txn, session); + // Drain our DB channels + while db::Cosign::try_recv(&mut txn, session).is_some() {} + while db::SlashReport::try_recv(&mut txn, session).is_some() {} + while db::CoordinatorToCosignerMessages::try_recv(&mut txn, session).is_some() {} + while db::CosignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} + while db::CoordinatorToBatchSignerMessages::try_recv(&mut txn, session).is_some() {} + while db::BatchSignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} + while db::CoordinatorToSlashReportSignerMessages::try_recv(&mut txn, session).is_some() {} + while db::SlashReportSignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} + while db::CoordinatorToTransactionSignerMessages::try_recv(&mut txn, session).is_some() {} + while db::TransactionSignerToCoordinatorMessages::try_recv(&mut txn, session).is_some() {} + } + db::ToCleanup::del(&mut txn); + txn.commit(); + } + + let mut tasks = HashMap::new(); + + let (coordinator_task, coordinator_handle) = Task::new(); + tokio::spawn( + CoordinatorTask::new(db.clone(), coordinator).continually_run(coordinator_task, vec![]), + ); + + for session in db::RegisteredKeys::get(&db).unwrap_or(vec![]) { + let buf = db::SerializedKeys::get(&db, session).unwrap(); + let mut buf = buf.as_slice(); + + let mut substrate_keys = vec![]; + let mut external_keys = vec![]; + while !buf.is_empty() { + substrate_keys + .push(ThresholdKeys::from(ThresholdCore::::read(&mut buf).unwrap())); + external_keys.push(ThresholdKeys::from( + ThresholdCore::>::read(&mut buf).unwrap(), + )); + } + + tasks.insert( + session, + Self::tasks( + db.clone(), + publisher.clone(), + coordinator_handle.clone(), + session, + substrate_keys, + external_keys, + ), + ); + } + + Self { db, publisher, coordinator_handle, tasks, _Sch: PhantomData, _S: PhantomData } + } + + /// Register a set of keys to sign with. + /// + /// If this session (or a session after it) has already been retired, this is a NOP. + pub fn register_keys( + &mut self, + txn: &mut impl DbTxn, + session: Session, + substrate_keys: Vec>, + external_keys: Vec>>, + ) { + // Don't register already retired keys + if Some(session.0) <= db::LatestRetiredSession::get(txn).map(|session| session.0) { + return; + } + + { + let mut sessions = db::RegisteredKeys::get(txn).unwrap_or_else(|| Vec::with_capacity(1)); + sessions.push(session); + db::RegisteredKeys::set(txn, &sessions); + } + + { + let mut buf = Zeroizing::new(Vec::with_capacity(2 * substrate_keys.len() * 128)); + for (substrate_keys, external_keys) in substrate_keys.iter().zip(&external_keys) { + buf.extend(&*substrate_keys.serialize()); + buf.extend(&*external_keys.serialize()); + } + db::SerializedKeys::set(txn, session, &buf); + } + + // Spawn the tasks + self.tasks.insert( + session, + Self::tasks( + self.db.clone(), + self.publisher.clone(), + self.coordinator_handle.clone(), + session, + substrate_keys, + external_keys, + ), + ); + } + + /// Retire the signers for a session. + /// + /// This MUST be called in order, for every session (even if we didn't register keys for this + /// session). This MUST only be called after slash report publication, or after that process + /// times out (not once the key is done with regards to the external network). + pub fn retire_session( + &mut self, + txn: &mut impl DbTxn, + session: Session, + external_key: &impl GroupEncoding, + ) { + // Update the latest retired session + { + let next_to_retire = + db::LatestRetiredSession::get(txn).map_or(Session(0), |session| Session(session.0 + 1)); + assert_eq!(session, next_to_retire); + db::LatestRetiredSession::set(txn, &session); + } + + // Update RegisteredKeys/SerializedKeys + if let Some(registered) = db::RegisteredKeys::get(txn) { + db::RegisteredKeys::set( + txn, + ®istered.into_iter().filter(|session_i| *session_i != session).collect(), + ); + } + db::SerializedKeys::del(txn, session); + + // Queue the session for clean up + let mut to_cleanup = db::ToCleanup::get(txn).unwrap_or(vec![]); + to_cleanup.push((session, external_key.to_bytes().as_ref().to_vec())); + db::ToCleanup::set(txn, &to_cleanup); + + // Drop the task handles, which will cause the tasks to close + self.tasks.remove(&session); + } + + /// Queue handling a message. + /// + /// This is a cheap call and able to be done inline from a higher-level loop. + pub fn queue_message(&mut self, txn: &mut impl DbTxn, message: &CoordinatorMessage) { + let sign_id = message.sign_id(); + let tasks = self.tasks.get(&sign_id.session); + match sign_id.id { + VariantSignId::Cosign(_) => { + db::CoordinatorToCosignerMessages::send(txn, sign_id.session, message); + if let Some(tasks) = tasks { + tasks.cosigner.run_now(); + } + } + VariantSignId::Batch(_) => { + db::CoordinatorToBatchSignerMessages::send(txn, sign_id.session, message); + if let Some(tasks) = tasks { + tasks.batch.run_now(); + } + } + VariantSignId::SlashReport(_) => { + db::CoordinatorToSlashReportSignerMessages::send(txn, sign_id.session, message); + if let Some(tasks) = tasks { + tasks.slash_report.run_now(); + } + } + VariantSignId::Transaction(_) => { + db::CoordinatorToTransactionSignerMessages::send(txn, sign_id.session, message); + if let Some(tasks) = tasks { + tasks.transaction.run_now(); + } + } + } + } + + /// Cosign a block. + /// + /// This is a cheap call and able to be done inline from a higher-level loop. + pub fn cosign_block( + &mut self, + mut txn: impl DbTxn, + session: Session, + block_number: u64, + block: [u8; 32], + ) { + db::ToCosign::set(&mut txn, session, &(block_number, block)); + txn.commit(); + + if let Some(tasks) = self.tasks.get(&session) { + tasks.cosigner.run_now(); + } + } + + /// Sign a slash report. + /// + /// This is a cheap call and able to be done inline from a higher-level loop. + pub fn sign_slash_report( + &mut self, + mut txn: impl DbTxn, + session: Session, + slash_report: &Vec, + ) { + db::SlashReport::send(&mut txn, session, slash_report); + txn.commit(); + + if let Some(tasks) = self.tasks.get(&session) { + tasks.slash_report.run_now(); + } + } +} diff --git a/processor/signers/src/slash_report.rs b/processor/signers/src/slash_report.rs new file mode 100644 index 000000000..e040798cd --- /dev/null +++ b/processor/signers/src/slash_report.rs @@ -0,0 +1,122 @@ +use core::{marker::PhantomData, future::Future}; + +use ciphersuite::Ristretto; +use frost::dkg::ThresholdKeys; + +use scale::Encode; +use serai_primitives::Signature; +use serai_validator_sets_primitives::{ + Session, ValidatorSet, SlashReport as SlashReportStruct, report_slashes_message, +}; + +use serai_db::{DbTxn, Db}; + +use messages::sign::VariantSignId; + +use primitives::task::ContinuallyRan; +use scanner::ScannerFeed; + +use frost_attempt_manager::*; + +use crate::{ + db::{ + SlashReport, SlashReportSignature, CoordinatorToSlashReportSignerMessages, + SlashReportSignerToCoordinatorMessages, + }, + WrappedSchnorrkelMachine, +}; + +// Fetches slash reports to sign and signs them. +#[allow(non_snake_case)] +pub(crate) struct SlashReportSignerTask { + db: D, + _S: PhantomData, + + session: Session, + keys: Vec>, + + has_slash_report: bool, + attempt_manager: AttemptManager, +} + +impl SlashReportSignerTask { + pub(crate) fn new(db: D, session: Session, keys: Vec>) -> Self { + let attempt_manager = AttemptManager::new( + db.clone(), + session, + keys.first().expect("creating a slash report signer with 0 keys").params().i(), + ); + + Self { db, _S: PhantomData, session, keys, has_slash_report: false, attempt_manager } + } +} + +impl ContinuallyRan for SlashReportSignerTask { + fn run_iteration(&mut self) -> impl Send + Future> { + async move { + let mut iterated = false; + + // Check for the slash report to sign + if !self.has_slash_report { + let mut txn = self.db.txn(); + let Some(slash_report) = SlashReport::try_recv(&mut txn, self.session) else { + return Ok(false); + }; + // We only commit this upon successfully signing this slash report + drop(txn); + iterated = true; + + self.has_slash_report = true; + + let mut machines = Vec::with_capacity(self.keys.len()); + { + let message = report_slashes_message( + &ValidatorSet { network: S::NETWORK, session: self.session }, + &SlashReportStruct(slash_report.try_into().unwrap()), + ); + for keys in &self.keys { + machines.push(WrappedSchnorrkelMachine::new(keys.clone(), message.clone())); + } + } + let mut txn = self.db.txn(); + for msg in self.attempt_manager.register(VariantSignId::SlashReport(self.session), machines) + { + SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + txn.commit(); + } + + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToSlashReportSignerMessages::try_recv(&mut txn, self.session) + else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + SlashReportSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature } => { + let VariantSignId::SlashReport(session) = id else { + panic!("SlashReportSignerTask signed a non-SlashReport") + }; + assert_eq!(session, self.session); + // Drain the channel + SlashReport::try_recv(&mut txn, self.session).unwrap(); + // Send the signature + SlashReportSignature::send(&mut txn, session, &Signature::from(signature).encode()); + } + } + + txn.commit(); + } + + Ok(iterated) + } + } +} diff --git a/processor/signers/src/transaction/db.rs b/processor/signers/src/transaction/db.rs new file mode 100644 index 000000000..a91881e71 --- /dev/null +++ b/processor/signers/src/transaction/db.rs @@ -0,0 +1,11 @@ +use serai_validator_sets_primitives::Session; + +use serai_db::{Get, DbTxn, create_db}; + +create_db! { + SignersTransaction { + ActiveSigningProtocols: (session: Session) -> Vec<[u8; 32]>, + SerializedSignableTransactions: (id: [u8; 32]) -> Vec, + SerializedTransactions: (id: [u8; 32]) -> Vec, + } +} diff --git a/processor/signers/src/transaction/mod.rs b/processor/signers/src/transaction/mod.rs new file mode 100644 index 000000000..efb202173 --- /dev/null +++ b/processor/signers/src/transaction/mod.rs @@ -0,0 +1,238 @@ +use core::future::Future; +use std::{ + collections::HashSet, + time::{Duration, Instant}, +}; + +use frost::dkg::ThresholdKeys; + +use serai_validator_sets_primitives::Session; + +use serai_db::{DbTxn, Db}; + +use messages::sign::VariantSignId; + +use primitives::task::ContinuallyRan; +use scheduler::{Transaction, SignableTransaction, TransactionFor, TransactionsToSign}; +use scanner::CompletedEventualities; + +use frost_attempt_manager::*; + +use crate::{ + db::{CoordinatorToTransactionSignerMessages, TransactionSignerToCoordinatorMessages}, + TransactionPublisher, +}; + +mod db; +use db::*; + +// Fetches transactions to sign and signs them. +pub(crate) struct TransactionSignerTask< + D: Db, + ST: SignableTransaction, + P: TransactionPublisher>, +> { + db: D, + publisher: P, + + session: Session, + keys: Vec>, + + active_signing_protocols: HashSet<[u8; 32]>, + attempt_manager: AttemptManager::PreprocessMachine>, + + last_publication: Instant, +} + +impl>> + TransactionSignerTask +{ + pub(crate) fn new( + db: D, + publisher: P, + session: Session, + keys: Vec>, + ) -> Self { + let mut active_signing_protocols = HashSet::new(); + let mut attempt_manager = AttemptManager::new( + db.clone(), + session, + keys.first().expect("creating a transaction signer with 0 keys").params().i(), + ); + + // Re-register all active signing protocols + for tx in ActiveSigningProtocols::get(&db, session).unwrap_or(vec![]) { + active_signing_protocols.insert(tx); + + let signable_transaction_buf = SerializedSignableTransactions::get(&db, tx).unwrap(); + let mut signable_transaction_buf = signable_transaction_buf.as_slice(); + let signable_transaction = ST::read(&mut signable_transaction_buf).unwrap(); + assert!(signable_transaction_buf.is_empty()); + assert_eq!(signable_transaction.id(), tx); + + let mut machines = Vec::with_capacity(keys.len()); + for keys in &keys { + machines.push(signable_transaction.clone().sign(keys.clone())); + } + attempt_manager.register(VariantSignId::Transaction(tx), machines); + } + + Self { + db, + publisher, + session, + keys, + active_signing_protocols, + attempt_manager, + last_publication: Instant::now(), + } + } +} + +impl>> ContinuallyRan + for TransactionSignerTask +{ + fn run_iteration(&mut self) -> impl Send + Future> { + async { + let mut iterated = false; + + // Check for new transactions to sign + loop { + let mut txn = self.db.txn(); + let Some(tx) = TransactionsToSign::::try_recv(&mut txn, &self.keys[0].group_key()) + else { + break; + }; + iterated = true; + + // Save this to the database as a transaction to sign + self.active_signing_protocols.insert(tx.id()); + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + { + let mut buf = Vec::with_capacity(256); + tx.write(&mut buf).unwrap(); + SerializedSignableTransactions::set(&mut txn, tx.id(), &buf); + } + + let mut machines = Vec::with_capacity(self.keys.len()); + for keys in &self.keys { + machines.push(tx.clone().sign(keys.clone())); + } + for msg in self.attempt_manager.register(VariantSignId::Transaction(tx.id()), machines) { + TransactionSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + + txn.commit(); + } + + // Check for completed Eventualities (meaning we should no longer sign for these transactions) + loop { + let mut txn = self.db.txn(); + let Some(id) = CompletedEventualities::try_recv(&mut txn, &self.keys[0].group_key()) else { + break; + }; + + /* + We may have yet to register this signing protocol. + + While `TransactionsToSign` is populated before `CompletedEventualities`, we could + theoretically have `TransactionsToSign` populated with a new transaction _while iterating + over `CompletedEventualities`_, and then have `CompletedEventualities` populated. In that + edge case, we will see the completion notification before we see the transaction. + + In such a case, we break (dropping the txn, re-queueing the completion notification). On + the task's next iteration, we'll process the transaction from `TransactionsToSign` and be + able to make progress. + */ + if !self.active_signing_protocols.remove(&id) { + break; + } + iterated = true; + + // Since it was, remove this as an active signing protocol + ActiveSigningProtocols::set( + &mut txn, + self.session, + &self.active_signing_protocols.iter().copied().collect(), + ); + // Clean up the database + SerializedSignableTransactions::del(&mut txn, id); + SerializedTransactions::del(&mut txn, id); + + // We retire with a txn so we either successfully flag this Eventuality as completed, and + // won't re-register it (making this retire safe), or we don't flag it, meaning we will + // re-register it, yet that's safe as we have yet to retire it + self.attempt_manager.retire(&mut txn, VariantSignId::Transaction(id)); + + txn.commit(); + } + + // Handle any messages sent to us + loop { + let mut txn = self.db.txn(); + let Some(msg) = CoordinatorToTransactionSignerMessages::try_recv(&mut txn, self.session) + else { + break; + }; + iterated = true; + + match self.attempt_manager.handle(msg) { + Response::Messages(msgs) => { + for msg in msgs { + TransactionSignerToCoordinatorMessages::send(&mut txn, self.session, &msg); + } + } + Response::Signature { id, signature: signed_tx } => { + let signed_tx: TransactionFor = signed_tx.into(); + + // Save this transaction to the database + { + let mut buf = Vec::with_capacity(256); + signed_tx.write(&mut buf).unwrap(); + SerializedTransactions::set( + &mut txn, + match id { + VariantSignId::Transaction(id) => id, + _ => panic!("TransactionSignerTask signed a non-transaction"), + }, + &buf, + ); + } + + match self.publisher.publish(signed_tx).await { + Ok(()) => {} + Err(e) => log::warn!("couldn't broadcast transaction: {e:?}"), + } + } + } + + txn.commit(); + } + + // If it's been five minutes since the last publication, republish the transactions for all + // active signing protocols + if Instant::now().duration_since(self.last_publication) > Duration::from_secs(5 * 60) { + for tx in &self.active_signing_protocols { + let Some(tx_buf) = SerializedTransactions::get(&self.db, *tx) else { continue }; + let mut tx_buf = tx_buf.as_slice(); + let tx = TransactionFor::::read(&mut tx_buf).unwrap(); + assert!(tx_buf.is_empty()); + + self + .publisher + .publish(tx) + .await + .map_err(|e| format!("couldn't re-broadcast transactions: {e:?}"))?; + } + + self.last_publication = Instant::now(); + } + + Ok(iterated) + } + } +} diff --git a/processor/signers/src/wrapped_schnorrkel.rs b/processor/signers/src/wrapped_schnorrkel.rs new file mode 100644 index 000000000..d81eaa705 --- /dev/null +++ b/processor/signers/src/wrapped_schnorrkel.rs @@ -0,0 +1,86 @@ +use std::{ + collections::HashMap, + io::{self, Read}, +}; + +use rand_core::{RngCore, CryptoRng}; + +use ciphersuite::Ristretto; +use frost::{ + dkg::{Participant, ThresholdKeys}, + FrostError, + algorithm::Algorithm, + sign::*, +}; +use frost_schnorrkel::Schnorrkel; + +// This wraps a Schnorrkel sign machine into one with a preset message. +#[derive(Clone)] +pub(crate) struct WrappedSchnorrkelMachine(ThresholdKeys, Vec); +impl WrappedSchnorrkelMachine { + pub(crate) fn new(keys: ThresholdKeys, msg: Vec) -> Self { + Self(keys, msg) + } +} + +pub(crate) struct WrappedSchnorrkelSignMachine( + as PreprocessMachine>::SignMachine, + Vec, +); + +type Signature = as PreprocessMachine>::Signature; +impl PreprocessMachine for WrappedSchnorrkelMachine { + type Preprocess = as PreprocessMachine>::Preprocess; + type Signature = Signature; + type SignMachine = WrappedSchnorrkelSignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Preprocess>::Addendum>) + { + let WrappedSchnorrkelMachine(keys, batch) = self; + let (machine, preprocess) = + AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys).preprocess(rng); + (WrappedSchnorrkelSignMachine(machine, batch), preprocess) + } +} + +impl SignMachine for WrappedSchnorrkelSignMachine { + type Params = as SignMachine>::Params; + type Keys = as SignMachine>::Keys; + type Preprocess = + as SignMachine>::Preprocess; + type SignatureShare = + as SignMachine>::SignatureShare; + type SignatureMachine = + as SignMachine>::SignatureMachine; + + fn cache(self) -> CachedPreprocess { + unimplemented!() + } + + fn from_cache( + _algorithm: Schnorrkel, + _keys: ThresholdKeys, + _cache: CachedPreprocess, + ) -> (Self, Self::Preprocess) { + unimplemented!() + } + + fn read_preprocess(&self, reader: &mut R) -> io::Result { + self.0.read_preprocess(reader) + } + + fn sign( + self, + preprocesses: HashMap< + Participant, + Preprocess>::Addendum>, + >, + msg: &[u8], + ) -> Result<(Self::SignatureMachine, SignatureShare), FrostError> { + assert!(msg.is_empty()); + self.0.sign(preprocesses, &self.1) + } +} diff --git a/processor/src/additional_key.rs b/processor/src/additional_key.rs deleted file mode 100644 index f875950d7..000000000 --- a/processor/src/additional_key.rs +++ /dev/null @@ -1,14 +0,0 @@ -use ciphersuite::Ciphersuite; - -use crate::networks::Network; - -// Generate a static additional key for a given chain in a globally consistent manner -// Doesn't consider the current group key to increase the simplicity of verifying Serai's status -// Takes an index, k, to support protocols which use multiple secondary keys -// Presumably a view key -pub fn additional_key(k: u64) -> ::F { - ::hash_to_F( - b"Serai DEX Additional Key", - &[N::ID.as_bytes(), &k.to_le_bytes()].concat(), - ) -} diff --git a/processor/src/batch_signer.rs b/processor/src/batch_signer.rs deleted file mode 100644 index 41f50322c..000000000 --- a/processor/src/batch_signer.rs +++ /dev/null @@ -1,421 +0,0 @@ -use core::{marker::PhantomData, fmt}; -use std::collections::HashMap; - -use rand_core::OsRng; - -use frost::{ - curve::Ristretto, - ThresholdKeys, FrostError, - algorithm::Algorithm, - sign::{ - Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, - AlgorithmSignMachine, AlgorithmSignatureMachine, - }, -}; -use frost_schnorrkel::Schnorrkel; - -use log::{info, debug, warn}; - -use serai_client::{ - primitives::{NetworkId, BlockHash}, - in_instructions::primitives::{Batch, SignedBatch, batch_message}, - validator_sets::primitives::Session, -}; - -use messages::coordinator::*; -use crate::{Get, DbTxn, Db, create_db}; - -create_db!( - BatchSignerDb { - CompletedDb: (id: u32) -> (), - AttemptDb: (id: u32, attempt: u32) -> (), - BatchDb: (block: BlockHash) -> SignedBatch - } -); - -type Preprocess = as PreprocessMachine>::Preprocess; -type SignatureShare = as SignMachine< - >::Signature, ->>::SignatureShare; - -pub struct BatchSigner { - db: PhantomData, - - network: NetworkId, - session: Session, - keys: Vec>, - - signable: HashMap, - attempt: HashMap, - #[allow(clippy::type_complexity)] - preprocessing: HashMap>, Vec)>, - #[allow(clippy::type_complexity)] - signing: HashMap, Vec)>, -} - -impl fmt::Debug for BatchSigner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("BatchSigner") - .field("signable", &self.signable) - .field("attempt", &self.attempt) - .finish_non_exhaustive() - } -} - -impl BatchSigner { - pub fn new( - network: NetworkId, - session: Session, - keys: Vec>, - ) -> BatchSigner { - assert!(!keys.is_empty()); - BatchSigner { - db: PhantomData, - - network, - session, - keys, - - signable: HashMap::new(), - attempt: HashMap::new(), - preprocessing: HashMap::new(), - signing: HashMap::new(), - } - } - - fn verify_id(&self, id: &SubstrateSignId) -> Result<(Session, u32, u32), ()> { - let SubstrateSignId { session, id, attempt } = id; - let SubstrateSignableId::Batch(id) = id else { panic!("BatchSigner handed non-Batch") }; - - assert_eq!(session, &self.session); - - // Check the attempt lines up - match self.attempt.get(id) { - // If we don't have an attempt logged, it's because the coordinator is faulty OR because we - // rebooted OR we detected the signed batch on chain - // The latter is the expected flow for batches not actively being participated in - None => { - warn!("not attempting batch {id} #{attempt}"); - Err(())?; - } - Some(our_attempt) => { - if attempt != our_attempt { - warn!("sent signing data for batch {id} #{attempt} yet we have attempt #{our_attempt}"); - Err(())?; - } - } - } - - Ok((*session, *id, *attempt)) - } - - #[must_use] - fn attempt( - &mut self, - txn: &mut D::Transaction<'_>, - id: u32, - attempt: u32, - ) -> Option { - // See above commentary for why this doesn't emit SignedBatch - if CompletedDb::get(txn, id).is_some() { - return None; - } - - // Check if we're already working on this attempt - if let Some(curr_attempt) = self.attempt.get(&id) { - if curr_attempt >= &attempt { - warn!("told to attempt {id} #{attempt} yet we're already working on {curr_attempt}"); - return None; - } - } - - // Start this attempt - let block = if let Some(batch) = self.signable.get(&id) { - batch.block - } else { - warn!("told to attempt signing a batch we aren't currently signing for"); - return None; - }; - - // Delete any existing machines - self.preprocessing.remove(&id); - self.signing.remove(&id); - - // Update the attempt number - self.attempt.insert(id, attempt); - - info!("signing batch {id} #{attempt}"); - - // If we reboot mid-sign, the current design has us abort all signs and wait for latter - // attempts/new signing protocols - // This is distinct from the DKG which will continue DKG sessions, even on reboot - // This is because signing is tolerant of failures of up to 1/3rd of the group - // The DKG requires 100% participation - // While we could apply similar tricks as the DKG (a seeded RNG) to achieve support for - // reboots, it's not worth the complexity when messing up here leaks our secret share - // - // Despite this, on reboot, we'll get told of active signing items, and may be in this - // branch again for something we've already attempted - // - // Only run if this hasn't already been attempted - // TODO: This isn't complete as this txn may not be committed with the expected timing - if AttemptDb::get(txn, id, attempt).is_some() { - warn!( - "already attempted batch {id}, attempt #{attempt}. this is an error if we didn't reboot" - ); - return None; - } - AttemptDb::set(txn, id, attempt, &()); - - let mut machines = vec![]; - let mut preprocesses = vec![]; - let mut serialized_preprocesses = vec![]; - for keys in &self.keys { - // b"substrate" is a literal from sp-core - let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone()); - - let (machine, preprocess) = machine.preprocess(&mut OsRng); - machines.push(machine); - serialized_preprocesses.push(preprocess.serialize().try_into().unwrap()); - preprocesses.push(preprocess); - } - self.preprocessing.insert(id, (machines, preprocesses)); - - let id = SubstrateSignId { session: self.session, id: SubstrateSignableId::Batch(id), attempt }; - - // Broadcast our preprocesses - Some(ProcessorMessage::BatchPreprocess { id, block, preprocesses: serialized_preprocesses }) - } - - #[must_use] - pub fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) -> Option { - debug_assert_eq!(self.network, batch.network); - let id = batch.id; - if CompletedDb::get(txn, id).is_some() { - debug!("Sign batch order for ID we've already completed signing"); - // See batch_signed for commentary on why this simply returns - return None; - } - - self.signable.insert(id, batch); - self.attempt(txn, id, 0) - } - - #[must_use] - pub fn handle( - &mut self, - txn: &mut D::Transaction<'_>, - msg: CoordinatorMessage, - ) -> Option { - match msg { - CoordinatorMessage::CosignSubstrateBlock { .. } => { - panic!("BatchSigner passed CosignSubstrateBlock") - } - - CoordinatorMessage::SignSlashReport { .. } => { - panic!("Cosigner passed SignSlashReport") - } - - CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => { - let (session, id, attempt) = self.verify_id(&id).ok()?; - - let substrate_sign_id = - SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt }; - - let (machines, our_preprocesses) = match self.preprocessing.remove(&id) { - // Either rebooted or RPC error, or some invariant - None => { - warn!("not preprocessing for {id}. this is an error if we didn't reboot"); - return None; - } - Some(preprocess) => preprocess, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = preprocesses.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); - let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ); - }; - if !preprocess_ref.is_empty() { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ); - } - parsed.insert(l, res); - } - let preprocesses = parsed; - - // Only keep a single machine as we only need one to get the signature - let mut signature_machine = None; - let mut shares = vec![]; - let mut serialized_shares = vec![]; - for (m, machine) in machines.into_iter().enumerate() { - let mut preprocesses = preprocesses.clone(); - for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { - if i != m { - assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); - } - } - - let (machine, share) = match machine - .sign(preprocesses, &batch_message(&self.signable[&id])) - { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ) - } - }, - }; - if m == 0 { - signature_machine = Some(machine); - } - - let mut share_bytes = [0; 32]; - share_bytes.copy_from_slice(&share.serialize()); - serialized_shares.push(share_bytes); - - shares.push(share); - } - self.signing.insert(id, (signature_machine.unwrap(), shares)); - - // Broadcast our shares - Some( - (ProcessorMessage::SubstrateShare { id: substrate_sign_id, shares: serialized_shares }) - .into(), - ) - } - - CoordinatorMessage::SubstrateShares { id, shares } => { - let (session, id, attempt) = self.verify_id(&id).ok()?; - - let substrate_sign_id = - SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt }; - - let (machine, our_shares) = match self.signing.remove(&id) { - // Rebooted, RPC error, or some invariant - None => { - // If preprocessing has this ID, it means we were never sent the preprocess by the - // coordinator - if self.preprocessing.contains_key(&id) { - panic!("never preprocessed yet signing?"); - } - - warn!("not preprocessing for {id}. this is an error if we didn't reboot"); - return None; - } - Some(signing) => signing, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = shares.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut share_ref = shares.get(&l).unwrap().as_slice(); - let Ok(res) = machine.read_share(&mut share_ref) else { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ); - }; - if !share_ref.is_empty() { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ); - } - parsed.insert(l, res); - } - let mut shares = parsed; - - for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { - assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); - } - - let sig = match machine.complete(shares) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some( - (ProcessorMessage::InvalidParticipant { id: substrate_sign_id, participant: l }) - .into(), - ) - } - }, - }; - - info!("signed batch {id} with attempt #{attempt}"); - - let batch = - SignedBatch { batch: self.signable.remove(&id).unwrap(), signature: sig.into() }; - - // Save the batch in case it's needed for recovery - BatchDb::set(txn, batch.batch.block, &batch); - CompletedDb::set(txn, id, &()); - - // Stop trying to sign for this batch - assert!(self.attempt.remove(&id).is_some()); - assert!(self.preprocessing.remove(&id).is_none()); - assert!(self.signing.remove(&id).is_none()); - - Some((messages::substrate::ProcessorMessage::SignedBatch { batch }).into()) - } - - CoordinatorMessage::BatchReattempt { id } => { - let SubstrateSignableId::Batch(batch_id) = id.id else { - panic!("BatchReattempt passed non-Batch ID") - }; - self.attempt(txn, batch_id, id.attempt).map(Into::into) - } - } - } - - pub fn batch_signed(&mut self, txn: &mut D::Transaction<'_>, id: u32) { - // Stop trying to sign for this batch - CompletedDb::set(txn, id, &()); - - self.signable.remove(&id); - self.attempt.remove(&id); - self.preprocessing.remove(&id); - self.signing.remove(&id); - - // This doesn't emit SignedBatch because it doesn't have access to the SignedBatch - // This function is expected to only be called once Substrate acknowledges this block, - // which means its batch must have been signed - // While a successive batch's signing would also cause this block to be acknowledged, Substrate - // guarantees a batch's ordered inclusion - - // This also doesn't return any messages since all mutation from the Batch being signed happens - // on the substrate::CoordinatorMessage::SubstrateBlock message (which SignedBatch is meant to - // end up triggering) - } -} diff --git a/processor/src/coordinator.rs b/processor/src/coordinator.rs deleted file mode 100644 index 26786e30c..000000000 --- a/processor/src/coordinator.rs +++ /dev/null @@ -1,43 +0,0 @@ -use messages::{ProcessorMessage, CoordinatorMessage}; - -use message_queue::{Service, Metadata, client::MessageQueue}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Message { - pub id: u64, - pub msg: CoordinatorMessage, -} - -#[async_trait::async_trait] -pub trait Coordinator { - async fn send(&mut self, msg: impl Send + Into); - async fn recv(&mut self) -> Message; - async fn ack(&mut self, msg: Message); -} - -#[async_trait::async_trait] -impl Coordinator for MessageQueue { - async fn send(&mut self, msg: impl Send + Into) { - let msg: ProcessorMessage = msg.into(); - let metadata = Metadata { from: self.service, to: Service::Coordinator, intent: msg.intent() }; - let msg = borsh::to_vec(&msg).unwrap(); - - self.queue(metadata, msg).await; - } - - async fn recv(&mut self) -> Message { - let msg = self.next(Service::Coordinator).await; - - let id = msg.id; - - // Deserialize it into a CoordinatorMessage - let msg: CoordinatorMessage = - borsh::from_slice(&msg.msg).expect("message wasn't a borsh-encoded CoordinatorMessage"); - - return Message { id, msg }; - } - - async fn ack(&mut self, msg: Message) { - MessageQueue::ack(self, Service::Coordinator, msg.id).await - } -} diff --git a/processor/src/cosigner.rs b/processor/src/cosigner.rs deleted file mode 100644 index a9fb6cccc..000000000 --- a/processor/src/cosigner.rs +++ /dev/null @@ -1,296 +0,0 @@ -use core::fmt; -use std::collections::HashMap; - -use rand_core::OsRng; - -use frost::{ - curve::Ristretto, - ThresholdKeys, FrostError, - algorithm::Algorithm, - sign::{ - Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, - AlgorithmSignMachine, AlgorithmSignatureMachine, - }, -}; -use frost_schnorrkel::Schnorrkel; - -use log::{info, warn}; - -use serai_client::validator_sets::primitives::Session; - -use messages::coordinator::*; -use crate::{Get, DbTxn, create_db}; - -create_db! { - CosignerDb { - Completed: (id: [u8; 32]) -> (), - Attempt: (id: [u8; 32], attempt: u32) -> (), - } -} - -type Preprocess = as PreprocessMachine>::Preprocess; -type SignatureShare = as SignMachine< - >::Signature, ->>::SignatureShare; - -pub struct Cosigner { - session: Session, - keys: Vec>, - - block_number: u64, - id: [u8; 32], - attempt: u32, - #[allow(clippy::type_complexity)] - preprocessing: Option<(Vec>, Vec)>, - #[allow(clippy::type_complexity)] - signing: Option<(AlgorithmSignatureMachine, Vec)>, -} - -impl fmt::Debug for Cosigner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("Cosigner") - .field("session", &self.session) - .field("block_number", &self.block_number) - .field("id", &self.id) - .field("attempt", &self.attempt) - .field("preprocessing", &self.preprocessing.is_some()) - .field("signing", &self.signing.is_some()) - .finish_non_exhaustive() - } -} - -impl Cosigner { - pub fn new( - txn: &mut impl DbTxn, - session: Session, - keys: Vec>, - block_number: u64, - id: [u8; 32], - attempt: u32, - ) -> Option<(Cosigner, ProcessorMessage)> { - assert!(!keys.is_empty()); - - if Completed::get(txn, id).is_some() { - return None; - } - - if Attempt::get(txn, id, attempt).is_some() { - warn!( - "already attempted cosigning {}, attempt #{}. this is an error if we didn't reboot", - hex::encode(id), - attempt, - ); - return None; - } - Attempt::set(txn, id, attempt, &()); - - info!("cosigning block {} with attempt #{}", hex::encode(id), attempt); - - let mut machines = vec![]; - let mut preprocesses = vec![]; - let mut serialized_preprocesses = vec![]; - for keys in &keys { - // b"substrate" is a literal from sp-core - let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone()); - - let (machine, preprocess) = machine.preprocess(&mut OsRng); - machines.push(machine); - serialized_preprocesses.push(preprocess.serialize().try_into().unwrap()); - preprocesses.push(preprocess); - } - let preprocessing = Some((machines, preprocesses)); - - let substrate_sign_id = - SubstrateSignId { session, id: SubstrateSignableId::CosigningSubstrateBlock(id), attempt }; - - Some(( - Cosigner { session, keys, block_number, id, attempt, preprocessing, signing: None }, - ProcessorMessage::CosignPreprocess { - id: substrate_sign_id, - preprocesses: serialized_preprocesses, - }, - )) - } - - #[must_use] - pub fn handle( - &mut self, - txn: &mut impl DbTxn, - msg: CoordinatorMessage, - ) -> Option { - match msg { - CoordinatorMessage::CosignSubstrateBlock { .. } => { - panic!("Cosigner passed CosignSubstrateBlock") - } - - CoordinatorMessage::SignSlashReport { .. } => { - panic!("Cosigner passed SignSlashReport") - } - - CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => { - assert_eq!(id.session, self.session); - let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else { - panic!("cosigner passed Batch") - }; - if block != self.id { - panic!("given preprocesses for a distinct block than cosigner is signing") - } - if id.attempt != self.attempt { - panic!("given preprocesses for a distinct attempt than cosigner is signing") - } - - let (machines, our_preprocesses) = match self.preprocessing.take() { - // Either rebooted or RPC error, or some invariant - None => { - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(block), - ); - return None; - } - Some(preprocess) => preprocess, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = preprocesses.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); - let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !preprocess_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let preprocesses = parsed; - - // Only keep a single machine as we only need one to get the signature - let mut signature_machine = None; - let mut shares = vec![]; - let mut serialized_shares = vec![]; - for (m, machine) in machines.into_iter().enumerate() { - let mut preprocesses = preprocesses.clone(); - for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { - if i != m { - assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); - } - } - - let (machine, share) = - match machine.sign(preprocesses, &cosign_block_msg(self.block_number, self.id)) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - if m == 0 { - signature_machine = Some(machine); - } - - let mut share_bytes = [0; 32]; - share_bytes.copy_from_slice(&share.serialize()); - serialized_shares.push(share_bytes); - - shares.push(share); - } - self.signing = Some((signature_machine.unwrap(), shares)); - - // Broadcast our shares - Some(ProcessorMessage::SubstrateShare { id, shares: serialized_shares }) - } - - CoordinatorMessage::SubstrateShares { id, shares } => { - assert_eq!(id.session, self.session); - let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else { - panic!("cosigner passed Batch") - }; - if block != self.id { - panic!("given preprocesses for a distinct block than cosigner is signing") - } - if id.attempt != self.attempt { - panic!("given preprocesses for a distinct attempt than cosigner is signing") - } - - let (machine, our_shares) = match self.signing.take() { - // Rebooted, RPC error, or some invariant - None => { - // If preprocessing has this ID, it means we were never sent the preprocess by the - // coordinator - if self.preprocessing.is_some() { - panic!("never preprocessed yet signing?"); - } - - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(block) - ); - return None; - } - Some(signing) => signing, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = shares.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut share_ref = shares.get(&l).unwrap().as_slice(); - let Ok(res) = machine.read_share(&mut share_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !share_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let mut shares = parsed; - - for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { - assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); - } - - let sig = match machine.complete(shares) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - - info!("cosigned {} with attempt #{}", hex::encode(block), id.attempt); - - Completed::set(txn, block, &()); - - Some(ProcessorMessage::CosignedBlock { - block_number: self.block_number, - block, - signature: sig.to_bytes().to_vec(), - }) - } - CoordinatorMessage::BatchReattempt { .. } => panic!("BatchReattempt passed to Cosigner"), - } - } -} diff --git a/processor/src/db.rs b/processor/src/db.rs deleted file mode 100644 index ffd7c43ad..000000000 --- a/processor/src/db.rs +++ /dev/null @@ -1,43 +0,0 @@ -use std::io::Read; - -use scale::{Encode, Decode}; -use serai_client::validator_sets::primitives::{Session, KeyPair}; - -pub use serai_db::*; - -use crate::networks::{Block, Network}; - -create_db!( - MainDb { - HandledMessageDb: (id: u64) -> (), - PendingActivationsDb: () -> Vec - } -); - -impl PendingActivationsDb { - pub fn pending_activation( - getter: &impl Get, - ) -> Option<(>::Id, Session, KeyPair)> { - if let Some(bytes) = Self::get(getter) { - if !bytes.is_empty() { - let mut slice = bytes.as_slice(); - let (session, key_pair) = <(Session, KeyPair)>::decode(&mut slice).unwrap(); - let mut block_before_queue_block = >::Id::default(); - slice.read_exact(block_before_queue_block.as_mut()).unwrap(); - assert!(slice.is_empty()); - return Some((block_before_queue_block, session, key_pair)); - } - } - None - } - pub fn set_pending_activation( - txn: &mut impl DbTxn, - block_before_queue_block: &>::Id, - session: Session, - key_pair: KeyPair, - ) { - let mut buf = (session, key_pair).encode(); - buf.extend(block_before_queue_block.as_ref()); - Self::set(txn, &buf); - } -} diff --git a/processor/src/lib.rs b/processor/src/lib.rs deleted file mode 100644 index 19f67508b..000000000 --- a/processor/src/lib.rs +++ /dev/null @@ -1,15 +0,0 @@ -#![allow(dead_code)] - -mod plan; -pub use plan::*; - -mod db; -pub(crate) use db::*; - -mod key_gen; - -pub mod networks; -pub(crate) mod multisigs; - -mod additional_key; -pub use additional_key::additional_key; diff --git a/processor/src/main.rs b/processor/src/main.rs deleted file mode 100644 index 2d05ad4dc..000000000 --- a/processor/src/main.rs +++ /dev/null @@ -1,759 +0,0 @@ -use std::{time::Duration, collections::HashMap}; - -use zeroize::{Zeroize, Zeroizing}; - -use ciphersuite::{ - group::{ff::PrimeField, GroupEncoding}, - Ciphersuite, Ristretto, -}; -use dkg::evrf::EvrfCurve; - -use log::{info, warn}; -use tokio::time::sleep; - -use serai_client::{ - primitives::{BlockHash, NetworkId}, - validator_sets::primitives::{Session, KeyPair}, -}; - -use messages::{ - coordinator::{ - SubstrateSignableId, PlanMeta, CoordinatorMessage as CoordinatorCoordinatorMessage, - }, - CoordinatorMessage, -}; - -use serai_env as env; - -use message_queue::{Service, client::MessageQueue}; - -mod plan; -pub use plan::*; - -mod networks; -use networks::{Block, Network}; -#[cfg(feature = "bitcoin")] -use networks::Bitcoin; -#[cfg(feature = "ethereum")] -use networks::Ethereum; -#[cfg(feature = "monero")] -use networks::Monero; - -mod additional_key; -pub use additional_key::additional_key; - -mod db; -pub use db::*; - -mod coordinator; -pub use coordinator::*; - -mod key_gen; -use key_gen::{SessionDb, KeyConfirmed, KeyGen}; - -mod signer; -use signer::Signer; - -mod cosigner; -use cosigner::Cosigner; - -mod batch_signer; -use batch_signer::BatchSigner; - -mod slash_report_signer; -use slash_report_signer::SlashReportSigner; - -mod multisigs; -use multisigs::{MultisigEvent, MultisigManager}; - -#[cfg(test)] -mod tests; - -#[global_allocator] -static ALLOCATOR: zalloc::ZeroizingAlloc = - zalloc::ZeroizingAlloc(std::alloc::System); - -// Items which are mutably borrowed by Tributary. -// Any exceptions to this have to be carefully monitored in order to ensure consistency isn't -// violated. -struct TributaryMutable { - // The following are actually mutably borrowed by Substrate as well. - // - Substrate triggers key gens, and determines which to use. - // - SubstrateBlock events cause scheduling which causes signing. - // - // This is still considered Tributary-mutable as most mutation (preprocesses/shares) happens by - // the Tributary. - // - // Creation of tasks is by Substrate, yet this is safe since the mutable borrow is transferred to - // Tributary. - // - // Tributary stops mutating a key gen attempt before Substrate is made aware of it, ensuring - // Tributary drops its mutable borrow before Substrate acquires it. Tributary will maintain a - // mutable borrow on the *key gen task*, yet the finalization code can successfully run for any - // attempt. - // - // The only other note is how the scanner may cause a signer task to be dropped, effectively - // invalidating the Tributary's mutable borrow. The signer is coded to allow for attempted usage - // of a dropped task. - key_gen: KeyGen, - signers: HashMap>, - - // This is also mutably borrowed by the Scanner. - // The Scanner starts new sign tasks. - // The Tributary mutates already-created signed tasks, potentially completing them. - // Substrate may mark tasks as completed, invalidating any existing mutable borrows. - // The safety of this follows as written above. - - // There should only be one BatchSigner at a time (see #277) - batch_signer: Option>, - - // Solely mutated by the tributary. - cosigner: Option, - slash_report_signer: Option, -} - -// Items which are mutably borrowed by Substrate. -// Any exceptions to this have to be carefully monitored in order to ensure consistency isn't -// violated. - -/* - The MultisigManager contains the Scanner and Schedulers. - - The scanner is expected to autonomously operate, scanning blocks as they appear. When a block is - sufficiently confirmed, the scanner causes the Substrate signer to sign a batch. It itself only - mutates its list of finalized blocks, to protect against re-orgs, and its in-memory state though. - - Disk mutations to the scan-state only happens once the relevant `Batch` is included on Substrate. - It can't be mutated as soon as the `Batch` is signed as we need to know the order of `Batch`s - relevant to `Burn`s. - - Schedulers take in new outputs, confirmed in `Batch`s, and outbound payments, triggered by - `Burn`s. - - Substrate also decides when to move to a new multisig, hence why this entire object is - Substrate-mutable. - - Since MultisigManager should always be verifiable, and the Tributary is temporal, MultisigManager - being entirely SubstrateMutable shows proper data pipe-lining. -*/ - -type SubstrateMutable = MultisigManager; - -async fn handle_coordinator_msg( - txn: &mut D::Transaction<'_>, - network: &N, - coordinator: &mut Co, - tributary_mutable: &mut TributaryMutable, - substrate_mutable: &mut SubstrateMutable, - msg: &Message, -) { - // If this message expects a higher block number than we have, halt until synced - async fn wait( - txn: &D::Transaction<'_>, - substrate_mutable: &SubstrateMutable, - block_hash: &BlockHash, - ) { - let mut needed_hash = >::Id::default(); - needed_hash.as_mut().copy_from_slice(&block_hash.0); - - loop { - // Ensure our scanner has scanned this block, which means our daemon has this block at - // a sufficient depth - if substrate_mutable.block_number(txn, &needed_hash).await.is_none() { - warn!( - "node is desynced. we haven't scanned {} which should happen after {} confirms", - hex::encode(&needed_hash), - N::CONFIRMATIONS, - ); - sleep(Duration::from_secs(10)).await; - continue; - }; - break; - } - - // TODO2: Sanity check we got an AckBlock (or this is the AckBlock) for the block in question - - /* - let synced = |context: &SubstrateContext, key| -> Result<(), ()> { - // Check that we've synced this block and can actually operate on it ourselves - let latest = scanner.latest_scanned(key); - if usize::try_from(context.network_latest_finalized_block).unwrap() < latest { - log::warn!( - "external network node disconnected/desynced from rest of the network. \ - our block: {latest:?}, network's acknowledged: {}", - context.network_latest_finalized_block, - ); - Err(())?; - } - Ok(()) - }; - */ - } - - if let Some(required) = msg.msg.required_block() { - // wait only reads from, it doesn't mutate, substrate_mutable - wait(txn, substrate_mutable, &required).await; - } - - async fn activate_key( - network: &N, - substrate_mutable: &mut SubstrateMutable, - tributary_mutable: &mut TributaryMutable, - txn: &mut D::Transaction<'_>, - session: Session, - key_pair: KeyPair, - activation_number: usize, - ) { - info!("activating {session:?}'s keys at {activation_number}"); - - let network_key = ::Curve::read_G::<&[u8]>(&mut key_pair.1.as_ref()) - .expect("Substrate finalized invalid point as a network's key"); - - if tributary_mutable.key_gen.in_set(&session) { - // See TributaryMutable's struct definition for why this block is safe - let KeyConfirmed { substrate_keys, network_keys } = - tributary_mutable.key_gen.confirm(txn, session, &key_pair); - if session.0 == 0 { - tributary_mutable.batch_signer = - Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); - } - tributary_mutable - .signers - .insert(session, Signer::new(network.clone(), session, network_keys)); - } - - substrate_mutable.add_key(txn, activation_number, network_key).await; - } - - match msg.msg.clone() { - CoordinatorMessage::KeyGen(msg) => { - for msg in tributary_mutable.key_gen.handle(txn, msg) { - coordinator.send(msg).await; - } - } - - CoordinatorMessage::Sign(msg) => { - if let Some(msg) = tributary_mutable - .signers - .get_mut(&msg.session()) - .expect("coordinator told us to sign with a signer we don't have") - .handle(txn, msg) - .await - { - coordinator.send(msg).await; - } - } - - CoordinatorMessage::Coordinator(msg) => match msg { - CoordinatorCoordinatorMessage::CosignSubstrateBlock { id, block_number } => { - let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else { - panic!("CosignSubstrateBlock id didn't have a CosigningSubstrateBlock") - }; - let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else { - panic!("didn't have key shares for the key we were told to cosign with"); - }; - if let Some((cosigner, msg)) = - Cosigner::new(txn, id.session, keys, block_number, block, id.attempt) - { - tributary_mutable.cosigner = Some(cosigner); - coordinator.send(msg).await; - } else { - log::warn!("Cosigner::new returned None"); - } - } - CoordinatorCoordinatorMessage::SignSlashReport { id, report } => { - assert_eq!(id.id, SubstrateSignableId::SlashReport); - let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else { - panic!("didn't have key shares for the key we were told to perform a slash report with"); - }; - if let Some((slash_report_signer, msg)) = - SlashReportSigner::new(txn, N::NETWORK, id.session, keys, report, id.attempt) - { - tributary_mutable.slash_report_signer = Some(slash_report_signer); - coordinator.send(msg).await; - } else { - log::warn!("SlashReportSigner::new returned None"); - } - } - _ => { - let (is_cosign, is_batch, is_slash_report) = match msg { - CoordinatorCoordinatorMessage::CosignSubstrateBlock { .. } | - CoordinatorCoordinatorMessage::SignSlashReport { .. } => (false, false, false), - CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } | - CoordinatorCoordinatorMessage::SubstrateShares { ref id, .. } => ( - matches!(&id.id, SubstrateSignableId::CosigningSubstrateBlock(_)), - matches!(&id.id, SubstrateSignableId::Batch(_)), - matches!(&id.id, SubstrateSignableId::SlashReport), - ), - CoordinatorCoordinatorMessage::BatchReattempt { .. } => (false, true, false), - }; - - if is_cosign { - if let Some(cosigner) = tributary_mutable.cosigner.as_mut() { - if let Some(msg) = cosigner.handle(txn, msg) { - coordinator.send(msg).await; - } - } else { - log::warn!( - "received message for cosigner yet didn't have a cosigner. {}", - "this is an error if we didn't reboot", - ); - } - } else if is_batch { - if let Some(msg) = tributary_mutable - .batch_signer - .as_mut() - .expect( - "coordinator told us to sign a batch when we don't currently have a Substrate signer", - ) - .handle(txn, msg) - { - coordinator.send(msg).await; - } - } else if is_slash_report { - if let Some(slash_report_signer) = tributary_mutable.slash_report_signer.as_mut() { - if let Some(msg) = slash_report_signer.handle(txn, msg) { - coordinator.send(msg).await; - } - } else { - log::warn!( - "received message for slash report signer yet didn't have {}", - "a slash report signer. this is an error if we didn't reboot", - ); - } - } - } - }, - - CoordinatorMessage::Substrate(msg) => { - match msg { - messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, session, key_pair } => { - // This is the first key pair for this network so no block has been finalized yet - // TODO: Write documentation for this in docs/ - // TODO: Use an Option instead of a magic? - if context.network_latest_finalized_block.0 == [0; 32] { - assert!(tributary_mutable.signers.is_empty()); - assert!(tributary_mutable.batch_signer.is_none()); - assert!(tributary_mutable.cosigner.is_none()); - // We can't check this as existing is no longer pub - // assert!(substrate_mutable.existing.as_ref().is_none()); - - // Wait until a network's block's time exceeds Serai's time - // These time calls are extremely expensive for what they do, yet they only run when - // confirming the first key pair, before any network activity has occurred, so they - // should be fine - - // If the latest block number is 10, then the block indexed by 1 has 10 confirms - // 10 + 1 - 10 = 1 - let mut block_i; - while { - block_i = (network.get_latest_block_number_with_retries().await + 1) - .saturating_sub(N::CONFIRMATIONS); - network.get_block_with_retries(block_i).await.time(network).await < context.serai_time - } { - info!( - "serai confirmed the first key pair for a set. {} {}", - "we're waiting for a network's finalized block's time to exceed unix time ", - context.serai_time, - ); - sleep(Duration::from_secs(5)).await; - } - - // Find the first block to do so - let mut earliest = block_i; - // earliest > 0 prevents a panic if Serai creates keys before the genesis block - // which... should be impossible - // Yet a prevented panic is a prevented panic - while (earliest > 0) && - (network.get_block_with_retries(earliest - 1).await.time(network).await >= - context.serai_time) - { - earliest -= 1; - } - - // Use this as the activation block - let activation_number = earliest; - - activate_key( - network, - substrate_mutable, - tributary_mutable, - txn, - session, - key_pair, - activation_number, - ) - .await; - } else { - let mut block_before_queue_block = >::Id::default(); - block_before_queue_block - .as_mut() - .copy_from_slice(&context.network_latest_finalized_block.0); - // We can't set these keys for activation until we know their queue block, which we - // won't until the next Batch is confirmed - // Set this variable so when we get the next Batch event, we can handle it - PendingActivationsDb::set_pending_activation::( - txn, - &block_before_queue_block, - session, - key_pair, - ); - } - } - - messages::substrate::CoordinatorMessage::SubstrateBlock { - context, - block: substrate_block, - burns, - batches, - } => { - if let Some((block, session, key_pair)) = - PendingActivationsDb::pending_activation::(txn) - { - // Only run if this is a Batch belonging to a distinct block - if context.network_latest_finalized_block.as_ref() != block.as_ref() { - let mut queue_block = >::Id::default(); - queue_block.as_mut().copy_from_slice(context.network_latest_finalized_block.as_ref()); - - let activation_number = substrate_mutable - .block_number(txn, &queue_block) - .await - .expect("KeyConfirmed from context we haven't synced") + - N::CONFIRMATIONS; - - activate_key( - network, - substrate_mutable, - tributary_mutable, - txn, - session, - key_pair, - activation_number, - ) - .await; - //clear pending activation - txn.del(PendingActivationsDb::key()); - } - } - - // Since this block was acknowledged, we no longer have to sign the batches within it - if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() { - for batch_id in batches { - batch_signer.batch_signed(txn, batch_id); - } - } - - let (acquired_lock, to_sign) = - substrate_mutable.substrate_block(txn, network, context, burns).await; - - // Send SubstrateBlockAck, with relevant plan IDs, before we trigger the signing of these - // plans - if !tributary_mutable.signers.is_empty() { - coordinator - .send(messages::coordinator::ProcessorMessage::SubstrateBlockAck { - block: substrate_block, - plans: to_sign - .iter() - .filter_map(|signable| { - SessionDb::get(txn, signable.0.to_bytes().as_ref()) - .map(|session| PlanMeta { session, id: signable.1 }) - }) - .collect(), - }) - .await; - } - - // See commentary in TributaryMutable for why this is safe - let signers = &mut tributary_mutable.signers; - for (key, id, tx, eventuality) in to_sign { - if let Some(session) = SessionDb::get(txn, key.to_bytes().as_ref()) { - let signer = signers.get_mut(&session).unwrap(); - if let Some(msg) = signer.sign_transaction(txn, id, tx, &eventuality).await { - coordinator.send(msg).await; - } - } - } - - // This is not premature, even if this block had multiple `Batch`s created, as the first - // `Batch` alone will trigger all Plans/Eventualities/Signs - if acquired_lock { - substrate_mutable.release_scanner_lock().await; - } - } - } - } - } -} - -async fn boot( - raw_db: &mut D, - network: &N, - coordinator: &mut Co, -) -> (D, TributaryMutable, SubstrateMutable) { - fn read_key_from_env(label: &'static str) -> Zeroizing { - let key_hex = - Zeroizing::new(env::var(label).unwrap_or_else(|| panic!("{label} wasn't provided"))); - let bytes = Zeroizing::new( - hex::decode(key_hex).unwrap_or_else(|_| panic!("{label} wasn't a valid hex string")), - ); - - let mut repr = ::Repr::default(); - if repr.as_ref().len() != bytes.len() { - panic!("{label} wasn't the correct length"); - } - repr.as_mut().copy_from_slice(bytes.as_slice()); - let res = Zeroizing::new( - Option::from(::from_repr(repr)) - .unwrap_or_else(|| panic!("{label} wasn't a valid scalar")), - ); - repr.as_mut().zeroize(); - res - } - - let key_gen = KeyGen::::new( - raw_db.clone(), - read_key_from_env::<::EmbeddedCurve>("SUBSTRATE_EVRF_KEY"), - read_key_from_env::<::EmbeddedCurve>("NETWORK_EVRF_KEY"), - ); - - let (multisig_manager, current_keys, actively_signing) = - MultisigManager::new(raw_db, network).await; - - let mut batch_signer = None; - let mut signers = HashMap::new(); - - for (i, key) in current_keys.iter().enumerate() { - let Some((session, (substrate_keys, network_keys))) = key_gen.keys(key) else { continue }; - let network_key = network_keys[0].group_key(); - - // If this is the oldest key, load the BatchSigner for it as the active BatchSigner - // The new key only takes responsibility once the old key is fully deprecated - // - // We don't have to load any state for this since the Scanner will re-fire any events - // necessary, only no longer scanning old blocks once Substrate acks them - if i == 0 { - batch_signer = Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); - } - - // The Scanner re-fires events as needed for batch_signer yet not signer - // This is due to the transactions which we start signing from due to a block not being - // guaranteed to be signed before we stop scanning the block on reboot - // We could simplify the Signer flow by delaying when it acks a block, yet that'd: - // 1) Increase the startup time - // 2) Cause re-emission of Batch events, which we'd need to check the safety of - // (TODO: Do anyways?) - // 3) Violate the attempt counter (TODO: Is this already being violated?) - let mut signer = Signer::new(network.clone(), session, network_keys); - - // Sign any TXs being actively signed - for (plan, tx, eventuality) in &actively_signing { - if plan.key == network_key { - let mut txn = raw_db.txn(); - if let Some(msg) = - signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality).await - { - coordinator.send(msg).await; - } - // This should only have re-writes of existing data - drop(txn); - } - } - - signers.insert(session, signer); - } - - // Spawn a task to rebroadcast signed TXs yet to be mined into a finalized block - // This hedges against being dropped due to full mempools, temporarily too low of a fee... - tokio::spawn(Signer::::rebroadcast_task(raw_db.clone(), network.clone())); - - ( - raw_db.clone(), - TributaryMutable { key_gen, batch_signer, cosigner: None, slash_report_signer: None, signers }, - multisig_manager, - ) -} - -#[allow(clippy::await_holding_lock)] // Needed for txn, unfortunately can't be down-scoped -async fn run(mut raw_db: D, network: N, mut coordinator: Co) { - // We currently expect a contextless bidirectional mapping between these two values - // (which is that any value of A can be interpreted as B and vice versa) - // While we can write a contextual mapping, we have yet to do so - // This check ensures no network which doesn't have a bidirectional mapping is defined - assert_eq!(>::Id::default().as_ref().len(), BlockHash([0u8; 32]).0.len()); - - let (main_db, mut tributary_mutable, mut substrate_mutable) = - boot(&mut raw_db, &network, &mut coordinator).await; - - // We can't load this from the DB as we can't guarantee atomic increments with the ack function - // TODO: Load with a slight tolerance - let mut last_coordinator_msg = None; - - loop { - let mut txn = raw_db.txn(); - - log::trace!("new db txn in run"); - - let mut outer_msg = None; - - tokio::select! { - // This blocks the entire processor until it finishes handling this message - // KeyGen specifically may take a notable amount of processing time - // While that shouldn't be an issue in practice, as after processing an attempt it'll handle - // the other messages in the queue, it may be beneficial to parallelize these - // They could potentially be parallelized by type (KeyGen, Sign, Substrate) without issue - msg = coordinator.recv() => { - if let Some(last_coordinator_msg) = last_coordinator_msg { - assert_eq!(msg.id, last_coordinator_msg + 1); - } - last_coordinator_msg = Some(msg.id); - - // Only handle this if we haven't already - if HandledMessageDb::get(&main_db, msg.id).is_none() { - HandledMessageDb::set(&mut txn, msg.id, &()); - - // This is isolated to better think about how its ordered, or rather, about how the other - // cases aren't ordered - // - // While the coordinator messages are ordered, they're not deterministically ordered - // Tributary-caused messages are deterministically ordered, and Substrate-caused messages - // are deterministically-ordered, yet they're both shoved into a singular queue - // The order at which they're shoved in together isn't deterministic - // - // This is safe so long as Tributary and Substrate messages don't both expect mutable - // references over the same data - handle_coordinator_msg( - &mut txn, - &network, - &mut coordinator, - &mut tributary_mutable, - &mut substrate_mutable, - &msg, - ).await; - } - - outer_msg = Some(msg); - }, - - scanner_event = substrate_mutable.next_scanner_event() => { - let msg = substrate_mutable.scanner_event_to_multisig_event( - &mut txn, - &network, - scanner_event - ).await; - - match msg { - MultisigEvent::Batches(retired_key_new_key, batches) => { - // Start signing this batch - for batch in batches { - info!("created batch {} ({} instructions)", batch.id, batch.instructions.len()); - - // The coordinator expects BatchPreprocess to immediately follow Batch - coordinator.send( - messages::substrate::ProcessorMessage::Batch { batch: batch.clone() } - ).await; - - if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() { - if let Some(msg) = batch_signer.sign(&mut txn, batch) { - coordinator.send(msg).await; - } - } - } - - if let Some((retired_key, new_key)) = retired_key_new_key { - // Safe to mutate since all signing operations are done and no more will be added - if let Some(retired_session) = SessionDb::get(&txn, retired_key.to_bytes().as_ref()) { - tributary_mutable.signers.remove(&retired_session); - } - tributary_mutable.batch_signer.take(); - let keys = tributary_mutable.key_gen.keys(&new_key); - if let Some((session, (substrate_keys, _))) = keys { - tributary_mutable.batch_signer = - Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); - } - } - }, - MultisigEvent::Completed(key, id, tx) => { - if let Some(session) = SessionDb::get(&txn, &key) { - let signer = tributary_mutable.signers.get_mut(&session).unwrap(); - if let Some(msg) = signer.completed(&mut txn, id, &tx) { - coordinator.send(msg).await; - } - } - } - } - }, - } - - txn.commit(); - if let Some(msg) = outer_msg { - coordinator.ack(msg).await; - } - } -} - -#[tokio::main] -async fn main() { - // Override the panic handler with one which will panic if any tokio task panics - { - let existing = std::panic::take_hook(); - std::panic::set_hook(Box::new(move |panic| { - existing(panic); - const MSG: &str = "exiting the process due to a task panicking"; - println!("{MSG}"); - log::error!("{MSG}"); - std::process::exit(1); - })); - } - - if std::env::var("RUST_LOG").is_err() { - std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); - } - env_logger::init(); - - #[allow(unused_variables, unreachable_code)] - let db = { - #[cfg(all(feature = "parity-db", feature = "rocksdb"))] - panic!("built with parity-db and rocksdb"); - #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] - let db = - serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); - #[cfg(feature = "rocksdb")] - let db = - serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); - db - }; - - // Network configuration - let url = { - let login = env::var("NETWORK_RPC_LOGIN").expect("network RPC login wasn't specified"); - let hostname = env::var("NETWORK_RPC_HOSTNAME").expect("network RPC hostname wasn't specified"); - let port = env::var("NETWORK_RPC_PORT").expect("network port domain wasn't specified"); - "http://".to_string() + &login + "@" + &hostname + ":" + &port - }; - let network_id = match env::var("NETWORK").expect("network wasn't specified").as_str() { - "bitcoin" => NetworkId::Bitcoin, - "ethereum" => NetworkId::Ethereum, - "monero" => NetworkId::Monero, - _ => panic!("unrecognized network"), - }; - - let coordinator = MessageQueue::from_env(Service::Processor(network_id)); - - match network_id { - #[cfg(feature = "bitcoin")] - NetworkId::Bitcoin => run(db, Bitcoin::new(url).await, coordinator).await, - #[cfg(feature = "ethereum")] - NetworkId::Ethereum => { - let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") - .expect("ethereum relayer hostname wasn't specified") - .to_string(); - let relayer_port = - env::var("ETHEREUM_RELAYER_PORT").expect("ethereum relayer port wasn't specified"); - let relayer_url = relayer_hostname + ":" + &relayer_port; - run(db.clone(), Ethereum::new(db, url, relayer_url).await, coordinator).await - } - #[cfg(feature = "monero")] - NetworkId::Monero => run(db, Monero::new(url).await, coordinator).await, - _ => panic!("spawning a processor for an unsupported network"), - } -} diff --git a/processor/src/multisigs/db.rs b/processor/src/multisigs/db.rs deleted file mode 100644 index 3d1d13bdf..000000000 --- a/processor/src/multisigs/db.rs +++ /dev/null @@ -1,260 +0,0 @@ -use std::io; - -use ciphersuite::Ciphersuite; -pub use serai_db::*; - -use scale::{Encode, Decode}; -use serai_client::{primitives::Balance, in_instructions::primitives::InInstructionWithBalance}; - -use crate::{ - Get, Plan, - networks::{Output, Transaction, Network}, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub enum PlanFromScanning { - Refund(N::Output, N::Address), - Forward(N::Output), -} - -impl PlanFromScanning { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - match kind[0] { - 0 => { - let output = N::Output::read(reader)?; - - let mut address_vec_len = [0; 4]; - reader.read_exact(&mut address_vec_len)?; - let mut address_vec = - vec![0; usize::try_from(u32::from_le_bytes(address_vec_len)).unwrap()]; - reader.read_exact(&mut address_vec)?; - let address = - N::Address::try_from(address_vec).map_err(|_| "invalid address saved to disk").unwrap(); - - Ok(PlanFromScanning::Refund(output, address)) - } - 1 => { - let output = N::Output::read(reader)?; - Ok(PlanFromScanning::Forward(output)) - } - _ => panic!("reading unrecognized PlanFromScanning"), - } - } - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - PlanFromScanning::Refund(output, address) => { - writer.write_all(&[0])?; - output.write(writer)?; - - let address_vec: Vec = - address.clone().try_into().map_err(|_| "invalid address being refunded to").unwrap(); - writer.write_all(&u32::try_from(address_vec.len()).unwrap().to_le_bytes())?; - writer.write_all(&address_vec) - } - PlanFromScanning::Forward(output) => { - writer.write_all(&[1])?; - output.write(writer) - } - } - } -} - -create_db!( - MultisigsDb { - NextBatchDb: () -> u32, - PlanDb: (id: &[u8]) -> Vec, - PlansFromScanningDb: (block_number: u64) -> Vec, - OperatingCostsDb: () -> u64, - ResolvedDb: (tx: &[u8]) -> [u8; 32], - SigningDb: (key: &[u8]) -> Vec, - ForwardedOutputDb: (balance: Balance) -> Vec, - DelayedOutputDb: () -> Vec - } -); - -impl PlanDb { - pub fn save_active_plan( - txn: &mut impl DbTxn, - key: &[u8], - block_number: usize, - plan: &Plan, - operating_costs_at_time: u64, - ) { - let id = plan.id(); - - { - let mut signing = SigningDb::get(txn, key).unwrap_or_default(); - - // If we've already noted we're signing this, return - assert_eq!(signing.len() % 32, 0); - for i in 0 .. (signing.len() / 32) { - if signing[(i * 32) .. ((i + 1) * 32)] == id { - return; - } - } - - signing.extend(&id); - SigningDb::set(txn, key, &signing); - } - - { - let mut buf = block_number.to_le_bytes().to_vec(); - plan.write(&mut buf).unwrap(); - buf.extend(&operating_costs_at_time.to_le_bytes()); - Self::set(txn, &id, &buf); - } - } - - pub fn active_plans(getter: &impl Get, key: &[u8]) -> Vec<(u64, Plan, u64)> { - let signing = SigningDb::get(getter, key).unwrap_or_default(); - let mut res = vec![]; - - assert_eq!(signing.len() % 32, 0); - for i in 0 .. (signing.len() / 32) { - let id = &signing[(i * 32) .. ((i + 1) * 32)]; - let buf = Self::get(getter, id).unwrap(); - - let block_number = u64::from_le_bytes(buf[.. 8].try_into().unwrap()); - let plan = Plan::::read::<&[u8]>(&mut &buf[8 ..]).unwrap(); - assert_eq!(id, &plan.id()); - let operating_costs = u64::from_le_bytes(buf[(buf.len() - 8) ..].try_into().unwrap()); - res.push((block_number, plan, operating_costs)); - } - res - } - - pub fn plan_by_key_with_self_change( - getter: &impl Get, - key: ::G, - id: [u8; 32], - ) -> bool { - let plan = Plan::::read::<&[u8]>(&mut &Self::get(getter, &id).unwrap()[8 ..]).unwrap(); - assert_eq!(plan.id(), id); - if let Some(change) = N::change_address(plan.key) { - (key == plan.key) && (Some(change) == plan.change) - } else { - false - } - } -} - -impl OperatingCostsDb { - pub fn take_operating_costs(txn: &mut impl DbTxn) -> u64 { - let existing = Self::get(txn).unwrap_or_default(); - txn.del(Self::key()); - existing - } - pub fn set_operating_costs(txn: &mut impl DbTxn, amount: u64) { - if amount != 0 { - Self::set(txn, &amount); - } - } -} - -impl ResolvedDb { - pub fn resolve_plan( - txn: &mut impl DbTxn, - key: &[u8], - plan: [u8; 32], - resolution: &>::Id, - ) { - let mut signing = SigningDb::get(txn, key).unwrap_or_default(); - assert_eq!(signing.len() % 32, 0); - - let mut found = false; - for i in 0 .. (signing.len() / 32) { - let start = i * 32; - let end = i + 32; - if signing[start .. end] == plan { - found = true; - signing = [&signing[.. start], &signing[end ..]].concat(); - break; - } - } - - if !found { - log::warn!("told to finish signing {} yet wasn't actively signing it", hex::encode(plan)); - } - SigningDb::set(txn, key, &signing); - Self::set(txn, resolution.as_ref(), &plan); - } -} - -impl PlansFromScanningDb { - pub fn set_plans_from_scanning( - txn: &mut impl DbTxn, - block_number: usize, - plans: Vec>, - ) { - let mut buf = vec![]; - for plan in plans { - plan.write(&mut buf).unwrap(); - } - Self::set(txn, block_number.try_into().unwrap(), &buf); - } - - pub fn take_plans_from_scanning( - txn: &mut impl DbTxn, - block_number: usize, - ) -> Option>> { - let block_number = u64::try_from(block_number).unwrap(); - let res = Self::get(txn, block_number).map(|plans| { - let mut plans_ref = plans.as_slice(); - let mut res = vec![]; - while !plans_ref.is_empty() { - res.push(PlanFromScanning::::read(&mut plans_ref).unwrap()); - } - res - }); - if res.is_some() { - txn.del(Self::key(block_number)); - } - res - } -} - -impl ForwardedOutputDb { - pub fn save_forwarded_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) { - let mut existing = Self::get(txn, instruction.balance).unwrap_or_default(); - existing.extend(instruction.encode()); - Self::set(txn, instruction.balance, &existing); - } - - pub fn take_forwarded_output( - txn: &mut impl DbTxn, - balance: Balance, - ) -> Option { - let outputs = Self::get(txn, balance)?; - let mut outputs_ref = outputs.as_slice(); - let res = InInstructionWithBalance::decode(&mut outputs_ref).unwrap(); - assert!(outputs_ref.len() < outputs.len()); - if outputs_ref.is_empty() { - txn.del(Self::key(balance)); - } else { - Self::set(txn, balance, &outputs); - } - Some(res) - } -} - -impl DelayedOutputDb { - pub fn save_delayed_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) { - let mut existing = Self::get(txn).unwrap_or_default(); - existing.extend(instruction.encode()); - Self::set(txn, &existing); - } - - pub fn take_delayed_outputs(txn: &mut impl DbTxn) -> Vec { - let Some(outputs) = Self::get(txn) else { return vec![] }; - txn.del(Self::key()); - - let mut outputs_ref = outputs.as_slice(); - let mut res = vec![]; - while !outputs_ref.is_empty() { - res.push(InInstructionWithBalance::decode(&mut outputs_ref).unwrap()); - } - res - } -} diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs deleted file mode 100644 index 12f017151..000000000 --- a/processor/src/multisigs/mod.rs +++ /dev/null @@ -1,1068 +0,0 @@ -use core::time::Duration; -use std::collections::HashSet; - -use ciphersuite::{group::GroupEncoding, Ciphersuite}; - -use scale::{Encode, Decode}; -use messages::SubstrateContext; - -use serai_client::{ - primitives::{MAX_DATA_LEN, ExternalAddress, BlockHash, Data}, - in_instructions::primitives::{ - InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE, - }, - coins::primitives::{OutInstruction, OutInstructionWithBalance}, -}; - -use log::{info, error}; - -use tokio::time::sleep; - -#[cfg(not(test))] -mod scanner; -#[cfg(test)] -pub mod scanner; - -use scanner::{ScannerEvent, ScannerHandle, Scanner}; - -mod db; -use db::*; - -pub(crate) mod scheduler; -use scheduler::Scheduler; - -use crate::{ - Get, Db, Payment, Plan, - networks::{OutputType, Output, SignableTransaction, Eventuality, Block, PreparedSend, Network}, -}; - -// InInstructionWithBalance from an external output -fn instruction_from_output( - output: &N::Output, -) -> (Option, Option) { - assert_eq!(output.kind(), OutputType::External); - - let presumed_origin = output.presumed_origin().map(|address| { - ExternalAddress::new( - address - .try_into() - .map_err(|_| ()) - .expect("presumed origin couldn't be converted to a Vec"), - ) - .expect("presumed origin exceeded address limits") - }); - - let mut data = output.data(); - let max_data_len = usize::try_from(MAX_DATA_LEN).unwrap(); - if data.len() > max_data_len { - error!( - "data in output {} exceeded MAX_DATA_LEN ({MAX_DATA_LEN}): {}. skipping", - hex::encode(output.id()), - data.len(), - ); - return (presumed_origin, None); - } - - let shorthand = match Shorthand::decode(&mut data) { - Ok(shorthand) => shorthand, - Err(e) => { - info!("data in output {} wasn't valid shorthand: {e:?}", hex::encode(output.id())); - return (presumed_origin, None); - } - }; - let instruction = match RefundableInInstruction::try_from(shorthand) { - Ok(instruction) => instruction, - Err(e) => { - info!( - "shorthand in output {} wasn't convertible to a RefundableInInstruction: {e:?}", - hex::encode(output.id()) - ); - return (presumed_origin, None); - } - }; - - let mut balance = output.balance(); - // Deduct twice the cost to aggregate to prevent economic attacks by malicious miners against - // other users - balance.amount.0 -= 2 * N::COST_TO_AGGREGATE; - - ( - instruction.origin.or(presumed_origin), - Some(InInstructionWithBalance { instruction: instruction.instruction, balance }), - ) -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -enum RotationStep { - // Use the existing multisig for all actions (steps 1-3) - UseExisting, - // Use the new multisig as change (step 4) - NewAsChange, - // The existing multisig is expected to solely forward transactions at this point (step 5) - ForwardFromExisting, - // The existing multisig is expected to finish its own transactions and do nothing more - // (step 6) - ClosingExisting, -} - -// This explicitly shouldn't take the database as we prepare Plans we won't execute for fee -// estimates -async fn prepare_send( - network: &N, - block_number: usize, - plan: Plan, - operating_costs: u64, -) -> PreparedSend { - loop { - match network.prepare_send(block_number, plan.clone(), operating_costs).await { - Ok(prepared) => { - return prepared; - } - Err(e) => { - error!("couldn't prepare a send for plan {}: {e}", hex::encode(plan.id())); - // The processor is either trying to create an invalid TX (fatal) or the node went - // offline - // The former requires a patch, the latter is a connection issue - // If the latter, this is an appropriate sleep. If the former, we should panic, yet - // this won't flood the console ad infinitum - sleep(Duration::from_secs(60)).await; - } - } - } -} - -pub struct MultisigViewer { - activation_block: usize, - key: ::G, - scheduler: N::Scheduler, -} - -#[allow(clippy::type_complexity)] -#[derive(Clone, Debug)] -pub enum MultisigEvent { - // Batches to publish - Batches(Option<(::G, ::G)>, Vec), - // Eventuality completion found on-chain - Completed(Vec, [u8; 32], ::Completion), -} - -pub struct MultisigManager { - scanner: ScannerHandle, - existing: Option>, - new: Option>, -} - -impl MultisigManager { - pub async fn new( - raw_db: &D, - network: &N, - ) -> ( - Self, - Vec<::G>, - Vec<(Plan, N::SignableTransaction, N::Eventuality)>, - ) { - // The scanner has no long-standing orders to re-issue - let (mut scanner, current_keys) = Scanner::new(network.clone(), raw_db.clone()); - - let mut schedulers = vec![]; - - assert!(current_keys.len() <= 2); - let mut actively_signing = vec![]; - for (_, key) in ¤t_keys { - schedulers.push(N::Scheduler::from_db(raw_db, *key, N::NETWORK).unwrap()); - - // Load any TXs being actively signed - let key = key.to_bytes(); - for (block_number, plan, operating_costs) in PlanDb::active_plans::(raw_db, key.as_ref()) { - let block_number = block_number.try_into().unwrap(); - - let id = plan.id(); - info!("reloading plan {}: {:?}", hex::encode(id), plan); - - let key_bytes = plan.key.to_bytes(); - - let Some((tx, eventuality)) = - prepare_send(network, block_number, plan.clone(), operating_costs).await.tx - else { - panic!("previously created transaction is no longer being created") - }; - - scanner - .register_eventuality(key_bytes.as_ref(), block_number, id, eventuality.clone()) - .await; - actively_signing.push((plan, tx, eventuality)); - } - } - - ( - MultisigManager { - scanner, - existing: current_keys.first().copied().map(|(activation_block, key)| MultisigViewer { - activation_block, - key, - scheduler: schedulers.remove(0), - }), - new: current_keys.get(1).copied().map(|(activation_block, key)| MultisigViewer { - activation_block, - key, - scheduler: schedulers.remove(0), - }), - }, - current_keys.into_iter().map(|(_, key)| key).collect(), - actively_signing, - ) - } - - /// Returns the block number for a block hash, if it's known and all keys have scanned the block. - // This is guaranteed to atomically increment so long as no new keys are added to the scanner - // which activate at a block before the currently highest scanned block. This is prevented by - // the processor waiting for `Batch` inclusion before scanning too far ahead, and activation only - // happening after the "too far ahead" window. - pub async fn block_number( - &self, - getter: &G, - hash: &>::Id, - ) -> Option { - let latest = ScannerHandle::::block_number(getter, hash)?; - - // While the scanner has cemented this block, that doesn't mean it's been scanned for all - // keys - // ram_scanned will return the lowest scanned block number out of all keys - if latest > self.scanner.ram_scanned().await { - return None; - } - Some(latest) - } - - pub async fn add_key( - &mut self, - txn: &mut D::Transaction<'_>, - activation_block: usize, - external_key: ::G, - ) { - self.scanner.register_key(txn, activation_block, external_key).await; - let viewer = Some(MultisigViewer { - activation_block, - key: external_key, - scheduler: N::Scheduler::new::(txn, external_key, N::NETWORK), - }); - - if self.existing.is_none() { - self.existing = viewer; - return; - } - self.new = viewer; - } - - fn current_rotation_step(&self, block_number: usize) -> RotationStep { - let Some(new) = self.new.as_ref() else { return RotationStep::UseExisting }; - - // Period numbering here has no meaning other than these are the time values useful here, and - // the order they're calculated in. They have no reference/shared marker with anything else - - // ESTIMATED_BLOCK_TIME_IN_SECONDS is fine to use here. While inaccurate, it shouldn't be - // drastically off, and even if it is, it's a hiccup to latency handling only possible when - // rotating. The error rate wouldn't be acceptable if it was allowed to accumulate over time, - // yet rotation occurs on Serai's clock, disconnecting any errors here from any prior. - - // N::CONFIRMATIONS + 10 minutes - let period_1_start = new.activation_block + - N::CONFIRMATIONS + - (10usize * 60).div_ceil(N::ESTIMATED_BLOCK_TIME_IN_SECONDS); - - // N::CONFIRMATIONS - let period_2_start = period_1_start + N::CONFIRMATIONS; - - // 6 hours after period 2 - // Also ensure 6 hours is greater than the amount of CONFIRMATIONS, for sanity purposes - let period_3_start = - period_2_start + ((6 * 60 * 60) / N::ESTIMATED_BLOCK_TIME_IN_SECONDS).max(N::CONFIRMATIONS); - - if block_number < period_1_start { - RotationStep::UseExisting - } else if block_number < period_2_start { - RotationStep::NewAsChange - } else if block_number < period_3_start { - RotationStep::ForwardFromExisting - } else { - RotationStep::ClosingExisting - } - } - - // Convert new Burns to Payments. - // - // Also moves payments from the old Scheduler to the new multisig if the step calls for it. - fn burns_to_payments( - &mut self, - txn: &mut D::Transaction<'_>, - step: RotationStep, - burns: Vec, - ) -> (Vec>, Vec>) { - let mut payments = vec![]; - for out in burns { - let OutInstructionWithBalance { instruction: OutInstruction { address, data }, balance } = - out; - assert_eq!(balance.coin.network(), N::NETWORK); - - if let Ok(address) = N::Address::try_from(address.consume()) { - payments.push(Payment { address, data: data.map(Data::consume), balance }); - } - } - - let payments = payments; - match step { - RotationStep::UseExisting | RotationStep::NewAsChange => (payments, vec![]), - RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => { - // Consume any payments the prior scheduler was unable to complete - // This should only actually matter once - let mut new_payments = self.existing.as_mut().unwrap().scheduler.consume_payments::(txn); - // Add the new payments - new_payments.extend(payments); - (vec![], new_payments) - } - } - } - - fn split_outputs_by_key(&self, outputs: Vec) -> (Vec, Vec) { - let mut existing_outputs = Vec::with_capacity(outputs.len()); - let mut new_outputs = vec![]; - - let existing_key = self.existing.as_ref().unwrap().key; - let new_key = self.new.as_ref().map(|new| new.key); - for output in outputs { - if output.key() == existing_key { - existing_outputs.push(output); - } else { - assert_eq!(Some(output.key()), new_key); - new_outputs.push(output); - } - } - - (existing_outputs, new_outputs) - } - - fn refund_plan( - scheduler: &mut N::Scheduler, - txn: &mut D::Transaction<'_>, - output: N::Output, - refund_to: N::Address, - ) -> Plan { - log::info!("creating refund plan for {}", hex::encode(output.id())); - assert_eq!(output.kind(), OutputType::External); - scheduler.refund_plan::(txn, output, refund_to) - } - - // Returns the plan for forwarding if one is needed. - // Returns None if one is not needed to forward this output. - fn forward_plan(&mut self, txn: &mut D::Transaction<'_>, output: &N::Output) -> Option> { - log::info!("creating forwarding plan for {}", hex::encode(output.id())); - let res = self.existing.as_mut().unwrap().scheduler.forward_plan::( - txn, - output.clone(), - self.new.as_ref().expect("forwarding plan yet no new multisig").key, - ); - if res.is_none() { - log::info!("no forwarding plan was necessary for {}", hex::encode(output.id())); - } - res - } - - // Filter newly received outputs due to the step being RotationStep::ClosingExisting. - // - // Returns the Plans for the `Branch`s which should be created off outputs which passed the - // filter. - fn filter_outputs_due_to_closing( - &mut self, - txn: &mut D::Transaction<'_>, - existing_outputs: &mut Vec, - ) -> Vec> { - /* - The document says to only handle outputs we created. We don't know what outputs we - created. We do have an ordered view of equivalent outputs however, and can assume the - first (and likely only) ones are the ones we created. - - Accordingly, only handling outputs we created should be definable as only handling - outputs from the resolution of Eventualities. - - This isn't feasible. It requires knowing what Eventualities were completed in this block, - when we handle this block, which we don't know without fully serialized scanning + Batch - publication. - - Take the following scenario: - 1) A network uses 10 confirmations. Block x is scanned, meaning x+9a exists. - 2) 67% of nodes process x, create, sign, and publish a TX, creating an Eventuality. - 3) A reorganization to a shorter chain occurs, including the published TX in x+1b. - 4) The 33% of nodes which are latent will be allowed to scan x+1b as soon as x+10b - exists. They won't wait for Serai to include the Batch for x until they try to scan - x+10b. - 5) These latent nodes will handle x+1b, post-create an Eventuality, post-learn x+1b - contained resolutions, changing how x+1b should've been interpreted. - - We either have to: - A) Fully serialize scanning (removing the ability to utilize throughput to allow higher - latency, at least while the step is `ClosingExisting`). - B) Create Eventualities immediately, which we can't do as then both the external - network's clock AND Serai's clock can trigger Eventualities, removing ordering. - We'd need to shift entirely to the external network's clock, only handling Burns - outside the parallelization window (which would be extremely latent). - C) Use a different mechanism to determine if we created an output. - D) Re-define which outputs are still to be handled after the 6 hour period expires, such - that the multisig's lifetime cannot be further extended yet it does fulfill its - responsibility. - - External outputs to the existing multisig will be: - - Scanned before the rotation and unused (as used External outputs become Change) - - Forwarded immediately upon scanning - - Not scanned before the cut off time (and accordingly dropped) - - For the first case, since they're scanned before the rotation and unused, they'll be - forwarded with all other available outputs (since they'll be available when scanned). - - Change outputs will be: - - Scanned before the rotation and forwarded with all other available outputs - - Forwarded immediately upon scanning - - Not scanned before the cut off time, requiring an extension exclusive to these outputs - - The important thing to note about honest Change outputs to the existing multisig is that - they'll only be created within `CONFIRMATIONS+1` blocks of the activation block. Also - important to note is that there's another explicit window of `CONFIRMATIONS` before the - 6 hour window. - - Eventualities are not guaranteed to be known before we scan the block containing their - resolution. They are guaranteed to be known within `CONFIRMATIONS-1` blocks however, due - to the limitation on how far we'll scan ahead. - - This means we will know of all Eventualities related to Change outputs we need to forward - before the 6 hour period begins (as forwarding outputs will not create any Change outputs - to the existing multisig). - - This means a definition of complete can be defined as: - 1) Handled all Branch outputs - 2) Forwarded all External outputs received before the end of 6 hour window - 3) Forwarded the results of all Eventualities with Change, which will have been created - before the 6 hour window - - How can we track and ensure this without needing to check if an output is from the - resolution of an Eventuality? - - 1) We only create Branch outputs before the 6 hour window starts. These are guaranteed - to appear within `CONFIRMATIONS` blocks. They will exist with arbitrary depth however, - meaning that upon completion they will spawn several more Eventualities. The further - created Eventualities re-risk being present after the 6 hour period ends. - - We can: - 1) Build a queue for Branch outputs, delaying their handling until relevant - Eventualities are guaranteed to be present. - - This solution would theoretically work for all outputs and allow collapsing this - problem to simply: - - > Accordingly, only handling outputs we created should be definable as only - handling outputs from the resolution of Eventualities. - - 2) Create all Eventualities under a Branch at time of Branch creation. - This idea fails as Plans are tightly bound to outputs. - - 3) Don't track Branch outputs by Eventualities, yet by the amount of Branch outputs - remaining. Any Branch output received, of a useful amount, is assumed to be our - own and handled. All other Branch outputs, even if they're the completion of some - Eventuality, are dropped. - - This avoids needing any additional queue, avoiding additional pipelining/latency. - - 2) External outputs are self-evident. We simply stop handling them at the cut-off point, - and only start checking after `CONFIRMATIONS` blocks if all Eventualities are - complete. - - 3) Since all Change Eventualities will be known prior to the 6 hour window's beginning, - we can safely check if a received Change output is the resolution of an Eventuality. - We only need to forward it if so. Forwarding it simply requires only checking if - Eventualities are complete after `CONFIRMATIONS` blocks, same as for straggling - External outputs. - */ - - let mut plans = vec![]; - existing_outputs.retain(|output| { - match output.kind() { - OutputType::External | OutputType::Forwarded => false, - OutputType::Branch => { - let scheduler = &mut self.existing.as_mut().unwrap().scheduler; - // There *would* be a race condition here due to the fact we only mark a `Branch` output - // as needed when we process the block (and handle scheduling), yet actual `Branch` - // outputs may appear as soon as the next block (and we scan the next block before we - // process the prior block) - // - // Unlike Eventuality checking, which happens on scanning and is therefore asynchronous, - // all scheduling (and this check against the scheduler) happens on processing, which is - // synchronous - // - // While we could move Eventuality checking into the block processing, removing its - // asynchonicity, we could only check data the Scanner deems important. The Scanner won't - // deem important Eventuality resolutions which don't create an output to Serai unless - // it knows of the Eventuality. Accordingly, at best we could have a split role (the - // Scanner noting completion of Eventualities which don't have relevant outputs, the - // processing noting completion of ones which do) - // - // This is unnecessary, due to the current flow around Eventuality resolutions and the - // current bounds naturally found being sufficiently amenable, yet notable for the future - if scheduler.can_use_branch(output.balance()) { - // We could simply call can_use_branch, yet it'd have an edge case where if we receive - // two outputs for 100, and we could use one such output, we'd handle both. - // - // Individually schedule each output once confirming they're usable in order to avoid - // this. - let mut plan = scheduler.schedule::( - txn, - vec![output.clone()], - vec![], - self.new.as_ref().unwrap().key, - false, - ); - assert_eq!(plan.len(), 1); - let plan = plan.remove(0); - plans.push(plan); - } - false - } - OutputType::Change => { - // If the TX containing this output resolved an Eventuality... - if let Some(plan) = ResolvedDb::get(txn, output.tx_id().as_ref()) { - // And the Eventuality had change... - // We need this check as Eventualities have a race condition and can't be relied - // on, as extensively detailed above. Eventualities explicitly with change do have - // a safe timing window however - if PlanDb::plan_by_key_with_self_change::( - txn, - // Pass the key so the DB checks the Plan's key is this multisig's, preventing a - // potential issue where the new multisig creates a Plan with change *and a - // payment to the existing multisig's change address* - self.existing.as_ref().unwrap().key, - plan, - ) { - // Then this is an honest change output we need to forward - // (or it's a payment to the change address in the same transaction as an honest - // change output, which is fine to let slip in) - return true; - } - } - false - } - } - }); - plans - } - - // Returns the Plans caused from a block being acknowledged. - // - // Will rotate keys if the block acknowledged is the retirement block. - async fn plans_from_block( - &mut self, - txn: &mut D::Transaction<'_>, - block_number: usize, - block_id: >::Id, - step: &mut RotationStep, - burns: Vec, - ) -> (bool, Vec>, HashSet<[u8; 32]>) { - let (mut existing_payments, mut new_payments) = self.burns_to_payments(txn, *step, burns); - - let mut plans = vec![]; - let mut plans_from_scanning = HashSet::new(); - - // We now have to acknowledge the acknowledged block, if it's new - // It won't be if this block's `InInstruction`s were split into multiple `Batch`s - let (acquired_lock, (mut existing_outputs, new_outputs)) = { - let (acquired_lock, mut outputs) = if ScannerHandle::::db_scanned(txn) - .expect("published a Batch despite never scanning a block") < - block_number - { - // Load plans crated when we scanned the block - let scanning_plans = - PlansFromScanningDb::take_plans_from_scanning::(txn, block_number).unwrap(); - // Expand into actual plans - plans = scanning_plans - .into_iter() - .map(|plan| match plan { - PlanFromScanning::Refund(output, refund_to) => { - let existing = self.existing.as_mut().unwrap(); - if output.key() == existing.key { - Self::refund_plan(&mut existing.scheduler, txn, output, refund_to) - } else { - let new = self - .new - .as_mut() - .expect("new multisig didn't expect yet output wasn't for existing multisig"); - assert_eq!(output.key(), new.key, "output wasn't for existing nor new multisig"); - Self::refund_plan(&mut new.scheduler, txn, output, refund_to) - } - } - PlanFromScanning::Forward(output) => self - .forward_plan(txn, &output) - .expect("supposed to forward an output yet no forwarding plan"), - }) - .collect(); - - for plan in &plans { - plans_from_scanning.insert(plan.id()); - } - - let (is_retirement_block, outputs) = self.scanner.ack_block(txn, block_id.clone()).await; - if is_retirement_block { - let existing = self.existing.take().unwrap(); - assert!(existing.scheduler.empty()); - self.existing = self.new.take(); - *step = RotationStep::UseExisting; - assert!(existing_payments.is_empty()); - existing_payments = new_payments; - new_payments = vec![]; - } - (true, outputs) - } else { - (false, vec![]) - }; - - // Remove all outputs already present in plans - let mut output_set = HashSet::new(); - for plan in &plans { - for input in &plan.inputs { - output_set.insert(input.id().as_ref().to_vec()); - } - } - outputs.retain(|output| !output_set.remove(output.id().as_ref())); - assert_eq!(output_set.len(), 0); - - (acquired_lock, self.split_outputs_by_key(outputs)) - }; - - // If we're closing the existing multisig, filter its outputs down - if *step == RotationStep::ClosingExisting { - plans.extend(self.filter_outputs_due_to_closing(txn, &mut existing_outputs)); - } - - // Now that we've done all our filtering, schedule the existing multisig's outputs - plans.extend({ - let existing = self.existing.as_mut().unwrap(); - let existing_key = existing.key; - self.existing.as_mut().unwrap().scheduler.schedule::( - txn, - existing_outputs, - existing_payments, - match *step { - RotationStep::UseExisting => existing_key, - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => self.new.as_ref().unwrap().key, - }, - match *step { - RotationStep::UseExisting | RotationStep::NewAsChange => false, - RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => true, - }, - ) - }); - - for plan in &plans { - // This first equality should 'never meaningfully' be false - // All created plans so far are by the existing multisig EXCEPT: - // A) If we created a refund plan from the new multisig (yet that wouldn't have change) - // B) The existing Scheduler returned a Plan for the new key (yet that happens with the SC - // scheduler, yet that doesn't have change) - // Despite being 'unnecessary' now, it's better to explicitly ensure and be robust - if plan.key == self.existing.as_ref().unwrap().key { - if let Some(change) = N::change_address(plan.key) { - if plan.change == Some(change) { - // Assert these (self-change) are only created during the expected step - match *step { - RotationStep::UseExisting => {} - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => panic!("change was set to self despite rotating"), - } - } - } - } - } - - // Schedule the new multisig's outputs too - if let Some(new) = self.new.as_mut() { - plans.extend(new.scheduler.schedule::(txn, new_outputs, new_payments, new.key, false)); - } - - (acquired_lock, plans, plans_from_scanning) - } - - /// Handle a SubstrateBlock event, building the relevant Plans. - pub async fn substrate_block( - &mut self, - txn: &mut D::Transaction<'_>, - network: &N, - context: SubstrateContext, - burns: Vec, - ) -> (bool, Vec<(::G, [u8; 32], N::SignableTransaction, N::Eventuality)>) - { - let mut block_id = >::Id::default(); - block_id.as_mut().copy_from_slice(context.network_latest_finalized_block.as_ref()); - let block_number = ScannerHandle::::block_number(txn, &block_id) - .expect("SubstrateBlock with context we haven't synced"); - - // Determine what step of rotation we're currently in - let mut step = self.current_rotation_step(block_number); - - // Get the Plans from this block - let (acquired_lock, plans, plans_from_scanning) = - self.plans_from_block(txn, block_number, block_id, &mut step, burns).await; - - let res = { - let mut res = Vec::with_capacity(plans.len()); - - for plan in plans { - let id = plan.id(); - info!("preparing plan {}: {:?}", hex::encode(id), plan); - - let key = plan.key; - let key_bytes = key.to_bytes(); - - let (tx, post_fee_branches) = { - let running_operating_costs = OperatingCostsDb::take_operating_costs(txn); - - PlanDb::save_active_plan::( - txn, - key_bytes.as_ref(), - block_number, - &plan, - running_operating_costs, - ); - - // If this Plan is from the scanner handler below, don't take the opportunity to amortze - // operating costs - // It operates with limited context, and on a different clock, making it nable to react - // to operating costs - // Despite this, in order to properly save forwarded outputs' instructions, it needs to - // know the actual value forwarded outputs will be created with - // Including operating costs prevents that - let from_scanning = plans_from_scanning.contains(&plan.id()); - let to_use_operating_costs = if from_scanning { 0 } else { running_operating_costs }; - - let PreparedSend { tx, post_fee_branches, mut operating_costs } = - prepare_send(network, block_number, plan, to_use_operating_costs).await; - - // Restore running_operating_costs to operating_costs - if from_scanning { - // If we're forwarding (or refunding) this output, operating_costs should still be 0 - // Either this TX wasn't created, causing no operating costs, or it was yet it'd be - // amortized - assert_eq!(operating_costs, 0); - - operating_costs += running_operating_costs; - } - - OperatingCostsDb::set_operating_costs(txn, operating_costs); - - (tx, post_fee_branches) - }; - - for branch in post_fee_branches { - let existing = self.existing.as_mut().unwrap(); - let to_use = if key == existing.key { - existing - } else { - let new = self - .new - .as_mut() - .expect("plan wasn't for existing multisig yet there wasn't a new multisig"); - assert_eq!(key, new.key); - new - }; - - to_use.scheduler.created_output::(txn, branch.expected, branch.actual); - } - - if let Some((tx, eventuality)) = tx { - // The main function we return to will send an event to the coordinator which must be - // fired before these registered Eventualities have their Completions fired - // Safety is derived from a mutable lock on the Scanner being preserved, preventing - // scanning (and detection of Eventuality resolutions) before it's released - // It's only released by the main function after it does what it will - self - .scanner - .register_eventuality(key_bytes.as_ref(), block_number, id, eventuality.clone()) - .await; - - res.push((key, id, tx, eventuality)); - } - - // TODO: If the TX is None, restore its inputs to the scheduler for efficiency's sake - // If this TODO is removed, also reduce the operating costs - } - res - }; - (acquired_lock, res) - } - - pub async fn release_scanner_lock(&mut self) { - self.scanner.release_lock().await; - } - - pub async fn scanner_event_to_multisig_event( - &self, - txn: &mut D::Transaction<'_>, - network: &N, - msg: ScannerEvent, - ) -> MultisigEvent { - let (block_number, event) = match msg { - ScannerEvent::Block { is_retirement_block, block, mut outputs } => { - // Since the Scanner is asynchronous, the following is a concern for race conditions - // We safely know the step of a block since keys are declared, and the Scanner is safe - // with respect to the declaration of keys - // Accordingly, the following calls regarding new keys and step should be safe - let block_number = ScannerHandle::::block_number(txn, &block) - .expect("didn't have the block number for a block we just scanned"); - let step = self.current_rotation_step(block_number); - - // Instructions created from this block - let mut instructions = vec![]; - - // If any of these outputs were forwarded, create their instruction now - for output in &outputs { - if output.kind() != OutputType::Forwarded { - continue; - } - - if let Some(instruction) = ForwardedOutputDb::take_forwarded_output(txn, output.balance()) - { - instructions.push(instruction); - } - } - - // If the remaining outputs aren't externally received funds, don't handle them as - // instructions - outputs.retain(|output| output.kind() == OutputType::External); - - // These plans are of limited context. They're only allowed the outputs newly received - // within this block and are intended to handle forwarding transactions/refunds - let mut plans = vec![]; - - // If the old multisig is explicitly only supposed to forward, create all such plans now - if step == RotationStep::ForwardFromExisting { - let mut i = 0; - while i < outputs.len() { - let output = &outputs[i]; - let plans = &mut plans; - let txn = &mut *txn; - - #[allow(clippy::redundant_closure_call)] - let should_retain = (|| async move { - // If this output doesn't belong to the existing multisig, it shouldn't be forwarded - if output.key() != self.existing.as_ref().unwrap().key { - return true; - } - - let plans_at_start = plans.len(); - let (refund_to, instruction) = instruction_from_output::(output); - if let Some(mut instruction) = instruction { - let Some(shimmed_plan) = N::Scheduler::shim_forward_plan( - output.clone(), - self.new.as_ref().expect("forwarding from existing yet no new multisig").key, - ) else { - // If this network doesn't need forwarding, report the output now - return true; - }; - plans.push(PlanFromScanning::::Forward(output.clone())); - - // Set the instruction for this output to be returned - // We need to set it under the amount it's forwarded with, so prepare its forwarding - // TX to determine the fees involved - let PreparedSend { tx, post_fee_branches: _, operating_costs } = - prepare_send(network, block_number, shimmed_plan, 0).await; - // operating_costs should not increase in a forwarding TX - assert_eq!(operating_costs, 0); - - // If this actually forwarded any coins, save the output as forwarded - // If this didn't create a TX, we don't bother saving the output as forwarded - // The fact we already created and pushed a plan still using this output will cause - // it to not be retained here, and later the plan will be dropped as this did here, - // letting it die out - if let Some(tx) = &tx { - instruction.balance.amount.0 -= tx.0.fee(); - - /* - Sending a Plan, with arbitrary data proxying the InInstruction, would require - adding a flow for networks which drop their data to still embed arbitrary data. - It'd also have edge cases causing failures (we'd need to manually provide the - origin if it was implied, which may exceed the encoding limit). - - Instead, we save the InInstruction as we scan this output. Then, when the - output is successfully forwarded, we simply read it from the local database. - This also saves the costs of embedding arbitrary data. - - Since we can't rely on the Eventuality system to detect if it's a forwarded - transaction, due to the asynchonicity of the Eventuality system, we instead - interpret an Forwarded output which has an amount associated with an - InInstruction which was forwarded as having been forwarded. - */ - ForwardedOutputDb::save_forwarded_output(txn, &instruction); - } - } else if let Some(refund_to) = refund_to { - if let Ok(refund_to) = refund_to.consume().try_into() { - // Build a dedicated Plan refunding this - plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); - } - } - - // Only keep if we didn't make a Plan consuming it - plans_at_start == plans.len() - })() - .await; - if should_retain { - i += 1; - continue; - } - outputs.remove(i); - } - } - - for output in outputs { - // If this is an External transaction to the existing multisig, and we're either solely - // forwarding or closing the existing multisig, drop it - // In the case of the forwarding case, we'll report it once it hits the new multisig - if (match step { - RotationStep::UseExisting | RotationStep::NewAsChange => false, - RotationStep::ForwardFromExisting | RotationStep::ClosingExisting => true, - }) && (output.key() == self.existing.as_ref().unwrap().key) - { - continue; - } - - let (refund_to, instruction) = instruction_from_output::(&output); - let Some(instruction) = instruction else { - if let Some(refund_to) = refund_to { - if let Ok(refund_to) = refund_to.consume().try_into() { - plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); - } - } - continue; - }; - - // Delay External outputs received to new multisig earlier than expected - if Some(output.key()) == self.new.as_ref().map(|new| new.key) { - match step { - RotationStep::UseExisting => { - DelayedOutputDb::save_delayed_output(txn, &instruction); - continue; - } - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => {} - } - } - - instructions.push(instruction); - } - - // Save the plans created while scanning - // TODO: Should we combine all of these plans to reduce the fees incurred from their - // execution? They're refunds and forwards. Neither should need isolate Plan/Eventualities. - PlansFromScanningDb::set_plans_from_scanning(txn, block_number, plans); - - // If any outputs were delayed, append them into this block - match step { - RotationStep::UseExisting => {} - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => { - instructions.extend(DelayedOutputDb::take_delayed_outputs(txn)); - } - } - - let mut block_hash = [0; 32]; - block_hash.copy_from_slice(block.as_ref()); - let mut batch_id = NextBatchDb::get(txn).unwrap_or_default(); - - // start with empty batch - let mut batches = vec![Batch { - network: N::NETWORK, - id: batch_id, - block: BlockHash(block_hash), - instructions: vec![], - }]; - - for instruction in instructions { - let batch = batches.last_mut().unwrap(); - batch.instructions.push(instruction); - - // check if batch is over-size - if batch.encode().len() > MAX_BATCH_SIZE { - // pop the last instruction so it's back in size - let instruction = batch.instructions.pop().unwrap(); - - // bump the id for the new batch - batch_id += 1; - - // make a new batch with this instruction included - batches.push(Batch { - network: N::NETWORK, - id: batch_id, - block: BlockHash(block_hash), - instructions: vec![instruction], - }); - } - } - - // Save the next batch ID - NextBatchDb::set(txn, &(batch_id + 1)); - - ( - block_number, - MultisigEvent::Batches( - if is_retirement_block { - Some((self.existing.as_ref().unwrap().key, self.new.as_ref().unwrap().key)) - } else { - None - }, - batches, - ), - ) - } - - // This must be emitted before ScannerEvent::Block for all completions of known Eventualities - // within the block. Unknown Eventualities may have their Completed events emitted after - // ScannerEvent::Block however. - ScannerEvent::Completed(key, block_number, id, tx_id, completion) => { - ResolvedDb::resolve_plan::(txn, &key, id, &tx_id); - (block_number, MultisigEvent::Completed(key, id, completion)) - } - }; - - // If we either received a Block event (which will be the trigger when we have no - // Plans/Eventualities leading into ClosingExisting), or we received the last Completed for - // this multisig, set its retirement block - let existing = self.existing.as_ref().unwrap(); - - // This multisig is closing - let closing = self.current_rotation_step(block_number) == RotationStep::ClosingExisting; - // There's nothing left in its Scheduler. This call is safe as: - // 1) When ClosingExisting, all outputs should've been already forwarded, preventing - // new UTXOs from accumulating. - // 2) No new payments should be issued. - // 3) While there may be plans, they'll be dropped to create Eventualities. - // If this Eventuality is resolved, the Plan has already been dropped. - // 4) If this Eventuality will trigger a Plan, it'll still be in the plans HashMap. - let scheduler_is_empty = closing && existing.scheduler.empty(); - // Nothing is still being signed - let no_active_plans = scheduler_is_empty && - PlanDb::active_plans::(txn, existing.key.to_bytes().as_ref()).is_empty(); - - self - .scanner - .multisig_completed - // The above explicitly included their predecessor to ensure short-circuiting, yet their - // names aren't defined as an aggregate check. Still including all three here ensures all are - // used in the final value - .send(closing && scheduler_is_empty && no_active_plans) - .unwrap(); - - event - } - - pub async fn next_scanner_event(&mut self) -> ScannerEvent { - self.scanner.events.recv().await.unwrap() - } -} diff --git a/processor/src/multisigs/scanner.rs b/processor/src/multisigs/scanner.rs deleted file mode 100644 index 1b25e1086..000000000 --- a/processor/src/multisigs/scanner.rs +++ /dev/null @@ -1,739 +0,0 @@ -use core::marker::PhantomData; -use std::{ - sync::Arc, - io::Read, - time::Duration, - collections::{VecDeque, HashSet, HashMap}, -}; - -use ciphersuite::group::GroupEncoding; -use frost::curve::Ciphersuite; - -use log::{info, debug, warn}; -use tokio::{ - sync::{RwLockReadGuard, RwLockWriteGuard, RwLock, mpsc}, - time::sleep, -}; - -use crate::{ - Get, DbTxn, Db, - networks::{Output, Transaction, Eventuality, EventualitiesTracker, Block, Network}, -}; - -#[derive(Clone, Debug)] -pub enum ScannerEvent { - // Block scanned - Block { - is_retirement_block: bool, - block: >::Id, - outputs: Vec, - }, - // Eventuality completion found on-chain - // TODO: Move this from a tuple - Completed( - Vec, - usize, - [u8; 32], - >::Id, - ::Completion, - ), -} - -pub type ScannerEventChannel = mpsc::UnboundedReceiver>; - -#[derive(Clone, Debug)] -struct ScannerDb(PhantomData, PhantomData); -impl ScannerDb { - fn scanner_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec { - D::key(b"SCANNER", dst, key) - } - - fn block_key(number: usize) -> Vec { - Self::scanner_key(b"block_id", u64::try_from(number).unwrap().to_le_bytes()) - } - fn block_number_key(id: &>::Id) -> Vec { - Self::scanner_key(b"block_number", id) - } - fn save_block(txn: &mut D::Transaction<'_>, number: usize, id: &>::Id) { - txn.put(Self::block_number_key(id), u64::try_from(number).unwrap().to_le_bytes()); - txn.put(Self::block_key(number), id); - } - fn block(getter: &G, number: usize) -> Option<>::Id> { - getter.get(Self::block_key(number)).map(|id| { - let mut res = >::Id::default(); - res.as_mut().copy_from_slice(&id); - res - }) - } - fn block_number(getter: &G, id: &>::Id) -> Option { - getter - .get(Self::block_number_key(id)) - .map(|number| u64::from_le_bytes(number.try_into().unwrap()).try_into().unwrap()) - } - - fn keys_key() -> Vec { - Self::scanner_key(b"keys", b"") - } - fn register_key( - txn: &mut D::Transaction<'_>, - activation_number: usize, - key: ::G, - ) { - let mut keys = txn.get(Self::keys_key()).unwrap_or(vec![]); - - let key_bytes = key.to_bytes(); - - let key_len = key_bytes.as_ref().len(); - assert_eq!(keys.len() % (8 + key_len), 0); - - // Sanity check this key isn't already present - let mut i = 0; - while i < keys.len() { - if &keys[(i + 8) .. ((i + 8) + key_len)] == key_bytes.as_ref() { - panic!("adding {} as a key yet it was already present", hex::encode(key_bytes)); - } - i += 8 + key_len; - } - - keys.extend(u64::try_from(activation_number).unwrap().to_le_bytes()); - keys.extend(key_bytes.as_ref()); - txn.put(Self::keys_key(), keys); - } - fn keys(getter: &G) -> Vec<(usize, ::G)> { - let bytes_vec = getter.get(Self::keys_key()).unwrap_or(vec![]); - let mut bytes: &[u8] = bytes_vec.as_ref(); - - // Assumes keys will be 32 bytes when calculating the capacity - // If keys are larger, this may allocate more memory than needed - // If keys are smaller, this may require additional allocations - // Either are fine - let mut res = Vec::with_capacity(bytes.len() / (8 + 32)); - while !bytes.is_empty() { - let mut activation_number = [0; 8]; - bytes.read_exact(&mut activation_number).unwrap(); - let activation_number = u64::from_le_bytes(activation_number).try_into().unwrap(); - - res.push((activation_number, N::Curve::read_G(&mut bytes).unwrap())); - } - res - } - fn retire_key(txn: &mut D::Transaction<'_>) { - let keys = Self::keys(txn); - assert_eq!(keys.len(), 2); - txn.del(Self::keys_key()); - Self::register_key(txn, keys[1].0, keys[1].1); - } - - fn seen_key(id: &>::Id) -> Vec { - Self::scanner_key(b"seen", id) - } - fn seen(getter: &G, id: &>::Id) -> bool { - getter.get(Self::seen_key(id)).is_some() - } - - fn outputs_key(block: &>::Id) -> Vec { - Self::scanner_key(b"outputs", block.as_ref()) - } - fn save_outputs( - txn: &mut D::Transaction<'_>, - block: &>::Id, - outputs: &[N::Output], - ) { - let mut bytes = Vec::with_capacity(outputs.len() * 64); - for output in outputs { - output.write(&mut bytes).unwrap(); - } - txn.put(Self::outputs_key(block), bytes); - } - fn outputs( - txn: &D::Transaction<'_>, - block: &>::Id, - ) -> Option> { - let bytes_vec = txn.get(Self::outputs_key(block))?; - let mut bytes: &[u8] = bytes_vec.as_ref(); - - let mut res = vec![]; - while !bytes.is_empty() { - res.push(N::Output::read(&mut bytes).unwrap()); - } - Some(res) - } - - fn scanned_block_key() -> Vec { - Self::scanner_key(b"scanned_block", []) - } - - fn save_scanned_block(txn: &mut D::Transaction<'_>, block: usize) -> Vec { - let id = Self::block(txn, block); // It may be None for the first key rotated to - let outputs = - if let Some(id) = id.as_ref() { Self::outputs(txn, id).unwrap_or(vec![]) } else { vec![] }; - - // Mark all the outputs from this block as seen - for output in &outputs { - txn.put(Self::seen_key(&output.id()), b""); - } - - txn.put(Self::scanned_block_key(), u64::try_from(block).unwrap().to_le_bytes()); - - // Return this block's outputs so they can be pruned from the RAM cache - outputs - } - fn latest_scanned_block(getter: &G) -> Option { - getter - .get(Self::scanned_block_key()) - .map(|bytes| u64::from_le_bytes(bytes.try_into().unwrap()).try_into().unwrap()) - } - - fn retirement_block_key(key: &::G) -> Vec { - Self::scanner_key(b"retirement_block", key.to_bytes()) - } - fn save_retirement_block( - txn: &mut D::Transaction<'_>, - key: &::G, - block: usize, - ) { - txn.put(Self::retirement_block_key(key), u64::try_from(block).unwrap().to_le_bytes()); - } - fn retirement_block(getter: &G, key: &::G) -> Option { - getter - .get(Self::retirement_block_key(key)) - .map(|bytes| usize::try_from(u64::from_le_bytes(bytes.try_into().unwrap())).unwrap()) - } -} - -/// The Scanner emits events relating to the blockchain, notably received outputs. -/// -/// It WILL NOT fail to emit an event, even if it reboots at selected moments. -/// -/// It MAY fire the same event multiple times. -#[derive(Debug)] -pub struct Scanner { - _db: PhantomData, - - keys: Vec<(usize, ::G)>, - - eventualities: HashMap, EventualitiesTracker>, - - ram_scanned: Option, - ram_outputs: HashSet>, - - need_ack: VecDeque, - - events: mpsc::UnboundedSender>, -} - -#[derive(Clone, Debug)] -struct ScannerHold { - scanner: Arc>>>, -} -impl ScannerHold { - async fn read(&self) -> RwLockReadGuard<'_, Option>> { - loop { - let lock = self.scanner.read().await; - if lock.is_none() { - drop(lock); - tokio::task::yield_now().await; - continue; - } - return lock; - } - } - async fn write(&self) -> RwLockWriteGuard<'_, Option>> { - loop { - let lock = self.scanner.write().await; - if lock.is_none() { - drop(lock); - tokio::task::yield_now().await; - continue; - } - return lock; - } - } - // This is safe to not check if something else already acquired the Scanner as the only caller is - // sequential. - async fn long_term_acquire(&self) -> Scanner { - self.scanner.write().await.take().unwrap() - } - async fn restore(&self, scanner: Scanner) { - let _ = self.scanner.write().await.insert(scanner); - } -} - -#[derive(Debug)] -pub struct ScannerHandle { - scanner: ScannerHold, - held_scanner: Option>, - pub events: ScannerEventChannel, - pub multisig_completed: mpsc::UnboundedSender, -} - -impl ScannerHandle { - pub async fn ram_scanned(&self) -> usize { - self.scanner.read().await.as_ref().unwrap().ram_scanned.unwrap_or(0) - } - - /// Register a key to scan for. - pub async fn register_key( - &mut self, - txn: &mut D::Transaction<'_>, - activation_number: usize, - key: ::G, - ) { - info!("Registering key {} in scanner at {activation_number}", hex::encode(key.to_bytes())); - - let mut scanner_lock = self.scanner.write().await; - let scanner = scanner_lock.as_mut().unwrap(); - assert!( - activation_number > scanner.ram_scanned.unwrap_or(0), - "activation block of new keys was already scanned", - ); - - if scanner.keys.is_empty() { - assert!(scanner.ram_scanned.is_none()); - scanner.ram_scanned = Some(activation_number); - assert!(ScannerDb::::save_scanned_block(txn, activation_number).is_empty()); - } - - ScannerDb::::register_key(txn, activation_number, key); - scanner.keys.push((activation_number, key)); - #[cfg(not(test))] // TODO: A test violates this. Improve the test with a better flow - assert!(scanner.keys.len() <= 2); - - scanner.eventualities.insert(key.to_bytes().as_ref().to_vec(), EventualitiesTracker::new()); - } - - pub fn db_scanned(getter: &G) -> Option { - ScannerDb::::latest_scanned_block(getter) - } - - // This perform a database read which isn't safe with regards to if the value is set or not - // It may be set, when it isn't expected to be set, or not set, when it is expected to be set - // Since the value is static, if it's set, it's correctly set - pub fn block_number(getter: &G, id: &>::Id) -> Option { - ScannerDb::::block_number(getter, id) - } - - /// Acknowledge having handled a block. - /// - /// Creates a lock over the Scanner, preventing its independent scanning operations until - /// released. - /// - /// This must only be called on blocks which have been scanned in-memory. - pub async fn ack_block( - &mut self, - txn: &mut D::Transaction<'_>, - id: >::Id, - ) -> (bool, Vec) { - debug!("block {} acknowledged", hex::encode(&id)); - - let mut scanner = self.scanner.long_term_acquire().await; - - // Get the number for this block - let number = ScannerDb::::block_number(txn, &id) - .expect("main loop trying to operate on data we haven't scanned"); - log::trace!("block {} was {number}", hex::encode(&id)); - - let outputs = ScannerDb::::save_scanned_block(txn, number); - // This has a race condition if we try to ack a block we scanned on a prior boot, and we have - // yet to scan it on this boot - assert!(number <= scanner.ram_scanned.unwrap()); - for output in &outputs { - assert!(scanner.ram_outputs.remove(output.id().as_ref())); - } - - assert_eq!(scanner.need_ack.pop_front().unwrap(), number); - - self.held_scanner = Some(scanner); - - // Load the key from the DB, as it will have already been removed from RAM if retired - let key = ScannerDb::::keys(txn)[0].1; - let is_retirement_block = ScannerDb::::retirement_block(txn, &key) == Some(number); - if is_retirement_block { - ScannerDb::::retire_key(txn); - } - (is_retirement_block, outputs) - } - - pub async fn register_eventuality( - &mut self, - key: &[u8], - block_number: usize, - id: [u8; 32], - eventuality: N::Eventuality, - ) { - let mut lock; - // We won't use held_scanner if we're re-registering on boot - (if let Some(scanner) = self.held_scanner.as_mut() { - scanner - } else { - lock = Some(self.scanner.write().await); - lock.as_mut().unwrap().as_mut().unwrap() - }) - .eventualities - .get_mut(key) - .unwrap() - .register(block_number, id, eventuality) - } - - pub async fn release_lock(&mut self) { - self.scanner.restore(self.held_scanner.take().unwrap()).await - } -} - -impl Scanner { - #[allow(clippy::type_complexity, clippy::new_ret_no_self)] - pub fn new( - network: N, - db: D, - ) -> (ScannerHandle, Vec<(usize, ::G)>) { - let (events_send, events_recv) = mpsc::unbounded_channel(); - let (multisig_completed_send, multisig_completed_recv) = mpsc::unbounded_channel(); - - let keys = ScannerDb::::keys(&db); - let mut eventualities = HashMap::new(); - for key in &keys { - eventualities.insert(key.1.to_bytes().as_ref().to_vec(), EventualitiesTracker::new()); - } - - let ram_scanned = ScannerDb::::latest_scanned_block(&db); - - let scanner = ScannerHold { - scanner: Arc::new(RwLock::new(Some(Scanner { - _db: PhantomData, - - keys: keys.clone(), - - eventualities, - - ram_scanned, - ram_outputs: HashSet::new(), - - need_ack: VecDeque::new(), - - events: events_send, - }))), - }; - tokio::spawn(Scanner::run(db, network, scanner.clone(), multisig_completed_recv)); - - ( - ScannerHandle { - scanner, - held_scanner: None, - events: events_recv, - multisig_completed: multisig_completed_send, - }, - keys, - ) - } - - fn emit(&mut self, event: ScannerEvent) -> bool { - if self.events.send(event).is_err() { - info!("Scanner handler was dropped. Shutting down?"); - return false; - } - true - } - - // An async function, to be spawned on a task, to discover and report outputs - async fn run( - mut db: D, - network: N, - scanner_hold: ScannerHold, - mut multisig_completed: mpsc::UnboundedReceiver, - ) { - loop { - let (ram_scanned, latest_block_to_scan) = { - // Sleep 5 seconds to prevent hammering the node/scanner lock - sleep(Duration::from_secs(5)).await; - - let ram_scanned = { - let scanner_lock = scanner_hold.read().await; - let scanner = scanner_lock.as_ref().unwrap(); - - // If we're not scanning for keys yet, wait until we are - if scanner.keys.is_empty() { - continue; - } - - let ram_scanned = scanner.ram_scanned.unwrap(); - // If a Batch has taken too long to be published, start waiting until it is before - // continuing scanning - // Solves a race condition around multisig rotation, documented in the relevant doc - // and demonstrated with mini - if let Some(needing_ack) = scanner.need_ack.front() { - let next = ram_scanned + 1; - let limit = needing_ack + N::CONFIRMATIONS; - assert!(next <= limit); - if next == limit { - continue; - } - }; - - ram_scanned - }; - - ( - ram_scanned, - loop { - break match network.get_latest_block_number().await { - // Only scan confirmed blocks, which we consider effectively finalized - // CONFIRMATIONS - 1 as whatever's in the latest block already has 1 confirm - Ok(latest) => latest.saturating_sub(N::CONFIRMATIONS.saturating_sub(1)), - Err(_) => { - warn!("couldn't get latest block number"); - sleep(Duration::from_secs(60)).await; - continue; - } - }; - }, - ) - }; - - for block_being_scanned in (ram_scanned + 1) ..= latest_block_to_scan { - // Redo the checks for if we're too far ahead - { - let needing_ack = { - let scanner_lock = scanner_hold.read().await; - let scanner = scanner_lock.as_ref().unwrap(); - scanner.need_ack.front().copied() - }; - - if let Some(needing_ack) = needing_ack { - let limit = needing_ack + N::CONFIRMATIONS; - assert!(block_being_scanned <= limit); - if block_being_scanned == limit { - break; - } - } - } - - let Ok(block) = network.get_block(block_being_scanned).await else { - warn!("couldn't get block {block_being_scanned}"); - break; - }; - let block_id = block.id(); - - info!("scanning block: {} ({block_being_scanned})", hex::encode(&block_id)); - - // These DB calls are safe, despite not having a txn, since they're static values - // There's no issue if they're written in advance of expected (such as on reboot) - // They're also only expected here - if let Some(id) = ScannerDb::::block(&db, block_being_scanned) { - if id != block_id { - panic!("reorg'd from finalized {} to {}", hex::encode(id), hex::encode(block_id)); - } - } else { - // TODO: Move this to an unwrap - if let Some(id) = ScannerDb::::block(&db, block_being_scanned.saturating_sub(1)) { - if id != block.parent() { - panic!( - "block {} doesn't build off expected parent {}", - hex::encode(block_id), - hex::encode(id), - ); - } - } - - let mut txn = db.txn(); - ScannerDb::::save_block(&mut txn, block_being_scanned, &block_id); - txn.commit(); - } - - // Scan new blocks - // TODO: This lock acquisition may be long-lived... - let mut scanner_lock = scanner_hold.write().await; - let scanner = scanner_lock.as_mut().unwrap(); - - let mut has_activation = false; - let mut outputs = vec![]; - let mut completion_block_numbers = vec![]; - for (activation_number, key) in scanner.keys.clone() { - if activation_number > block_being_scanned { - continue; - } - - if activation_number == block_being_scanned { - has_activation = true; - } - - let key_vec = key.to_bytes().as_ref().to_vec(); - - // TODO: These lines are the ones which will cause a really long-lived lock acquisition - for output in network.get_outputs(&block, key).await { - assert_eq!(output.key(), key); - if output.balance().amount.0 >= N::DUST { - outputs.push(output); - } - } - - for (id, (block_number, tx, completion)) in network - .get_eventuality_completions(scanner.eventualities.get_mut(&key_vec).unwrap(), &block) - .await - { - info!( - "eventuality {} resolved by {}, as found on chain", - hex::encode(id), - hex::encode(tx.as_ref()) - ); - - completion_block_numbers.push(block_number); - // This must be before the mission of ScannerEvent::Block, per commentary in mod.rs - if !scanner.emit(ScannerEvent::Completed( - key_vec.clone(), - block_number, - id, - tx, - completion, - )) { - return; - } - } - } - - // Panic if we've already seen these outputs - for output in &outputs { - let id = output.id(); - info!( - "block {} had output {} worth {:?}", - hex::encode(&block_id), - hex::encode(&id), - output.balance(), - ); - - // On Bitcoin, the output ID should be unique for a given chain - // On Monero, it's trivial to make an output sharing an ID with another - // We should only scan outputs with valid IDs however, which will be unique - - /* - The safety of this code must satisfy the following conditions: - 1) seen is not set for the first occurrence - 2) seen is set for any future occurrence - - seen is only written to after this code completes. Accordingly, it cannot be set - before the first occurrence UNLESSS it's set, yet the last scanned block isn't. - They are both written in the same database transaction, preventing this. - - As for future occurrences, the RAM entry ensures they're handled properly even if - the database has yet to be set. - - On reboot, which will clear the RAM, if seen wasn't set, neither was latest scanned - block. Accordingly, this will scan from some prior block, re-populating the RAM. - - If seen was set, then this will be successfully read. - - There's also no concern ram_outputs was pruned, yet seen wasn't set, as pruning - from ram_outputs will acquire a write lock (preventing this code from acquiring - its own write lock and running), and during its holding of the write lock, it - commits the transaction setting seen and the latest scanned block. - - This last case isn't true. Committing seen/latest_scanned_block happens after - relinquishing the write lock. - - TODO2: Only update ram_outputs after committing the TXN in question. - */ - let seen = ScannerDb::::seen(&db, &id); - let id = id.as_ref().to_vec(); - if seen || scanner.ram_outputs.contains(&id) { - panic!("scanned an output multiple times"); - } - scanner.ram_outputs.insert(id); - } - - // We could remove this, if instead of doing the first block which passed - // requirements + CONFIRMATIONS, we simply emitted an event for every block where - // `number % CONFIRMATIONS == 0` (once at the final stage for the existing multisig) - // There's no need at this point, yet the latter may be more suitable for modeling... - async fn check_multisig_completed( - db: &mut D, - multisig_completed: &mut mpsc::UnboundedReceiver, - block_number: usize, - ) -> bool { - match multisig_completed.recv().await { - None => { - info!("Scanner handler was dropped. Shutting down?"); - false - } - Some(completed) => { - // Set the retirement block as block_number + CONFIRMATIONS - if completed { - let mut txn = db.txn(); - // The retiring key is the earliest one still around - let retiring_key = ScannerDb::::keys(&txn)[0].1; - // This value is static w.r.t. the key - ScannerDb::::save_retirement_block( - &mut txn, - &retiring_key, - block_number + N::CONFIRMATIONS, - ); - txn.commit(); - } - true - } - } - } - - drop(scanner_lock); - // Now that we've dropped the Scanner lock, we need to handle the multisig_completed - // channel before we decide if this block should be fired or not - // (holding the Scanner risks a deadlock) - for block_number in completion_block_numbers { - if !check_multisig_completed::(&mut db, &mut multisig_completed, block_number).await - { - return; - }; - } - - // Reacquire the scanner - let mut scanner_lock = scanner_hold.write().await; - let scanner = scanner_lock.as_mut().unwrap(); - - // Only emit an event if any of the following is true: - // - This is an activation block - // - This is a retirement block - // - There's outputs - // as only those blocks are meaningful and warrant obtaining synchrony over - let is_retirement_block = - ScannerDb::::retirement_block(&db, &scanner.keys[0].1) == Some(block_being_scanned); - let sent_block = if has_activation || is_retirement_block || (!outputs.is_empty()) { - // Save the outputs to disk - let mut txn = db.txn(); - ScannerDb::::save_outputs(&mut txn, &block_id, &outputs); - txn.commit(); - - // Send all outputs - if !scanner.emit(ScannerEvent::Block { is_retirement_block, block: block_id, outputs }) { - return; - } - - // Since we're creating a Batch, mark it as needing ack - scanner.need_ack.push_back(block_being_scanned); - true - } else { - false - }; - - // Remove it from memory - if is_retirement_block { - let retired = scanner.keys.remove(0).1; - scanner.eventualities.remove(retired.to_bytes().as_ref()); - } - - // Update ram_scanned - scanner.ram_scanned = Some(block_being_scanned); - - drop(scanner_lock); - // If we sent a Block event, once again check multisig_completed - if sent_block && - (!check_multisig_completed::( - &mut db, - &mut multisig_completed, - block_being_scanned, - ) - .await) - { - return; - } - } - } - } -} diff --git a/processor/src/multisigs/scheduler/mod.rs b/processor/src/multisigs/scheduler/mod.rs deleted file mode 100644 index 26c940fe8..000000000 --- a/processor/src/multisigs/scheduler/mod.rs +++ /dev/null @@ -1,96 +0,0 @@ -use core::fmt::Debug; -use std::io; - -use ciphersuite::Ciphersuite; - -use serai_client::primitives::{NetworkId, Balance}; - -use crate::{networks::Network, Db, Payment, Plan}; - -pub(crate) mod utxo; -pub(crate) mod smart_contract; - -pub trait SchedulerAddendum: Send + Clone + PartialEq + Debug { - fn read(reader: &mut R) -> io::Result; - fn write(&self, writer: &mut W) -> io::Result<()>; -} - -impl SchedulerAddendum for () { - fn read(_: &mut R) -> io::Result { - Ok(()) - } - fn write(&self, _: &mut W) -> io::Result<()> { - Ok(()) - } -} - -pub trait Scheduler: Sized + Clone + PartialEq + Debug { - type Addendum: SchedulerAddendum; - - /// Check if this Scheduler is empty. - fn empty(&self) -> bool; - - /// Create a new Scheduler. - fn new( - txn: &mut D::Transaction<'_>, - key: ::G, - network: NetworkId, - ) -> Self; - - /// Load a Scheduler from the DB. - fn from_db( - db: &D, - key: ::G, - network: NetworkId, - ) -> io::Result; - - /// Check if a branch is usable. - fn can_use_branch(&self, balance: Balance) -> bool; - - /// Schedule a series of outputs/payments. - fn schedule( - &mut self, - txn: &mut D::Transaction<'_>, - utxos: Vec, - payments: Vec>, - // TODO: Tighten this to multisig_for_any_change - key_for_any_change: ::G, - force_spend: bool, - ) -> Vec>; - - /// Consume all payments still pending within this Scheduler, without scheduling them. - fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec>; - - /// Note a branch output as having been created, with the amount it was actually created with, - /// or not having been created due to being too small. - fn created_output( - &mut self, - txn: &mut D::Transaction<'_>, - expected: u64, - actual: Option, - ); - - /// Refund a specific output. - fn refund_plan( - &mut self, - txn: &mut D::Transaction<'_>, - output: N::Output, - refund_to: N::Address, - ) -> Plan; - - /// Shim the forwarding Plan as necessary to obtain a fee estimate. - /// - /// If this Scheduler is for a Network which requires forwarding, this must return Some with a - /// plan with identical fee behavior. If forwarding isn't necessary, returns None. - fn shim_forward_plan(output: N::Output, to: ::G) -> Option>; - - /// Forward a specific output to the new multisig. - /// - /// Returns None if no forwarding is necessary. Must return Some if forwarding is necessary. - fn forward_plan( - &mut self, - txn: &mut D::Transaction<'_>, - output: N::Output, - to: ::G, - ) -> Option>; -} diff --git a/processor/src/multisigs/scheduler/smart_contract.rs b/processor/src/multisigs/scheduler/smart_contract.rs deleted file mode 100644 index 3da8acf48..000000000 --- a/processor/src/multisigs/scheduler/smart_contract.rs +++ /dev/null @@ -1,208 +0,0 @@ -use std::{io, collections::HashSet}; - -use ciphersuite::{group::GroupEncoding, Ciphersuite}; - -use serai_client::primitives::{NetworkId, Coin, Balance}; - -use crate::{ - Get, DbTxn, Db, Payment, Plan, create_db, - networks::{Output, Network}, - multisigs::scheduler::{SchedulerAddendum, Scheduler as SchedulerTrait}, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Scheduler { - key: ::G, - coins: HashSet, - rotated: bool, -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum Addendum { - Nonce(u64), - RotateTo { nonce: u64, new_key: ::G }, -} - -impl SchedulerAddendum for Addendum { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0xff]; - reader.read_exact(&mut kind)?; - match kind[0] { - 0 => { - let mut nonce = [0; 8]; - reader.read_exact(&mut nonce)?; - Ok(Addendum::Nonce(u64::from_le_bytes(nonce))) - } - 1 => { - let mut nonce = [0; 8]; - reader.read_exact(&mut nonce)?; - let nonce = u64::from_le_bytes(nonce); - - let new_key = N::Curve::read_G(reader)?; - Ok(Addendum::RotateTo { nonce, new_key }) - } - _ => Err(io::Error::other("reading unknown Addendum type"))?, - } - } - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - Addendum::Nonce(nonce) => { - writer.write_all(&[0])?; - writer.write_all(&nonce.to_le_bytes()) - } - Addendum::RotateTo { nonce, new_key } => { - writer.write_all(&[1])?; - writer.write_all(&nonce.to_le_bytes())?; - writer.write_all(new_key.to_bytes().as_ref()) - } - } - } -} - -create_db! { - SchedulerDb { - LastNonce: () -> u64, - RotatedTo: (key: &[u8]) -> Vec, - } -} - -impl> SchedulerTrait for Scheduler { - type Addendum = Addendum; - - /// Check if this Scheduler is empty. - fn empty(&self) -> bool { - self.rotated - } - - /// Create a new Scheduler. - fn new( - _txn: &mut D::Transaction<'_>, - key: ::G, - network: NetworkId, - ) -> Self { - assert!(N::branch_address(key).is_none()); - assert!(N::change_address(key).is_none()); - assert!(N::forward_address(key).is_none()); - - Scheduler { key, coins: network.coins().iter().copied().collect(), rotated: false } - } - - /// Load a Scheduler from the DB. - fn from_db( - db: &D, - key: ::G, - network: NetworkId, - ) -> io::Result { - Ok(Scheduler { - key, - coins: network.coins().iter().copied().collect(), - rotated: RotatedTo::get(db, key.to_bytes().as_ref()).is_some(), - }) - } - - fn can_use_branch(&self, _balance: Balance) -> bool { - false - } - - fn schedule( - &mut self, - txn: &mut D::Transaction<'_>, - utxos: Vec, - payments: Vec>, - key_for_any_change: ::G, - force_spend: bool, - ) -> Vec> { - for utxo in utxos { - assert!(self.coins.contains(&utxo.balance().coin)); - } - - let mut nonce = LastNonce::get(txn).unwrap_or(1); - let mut plans = vec![]; - for chunk in payments.as_slice().chunks(N::MAX_OUTPUTS) { - // Once we rotate, all further payments should be scheduled via the new multisig - assert!(!self.rotated); - plans.push(Plan { - key: self.key, - inputs: vec![], - payments: chunk.to_vec(), - change: None, - scheduler_addendum: Addendum::Nonce(nonce), - }); - nonce += 1; - } - - // If we're supposed to rotate to the new key, create an empty Plan which will signify the key - // update - if force_spend && (!self.rotated) { - plans.push(Plan { - key: self.key, - inputs: vec![], - payments: vec![], - change: None, - scheduler_addendum: Addendum::RotateTo { nonce, new_key: key_for_any_change }, - }); - nonce += 1; - self.rotated = true; - RotatedTo::set( - txn, - self.key.to_bytes().as_ref(), - &key_for_any_change.to_bytes().as_ref().to_vec(), - ); - } - - LastNonce::set(txn, &nonce); - - plans - } - - fn consume_payments(&mut self, _txn: &mut D::Transaction<'_>) -> Vec> { - vec![] - } - - fn created_output( - &mut self, - _txn: &mut D::Transaction<'_>, - _expected: u64, - _actual: Option, - ) { - panic!("Smart Contract Scheduler created a Branch output") - } - - /// Refund a specific output. - fn refund_plan( - &mut self, - txn: &mut D::Transaction<'_>, - output: N::Output, - refund_to: N::Address, - ) -> Plan { - let current_key = RotatedTo::get(txn, self.key.to_bytes().as_ref()) - .and_then(|key_bytes| ::read_G(&mut key_bytes.as_slice()).ok()) - .unwrap_or(self.key); - - let nonce = LastNonce::get(txn).map_or(1, |nonce| nonce + 1); - LastNonce::set(txn, &(nonce + 1)); - Plan { - key: current_key, - inputs: vec![], - payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], - change: None, - scheduler_addendum: Addendum::Nonce(nonce), - } - } - - fn shim_forward_plan(_output: N::Output, _to: ::G) -> Option> { - None - } - - /// Forward a specific output to the new multisig. - /// - /// Returns None if no forwarding is necessary. - fn forward_plan( - &mut self, - _txn: &mut D::Transaction<'_>, - _output: N::Output, - _to: ::G, - ) -> Option> { - None - } -} diff --git a/processor/src/multisigs/scheduler/utxo.rs b/processor/src/multisigs/scheduler/utxo.rs deleted file mode 100644 index 1865cab91..000000000 --- a/processor/src/multisigs/scheduler/utxo.rs +++ /dev/null @@ -1,631 +0,0 @@ -use std::{ - io::{self, Read}, - collections::{VecDeque, HashMap}, -}; - -use ciphersuite::{group::GroupEncoding, Ciphersuite}; - -use serai_client::primitives::{NetworkId, Coin, Amount, Balance}; - -use crate::{ - DbTxn, Db, Payment, Plan, - networks::{OutputType, Output, Network, UtxoNetwork}, - multisigs::scheduler::Scheduler as SchedulerTrait, -}; - -/// Deterministic output/payment manager. -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Scheduler { - key: ::G, - coin: Coin, - - // Serai, when it has more outputs expected than it can handle in a single transaction, will - // schedule the outputs to be handled later. Immediately, it just creates additional outputs - // which will eventually handle those outputs - // - // These maps map output amounts, which we'll receive in the future, to the payments they should - // be used on - // - // When those output amounts appear, their payments should be scheduled - // The Vec is for all payments that should be done per output instance - // The VecDeque allows multiple sets of payments with the same sum amount to properly co-exist - // - // queued_plans are for outputs which we will create, yet when created, will have their amount - // reduced by the fee it cost to be created. The Scheduler will then be told how what amount the - // output actually has, and it'll be moved into plans - queued_plans: HashMap>>>, - plans: HashMap>>>, - - // UTXOs available - utxos: Vec, - - // Payments awaiting scheduling due to the output availability problem - payments: VecDeque>, -} - -fn scheduler_key(key: &G) -> Vec { - D::key(b"SCHEDULER", b"scheduler", key.to_bytes()) -} - -impl> Scheduler { - pub fn empty(&self) -> bool { - self.queued_plans.is_empty() && - self.plans.is_empty() && - self.utxos.is_empty() && - self.payments.is_empty() - } - - fn read( - key: ::G, - coin: Coin, - reader: &mut R, - ) -> io::Result { - let mut read_plans = || -> io::Result<_> { - let mut all_plans = HashMap::new(); - let mut all_plans_len = [0; 4]; - reader.read_exact(&mut all_plans_len)?; - for _ in 0 .. u32::from_le_bytes(all_plans_len) { - let mut amount = [0; 8]; - reader.read_exact(&mut amount)?; - let amount = u64::from_le_bytes(amount); - - let mut plans = VecDeque::new(); - let mut plans_len = [0; 4]; - reader.read_exact(&mut plans_len)?; - for _ in 0 .. u32::from_le_bytes(plans_len) { - let mut payments = vec![]; - let mut payments_len = [0; 4]; - reader.read_exact(&mut payments_len)?; - - for _ in 0 .. u32::from_le_bytes(payments_len) { - payments.push(Payment::read(reader)?); - } - plans.push_back(payments); - } - all_plans.insert(amount, plans); - } - Ok(all_plans) - }; - let queued_plans = read_plans()?; - let plans = read_plans()?; - - let mut utxos = vec![]; - let mut utxos_len = [0; 4]; - reader.read_exact(&mut utxos_len)?; - for _ in 0 .. u32::from_le_bytes(utxos_len) { - utxos.push(N::Output::read(reader)?); - } - - let mut payments = VecDeque::new(); - let mut payments_len = [0; 4]; - reader.read_exact(&mut payments_len)?; - for _ in 0 .. u32::from_le_bytes(payments_len) { - payments.push_back(Payment::read(reader)?); - } - - Ok(Scheduler { key, coin, queued_plans, plans, utxos, payments }) - } - - // TODO2: Get rid of this - // We reserialize the entire scheduler on any mutation to save it to the DB which is horrible - // We should have an incremental solution - fn serialize(&self) -> Vec { - let mut res = Vec::with_capacity(4096); - - let mut write_plans = |plans: &HashMap>>>| { - res.extend(u32::try_from(plans.len()).unwrap().to_le_bytes()); - for (amount, list_of_plans) in plans { - res.extend(amount.to_le_bytes()); - res.extend(u32::try_from(list_of_plans.len()).unwrap().to_le_bytes()); - for plan in list_of_plans { - res.extend(u32::try_from(plan.len()).unwrap().to_le_bytes()); - for payment in plan { - payment.write(&mut res).unwrap(); - } - } - } - }; - write_plans(&self.queued_plans); - write_plans(&self.plans); - - res.extend(u32::try_from(self.utxos.len()).unwrap().to_le_bytes()); - for utxo in &self.utxos { - utxo.write(&mut res).unwrap(); - } - - res.extend(u32::try_from(self.payments.len()).unwrap().to_le_bytes()); - for payment in &self.payments { - payment.write(&mut res).unwrap(); - } - - debug_assert_eq!(&Self::read(self.key, self.coin, &mut res.as_slice()).unwrap(), self); - res - } - - pub fn new( - txn: &mut D::Transaction<'_>, - key: ::G, - network: NetworkId, - ) -> Self { - assert!(N::branch_address(key).is_some()); - assert!(N::change_address(key).is_some()); - assert!(N::forward_address(key).is_some()); - - let coin = { - let coins = network.coins(); - assert_eq!(coins.len(), 1); - coins[0] - }; - - let res = Scheduler { - key, - coin, - queued_plans: HashMap::new(), - plans: HashMap::new(), - utxos: vec![], - payments: VecDeque::new(), - }; - // Save it to disk so from_db won't panic if we don't mutate it before rebooting - txn.put(scheduler_key::(&res.key), res.serialize()); - res - } - - pub fn from_db( - db: &D, - key: ::G, - network: NetworkId, - ) -> io::Result { - let coin = { - let coins = network.coins(); - assert_eq!(coins.len(), 1); - coins[0] - }; - - let scheduler = db.get(scheduler_key::(&key)).unwrap_or_else(|| { - panic!("loading scheduler from DB without scheduler for {}", hex::encode(key.to_bytes())) - }); - let mut reader_slice = scheduler.as_slice(); - let reader = &mut reader_slice; - - Self::read(key, coin, reader) - } - - pub fn can_use_branch(&self, balance: Balance) -> bool { - assert_eq!(balance.coin, self.coin); - self.plans.contains_key(&balance.amount.0) - } - - fn execute( - &mut self, - inputs: Vec, - mut payments: Vec>, - key_for_any_change: ::G, - ) -> Plan { - let mut change = false; - let mut max = N::MAX_OUTPUTS; - - let payment_amounts = |payments: &Vec>| { - payments.iter().map(|payment| payment.balance.amount.0).sum::() - }; - - // Requires a change output - if inputs.iter().map(|output| output.balance().amount.0).sum::() != - payment_amounts(&payments) - { - change = true; - max -= 1; - } - - let mut add_plan = |payments| { - let amount = payment_amounts(&payments); - self.queued_plans.entry(amount).or_insert(VecDeque::new()).push_back(payments); - amount - }; - - let branch_address = N::branch_address(self.key).unwrap(); - - // If we have more payments than we can handle in a single TX, create plans for them - // TODO2: This isn't perfect. For 258 outputs, and a MAX_OUTPUTS of 16, this will create: - // 15 branches of 16 leaves - // 1 branch of: - // - 1 branch of 16 leaves - // - 2 leaves - // If this was perfect, the heaviest branch would have 1 branch of 3 leaves and 15 leaves - while payments.len() > max { - // The resulting TX will have the remaining payments and a new branch payment - let to_remove = (payments.len() + 1) - N::MAX_OUTPUTS; - // Don't remove more than possible - let to_remove = to_remove.min(N::MAX_OUTPUTS); - - // Create the plan - let removed = payments.drain((payments.len() - to_remove) ..).collect::>(); - assert_eq!(removed.len(), to_remove); - let amount = add_plan(removed); - - // Create the payment for the plan - // Push it to the front so it's not moved into a branch until all lower-depth items are - payments.insert( - 0, - Payment { - address: branch_address.clone(), - data: None, - balance: Balance { coin: self.coin, amount: Amount(amount) }, - }, - ); - } - - Plan { - key: self.key, - inputs, - payments, - change: Some(N::change_address(key_for_any_change).unwrap()).filter(|_| change), - scheduler_addendum: (), - } - } - - fn add_outputs( - &mut self, - mut utxos: Vec, - key_for_any_change: ::G, - ) -> Vec> { - log::info!("adding {} outputs", utxos.len()); - - let mut txs = vec![]; - - for utxo in utxos.drain(..) { - if utxo.kind() == OutputType::Branch { - let amount = utxo.balance().amount.0; - if let Some(plans) = self.plans.get_mut(&amount) { - // Execute the first set of payments possible with an output of this amount - let payments = plans.pop_front().unwrap(); - // They won't be equal if we dropped payments due to being dust - assert!(amount >= payments.iter().map(|payment| payment.balance.amount.0).sum::()); - - // If we've grabbed the last plan for this output amount, remove it from the map - if plans.is_empty() { - self.plans.remove(&amount); - } - - // Create a TX for these payments - txs.push(self.execute(vec![utxo], payments, key_for_any_change)); - continue; - } - } - - self.utxos.push(utxo); - } - - log::info!("{} planned TXs have had their required inputs confirmed", txs.len()); - txs - } - - // Schedule a series of outputs/payments. - pub fn schedule( - &mut self, - txn: &mut D::Transaction<'_>, - utxos: Vec, - mut payments: Vec>, - key_for_any_change: ::G, - force_spend: bool, - ) -> Vec> { - for utxo in &utxos { - assert_eq!(utxo.balance().coin, self.coin); - } - for payment in &payments { - assert_eq!(payment.balance.coin, self.coin); - } - - // Drop payments to our own branch address - /* - created_output will be called any time we send to a branch address. If it's called, and it - wasn't expecting to be called, that's almost certainly an error. The only way to guarantee - this however is to only have us send to a branch address when creating a branch, hence the - dropping of pointless payments. - - This is not comprehensive as a payment may still be made to another active multisig's branch - address, depending on timing. This is safe as the issue only occurs when a multisig sends to - its *own* branch address, since created_output is called on the signer's Scheduler. - */ - { - let branch_address = N::branch_address(self.key).unwrap(); - payments = - payments.drain(..).filter(|payment| payment.address != branch_address).collect::>(); - } - - let mut plans = self.add_outputs(utxos, key_for_any_change); - - log::info!("scheduling {} new payments", payments.len()); - - // Add all new payments to the list of pending payments - self.payments.extend(payments); - let payments_at_start = self.payments.len(); - log::info!("{} payments are now scheduled", payments_at_start); - - // If we don't have UTXOs available, don't try to continue - if self.utxos.is_empty() { - log::info!("no utxos currently available"); - return plans; - } - - // Sort UTXOs so the highest valued ones are first - self.utxos.sort_by(|a, b| a.balance().amount.0.cmp(&b.balance().amount.0).reverse()); - - // We always want to aggregate our UTXOs into a single UTXO in the name of simplicity - // We may have more UTXOs than will fit into a TX though - // We use the most valuable UTXOs to handle our current payments, and we return aggregation TXs - // for the rest of the inputs - // Since we do multiple aggregation TXs at once, this will execute in logarithmic time - let utxos = self.utxos.drain(..).collect::>(); - let mut utxo_chunks = - utxos.chunks(N::MAX_INPUTS).map(<[::Output]>::to_vec).collect::>(); - - // Use the first chunk for any scheduled payments, since it has the most value - let utxos = utxo_chunks.remove(0); - - // If the last chunk exists and only has one output, don't try aggregating it - // Set it to be restored to UTXO set - let mut to_restore = None; - if let Some(mut chunk) = utxo_chunks.pop() { - if chunk.len() == 1 { - to_restore = Some(chunk.pop().unwrap()); - } else { - utxo_chunks.push(chunk); - } - } - - for chunk in utxo_chunks.drain(..) { - log::debug!("aggregating a chunk of {} inputs", chunk.len()); - plans.push(Plan { - key: self.key, - inputs: chunk, - payments: vec![], - change: Some(N::change_address(key_for_any_change).unwrap()), - scheduler_addendum: (), - }) - } - - // We want to use all possible UTXOs for all possible payments - let mut balance = utxos.iter().map(|output| output.balance().amount.0).sum::(); - - // If we can't fulfill the next payment, we have encountered an instance of the UTXO - // availability problem - // This shows up in networks like Monero, where because we spent outputs, our change has yet to - // re-appear. Since it has yet to re-appear, we only operate with a balance which is a subset - // of our total balance - // Despite this, we may be ordered to fulfill a payment which is our total balance - // The solution is to wait for the temporarily unavailable change outputs to re-appear, - // granting us access to our full balance - let mut executing = vec![]; - while !self.payments.is_empty() { - let amount = self.payments[0].balance.amount.0; - if balance.checked_sub(amount).is_some() { - balance -= amount; - executing.push(self.payments.pop_front().unwrap()); - } else { - // Doesn't check if other payments would fit into the current batch as doing so may never - // let enough inputs become simultaneously availabile to enable handling of payments[0] - break; - } - } - - // Now that we have the list of payments we can successfully handle right now, create the TX - // for them - if !executing.is_empty() { - plans.push(self.execute(utxos, executing, key_for_any_change)); - } else { - // If we don't have any payments to execute, save these UTXOs for later - self.utxos.extend(utxos); - } - - // If we're instructed to force a spend, do so - // This is used when an old multisig is retiring and we want to always transfer outputs to the - // new one, regardless if we currently have payments - if force_spend && (!self.utxos.is_empty()) { - assert!(self.utxos.len() <= N::MAX_INPUTS); - plans.push(Plan { - key: self.key, - inputs: self.utxos.drain(..).collect::>(), - payments: vec![], - change: Some(N::change_address(key_for_any_change).unwrap()), - scheduler_addendum: (), - }); - } - - // If there's a UTXO to restore, restore it - // This is done now as if there is a to_restore output, and it was inserted into self.utxos - // earlier, self.utxos.len() may become `N::MAX_INPUTS + 1` - // The prior block requires the len to be `<= N::MAX_INPUTS` - if let Some(to_restore) = to_restore { - self.utxos.push(to_restore); - } - - txn.put(scheduler_key::(&self.key), self.serialize()); - - log::info!( - "created {} plans containing {} payments to sign, with {} payments pending scheduling", - plans.len(), - payments_at_start - self.payments.len(), - self.payments.len(), - ); - plans - } - - pub fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec> { - let res: Vec<_> = self.payments.drain(..).collect(); - if !res.is_empty() { - txn.put(scheduler_key::(&self.key), self.serialize()); - } - res - } - - // Note a branch output as having been created, with the amount it was actually created with, - // or not having been created due to being too small - pub fn created_output( - &mut self, - txn: &mut D::Transaction<'_>, - expected: u64, - actual: Option, - ) { - log::debug!("output expected to have {} had {:?} after fees", expected, actual); - - // Get the payments this output is expected to handle - let queued = self.queued_plans.get_mut(&expected).unwrap(); - let mut payments = queued.pop_front().unwrap(); - assert_eq!(expected, payments.iter().map(|payment| payment.balance.amount.0).sum::()); - // If this was the last set of payments at this amount, remove it - if queued.is_empty() { - self.queued_plans.remove(&expected); - } - - // If we didn't actually create this output, return, dropping the child payments - let Some(actual) = actual else { return }; - - // Amortize the fee amongst all payments underneath this branch - { - let mut to_amortize = actual - expected; - // If the payments are worth less than this fee we need to amortize, return, dropping them - if payments.iter().map(|payment| payment.balance.amount.0).sum::() < to_amortize { - return; - } - while to_amortize != 0 { - let payments_len = u64::try_from(payments.len()).unwrap(); - let per_payment = to_amortize / payments_len; - let mut overage = to_amortize % payments_len; - - for payment in &mut payments { - let to_subtract = per_payment + overage; - // Only subtract the overage once - overage = 0; - - let subtractable = payment.balance.amount.0.min(to_subtract); - to_amortize -= subtractable; - payment.balance.amount.0 -= subtractable; - } - } - } - - // Drop payments now below the dust threshold - let payments = payments - .into_iter() - .filter(|payment| payment.balance.amount.0 >= N::DUST) - .collect::>(); - // Sanity check this was done properly - assert!(actual >= payments.iter().map(|payment| payment.balance.amount.0).sum::()); - - // If there's no payments left, return - if payments.is_empty() { - return; - } - - self.plans.entry(actual).or_insert(VecDeque::new()).push_back(payments); - - // TODO2: This shows how ridiculous the serialize function is - txn.put(scheduler_key::(&self.key), self.serialize()); - } -} - -impl> SchedulerTrait for Scheduler { - type Addendum = (); - - /// Check if this Scheduler is empty. - fn empty(&self) -> bool { - Scheduler::empty(self) - } - - /// Create a new Scheduler. - fn new( - txn: &mut D::Transaction<'_>, - key: ::G, - network: NetworkId, - ) -> Self { - Scheduler::new::(txn, key, network) - } - - /// Load a Scheduler from the DB. - fn from_db( - db: &D, - key: ::G, - network: NetworkId, - ) -> io::Result { - Scheduler::from_db::(db, key, network) - } - - /// Check if a branch is usable. - fn can_use_branch(&self, balance: Balance) -> bool { - Scheduler::can_use_branch(self, balance) - } - - /// Schedule a series of outputs/payments. - fn schedule( - &mut self, - txn: &mut D::Transaction<'_>, - utxos: Vec, - payments: Vec>, - key_for_any_change: ::G, - force_spend: bool, - ) -> Vec> { - Scheduler::schedule::(self, txn, utxos, payments, key_for_any_change, force_spend) - } - - /// Consume all payments still pending within this Scheduler, without scheduling them. - fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec> { - Scheduler::consume_payments::(self, txn) - } - - /// Note a branch output as having been created, with the amount it was actually created with, - /// or not having been created due to being too small. - // TODO: Move this to Balance. - fn created_output( - &mut self, - txn: &mut D::Transaction<'_>, - expected: u64, - actual: Option, - ) { - Scheduler::created_output::(self, txn, expected, actual) - } - - fn refund_plan( - &mut self, - _: &mut D::Transaction<'_>, - output: N::Output, - refund_to: N::Address, - ) -> Plan { - let output_id = output.id().as_ref().to_vec(); - let res = Plan { - key: output.key(), - // Uses a payment as this will still be successfully sent due to fee amortization, - // and because change is currently always a Serai key - payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], - inputs: vec![output], - change: None, - scheduler_addendum: (), - }; - log::info!("refund plan for {} has ID {}", hex::encode(output_id), hex::encode(res.id())); - res - } - - fn shim_forward_plan(output: N::Output, to: ::G) -> Option> { - Some(Plan { - key: output.key(), - payments: vec![Payment { - address: N::forward_address(to).unwrap(), - data: None, - balance: output.balance(), - }], - inputs: vec![output], - change: None, - scheduler_addendum: (), - }) - } - - fn forward_plan( - &mut self, - _: &mut D::Transaction<'_>, - output: N::Output, - to: ::G, - ) -> Option> { - assert_eq!(self.key, output.key()); - // Call shim as shim returns the actual - Self::shim_forward_plan(output, to) - } -} diff --git a/processor/src/networks/bitcoin.rs b/processor/src/networks/bitcoin.rs deleted file mode 100644 index 43cad1c78..000000000 --- a/processor/src/networks/bitcoin.rs +++ /dev/null @@ -1,942 +0,0 @@ -use std::{sync::OnceLock, time::Duration, io, collections::HashMap}; - -use async_trait::async_trait; - -use scale::{Encode, Decode}; - -use ciphersuite::group::ff::PrimeField; -use k256::{ProjectivePoint, Scalar}; -use frost::{ - curve::{Curve, Secp256k1}, - ThresholdKeys, -}; - -use tokio::time::sleep; - -use bitcoin_serai::{ - bitcoin::{ - hashes::Hash as HashTrait, - key::{Parity, XOnlyPublicKey}, - consensus::{Encodable, Decodable}, - script::Instruction, - Transaction, Block, ScriptBuf, - opcodes::all::{OP_SHA256, OP_EQUALVERIFY}, - }, - wallet::{ - tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, - SignableTransaction as BSignableTransaction, TransactionMachine, - }, - rpc::{RpcError, Rpc}, -}; - -#[cfg(test)] -use bitcoin_serai::bitcoin::{ - secp256k1::{SECP256K1, SecretKey, Message}, - PrivateKey, PublicKey, - sighash::{EcdsaSighashType, SighashCache}, - script::PushBytesBuf, - absolute::LockTime, - Amount as BAmount, Sequence, Script, Witness, OutPoint, - transaction::Version, - blockdata::transaction::{TxIn, TxOut}, -}; - -use serai_client::{ - primitives::{MAX_DATA_LEN, Coin, NetworkId, Amount, Balance}, - networks::bitcoin::Address, -}; - -use crate::{ - networks::{ - NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, - Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, - Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, - }, - Payment, - multisigs::scheduler::utxo::Scheduler, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct OutputId(pub [u8; 36]); -impl Default for OutputId { - fn default() -> Self { - Self([0; 36]) - } -} -impl AsRef<[u8]> for OutputId { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} -impl AsMut<[u8]> for OutputId { - fn as_mut(&mut self) -> &mut [u8] { - self.0.as_mut() - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Output { - kind: OutputType, - presumed_origin: Option
, - output: ReceivedOutput, - data: Vec, -} - -impl OutputTrait for Output { - type Id = OutputId; - - fn kind(&self) -> OutputType { - self.kind - } - - fn id(&self) -> Self::Id { - let mut res = OutputId::default(); - self.output.outpoint().consensus_encode(&mut res.as_mut()).unwrap(); - debug_assert_eq!( - { - let mut outpoint = vec![]; - self.output.outpoint().consensus_encode(&mut outpoint).unwrap(); - outpoint - }, - res.as_ref().to_vec() - ); - res - } - - fn tx_id(&self) -> [u8; 32] { - let mut hash = *self.output.outpoint().txid.as_raw_hash().as_byte_array(); - hash.reverse(); - hash - } - - fn key(&self) -> ProjectivePoint { - let script = &self.output.output().script_pubkey; - assert!(script.is_p2tr()); - let Instruction::PushBytes(key) = script.instructions_minimal().last().unwrap().unwrap() else { - panic!("last item in v1 Taproot script wasn't bytes") - }; - let key = XOnlyPublicKey::from_slice(key.as_ref()) - .expect("last item in v1 Taproot script wasn't x-only public key"); - Secp256k1::read_G(&mut key.public_key(Parity::Even).serialize().as_slice()).unwrap() - - (ProjectivePoint::GENERATOR * self.output.offset()) - } - - fn presumed_origin(&self) -> Option
{ - self.presumed_origin.clone() - } - - fn balance(&self) -> Balance { - Balance { coin: Coin::Bitcoin, amount: Amount(self.output.value()) } - } - - fn data(&self) -> &[u8] { - &self.data - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - self.kind.write(writer)?; - let presumed_origin: Option> = self.presumed_origin.clone().map(Into::into); - writer.write_all(&presumed_origin.encode())?; - self.output.write(writer)?; - writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?; - writer.write_all(&self.data) - } - - fn read(mut reader: &mut R) -> io::Result { - Ok(Output { - kind: OutputType::read(reader)?, - presumed_origin: { - let mut io_reader = scale::IoReader(reader); - let res = Option::>::decode(&mut io_reader) - .unwrap() - .map(|address| Address::try_from(address).unwrap()); - reader = io_reader.0; - res - }, - output: ReceivedOutput::read(reader)?, - data: { - let mut data_len = [0; 2]; - reader.read_exact(&mut data_len)?; - - let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; - reader.read_exact(&mut data)?; - data - }, - }) - } -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Fee(u64); - -#[async_trait] -impl TransactionTrait for Transaction { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - let mut hash = *self.compute_txid().as_raw_hash().as_byte_array(); - hash.reverse(); - hash - } - - #[cfg(test)] - async fn fee(&self, network: &Bitcoin) -> u64 { - let mut value = 0; - for input in &self.input { - let output = input.previous_output; - let mut hash = *output.txid.as_raw_hash().as_byte_array(); - hash.reverse(); - value += network.rpc.get_transaction(&hash).await.unwrap().output - [usize::try_from(output.vout).unwrap()] - .value - .to_sat(); - } - for output in &self.output { - value -= output.value.to_sat(); - } - value - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Eventuality([u8; 32]); - -#[derive(Clone, PartialEq, Eq, Default, Debug)] -pub struct EmptyClaim; -impl AsRef<[u8]> for EmptyClaim { - fn as_ref(&self) -> &[u8] { - &[] - } -} -impl AsMut<[u8]> for EmptyClaim { - fn as_mut(&mut self) -> &mut [u8] { - &mut [] - } -} - -impl EventualityTrait for Eventuality { - type Claim = EmptyClaim; - type Completion = Transaction; - - fn lookup(&self) -> Vec { - self.0.to_vec() - } - - fn read(reader: &mut R) -> io::Result { - let mut id = [0; 32]; - reader - .read_exact(&mut id) - .map_err(|_| io::Error::other("couldn't decode ID in eventuality"))?; - Ok(Eventuality(id)) - } - fn serialize(&self) -> Vec { - self.0.to_vec() - } - - fn claim(_: &Transaction) -> EmptyClaim { - EmptyClaim - } - fn serialize_completion(completion: &Transaction) -> Vec { - let mut buf = vec![]; - completion.consensus_encode(&mut buf).unwrap(); - buf - } - fn read_completion(reader: &mut R) -> io::Result { - Transaction::consensus_decode(&mut io::BufReader::with_capacity(0, reader)) - .map_err(|e| io::Error::other(format!("{e}"))) - } -} - -#[derive(Clone, Debug)] -pub struct SignableTransaction { - actual: BSignableTransaction, -} -impl PartialEq for SignableTransaction { - fn eq(&self, other: &SignableTransaction) -> bool { - self.actual == other.actual - } -} -impl Eq for SignableTransaction {} -impl SignableTransactionTrait for SignableTransaction { - fn fee(&self) -> u64 { - self.actual.fee() - } -} - -#[async_trait] -impl BlockTrait for Block { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - let mut hash = *self.block_hash().as_raw_hash().as_byte_array(); - hash.reverse(); - hash - } - - fn parent(&self) -> Self::Id { - let mut hash = *self.header.prev_blockhash.as_raw_hash().as_byte_array(); - hash.reverse(); - hash - } - - async fn time(&self, rpc: &Bitcoin) -> u64 { - // Use the network median time defined in BIP-0113 since the in-block time isn't guaranteed to - // be monotonic - let mut timestamps = vec![u64::from(self.header.time)]; - let mut parent = self.parent(); - // BIP-0113 uses a median of the prior 11 blocks - while timestamps.len() < 11 { - let mut parent_block; - while { - parent_block = rpc.rpc.get_block(&parent).await; - parent_block.is_err() - } { - log::error!("couldn't get parent block when trying to get block time: {parent_block:?}"); - sleep(Duration::from_secs(5)).await; - } - let parent_block = parent_block.unwrap(); - timestamps.push(u64::from(parent_block.header.time)); - parent = parent_block.parent(); - - if parent == [0; 32] { - break; - } - } - timestamps.sort(); - timestamps[timestamps.len() / 2] - } -} - -const KEY_DST: &[u8] = b"Serai Bitcoin Output Offset"; -static BRANCH_OFFSET: OnceLock = OnceLock::new(); -static CHANGE_OFFSET: OnceLock = OnceLock::new(); -static FORWARD_OFFSET: OnceLock = OnceLock::new(); - -// Always construct the full scanner in order to ensure there's no collisions -fn scanner( - key: ProjectivePoint, -) -> (Scanner, HashMap, HashMap, OutputType>) { - let mut scanner = Scanner::new(key).unwrap(); - let mut offsets = HashMap::from([(OutputType::External, Scalar::ZERO)]); - - let zero = Scalar::ZERO.to_repr(); - let zero_ref: &[u8] = zero.as_ref(); - let mut kinds = HashMap::from([(zero_ref.to_vec(), OutputType::External)]); - - let mut register = |kind, offset| { - let offset = scanner.register_offset(offset).expect("offset collision"); - offsets.insert(kind, offset); - - let offset = offset.to_repr(); - let offset_ref: &[u8] = offset.as_ref(); - kinds.insert(offset_ref.to_vec(), kind); - }; - - register( - OutputType::Branch, - *BRANCH_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b"branch")), - ); - register( - OutputType::Change, - *CHANGE_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b"change")), - ); - register( - OutputType::Forwarded, - *FORWARD_OFFSET.get_or_init(|| Secp256k1::hash_to_F(KEY_DST, b"forward")), - ); - - (scanner, offsets, kinds) -} - -#[derive(Clone, Debug)] -pub struct Bitcoin { - pub(crate) rpc: Rpc, -} -// Shim required for testing/debugging purposes due to generic arguments also necessitating trait -// bounds -impl PartialEq for Bitcoin { - fn eq(&self, _: &Self) -> bool { - true - } -} -impl Eq for Bitcoin {} - -impl Bitcoin { - pub async fn new(url: String) -> Bitcoin { - let mut res = Rpc::new(url.clone()).await; - while let Err(e) = res { - log::error!("couldn't connect to Bitcoin node: {e:?}"); - sleep(Duration::from_secs(5)).await; - res = Rpc::new(url.clone()).await; - } - Bitcoin { rpc: res.unwrap() } - } - - #[cfg(test)] - pub async fn fresh_chain(&self) { - if self.rpc.get_latest_block_number().await.unwrap() > 0 { - self - .rpc - .rpc_call( - "invalidateblock", - serde_json::json!([hex::encode(self.rpc.get_block_hash(1).await.unwrap())]), - ) - .await - .unwrap() - } - } - - // This function panics on a node which doesn't follow the Bitcoin protocol, which is deemed fine - async fn median_fee(&self, block: &Block) -> Result { - let mut fees = vec![]; - if block.txdata.len() > 1 { - for tx in &block.txdata[1 ..] { - let mut in_value = 0; - for input in &tx.input { - let mut input_tx = input.previous_output.txid.to_raw_hash().to_byte_array(); - input_tx.reverse(); - in_value += self - .rpc - .get_transaction(&input_tx) - .await - .map_err(|_| NetworkError::ConnectionError)? - .output[usize::try_from(input.previous_output.vout).unwrap()] - .value - .to_sat(); - } - let out = tx.output.iter().map(|output| output.value.to_sat()).sum::(); - fees.push((in_value - out) / u64::try_from(tx.vsize()).unwrap()); - } - } - fees.sort(); - let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); - - // The DUST constant documentation notes a relay rule practically enforcing a - // 1000 sat/kilo-vbyte minimum fee. - Ok(Fee(fee.max(1))) - } - - async fn make_signable_transaction( - &self, - block_number: usize, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - calculating_fee: bool, - ) -> Result, NetworkError> { - for payment in payments { - assert_eq!(payment.balance.coin, Coin::Bitcoin); - } - - // TODO2: Use an fee representative of several blocks, cached inside Self - let block_for_fee = self.get_block(block_number).await?; - let fee = self.median_fee(&block_for_fee).await?; - - let payments = payments - .iter() - .map(|payment| { - ( - payment.address.clone().into(), - // If we're solely estimating the fee, don't specify the actual amount - // This won't affect the fee calculation yet will ensure we don't hit a not enough funds - // error - if calculating_fee { Self::DUST } else { payment.balance.amount.0 }, - ) - }) - .collect::>(); - - match BSignableTransaction::new( - inputs.iter().map(|input| input.output.clone()).collect(), - &payments, - change.clone().map(Into::into), - None, - fee.0, - ) { - Ok(signable) => Ok(Some(signable)), - Err(TransactionError::NoInputs) => { - panic!("trying to create a bitcoin transaction without inputs") - } - // No outputs left and the change isn't worth enough/not even enough funds to pay the fee - Err(TransactionError::NoOutputs | TransactionError::NotEnoughFunds) => Ok(None), - // amortize_fee removes payments which fall below the dust threshold - Err(TransactionError::DustPayment) => panic!("dust payment despite removing dust"), - Err(TransactionError::TooMuchData) => { - panic!("too much data despite not specifying data") - } - Err(TransactionError::TooLowFee) => { - panic!("created a transaction whose fee is below the minimum") - } - Err(TransactionError::TooLargeTransaction) => { - panic!("created a too large transaction despite limiting inputs/outputs") - } - } - } - - // Expected script has to start with SHA256 PUSH MSG_HASH OP_EQUALVERIFY .. - fn segwit_data_pattern(script: &ScriptBuf) -> Option { - let mut ins = script.instructions(); - - // first item should be SHA256 code - if ins.next()?.ok()?.opcode()? != OP_SHA256 { - return Some(false); - } - - // next should be a data push - ins.next()?.ok()?.push_bytes()?; - - // next should be a equality check - if ins.next()?.ok()?.opcode()? != OP_EQUALVERIFY { - return Some(false); - } - - Some(true) - } - - fn extract_serai_data(tx: &Transaction) -> Vec { - // check outputs - let mut data = (|| { - for output in &tx.output { - if output.script_pubkey.is_op_return() { - match output.script_pubkey.instructions_minimal().last() { - Some(Ok(Instruction::PushBytes(data))) => return data.as_bytes().to_vec(), - _ => continue, - } - } - } - vec![] - })(); - - // check inputs - if data.is_empty() { - for input in &tx.input { - let witness = input.witness.to_vec(); - // expected witness at least has to have 2 items, msg and the redeem script. - if witness.len() >= 2 { - let redeem_script = ScriptBuf::from_bytes(witness.last().unwrap().clone()); - if Self::segwit_data_pattern(&redeem_script) == Some(true) { - data.clone_from(&witness[witness.len() - 2]); // len() - 1 is the redeem_script - break; - } - } - } - } - - data.truncate(MAX_DATA_LEN.try_into().unwrap()); - data - } - - #[cfg(test)] - pub fn sign_btc_input_for_p2pkh( - tx: &Transaction, - input_index: usize, - private_key: &PrivateKey, - ) -> ScriptBuf { - use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; - - let public_key = PublicKey::from_private_key(SECP256K1, private_key); - let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); - - let mut der = SECP256K1 - .sign_ecdsa_low_r( - &Message::from_digest_slice( - SighashCache::new(tx) - .legacy_signature_hash( - input_index, - &main_addr.script_pubkey(), - EcdsaSighashType::All.to_u32(), - ) - .unwrap() - .to_raw_hash() - .as_ref(), - ) - .unwrap(), - &private_key.inner, - ) - .serialize_der() - .to_vec(); - der.push(1); - - ScriptBuf::builder() - .push_slice(PushBytesBuf::try_from(der).unwrap()) - .push_key(&public_key) - .into_script() - } -} - -// Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) -// A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes -// While our inputs are entirely SegWit, such fine tuning is not necessary and could create -// issues in the future (if the size decreases or we misevaluate it) -// It also offers a minimal amount of benefit when we are able to logarithmically accumulate -// inputs -// For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and -// 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 -// bytes -// 100,000 / 192 = 520 -// 520 * 192 leaves 160 bytes of overhead for the transaction structure itself -const MAX_INPUTS: usize = 520; -const MAX_OUTPUTS: usize = 520; - -fn address_from_key(key: ProjectivePoint) -> Address { - Address::new( - p2tr_script_buf(key).expect("creating address from key which isn't properly tweaked"), - ) - .expect("couldn't create Serai-representable address for P2TR script") -} - -#[async_trait] -impl Network for Bitcoin { - type Curve = Secp256k1; - - type Transaction = Transaction; - type Block = Block; - - type Output = Output; - type SignableTransaction = SignableTransaction; - type Eventuality = Eventuality; - type TransactionMachine = TransactionMachine; - - type Scheduler = Scheduler; - - type Address = Address; - - const NETWORK: NetworkId = NetworkId::Bitcoin; - const ID: &'static str = "Bitcoin"; - const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 600; - const CONFIRMATIONS: usize = 6; - - /* - A Taproot input is: - - 36 bytes for the OutPoint - - 0 bytes for the script (+1 byte for the length) - - 4 bytes for the sequence - Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format - - There's also: - - 1 byte for the witness length - - 1 byte for the signature length - - 64 bytes for the signature - which have the SegWit discount. - - (4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units - 230 ceil div 4 = 57 vbytes - - Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are: - - 1000 sat/kilo-vbyte for a transaction to be relayed - - Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte - The DUST constant needs to be determined by the latter. - Since these are solely relay rules, and may be raised, we require all outputs be spendable - under a 5000 sat/kilo-vbyte fee rate. - - 5000 sat/kilo-vbyte = 5 sat/vbyte - 5 * 57 = 285 sats/spent-output - - Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding - 100 vbytes, tripling the transaction size, then the sats/tx would be < 1000. - - Increase by an order of magnitude, in order to ensure this is actually worth our time, and we - get 10,000 satoshis. - */ - const DUST: u64 = 10_000; - - // 2 inputs should be 2 * 230 = 460 weight units - // The output should be ~36 bytes, or 144 weight units - // The overhead should be ~20 bytes at most, or 80 weight units - // 684 weight units, 171 vbytes, round up to 200 - // 200 vbytes at 1 sat/weight (our current minimum fee, 4 sat/vbyte) = 800 sat fee for the - // aggregation TX - const COST_TO_AGGREGATE: u64 = 800; - - const MAX_OUTPUTS: usize = MAX_OUTPUTS; - - fn tweak_keys(keys: &mut ThresholdKeys) { - *keys = tweak_keys(keys); - // Also create a scanner to assert these keys, and all expected paths, are usable - scanner(keys.group_key()); - } - - #[cfg(test)] - async fn external_address(&self, key: ProjectivePoint) -> Address { - address_from_key(key) - } - - fn branch_address(key: ProjectivePoint) -> Option
{ - let (_, offsets, _) = scanner(key); - Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch]))) - } - - fn change_address(key: ProjectivePoint) -> Option
{ - let (_, offsets, _) = scanner(key); - Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change]))) - } - - fn forward_address(key: ProjectivePoint) -> Option
{ - let (_, offsets, _) = scanner(key); - Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded]))) - } - - async fn get_latest_block_number(&self) -> Result { - self.rpc.get_latest_block_number().await.map_err(|_| NetworkError::ConnectionError) - } - - async fn get_block(&self, number: usize) -> Result { - let block_hash = - self.rpc.get_block_hash(number).await.map_err(|_| NetworkError::ConnectionError)?; - self.rpc.get_block(&block_hash).await.map_err(|_| NetworkError::ConnectionError) - } - - async fn get_outputs(&self, block: &Self::Block, key: ProjectivePoint) -> Vec { - let (scanner, _, kinds) = scanner(key); - - let mut outputs = vec![]; - // Skip the coinbase transaction which is burdened by maturity - for tx in &block.txdata[1 ..] { - for output in scanner.scan_transaction(tx) { - let offset_repr = output.offset().to_repr(); - let offset_repr_ref: &[u8] = offset_repr.as_ref(); - let kind = kinds[offset_repr_ref]; - - let output = Output { kind, presumed_origin: None, output, data: vec![] }; - assert_eq!(output.tx_id(), tx.id()); - outputs.push(output); - } - - if outputs.is_empty() { - continue; - } - - // populate the outputs with the origin and data - let presumed_origin = { - // This may identify the P2WSH output *embedding the InInstruction* as the origin, which - // would be a bit trickier to spend that a traditional output... - // There's no risk of the InInstruction going missing as it'd already be on-chain though - // We *could* parse out the script *without the InInstruction prefix* and declare that the - // origin - // TODO - let spent_output = { - let input = &tx.input[0]; - let mut spent_tx = input.previous_output.txid.as_raw_hash().to_byte_array(); - spent_tx.reverse(); - let mut tx; - while { - tx = self.rpc.get_transaction(&spent_tx).await; - tx.is_err() - } { - log::error!("couldn't get transaction from bitcoin node: {tx:?}"); - sleep(Duration::from_secs(5)).await; - } - tx.unwrap().output.swap_remove(usize::try_from(input.previous_output.vout).unwrap()) - }; - Address::new(spent_output.script_pubkey) - }; - let data = Self::extract_serai_data(tx); - for output in &mut outputs { - if output.kind == OutputType::External { - output.data.clone_from(&data); - } - output.presumed_origin.clone_from(&presumed_origin); - } - } - - outputs - } - - async fn get_eventuality_completions( - &self, - eventualities: &mut EventualitiesTracker, - block: &Self::Block, - ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { - let mut res = HashMap::new(); - if eventualities.map.is_empty() { - return res; - } - - fn check_block( - eventualities: &mut EventualitiesTracker, - block: &Block, - res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, - ) { - for tx in &block.txdata[1 ..] { - if let Some((plan, _)) = eventualities.map.remove(tx.id().as_slice()) { - res.insert(plan, (eventualities.block_number, tx.id(), tx.clone())); - } - } - - eventualities.block_number += 1; - } - - let this_block_hash = block.id(); - let this_block_num = (async { - loop { - match self.rpc.get_block_number(&this_block_hash).await { - Ok(number) => return number, - Err(e) => { - log::error!("couldn't get the block number for {}: {}", hex::encode(this_block_hash), e) - } - } - sleep(Duration::from_secs(60)).await; - } - }) - .await; - - for block_num in (eventualities.block_number + 1) .. this_block_num { - let block = { - let mut block; - while { - block = self.get_block(block_num).await; - block.is_err() - } { - log::error!("couldn't get block {}: {}", block_num, block.err().unwrap()); - sleep(Duration::from_secs(60)).await; - } - block.unwrap() - }; - - check_block(eventualities, &block, &mut res); - } - - // Also check the current block - check_block(eventualities, block, &mut res); - assert_eq!(eventualities.block_number, this_block_num); - - res - } - - async fn needed_fee( - &self, - block_number: usize, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - ) -> Result, NetworkError> { - Ok( - self - .make_signable_transaction(block_number, inputs, payments, change, true) - .await? - .map(|signable| signable.needed_fee()), - ) - } - - async fn signable_transaction( - &self, - block_number: usize, - _plan_id: &[u8; 32], - _key: ProjectivePoint, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - (): &(), - ) -> Result, NetworkError> { - Ok(self.make_signable_transaction(block_number, inputs, payments, change, false).await?.map( - |signable| { - let eventuality = Eventuality(signable.txid()); - (SignableTransaction { actual: signable }, eventuality) - }, - )) - } - - async fn attempt_sign( - &self, - keys: ThresholdKeys, - transaction: Self::SignableTransaction, - ) -> Result { - Ok(transaction.actual.clone().multisig(&keys).expect("used the wrong keys")) - } - - async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { - match self.rpc.send_raw_transaction(tx).await { - Ok(_) => (), - Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?, - // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs - // invalid transaction - Err(e) => panic!("failed to publish TX {}: {e}", tx.compute_txid()), - } - Ok(()) - } - - async fn confirm_completion( - &self, - eventuality: &Self::Eventuality, - _: &EmptyClaim, - ) -> Result, NetworkError> { - Ok(Some( - self.rpc.get_transaction(&eventuality.0).await.map_err(|_| NetworkError::ConnectionError)?, - )) - } - - #[cfg(test)] - async fn get_block_number(&self, id: &[u8; 32]) -> usize { - self.rpc.get_block_number(id).await.unwrap() - } - - #[cfg(test)] - async fn check_eventuality_by_claim( - &self, - eventuality: &Self::Eventuality, - _: &EmptyClaim, - ) -> bool { - self.rpc.get_transaction(&eventuality.0).await.is_ok() - } - - #[cfg(test)] - async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventuality) -> Transaction { - self.rpc.get_transaction(&id.0).await.unwrap() - } - - #[cfg(test)] - async fn mine_block(&self) { - use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; - - self - .rpc - .rpc_call::>( - "generatetoaddress", - serde_json::json!([1, BAddress::p2sh(Script::new(), BNetwork::Regtest).unwrap()]), - ) - .await - .unwrap(); - } - - #[cfg(test)] - async fn test_send(&self, address: Address) -> Block { - use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; - - let secret_key = SecretKey::new(&mut rand_core::OsRng); - let private_key = PrivateKey::new(secret_key, BNetwork::Regtest); - let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); - - let new_block = self.get_latest_block_number().await.unwrap() + 1; - self - .rpc - .rpc_call::>("generatetoaddress", serde_json::json!([100, main_addr])) - .await - .unwrap(); - - let tx = self.get_block(new_block).await.unwrap().txdata.swap_remove(0); - let mut tx = Transaction { - version: Version(2), - lock_time: LockTime::ZERO, - input: vec![TxIn { - previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, - script_sig: Script::new().into(), - sequence: Sequence(u32::MAX), - witness: Witness::default(), - }], - output: vec![TxOut { - value: tx.output[0].value - BAmount::from_sat(10000), - script_pubkey: address.clone().into(), - }], - }; - tx.input[0].script_sig = Self::sign_btc_input_for_p2pkh(&tx, 0, &private_key); - - let block = self.get_latest_block_number().await.unwrap() + 1; - self.rpc.send_raw_transaction(&tx).await.unwrap(); - for _ in 0 .. Self::CONFIRMATIONS { - self.mine_block().await; - } - self.get_block(block).await.unwrap() - } -} - -impl UtxoNetwork for Bitcoin { - const MAX_INPUTS: usize = MAX_INPUTS; -} diff --git a/processor/src/networks/ethereum.rs b/processor/src/networks/ethereum.rs deleted file mode 100644 index 3545f34ac..000000000 --- a/processor/src/networks/ethereum.rs +++ /dev/null @@ -1,936 +0,0 @@ -use core::{fmt, time::Duration}; -use std::{ - sync::Arc, - collections::{HashSet, HashMap}, - io, -}; - -use async_trait::async_trait; - -use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; -use frost::ThresholdKeys; - -use ethereum_serai::{ - alloy::{ - primitives::U256, - rpc_types::{BlockTransactionsKind, BlockNumberOrTag, Transaction}, - simple_request_transport::SimpleRequest, - rpc_client::ClientBuilder, - provider::{Provider, RootProvider}, - }, - crypto::{PublicKey, Signature}, - erc20::Erc20, - deployer::Deployer, - router::{Router, Coin as EthereumCoin, InInstruction as EthereumInInstruction}, - machine::*, -}; -#[cfg(test)] -use ethereum_serai::alloy::primitives::B256; - -use tokio::{ - time::sleep, - sync::{RwLock, RwLockReadGuard}, -}; -#[cfg(not(test))] -use tokio::{ - io::{AsyncReadExt, AsyncWriteExt}, - net::TcpStream, -}; - -use serai_client::{ - primitives::{Coin, Amount, Balance, NetworkId}, - validator_sets::primitives::Session, -}; - -use crate::{ - Db, Payment, - networks::{ - OutputType, Output, Transaction as TransactionTrait, SignableTransaction, Block, - Eventuality as EventualityTrait, EventualitiesTracker, NetworkError, Network, - }, - key_gen::NetworkKeyDb, - multisigs::scheduler::{ - Scheduler as SchedulerTrait, - smart_contract::{Addendum, Scheduler}, - }, -}; - -#[cfg(not(test))] -const DAI: [u8; 20] = - match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { - Ok(res) => res, - Err(_) => panic!("invalid non-test DAI hex address"), - }; -#[cfg(test)] // TODO -const DAI: [u8; 20] = - match const_hex::const_decode_to_array(b"0000000000000000000000000000000000000000") { - Ok(res) => res, - Err(_) => panic!("invalid test DAI hex address"), - }; - -fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { - match coin { - EthereumCoin::Ether => Some(Coin::Ether), - EthereumCoin::Erc20(token) => { - if *token == DAI { - return Some(Coin::Dai); - } - None - } - } -} - -fn amount_to_serai_amount(coin: Coin, amount: U256) -> Amount { - assert_eq!(coin.network(), NetworkId::Ethereum); - assert_eq!(coin.decimals(), 8); - // Remove 10 decimals so we go from 18 decimals to 8 decimals - let divisor = U256::from(10_000_000_000u64); - // This is valid up to 184b, which is assumed for the coins allowed - Amount(u64::try_from(amount / divisor).unwrap()) -} - -fn balance_to_ethereum_amount(balance: Balance) -> U256 { - assert_eq!(balance.coin.network(), NetworkId::Ethereum); - assert_eq!(balance.coin.decimals(), 8); - // Restore 10 decimals so we go from 8 decimals to 18 decimals - let factor = U256::from(10_000_000_000u64); - U256::from(balance.amount.0) * factor -} - -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Address(pub [u8; 20]); -impl TryFrom> for Address { - type Error = (); - fn try_from(bytes: Vec) -> Result { - if bytes.len() != 20 { - Err(())?; - } - let mut res = [0; 20]; - res.copy_from_slice(&bytes); - Ok(Address(res)) - } -} -impl TryInto> for Address { - type Error = (); - fn try_into(self) -> Result, ()> { - Ok(self.0.to_vec()) - } -} - -impl fmt::Display for Address { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ethereum_serai::alloy::primitives::Address::from(self.0).fmt(f) - } -} - -impl SignableTransaction for RouterCommand { - fn fee(&self) -> u64 { - // Return a fee of 0 as we'll handle amortization on our end - 0 - } -} - -#[async_trait] -impl TransactionTrait> for Transaction { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - self.hash.0 - } - - #[cfg(test)] - async fn fee(&self, _network: &Ethereum) -> u64 { - // Return a fee of 0 as we'll handle amortization on our end - 0 - } -} - -// We use 32-block Epochs to represent blocks. -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub struct Epoch { - // The hash of the block which ended the prior Epoch. - prior_end_hash: [u8; 32], - // The first block number within this Epoch. - start: u64, - // The hash of the last block within this Epoch. - end_hash: [u8; 32], - // The monotonic time for this Epoch. - time: u64, -} - -impl Epoch { - fn end(&self) -> u64 { - self.start + 31 - } -} - -#[async_trait] -impl Block> for Epoch { - type Id = [u8; 32]; - fn id(&self) -> [u8; 32] { - self.end_hash - } - fn parent(&self) -> [u8; 32] { - self.prior_end_hash - } - async fn time(&self, _: &Ethereum) -> u64 { - self.time - } -} - -impl Output> for EthereumInInstruction { - type Id = [u8; 32]; - - fn kind(&self) -> OutputType { - OutputType::External - } - - fn id(&self) -> Self::Id { - let mut id = [0; 40]; - id[.. 32].copy_from_slice(&self.id.0); - id[32 ..].copy_from_slice(&self.id.1.to_le_bytes()); - *ethereum_serai::alloy::primitives::keccak256(id) - } - fn tx_id(&self) -> [u8; 32] { - self.id.0 - } - fn key(&self) -> ::G { - self.key_at_end_of_block - } - - fn presumed_origin(&self) -> Option
{ - Some(Address(self.from)) - } - - fn balance(&self) -> Balance { - let coin = coin_to_serai_coin(&self.coin).unwrap_or_else(|| { - panic!( - "requesting coin for an EthereumInInstruction with a coin {}", - "we don't handle. this never should have been yielded" - ) - }); - Balance { coin, amount: amount_to_serai_amount(coin, self.amount) } - } - fn data(&self) -> &[u8] { - &self.data - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - EthereumInInstruction::write(self, writer) - } - fn read(reader: &mut R) -> io::Result { - EthereumInInstruction::read(reader) - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Claim { - signature: [u8; 64], -} -impl AsRef<[u8]> for Claim { - fn as_ref(&self) -> &[u8] { - &self.signature - } -} -impl AsMut<[u8]> for Claim { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.signature - } -} -impl Default for Claim { - fn default() -> Self { - Self { signature: [0; 64] } - } -} -impl From<&Signature> for Claim { - fn from(sig: &Signature) -> Self { - Self { signature: sig.to_bytes() } - } -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Eventuality(PublicKey, RouterCommand); -impl EventualityTrait for Eventuality { - type Claim = Claim; - type Completion = SignedRouterCommand; - - fn lookup(&self) -> Vec { - match self.1 { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - nonce.as_le_bytes().to_vec() - } - } - } - - fn read(reader: &mut R) -> io::Result { - let point = Secp256k1::read_G(reader)?; - let command = RouterCommand::read(reader)?; - Ok(Eventuality( - PublicKey::new(point).ok_or(io::Error::other("unusable key within Eventuality"))?, - command, - )) - } - fn serialize(&self) -> Vec { - let mut res = vec![]; - res.extend(self.0.point().to_bytes().as_slice()); - self.1.write(&mut res).unwrap(); - res - } - - fn claim(completion: &Self::Completion) -> Self::Claim { - Claim::from(completion.signature()) - } - fn serialize_completion(completion: &Self::Completion) -> Vec { - let mut res = vec![]; - completion.write(&mut res).unwrap(); - res - } - fn read_completion(reader: &mut R) -> io::Result { - SignedRouterCommand::read(reader) - } -} - -#[derive(Clone)] -pub struct Ethereum { - // This DB is solely used to access the first key generated, as needed to determine the Router's - // address. Accordingly, all methods present are consistent to a Serai chain with a finalized - // first key (regardless of local state), and this is safe. - db: D, - #[cfg_attr(test, allow(unused))] - relayer_url: String, - provider: Arc>, - deployer: Deployer, - router: Arc>>, -} -impl PartialEq for Ethereum { - fn eq(&self, _other: &Ethereum) -> bool { - true - } -} -impl fmt::Debug for Ethereum { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("Ethereum") - .field("deployer", &self.deployer) - .field("router", &self.router) - .finish_non_exhaustive() - } -} -impl Ethereum { - pub async fn new(db: D, daemon_url: String, relayer_url: String) -> Self { - let provider = Arc::new(RootProvider::new( - ClientBuilder::default().transport(SimpleRequest::new(daemon_url), true), - )); - - let mut deployer = Deployer::new(provider.clone()).await; - while !matches!(deployer, Ok(Some(_))) { - log::error!("Deployer wasn't deployed yet or networking error"); - sleep(Duration::from_secs(5)).await; - deployer = Deployer::new(provider.clone()).await; - } - let deployer = deployer.unwrap().unwrap(); - - dbg!(&relayer_url); - dbg!(relayer_url.len()); - Ethereum { db, relayer_url, provider, deployer, router: Arc::new(RwLock::new(None)) } - } - - // Obtain a reference to the Router, sleeping until it's deployed if it hasn't already been. - // This is guaranteed to return Some. - pub async fn router(&self) -> RwLockReadGuard<'_, Option> { - // If we've already instantiated the Router, return a read reference - { - let router = self.router.read().await; - if router.is_some() { - return router; - } - } - - // Instantiate it - let mut router = self.router.write().await; - // If another attempt beat us to it, return - if router.is_some() { - drop(router); - return self.router.read().await; - } - - // Get the first key from the DB - let first_key = - NetworkKeyDb::get(&self.db, Session(0)).expect("getting outputs before confirming a key"); - let key = Secp256k1::read_G(&mut first_key.as_slice()).unwrap(); - let public_key = PublicKey::new(key).unwrap(); - - // Find the router - let mut found = self.deployer.find_router(self.provider.clone(), &public_key).await; - while !matches!(found, Ok(Some(_))) { - log::error!("Router wasn't deployed yet or networking error"); - sleep(Duration::from_secs(5)).await; - found = self.deployer.find_router(self.provider.clone(), &public_key).await; - } - - // Set it - *router = Some(found.unwrap().unwrap()); - - // Downgrade to a read lock - // Explicitly doesn't use `downgrade` so that another pending write txn can realize it's no - // longer necessary - drop(router); - self.router.read().await - } -} - -#[async_trait] -impl Network for Ethereum { - type Curve = Secp256k1; - - type Transaction = Transaction; - type Block = Epoch; - - type Output = EthereumInInstruction; - type SignableTransaction = RouterCommand; - type Eventuality = Eventuality; - type TransactionMachine = RouterCommandMachine; - - type Scheduler = Scheduler; - - type Address = Address; - - const NETWORK: NetworkId = NetworkId::Ethereum; - const ID: &'static str = "Ethereum"; - const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 32 * 12; - const CONFIRMATIONS: usize = 1; - - const DUST: u64 = 0; // TODO - - const COST_TO_AGGREGATE: u64 = 0; - - // TODO: usize::max, with a merkle tree in the router - const MAX_OUTPUTS: usize = 256; - - fn tweak_keys(keys: &mut ThresholdKeys) { - while PublicKey::new(keys.group_key()).is_none() { - *keys = keys.offset(::F::ONE); - } - } - - #[cfg(test)] - async fn external_address(&self, _key: ::G) -> Address { - Address(self.router().await.as_ref().unwrap().address()) - } - - fn branch_address(_key: ::G) -> Option
{ - None - } - - fn change_address(_key: ::G) -> Option
{ - None - } - - fn forward_address(_key: ::G) -> Option
{ - None - } - - async fn get_latest_block_number(&self) -> Result { - let actual_number = self - .provider - .get_block(BlockNumberOrTag::Finalized.into(), BlockTransactionsKind::Hashes) - .await - .map_err(|_| NetworkError::ConnectionError)? - .ok_or(NetworkError::ConnectionError)? - .header - .number; - // Error if there hasn't been a full epoch yet - if actual_number < 32 { - Err(NetworkError::ConnectionError)? - } - // If this is 33, the division will return 1, yet 1 is the epoch in progress - let latest_full_epoch = (actual_number / 32).saturating_sub(1); - Ok(latest_full_epoch.try_into().unwrap()) - } - - async fn get_block(&self, number: usize) -> Result { - let latest_finalized = self.get_latest_block_number().await?; - if number > latest_finalized { - Err(NetworkError::ConnectionError)? - } - - let start = number * 32; - let prior_end_hash = if start == 0 { - [0; 32] - } else { - self - .provider - .get_block(u64::try_from(start - 1).unwrap().into(), BlockTransactionsKind::Hashes) - .await - .ok() - .flatten() - .ok_or(NetworkError::ConnectionError)? - .header - .hash - .into() - }; - - let end_header = self - .provider - .get_block(u64::try_from(start + 31).unwrap().into(), BlockTransactionsKind::Hashes) - .await - .ok() - .flatten() - .ok_or(NetworkError::ConnectionError)? - .header; - - let end_hash = end_header.hash.into(); - let time = end_header.timestamp; - - Ok(Epoch { prior_end_hash, start: start.try_into().unwrap(), end_hash, time }) - } - - async fn get_outputs( - &self, - block: &Self::Block, - _: ::G, - ) -> Vec { - let router = self.router().await; - let router = router.as_ref().unwrap(); - // Grab the key at the end of the epoch - let key_at_end_of_block = loop { - match router.key_at_end_of_block(block.start + 31).await { - Ok(Some(key)) => break key, - Ok(None) => return vec![], - Err(e) => { - log::error!("couldn't connect to router for the key at the end of the block: {e:?}"); - sleep(Duration::from_secs(5)).await; - continue; - } - } - }; - - let mut all_events = vec![]; - let mut top_level_txids = HashSet::new(); - for erc20_addr in [DAI] { - let erc20 = Erc20::new(self.provider.clone(), erc20_addr); - - for block in block.start .. (block.start + 32) { - let transfers = loop { - match erc20.top_level_transfers(block, router.address()).await { - Ok(transfers) => break transfers, - Err(e) => { - log::error!("couldn't connect to Ethereum node for the top-level transfers: {e:?}"); - sleep(Duration::from_secs(5)).await; - continue; - } - } - }; - - for transfer in transfers { - top_level_txids.insert(transfer.id); - all_events.push(EthereumInInstruction { - id: (transfer.id, 0), - from: transfer.from, - coin: EthereumCoin::Erc20(erc20_addr), - amount: transfer.amount, - data: transfer.data, - key_at_end_of_block, - }); - } - } - } - - for block in block.start .. (block.start + 32) { - let mut events = router.in_instructions(block, &HashSet::from([DAI])).await; - while let Err(e) = events { - log::error!("couldn't connect to Ethereum node for the Router's events: {e:?}"); - sleep(Duration::from_secs(5)).await; - events = router.in_instructions(block, &HashSet::from([DAI])).await; - } - let mut events = events.unwrap(); - for event in &mut events { - // A transaction should either be a top-level transfer or a Router InInstruction - if top_level_txids.contains(&event.id.0) { - panic!("top-level transfer had {} and router had {:?}", hex::encode(event.id.0), event); - } - // Overwrite the key at end of block to key at end of epoch - event.key_at_end_of_block = key_at_end_of_block; - } - all_events.extend(events); - } - - for event in &all_events { - assert!( - coin_to_serai_coin(&event.coin).is_some(), - "router yielded events for unrecognized coins" - ); - } - all_events - } - - async fn get_eventuality_completions( - &self, - eventualities: &mut EventualitiesTracker, - block: &Self::Block, - ) -> HashMap< - [u8; 32], - ( - usize, - >::Id, - ::Completion, - ), - > { - let mut res = HashMap::new(); - if eventualities.map.is_empty() { - return res; - } - - let router = self.router().await; - let router = router.as_ref().unwrap(); - - let past_scanned_epoch = loop { - match self.get_block(eventualities.block_number).await { - Ok(block) => break block, - Err(e) => log::error!("couldn't get the last scanned block in the tracker: {}", e), - } - sleep(Duration::from_secs(10)).await; - }; - assert_eq!( - past_scanned_epoch.start / 32, - u64::try_from(eventualities.block_number).unwrap(), - "assumption of tracker block number's relation to epoch start is incorrect" - ); - - // Iterate from after the epoch number in the tracker to the end of this epoch - for block_num in (past_scanned_epoch.end() + 1) ..= block.end() { - let executed = loop { - match router.executed_commands(block_num).await { - Ok(executed) => break executed, - Err(e) => log::error!("couldn't get the executed commands in block {block_num}: {e}"), - } - sleep(Duration::from_secs(10)).await; - }; - - for executed in executed { - let lookup = executed.nonce.to_le_bytes().to_vec(); - if let Some((plan_id, eventuality)) = eventualities.map.get(&lookup) { - if let Some(command) = - SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &executed.signature) - { - res.insert(*plan_id, (block_num.try_into().unwrap(), executed.tx_id, command)); - eventualities.map.remove(&lookup); - } - } - } - } - eventualities.block_number = (block.start / 32).try_into().unwrap(); - - res - } - - async fn needed_fee( - &self, - _block_number: usize, - inputs: &[Self::Output], - _payments: &[Payment], - _change: &Option, - ) -> Result, NetworkError> { - assert_eq!(inputs.len(), 0); - // Claim no fee is needed so we can perform amortization ourselves - Ok(Some(0)) - } - - async fn signable_transaction( - &self, - _block_number: usize, - _plan_id: &[u8; 32], - key: ::G, - inputs: &[Self::Output], - payments: &[Payment], - change: &Option, - scheduler_addendum: &>::Addendum, - ) -> Result, NetworkError> { - assert_eq!(inputs.len(), 0); - assert!(change.is_none()); - let chain_id = self.provider.get_chain_id().await.map_err(|_| NetworkError::ConnectionError)?; - - // TODO: Perform fee amortization (in scheduler? - // TODO: Make this function internal and have needed_fee properly return None as expected? - // TODO: signable_transaction is written as cannot return None if needed_fee returns Some - // TODO: Why can this return None at all if it isn't allowed to return None? - - let command = match scheduler_addendum { - Addendum::Nonce(nonce) => RouterCommand::Execute { - chain_id: U256::try_from(chain_id).unwrap(), - nonce: U256::try_from(*nonce).unwrap(), - outs: payments - .iter() - .filter_map(|payment| { - Some(OutInstruction { - target: if let Some(data) = payment.data.as_ref() { - // This introspects the Call serialization format, expecting the first 20 bytes to - // be the address - // This avoids wasting the 20-bytes allocated within address - let full_data = [payment.address.0.as_slice(), data].concat(); - let mut reader = full_data.as_slice(); - - let mut calls = vec![]; - while !reader.is_empty() { - calls.push(Call::read(&mut reader).ok()?) - } - // The above must have executed at least once since reader contains the address - assert_eq!(calls[0].to, payment.address.0); - - OutInstructionTarget::Calls(calls) - } else { - OutInstructionTarget::Direct(payment.address.0) - }, - value: { - assert_eq!(payment.balance.coin, Coin::Ether); // TODO - balance_to_ethereum_amount(payment.balance) - }, - }) - }) - .collect(), - }, - Addendum::RotateTo { nonce, new_key } => { - assert!(payments.is_empty()); - RouterCommand::UpdateSeraiKey { - chain_id: U256::try_from(chain_id).unwrap(), - nonce: U256::try_from(*nonce).unwrap(), - key: PublicKey::new(*new_key).expect("new key wasn't a valid ETH public key"), - } - } - }; - Ok(Some(( - command.clone(), - Eventuality(PublicKey::new(key).expect("key wasn't a valid ETH public key"), command), - ))) - } - - async fn attempt_sign( - &self, - keys: ThresholdKeys, - transaction: Self::SignableTransaction, - ) -> Result { - Ok( - RouterCommandMachine::new(keys, transaction) - .expect("keys weren't usable to sign router commands"), - ) - } - - async fn publish_completion( - &self, - completion: &::Completion, - ) -> Result<(), NetworkError> { - // Publish this to the dedicated TX server for a solver to actually publish - #[cfg(not(test))] - { - let mut msg = vec![]; - match completion.command() { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - msg.extend(&u32::try_from(nonce).unwrap().to_le_bytes()); - } - } - completion.write(&mut msg).unwrap(); - - let Ok(mut socket) = TcpStream::connect(&self.relayer_url).await else { - log::warn!("couldn't connect to the relayer server"); - Err(NetworkError::ConnectionError)? - }; - let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { - log::warn!("couldn't send the message's len to the relayer server"); - Err(NetworkError::ConnectionError)? - }; - let Ok(()) = socket.write_all(&msg).await else { - log::warn!("couldn't write the message to the relayer server"); - Err(NetworkError::ConnectionError)? - }; - if socket.read_u8().await.ok() != Some(1) { - log::warn!("didn't get the ack from the relayer server"); - Err(NetworkError::ConnectionError)?; - } - - Ok(()) - } - - // Publish this using a dummy account we fund with magic RPC commands - #[cfg(test)] - { - let router = self.router().await; - let router = router.as_ref().unwrap(); - - let mut tx = match completion.command() { - RouterCommand::UpdateSeraiKey { key, .. } => { - router.update_serai_key(key, completion.signature()) - } - RouterCommand::Execute { outs, .. } => router.execute( - &outs.iter().cloned().map(Into::into).collect::>(), - completion.signature(), - ), - }; - tx.gas_limit = 1_000_000u64.into(); - tx.gas_price = 1_000_000_000u64.into(); - let tx = ethereum_serai::crypto::deterministically_sign(&tx); - - if self.provider.get_transaction_by_hash(*tx.hash()).await.unwrap().is_none() { - self - .provider - .raw_request::<_, ()>( - "anvil_setBalance".into(), - [ - tx.recover_signer().unwrap().to_string(), - (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), - ], - ) - .await - .unwrap(); - - let (tx, sig, _) = tx.into_parts(); - let mut bytes = vec![]; - tx.encode_with_signature_fields(&sig, &mut bytes); - let pending_tx = self.provider.send_raw_transaction(&bytes).await.unwrap(); - self.mine_block().await; - assert!(pending_tx.get_receipt().await.unwrap().status()); - } - - Ok(()) - } - } - - async fn confirm_completion( - &self, - eventuality: &Self::Eventuality, - claim: &::Claim, - ) -> Result::Completion>, NetworkError> { - Ok(SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature)) - } - - #[cfg(test)] - async fn get_block_number(&self, id: &>::Id) -> usize { - self - .provider - .get_block(B256::from(*id).into(), BlockTransactionsKind::Hashes) - .await - .unwrap() - .unwrap() - .header - .number - .try_into() - .unwrap() - } - - #[cfg(test)] - async fn check_eventuality_by_claim( - &self, - eventuality: &Self::Eventuality, - claim: &::Claim, - ) -> bool { - SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature).is_some() - } - - #[cfg(test)] - async fn get_transaction_by_eventuality( - &self, - block: usize, - eventuality: &Self::Eventuality, - ) -> Self::Transaction { - // We mine 96 blocks to ensure the 32 blocks relevant are finalized - // Back-check the prior two epochs in response to this - // TODO: Review why this is sub(3) and not sub(2) - for block in block.saturating_sub(3) ..= block { - match eventuality.1 { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - let router = self.router().await; - let router = router.as_ref().unwrap(); - - let block = u64::try_from(block).unwrap(); - let filter = router - .key_updated_filter() - .from_block(block * 32) - .to_block(((block + 1) * 32) - 1) - .topic1(nonce); - let logs = self.provider.get_logs(&filter).await.unwrap(); - if let Some(log) = logs.first() { - return self - .provider - .get_transaction_by_hash(log.clone().transaction_hash.unwrap()) - .await - .unwrap() - .unwrap(); - }; - - let filter = router - .executed_filter() - .from_block(block * 32) - .to_block(((block + 1) * 32) - 1) - .topic1(nonce); - let logs = self.provider.get_logs(&filter).await.unwrap(); - if logs.is_empty() { - continue; - } - return self - .provider - .get_transaction_by_hash(logs[0].transaction_hash.unwrap()) - .await - .unwrap() - .unwrap(); - } - } - } - panic!("couldn't find completion in any three of checked blocks"); - } - - #[cfg(test)] - async fn mine_block(&self) { - self.provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); - } - - #[cfg(test)] - async fn test_send(&self, send_to: Self::Address) -> Self::Block { - use rand_core::OsRng; - use ciphersuite::group::ff::Field; - use ethereum_serai::alloy::sol_types::SolCall; - - let key = ::F::random(&mut OsRng); - let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); - - // Set a 1.1 ETH balance - self - .provider - .raw_request::<_, ()>( - "anvil_setBalance".into(), - [Address(address).to_string(), "1100000000000000000".into()], - ) - .await - .unwrap(); - - let value = U256::from_str_radix("1000000000000000000", 10).unwrap(); - let tx = ethereum_serai::alloy::consensus::TxLegacy { - chain_id: None, - nonce: 0, - gas_price: 1_000_000_000u128, - gas_limit: 200_000u128, - to: ethereum_serai::alloy::primitives::TxKind::Call(send_to.0.into()), - // 1 ETH - value, - input: ethereum_serai::router::abi::inInstructionCall::new(( - [0; 20].into(), - value, - vec![].into(), - )) - .abi_encode() - .into(), - }; - - use ethereum_serai::alloy::{primitives::Signature, consensus::SignableTransaction}; - let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap()) - .sign_prehash_recoverable(tx.signature_hash().as_ref()) - .unwrap(); - - let mut bytes = vec![]; - tx.encode_with_signature_fields(&Signature::from(sig), &mut bytes); - let pending_tx = self.provider.send_raw_transaction(&bytes).await.ok().unwrap(); - - // Mine an epoch containing this TX - self.mine_block().await; - assert!(pending_tx.get_receipt().await.unwrap().status()); - // Yield the freshly mined block - self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap() - } -} diff --git a/processor/src/networks/mod.rs b/processor/src/networks/mod.rs deleted file mode 100644 index 81838ae12..000000000 --- a/processor/src/networks/mod.rs +++ /dev/null @@ -1,658 +0,0 @@ -use core::{fmt::Debug, time::Duration}; -use std::{io, collections::HashMap}; - -use async_trait::async_trait; -use thiserror::Error; - -use frost::{ - dkg::evrf::EvrfCurve, - curve::{Ciphersuite, Curve}, - ThresholdKeys, - sign::PreprocessMachine, -}; - -use serai_client::primitives::{NetworkId, Balance}; - -use log::error; - -use tokio::time::sleep; - -#[cfg(feature = "bitcoin")] -pub mod bitcoin; -#[cfg(feature = "bitcoin")] -pub use self::bitcoin::Bitcoin; - -#[cfg(feature = "ethereum")] -pub mod ethereum; -#[cfg(feature = "ethereum")] -pub use ethereum::Ethereum; - -#[cfg(feature = "monero")] -pub mod monero; -#[cfg(feature = "monero")] -pub use monero::Monero; - -use crate::{Payment, Plan, multisigs::scheduler::Scheduler}; - -#[derive(Clone, Copy, Error, Debug)] -pub enum NetworkError { - #[error("failed to connect to network daemon")] - ConnectionError, -} - -pub trait Id: - Send + Sync + Clone + Default + PartialEq + AsRef<[u8]> + AsMut<[u8]> + Debug -{ -} -impl + AsMut<[u8]> + Debug> Id for I {} - -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum OutputType { - // Needs to be processed/sent up to Substrate - External, - - // Given a known output set, and a known series of outbound transactions, we should be able to - // form a completely deterministic schedule S. The issue is when S has TXs which spend prior TXs - // in S (which is needed for our logarithmic scheduling). In order to have the descendant TX, say - // S[1], build off S[0], we need to observe when S[0] is included on-chain. - // - // We cannot. - // - // Monero (and other privacy coins) do not expose their UTXO graphs. Even if we know how to - // create S[0], and the actual payment info behind it, we cannot observe it on the blockchain - // unless we participated in creating it. Locking the entire schedule, when we cannot sign for - // the entire schedule at once, to a single signing set isn't feasible. - // - // While any member of the active signing set can provide data enabling other signers to - // participate, it's several KB of data which we then have to code communication for. - // The other option is to simply not observe S[0]. Instead, observe a TX with an identical output - // to the one in S[0] we intended to use for S[1]. It's either from S[0], or Eve, a malicious - // actor, has sent us a forged TX which is... equally as usable? so who cares? - // - // The only issue is if we have multiple outputs on-chain with identical amounts and purposes. - // Accordingly, when the scheduler makes a plan for when a specific output is available, it - // shouldn't write that plan. It should *push* that plan to a queue of plans to perform when - // instances of that output occur. - Branch, - - // Should be added to the available UTXO pool with no further action - Change, - - // Forwarded output from the prior multisig - Forwarded, -} - -impl OutputType { - fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&[match self { - OutputType::External => 0, - OutputType::Branch => 1, - OutputType::Change => 2, - OutputType::Forwarded => 3, - }]) - } - - fn read(reader: &mut R) -> io::Result { - let mut byte = [0; 1]; - reader.read_exact(&mut byte)?; - Ok(match byte[0] { - 0 => OutputType::External, - 1 => OutputType::Branch, - 2 => OutputType::Change, - 3 => OutputType::Forwarded, - _ => Err(io::Error::other("invalid OutputType"))?, - }) - } -} - -pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Debug { - type Id: 'static + Id; - - fn kind(&self) -> OutputType; - - fn id(&self) -> Self::Id; - fn tx_id(&self) -> >::Id; // TODO: Review use of - fn key(&self) -> ::G; - - fn presumed_origin(&self) -> Option; - - fn balance(&self) -> Balance; - fn data(&self) -> &[u8]; - - fn write(&self, writer: &mut W) -> io::Result<()>; - fn read(reader: &mut R) -> io::Result; -} - -#[async_trait] -pub trait Transaction: Send + Sync + Sized + Clone + PartialEq + Debug { - type Id: 'static + Id; - fn id(&self) -> Self::Id; - // TODO: Move to Balance - #[cfg(test)] - async fn fee(&self, network: &N) -> u64; -} - -pub trait SignableTransaction: Send + Sync + Clone + Debug { - // TODO: Move to Balance - fn fee(&self) -> u64; -} - -pub trait Eventuality: Send + Sync + Clone + PartialEq + Debug { - type Claim: Send + Sync + Clone + PartialEq + Default + AsRef<[u8]> + AsMut<[u8]> + Debug; - type Completion: Send + Sync + Clone + PartialEq + Debug; - - fn lookup(&self) -> Vec; - - fn read(reader: &mut R) -> io::Result; - fn serialize(&self) -> Vec; - - fn claim(completion: &Self::Completion) -> Self::Claim; - - // TODO: Make a dedicated Completion trait - fn serialize_completion(completion: &Self::Completion) -> Vec; - fn read_completion(reader: &mut R) -> io::Result; -} - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct EventualitiesTracker { - // Lookup property (input, nonce, TX extra...) -> (plan ID, eventuality) - map: HashMap, ([u8; 32], E)>, - // Block number we've scanned these eventualities too - block_number: usize, -} - -impl EventualitiesTracker { - pub fn new() -> Self { - EventualitiesTracker { map: HashMap::new(), block_number: usize::MAX } - } - - pub fn register(&mut self, block_number: usize, id: [u8; 32], eventuality: E) { - log::info!("registering eventuality for {}", hex::encode(id)); - - let lookup = eventuality.lookup(); - if self.map.contains_key(&lookup) { - panic!("registering an eventuality multiple times or lookup collision"); - } - self.map.insert(lookup, (id, eventuality)); - // If our self tracker already went past this block number, set it back - self.block_number = self.block_number.min(block_number); - } - - pub fn drop(&mut self, id: [u8; 32]) { - // O(n) due to the lack of a reverse lookup - let mut found_key = None; - for (key, value) in &self.map { - if value.0 == id { - found_key = Some(key.clone()); - break; - } - } - - if let Some(key) = found_key { - self.map.remove(&key); - } - } -} - -impl Default for EventualitiesTracker { - fn default() -> Self { - Self::new() - } -} - -#[async_trait] -pub trait Block: Send + Sync + Sized + Clone + Debug { - // This is currently bounded to being 32 bytes. - type Id: 'static + Id; - fn id(&self) -> Self::Id; - fn parent(&self) -> Self::Id; - /// The monotonic network time at this block. - /// - /// This call is presumed to be expensive and should only be called sparingly. - async fn time(&self, rpc: &N) -> u64; -} - -// The post-fee value of an expected branch. -pub struct PostFeeBranch { - pub expected: u64, - pub actual: Option, -} - -// Return the PostFeeBranches needed when dropping a transaction -fn drop_branches( - key: ::G, - payments: &[Payment], -) -> Vec { - let mut branch_outputs = vec![]; - for payment in payments { - if Some(&payment.address) == N::branch_address(key).as_ref() { - branch_outputs.push(PostFeeBranch { expected: payment.balance.amount.0, actual: None }); - } - } - branch_outputs -} - -pub struct PreparedSend { - /// None for the transaction if the SignableTransaction was dropped due to lack of value. - pub tx: Option<(N::SignableTransaction, N::Eventuality)>, - pub post_fee_branches: Vec, - /// The updated operating costs after preparing this transaction. - pub operating_costs: u64, -} - -#[async_trait] -#[rustfmt::skip] -pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug { - /// The elliptic curve used for this network. - type Curve: Curve - + EvrfCurve::F>>>; - - /// The type representing the transaction for this network. - type Transaction: Transaction; // TODO: Review use of - /// The type representing the block for this network. - type Block: Block; - - /// The type containing all information on a scanned output. - // This is almost certainly distinct from the network's native output type. - type Output: Output; - /// The type containing all information on a planned transaction, waiting to be signed. - type SignableTransaction: SignableTransaction; - /// The type containing all information to check if a plan was completed. - /// - /// This must be binding to both the outputs expected and the plan ID. - type Eventuality: Eventuality; - /// The FROST machine to sign a transaction. - type TransactionMachine: PreprocessMachine< - Signature = ::Completion, - >; - - /// The scheduler for this network. - type Scheduler: Scheduler; - - /// The type representing an address. - // This should NOT be a String, yet a tailored type representing an efficient binary encoding, - // as detailed in the integration documentation. - type Address: Send - + Sync - + Clone - + PartialEq - + Eq - + Debug - + ToString - + TryInto> - + TryFrom>; - - /// Network ID for this network. - const NETWORK: NetworkId; - /// String ID for this network. - const ID: &'static str; - /// The estimated amount of time a block will take. - const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize; - /// The amount of confirmations required to consider a block 'final'. - const CONFIRMATIONS: usize; - /// The maximum amount of outputs which will fit in a TX. - /// This should be equal to MAX_INPUTS unless one is specifically limited. - /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. - const MAX_OUTPUTS: usize; - - /// Minimum output value which will be handled. - /// - /// For any received output, there's the cost to spend the output. This value MUST exceed the - /// cost to spend said output, and should by a notable margin (not just 2x, yet an order of - /// magnitude). - // TODO: Dust needs to be diversified per Coin - const DUST: u64; - - /// The cost to perform input aggregation with a 2-input 1-output TX. - const COST_TO_AGGREGATE: u64; - - /// Tweak keys for this network. - fn tweak_keys(key: &mut ThresholdKeys); - - /// Address for the given group key to receive external coins to. - #[cfg(test)] - async fn external_address(&self, key: ::G) -> Self::Address; - /// Address for the given group key to use for scheduled branches. - fn branch_address(key: ::G) -> Option; - /// Address for the given group key to use for change. - fn change_address(key: ::G) -> Option; - /// Address for forwarded outputs from prior multisigs. - /// - /// forward_address must only return None if explicit forwarding isn't necessary. - fn forward_address(key: ::G) -> Option; - - /// Get the latest block's number. - async fn get_latest_block_number(&self) -> Result; - /// Get a block by its number. - async fn get_block(&self, number: usize) -> Result; - - /// Get the latest block's number, retrying until success. - async fn get_latest_block_number_with_retries(&self) -> usize { - loop { - match self.get_latest_block_number().await { - Ok(number) => { - return number; - } - Err(e) => { - error!( - "couldn't get the latest block number in the with retry get_latest_block_number: {e:?}", - ); - sleep(Duration::from_secs(10)).await; - } - } - } - } - - /// Get a block, retrying until success. - async fn get_block_with_retries(&self, block_number: usize) -> Self::Block { - loop { - match self.get_block(block_number).await { - Ok(block) => { - return block; - } - Err(e) => { - error!("couldn't get block {block_number} in the with retry get_block: {:?}", e); - sleep(Duration::from_secs(10)).await; - } - } - } - } - - /// Get the outputs within a block for a specific key. - async fn get_outputs( - &self, - block: &Self::Block, - key: ::G, - ) -> Vec; - - /// Get the registered eventualities completed within this block, and any prior blocks which - /// registered eventualities may have been completed in. - /// - /// This may panic if not fed a block greater than the tracker's block number. - /// - /// Plan ID -> (block number, TX ID, completion) - // TODO: get_eventuality_completions_internal + provided get_eventuality_completions for common - // code - // TODO: Consider having this return the Transaction + the Completion? - // Or Transaction with extract_completion? - async fn get_eventuality_completions( - &self, - eventualities: &mut EventualitiesTracker, - block: &Self::Block, - ) -> HashMap< - [u8; 32], - ( - usize, - >::Id, - ::Completion, - ), - >; - - /// Returns the needed fee to fulfill this Plan at this fee rate. - /// - /// Returns None if this Plan isn't fulfillable (such as when the fee exceeds the input value). - async fn needed_fee( - &self, - block_number: usize, - inputs: &[Self::Output], - payments: &[Payment], - change: &Option, - ) -> Result, NetworkError>; - - /// Create a SignableTransaction for the given Plan. - /// - /// The expected flow is: - /// 1) Call needed_fee - /// 2) If the Plan is fulfillable, amortize the fee - /// 3) Call signable_transaction *which MUST NOT return None if the above was done properly* - /// - /// This takes a destructured Plan as some of these arguments are malleated from the original - /// Plan. - // TODO: Explicit AmortizedPlan? - #[allow(clippy::too_many_arguments)] - async fn signable_transaction( - &self, - block_number: usize, - plan_id: &[u8; 32], - key: ::G, - inputs: &[Self::Output], - payments: &[Payment], - change: &Option, - scheduler_addendum: &>::Addendum, - ) -> Result, NetworkError>; - - /// Prepare a SignableTransaction for a transaction. - /// - /// This must not persist anything as we will prepare Plans we never intend to execute. - async fn prepare_send( - &self, - block_number: usize, - plan: Plan, - operating_costs: u64, - ) -> Result, NetworkError> { - // Sanity check this has at least one output planned - assert!((!plan.payments.is_empty()) || plan.change.is_some()); - - let plan_id = plan.id(); - let Plan { key, inputs, mut payments, change, scheduler_addendum } = plan; - let theoretical_change_amount = if change.is_some() { - inputs.iter().map(|input| input.balance().amount.0).sum::() - - payments.iter().map(|payment| payment.balance.amount.0).sum::() - } else { - 0 - }; - - let Some(tx_fee) = self.needed_fee(block_number, &inputs, &payments, &change).await? else { - // This Plan is not fulfillable - // TODO: Have Plan explicitly distinguish payments and branches in two separate Vecs? - return Ok(PreparedSend { - tx: None, - // Have all of its branches dropped - post_fee_branches: drop_branches(key, &payments), - // This plan expects a change output valued at sum(inputs) - sum(outputs) - // Since we can no longer create this change output, it becomes an operating cost - // TODO: Look at input restoration to reduce this operating cost - operating_costs: operating_costs + - if change.is_some() { theoretical_change_amount } else { 0 }, - }); - }; - - // Amortize the fee over the plan's payments - let (post_fee_branches, mut operating_costs) = (|| { - // If we're creating a change output, letting us recoup coins, amortize the operating costs - // as well - let total_fee = tx_fee + if change.is_some() { operating_costs } else { 0 }; - - let original_outputs = payments.iter().map(|payment| payment.balance.amount.0).sum::(); - // If this isn't enough for the total fee, drop and move on - if original_outputs < total_fee { - let mut remaining_operating_costs = operating_costs; - if change.is_some() { - // Operating costs increase by the TX fee - remaining_operating_costs += tx_fee; - // Yet decrease by the payments we managed to drop - remaining_operating_costs = remaining_operating_costs.saturating_sub(original_outputs); - } - return (drop_branches(key, &payments), remaining_operating_costs); - } - - let initial_payment_amounts = - payments.iter().map(|payment| payment.balance.amount.0).collect::>(); - - // Amortize the transaction fee across outputs - let mut remaining_fee = total_fee; - // Run as many times as needed until we can successfully subtract this fee - while remaining_fee != 0 { - // This shouldn't be a / by 0 as these payments have enough value to cover the fee - let this_iter_fee = remaining_fee / u64::try_from(payments.len()).unwrap(); - let mut overage = remaining_fee % u64::try_from(payments.len()).unwrap(); - for payment in &mut payments { - let this_payment_fee = this_iter_fee + overage; - // Only subtract the overage once - overage = 0; - - let subtractable = payment.balance.amount.0.min(this_payment_fee); - remaining_fee -= subtractable; - payment.balance.amount.0 -= subtractable; - } - } - - // If any payment is now below the dust threshold, set its value to 0 so it'll be dropped - for payment in &mut payments { - if payment.balance.amount.0 < Self::DUST { - payment.balance.amount.0 = 0; - } - } - - // Note the branch outputs' new values - let mut branch_outputs = vec![]; - for (initial_amount, payment) in initial_payment_amounts.into_iter().zip(&payments) { - if Some(&payment.address) == Self::branch_address(key).as_ref() { - branch_outputs.push(PostFeeBranch { - expected: initial_amount, - actual: if payment.balance.amount.0 == 0 { - None - } else { - Some(payment.balance.amount.0) - }, - }); - } - } - - // Drop payments now worth 0 - payments = payments - .drain(..) - .filter(|payment| { - if payment.balance.amount.0 != 0 { - true - } else { - log::debug!("dropping dust payment from plan {}", hex::encode(plan_id)); - false - } - }) - .collect(); - - // Sanity check the fee was successfully amortized - let new_outputs = payments.iter().map(|payment| payment.balance.amount.0).sum::(); - assert!((new_outputs + total_fee) <= original_outputs); - - ( - branch_outputs, - if change.is_none() { - // If the change is None, this had no effect on the operating costs - operating_costs - } else { - // Since the change is some, and we successfully amortized, the operating costs were - // recouped - 0 - }, - ) - })(); - - let Some(tx) = self - .signable_transaction( - block_number, - &plan_id, - key, - &inputs, - &payments, - &change, - &scheduler_addendum, - ) - .await? - else { - panic!( - "{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}, {}: {:?}", - "signable_transaction returned None for a TX we prior successfully calculated the fee for", - "id", - hex::encode(plan_id), - "inputs", - inputs, - "post-amortization payments", - payments, - "change", - change, - "successfully amoritized fee", - tx_fee, - "scheduler's addendum", - scheduler_addendum, - ) - }; - - if change.is_some() { - let on_chain_expected_change = - inputs.iter().map(|input| input.balance().amount.0).sum::() - - payments.iter().map(|payment| payment.balance.amount.0).sum::() - - tx_fee; - // If the change value is less than the dust threshold, it becomes an operating cost - // This may be slightly inaccurate as dropping payments may reduce the fee, raising the - // change above dust - // That's fine since it'd have to be in a very precarious state AND then it's over-eager in - // tabulating costs - if on_chain_expected_change < Self::DUST { - operating_costs += theoretical_change_amount; - } - } - - Ok(PreparedSend { tx: Some(tx), post_fee_branches, operating_costs }) - } - - /// Attempt to sign a SignableTransaction. - async fn attempt_sign( - &self, - keys: ThresholdKeys, - transaction: Self::SignableTransaction, - ) -> Result; - - /// Publish a completion. - async fn publish_completion( - &self, - completion: &::Completion, - ) -> Result<(), NetworkError>; - - /// Confirm a plan was completed by the specified transaction, per our bounds. - /// - /// Returns Err if there was an error with the confirmation methodology. - /// Returns Ok(None) if this is not a valid completion. - /// Returns Ok(Some(_)) with the completion if it's valid. - async fn confirm_completion( - &self, - eventuality: &Self::Eventuality, - claim: &::Claim, - ) -> Result::Completion>, NetworkError>; - - /// Get a block's number by its ID. - #[cfg(test)] - async fn get_block_number(&self, id: &>::Id) -> usize; - - /// Check an Eventuality is fulfilled by a claim. - #[cfg(test)] - async fn check_eventuality_by_claim( - &self, - eventuality: &Self::Eventuality, - claim: &::Claim, - ) -> bool; - - /// Get a transaction by the Eventuality it completes. - #[cfg(test)] - async fn get_transaction_by_eventuality( - &self, - block: usize, - eventuality: &Self::Eventuality, - ) -> Self::Transaction; - - #[cfg(test)] - async fn mine_block(&self); - - /// Sends to the specified address. - /// Additionally mines enough blocks so that the TX is past the confirmation depth. - #[cfg(test)] - async fn test_send(&self, key: Self::Address) -> Self::Block; -} - -pub trait UtxoNetwork: Network { - /// The maximum amount of inputs which will fit in a TX. - /// This should be equal to MAX_OUTPUTS unless one is specifically limited. - /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. - const MAX_INPUTS: usize; -} diff --git a/processor/src/networks/monero.rs b/processor/src/networks/monero.rs deleted file mode 100644 index 6ffa29df2..000000000 --- a/processor/src/networks/monero.rs +++ /dev/null @@ -1,807 +0,0 @@ -use std::{time::Duration, collections::HashMap, io}; - -use async_trait::async_trait; - -use zeroize::Zeroizing; - -use rand_core::SeedableRng; -use rand_chacha::ChaCha20Rng; - -use transcript::{Transcript, RecommendedTranscript}; - -use ciphersuite::group::{ff::Field, Group}; -use dalek_ff_group::{Scalar, EdwardsPoint}; -use frost::{curve::Ed25519, ThresholdKeys}; - -use monero_simple_request_rpc::SimpleRequestRpc; -use monero_wallet::{ - ringct::RctType, - transaction::Transaction, - block::Block, - rpc::{FeeRate, RpcError, Rpc}, - address::{Network as MoneroNetwork, SubaddressIndex}, - ViewPair, GuaranteedViewPair, WalletOutput, OutputWithDecoys, GuaranteedScanner, - send::{ - SendError, Change, SignableTransaction as MSignableTransaction, Eventuality, TransactionMachine, - }, -}; -#[cfg(test)] -use monero_wallet::Scanner; - -use tokio::time::sleep; - -pub use serai_client::{ - primitives::{MAX_DATA_LEN, Coin, NetworkId, Amount, Balance}, - networks::monero::Address, -}; - -use crate::{ - Payment, additional_key, - networks::{ - NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, - Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, - Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, - }, - multisigs::scheduler::utxo::Scheduler, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Output(WalletOutput); - -const EXTERNAL_SUBADDRESS: Option = SubaddressIndex::new(0, 0); -const BRANCH_SUBADDRESS: Option = SubaddressIndex::new(1, 0); -const CHANGE_SUBADDRESS: Option = SubaddressIndex::new(2, 0); -const FORWARD_SUBADDRESS: Option = SubaddressIndex::new(3, 0); - -impl OutputTrait for Output { - // While we could use (tx, o), using the key ensures we won't be susceptible to the burning bug. - // While we already are immune, thanks to using featured address, this doesn't hurt and is - // technically more efficient. - type Id = [u8; 32]; - - fn kind(&self) -> OutputType { - match self.0.subaddress() { - EXTERNAL_SUBADDRESS => OutputType::External, - BRANCH_SUBADDRESS => OutputType::Branch, - CHANGE_SUBADDRESS => OutputType::Change, - FORWARD_SUBADDRESS => OutputType::Forwarded, - _ => panic!("unrecognized address was scanned for"), - } - } - - fn id(&self) -> Self::Id { - self.0.key().compress().to_bytes() - } - - fn tx_id(&self) -> [u8; 32] { - self.0.transaction() - } - - fn key(&self) -> EdwardsPoint { - EdwardsPoint(self.0.key() - (EdwardsPoint::generator().0 * self.0.key_offset())) - } - - fn presumed_origin(&self) -> Option
{ - None - } - - fn balance(&self) -> Balance { - Balance { coin: Coin::Monero, amount: Amount(self.0.commitment().amount) } - } - - fn data(&self) -> &[u8] { - let Some(data) = self.0.arbitrary_data().first() else { return &[] }; - // If the data is too large, prune it - // This should cause decoding the instruction to fail, and trigger a refund as appropriate - if data.len() > usize::try_from(MAX_DATA_LEN).unwrap() { - return &[]; - } - data - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - self.0.write(writer)?; - Ok(()) - } - - fn read(reader: &mut R) -> io::Result { - Ok(Output(WalletOutput::read(reader)?)) - } -} - -// TODO: Consider ([u8; 32], TransactionPruned) -#[async_trait] -impl TransactionTrait for Transaction { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - self.hash() - } - - #[cfg(test)] - async fn fee(&self, _: &Monero) -> u64 { - match self { - Transaction::V1 { .. } => panic!("v1 TX in test-only function"), - Transaction::V2 { ref proofs, .. } => proofs.as_ref().unwrap().base.fee, - } - } -} - -impl EventualityTrait for Eventuality { - type Claim = [u8; 32]; - type Completion = Transaction; - - // Use the TX extra to look up potential matches - // While anyone can forge this, a transaction with distinct outputs won't actually match - // Extra includess the one time keys which are derived from the plan ID, so a collision here is a - // hash collision - fn lookup(&self) -> Vec { - self.extra() - } - - fn read(reader: &mut R) -> io::Result { - Eventuality::read(reader) - } - fn serialize(&self) -> Vec { - self.serialize() - } - - fn claim(tx: &Transaction) -> [u8; 32] { - tx.id() - } - fn serialize_completion(completion: &Transaction) -> Vec { - completion.serialize() - } - fn read_completion(reader: &mut R) -> io::Result { - Transaction::read(reader) - } -} - -#[derive(Clone, Debug)] -pub struct SignableTransaction(MSignableTransaction); -impl SignableTransactionTrait for SignableTransaction { - fn fee(&self) -> u64 { - self.0.necessary_fee() - } -} - -#[async_trait] -impl BlockTrait for Block { - type Id = [u8; 32]; - fn id(&self) -> Self::Id { - self.hash() - } - - fn parent(&self) -> Self::Id { - self.header.previous - } - - async fn time(&self, rpc: &Monero) -> u64 { - // Constant from Monero - const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: usize = 60; - - // If Monero doesn't have enough blocks to build a window, it doesn't define a network time - if (self.number().unwrap() + 1) < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { - // Use the block number as the time - return u64::try_from(self.number().unwrap()).unwrap(); - } - - let mut timestamps = vec![self.header.timestamp]; - let mut parent = self.parent(); - while timestamps.len() < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { - let mut parent_block; - while { - parent_block = rpc.rpc.get_block(parent).await; - parent_block.is_err() - } { - log::error!("couldn't get parent block when trying to get block time: {parent_block:?}"); - sleep(Duration::from_secs(5)).await; - } - let parent_block = parent_block.unwrap(); - timestamps.push(parent_block.header.timestamp); - parent = parent_block.parent(); - - if parent_block.number().unwrap() == 0 { - break; - } - } - timestamps.sort(); - - // Because 60 has two medians, Monero's epee picks the in-between value, calculated by the - // following formula (from the "get_mid" function) - let n = timestamps.len() / 2; - let a = timestamps[n - 1]; - let b = timestamps[n]; - #[rustfmt::skip] // Enables Ctrl+F'ing for everything after the `= ` - let res = (a/2) + (b/2) + ((a - 2*(a/2)) + (b - 2*(b/2)))/2; - // Technically, res may be 1 if all prior blocks had a timestamp by 0, which would break - // monotonicity with our above definition of height as time - // Monero also solely requires the block's time not be less than the median, it doesn't ensure - // it advances the median forward - // Ensure monotonicity despite both these issues by adding the block number to the median time - res + u64::try_from(self.number().unwrap()).unwrap() - } -} - -#[derive(Clone, Debug)] -pub struct Monero { - rpc: SimpleRequestRpc, -} -// Shim required for testing/debugging purposes due to generic arguments also necessitating trait -// bounds -impl PartialEq for Monero { - fn eq(&self, _: &Self) -> bool { - true - } -} -impl Eq for Monero {} - -#[allow(clippy::needless_pass_by_value)] // Needed to satisfy API expectations -fn map_rpc_err(err: RpcError) -> NetworkError { - if let RpcError::InvalidNode(reason) = &err { - log::error!("Monero RpcError::InvalidNode({reason})"); - } else { - log::debug!("Monero RpcError {err:?}"); - } - NetworkError::ConnectionError -} - -enum MakeSignableTransactionResult { - Fee(u64), - SignableTransaction(MSignableTransaction), -} - -impl Monero { - pub async fn new(url: String) -> Monero { - let mut res = SimpleRequestRpc::new(url.clone()).await; - while let Err(e) = res { - log::error!("couldn't connect to Monero node: {e:?}"); - tokio::time::sleep(Duration::from_secs(5)).await; - res = SimpleRequestRpc::new(url.clone()).await; - } - Monero { rpc: res.unwrap() } - } - - fn view_pair(spend: EdwardsPoint) -> GuaranteedViewPair { - GuaranteedViewPair::new(spend.0, Zeroizing::new(additional_key::(0).0)).unwrap() - } - - fn address_internal(spend: EdwardsPoint, subaddress: Option) -> Address { - Address::new(Self::view_pair(spend).address(MoneroNetwork::Mainnet, subaddress, None)).unwrap() - } - - fn scanner(spend: EdwardsPoint) -> GuaranteedScanner { - let mut scanner = GuaranteedScanner::new(Self::view_pair(spend)); - debug_assert!(EXTERNAL_SUBADDRESS.is_none()); - scanner.register_subaddress(BRANCH_SUBADDRESS.unwrap()); - scanner.register_subaddress(CHANGE_SUBADDRESS.unwrap()); - scanner.register_subaddress(FORWARD_SUBADDRESS.unwrap()); - scanner - } - - async fn median_fee(&self, block: &Block) -> Result { - let mut fees = vec![]; - for tx_hash in &block.transactions { - let tx = - self.rpc.get_transaction(*tx_hash).await.map_err(|_| NetworkError::ConnectionError)?; - // Only consider fees from RCT transactions, else the fee property read wouldn't be accurate - let fee = match &tx { - Transaction::V2 { proofs: Some(proofs), .. } => proofs.base.fee, - _ => continue, - }; - fees.push(fee / u64::try_from(tx.weight()).unwrap()); - } - fees.sort(); - let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); - - // TODO: Set a sane minimum fee - const MINIMUM_FEE: u64 = 1_500_000; - Ok(FeeRate::new(fee.max(MINIMUM_FEE), 10000).unwrap()) - } - - async fn make_signable_transaction( - &self, - block_number: usize, - plan_id: &[u8; 32], - inputs: &[Output], - payments: &[Payment], - change: &Option
, - calculating_fee: bool, - ) -> Result, NetworkError> { - for payment in payments { - assert_eq!(payment.balance.coin, Coin::Monero); - } - - // TODO2: Use an fee representative of several blocks, cached inside Self - let block_for_fee = self.get_block(block_number).await?; - let fee_rate = self.median_fee(&block_for_fee).await?; - - // Determine the RCT proofs to make based off the hard fork - // TODO: Make a fn for this block which is duplicated with tests - let rct_type = match block_for_fee.header.hardfork_version { - 14 => RctType::ClsagBulletproof, - 15 | 16 => RctType::ClsagBulletproofPlus, - _ => panic!("Monero hard forked and the processor wasn't updated for it"), - }; - - let mut transcript = - RecommendedTranscript::new(b"Serai Processor Monero Transaction Transcript"); - transcript.append_message(b"plan", plan_id); - - // All signers need to select the same decoys - // All signers use the same height and a seeded RNG to make sure they do so. - let mut inputs_actual = Vec::with_capacity(inputs.len()); - for input in inputs { - inputs_actual.push( - OutputWithDecoys::fingerprintable_deterministic_new( - &mut ChaCha20Rng::from_seed(transcript.rng_seed(b"decoys")), - &self.rpc, - // TODO: Have Decoys take RctType - match rct_type { - RctType::ClsagBulletproof => 11, - RctType::ClsagBulletproofPlus => 16, - _ => panic!("selecting decoys for an unsupported RctType"), - }, - block_number + 1, - input.0.clone(), - ) - .await - .map_err(map_rpc_err)?, - ); - } - - // Monero requires at least two outputs - // If we only have one output planned, add a dummy payment - let mut payments = payments.to_vec(); - let outputs = payments.len() + usize::from(u8::from(change.is_some())); - if outputs == 0 { - return Ok(None); - } else if outputs == 1 { - payments.push(Payment { - address: Address::new( - ViewPair::new(EdwardsPoint::generator().0, Zeroizing::new(Scalar::ONE.0)) - .unwrap() - .legacy_address(MoneroNetwork::Mainnet), - ) - .unwrap(), - balance: Balance { coin: Coin::Monero, amount: Amount(0) }, - data: None, - }); - } - - let payments = payments - .into_iter() - .map(|payment| (payment.address.into(), payment.balance.amount.0)) - .collect::>(); - - match MSignableTransaction::new( - rct_type, - // Use the plan ID as the outgoing view key - Zeroizing::new(*plan_id), - inputs_actual, - payments, - Change::fingerprintable(change.as_ref().map(|change| change.clone().into())), - vec![], - fee_rate, - ) { - Ok(signable) => Ok(Some({ - if calculating_fee { - MakeSignableTransactionResult::Fee(signable.necessary_fee()) - } else { - MakeSignableTransactionResult::SignableTransaction(signable) - } - })), - Err(e) => match e { - SendError::UnsupportedRctType => { - panic!("trying to use an RctType unsupported by monero-wallet") - } - SendError::NoInputs | - SendError::InvalidDecoyQuantity | - SendError::NoOutputs | - SendError::TooManyOutputs | - SendError::NoChange | - SendError::TooMuchArbitraryData | - SendError::TooLargeTransaction | - SendError::WrongPrivateKey => { - panic!("created an invalid Monero transaction: {e}"); - } - SendError::MultiplePaymentIds => { - panic!("multiple payment IDs despite not supporting integrated addresses"); - } - SendError::NotEnoughFunds { inputs, outputs, necessary_fee } => { - log::debug!( - "Monero NotEnoughFunds. inputs: {:?}, outputs: {:?}, necessary_fee: {necessary_fee:?}", - inputs, - outputs - ); - match necessary_fee { - Some(necessary_fee) => { - // If we're solely calculating the fee, return the fee this TX will cost - if calculating_fee { - Ok(Some(MakeSignableTransactionResult::Fee(necessary_fee))) - } else { - // If we're actually trying to make the TX, return None - Ok(None) - } - } - // We didn't have enough funds to even cover the outputs - None => { - // Ensure we're not misinterpreting this - assert!(outputs > inputs); - Ok(None) - } - } - } - SendError::MaliciousSerialization | SendError::ClsagError(_) | SendError::FrostError(_) => { - panic!("supposedly unreachable (at this time) Monero error: {e}"); - } - }, - } - } - - #[cfg(test)] - fn test_view_pair() -> ViewPair { - ViewPair::new(*EdwardsPoint::generator(), Zeroizing::new(Scalar::ONE.0)).unwrap() - } - - #[cfg(test)] - fn test_scanner() -> Scanner { - Scanner::new(Self::test_view_pair()) - } - - #[cfg(test)] - fn test_address() -> Address { - Address::new(Self::test_view_pair().legacy_address(MoneroNetwork::Mainnet)).unwrap() - } -} - -#[async_trait] -impl Network for Monero { - type Curve = Ed25519; - - type Transaction = Transaction; - type Block = Block; - - type Output = Output; - type SignableTransaction = SignableTransaction; - type Eventuality = Eventuality; - type TransactionMachine = TransactionMachine; - - type Scheduler = Scheduler; - - type Address = Address; - - const NETWORK: NetworkId = NetworkId::Monero; - const ID: &'static str = "Monero"; - const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 120; - const CONFIRMATIONS: usize = 10; - - const MAX_OUTPUTS: usize = 16; - - // 0.01 XMR - const DUST: u64 = 10000000000; - - // TODO - const COST_TO_AGGREGATE: u64 = 0; - - // Monero doesn't require/benefit from tweaking - fn tweak_keys(_: &mut ThresholdKeys) {} - - #[cfg(test)] - async fn external_address(&self, key: EdwardsPoint) -> Address { - Self::address_internal(key, EXTERNAL_SUBADDRESS) - } - - fn branch_address(key: EdwardsPoint) -> Option
{ - Some(Self::address_internal(key, BRANCH_SUBADDRESS)) - } - - fn change_address(key: EdwardsPoint) -> Option
{ - Some(Self::address_internal(key, CHANGE_SUBADDRESS)) - } - - fn forward_address(key: EdwardsPoint) -> Option
{ - Some(Self::address_internal(key, FORWARD_SUBADDRESS)) - } - - async fn get_latest_block_number(&self) -> Result { - // Monero defines height as chain length, so subtract 1 for block number - Ok(self.rpc.get_height().await.map_err(map_rpc_err)? - 1) - } - - async fn get_block(&self, number: usize) -> Result { - Ok( - self - .rpc - .get_block(self.rpc.get_block_hash(number).await.map_err(map_rpc_err)?) - .await - .map_err(map_rpc_err)?, - ) - } - - async fn get_outputs(&self, block: &Block, key: EdwardsPoint) -> Vec { - let outputs = loop { - match self - .rpc - .get_scannable_block(block.clone()) - .await - .map_err(|e| format!("{e:?}")) - .and_then(|block| Self::scanner(key).scan(block).map_err(|e| format!("{e:?}"))) - { - Ok(outputs) => break outputs, - Err(e) => { - log::error!("couldn't scan block {}: {e:?}", hex::encode(block.id())); - sleep(Duration::from_secs(60)).await; - continue; - } - } - }; - - // Miner transactions are required to explicitly state their timelock, so this does exclude - // those (which have an extended timelock we don't want to deal with) - let raw_outputs = outputs.not_additionally_locked(); - let mut outputs = Vec::with_capacity(raw_outputs.len()); - for output in raw_outputs { - // This should be pointless as we shouldn't be able to scan for any other subaddress - // This just helps ensures nothing invalid makes it through - assert!([EXTERNAL_SUBADDRESS, BRANCH_SUBADDRESS, CHANGE_SUBADDRESS, FORWARD_SUBADDRESS] - .contains(&output.subaddress())); - - outputs.push(Output(output)); - } - - outputs - } - - async fn get_eventuality_completions( - &self, - eventualities: &mut EventualitiesTracker, - block: &Block, - ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { - let mut res = HashMap::new(); - if eventualities.map.is_empty() { - return res; - } - - async fn check_block( - network: &Monero, - eventualities: &mut EventualitiesTracker, - block: &Block, - res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, - ) { - for hash in &block.transactions { - let tx = { - let mut tx; - while { - tx = network.rpc.get_transaction(*hash).await; - tx.is_err() - } { - log::error!("couldn't get transaction {}: {}", hex::encode(hash), tx.err().unwrap()); - sleep(Duration::from_secs(60)).await; - } - tx.unwrap() - }; - - if let Some((_, eventuality)) = eventualities.map.get(&tx.prefix().extra) { - if eventuality.matches(&tx.clone().into()) { - res.insert( - eventualities.map.remove(&tx.prefix().extra).unwrap().0, - (block.number().unwrap(), tx.id(), tx), - ); - } - } - } - - eventualities.block_number += 1; - assert_eq!(eventualities.block_number, block.number().unwrap()); - } - - for block_num in (eventualities.block_number + 1) .. block.number().unwrap() { - let block = { - let mut block; - while { - block = self.get_block(block_num).await; - block.is_err() - } { - log::error!("couldn't get block {}: {}", block_num, block.err().unwrap()); - sleep(Duration::from_secs(60)).await; - } - block.unwrap() - }; - - check_block(self, eventualities, &block, &mut res).await; - } - - // Also check the current block - check_block(self, eventualities, block, &mut res).await; - assert_eq!(eventualities.block_number, block.number().unwrap()); - - res - } - - async fn needed_fee( - &self, - block_number: usize, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - ) -> Result, NetworkError> { - let res = self - .make_signable_transaction(block_number, &[0; 32], inputs, payments, change, true) - .await?; - let Some(res) = res else { return Ok(None) }; - let MakeSignableTransactionResult::Fee(fee) = res else { - panic!("told make_signable_transaction calculating_fee and got transaction") - }; - Ok(Some(fee)) - } - - async fn signable_transaction( - &self, - block_number: usize, - plan_id: &[u8; 32], - _key: EdwardsPoint, - inputs: &[Output], - payments: &[Payment], - change: &Option
, - (): &(), - ) -> Result, NetworkError> { - let res = self - .make_signable_transaction(block_number, plan_id, inputs, payments, change, false) - .await?; - let Some(res) = res else { return Ok(None) }; - let MakeSignableTransactionResult::SignableTransaction(signable) = res else { - panic!("told make_signable_transaction not calculating_fee and got fee") - }; - - let signable = SignableTransaction(signable); - let eventuality = signable.0.clone().into(); - Ok(Some((signable, eventuality))) - } - - async fn attempt_sign( - &self, - keys: ThresholdKeys, - transaction: SignableTransaction, - ) -> Result { - match transaction.0.clone().multisig(keys) { - Ok(machine) => Ok(machine), - Err(e) => panic!("failed to create a multisig machine for TX: {e}"), - } - } - - async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { - match self.rpc.publish_transaction(tx).await { - Ok(()) => Ok(()), - Err(RpcError::ConnectionError(e)) => { - log::debug!("Monero ConnectionError: {e}"); - Err(NetworkError::ConnectionError)? - } - // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs - // invalid transaction - Err(e) => panic!("failed to publish TX {}: {e}", hex::encode(tx.hash())), - } - } - - async fn confirm_completion( - &self, - eventuality: &Eventuality, - id: &[u8; 32], - ) -> Result, NetworkError> { - let tx = self.rpc.get_transaction(*id).await.map_err(map_rpc_err)?; - if eventuality.matches(&tx.clone().into()) { - Ok(Some(tx)) - } else { - Ok(None) - } - } - - #[cfg(test)] - async fn get_block_number(&self, id: &[u8; 32]) -> usize { - self.rpc.get_block(*id).await.unwrap().number().unwrap() - } - - #[cfg(test)] - async fn check_eventuality_by_claim( - &self, - eventuality: &Self::Eventuality, - claim: &[u8; 32], - ) -> bool { - return eventuality.matches(&self.rpc.get_pruned_transaction(*claim).await.unwrap()); - } - - #[cfg(test)] - async fn get_transaction_by_eventuality( - &self, - block: usize, - eventuality: &Eventuality, - ) -> Transaction { - let block = self.rpc.get_block_by_number(block).await.unwrap(); - for tx in &block.transactions { - let tx = self.rpc.get_transaction(*tx).await.unwrap(); - if eventuality.matches(&tx.clone().into()) { - return tx; - } - } - panic!("block didn't have a transaction for this eventuality") - } - - #[cfg(test)] - async fn mine_block(&self) { - // https://github.com/serai-dex/serai/issues/198 - sleep(std::time::Duration::from_millis(100)).await; - self.rpc.generate_blocks(&Self::test_address().into(), 1).await.unwrap(); - } - - #[cfg(test)] - async fn test_send(&self, address: Address) -> Block { - use zeroize::Zeroizing; - use rand_core::{RngCore, OsRng}; - use monero_wallet::rpc::FeePriority; - - let new_block = self.get_latest_block_number().await.unwrap() + 1; - for _ in 0 .. 80 { - self.mine_block().await; - } - - let new_block = self.rpc.get_block_by_number(new_block).await.unwrap(); - let mut outputs = Self::test_scanner() - .scan(self.rpc.get_scannable_block(new_block.clone()).await.unwrap()) - .unwrap() - .ignore_additional_timelock(); - let output = outputs.swap_remove(0); - - let amount = output.commitment().amount; - // The dust should always be sufficient for the fee - let fee = Monero::DUST; - - let rct_type = match new_block.header.hardfork_version { - 14 => RctType::ClsagBulletproof, - 15 | 16 => RctType::ClsagBulletproofPlus, - _ => panic!("Monero hard forked and the processor wasn't updated for it"), - }; - - let output = OutputWithDecoys::fingerprintable_deterministic_new( - &mut OsRng, - &self.rpc, - match rct_type { - RctType::ClsagBulletproof => 11, - RctType::ClsagBulletproofPlus => 16, - _ => panic!("selecting decoys for an unsupported RctType"), - }, - self.rpc.get_height().await.unwrap(), - output, - ) - .await - .unwrap(); - - let mut outgoing_view_key = Zeroizing::new([0; 32]); - OsRng.fill_bytes(outgoing_view_key.as_mut()); - let tx = MSignableTransaction::new( - rct_type, - outgoing_view_key, - vec![output], - vec![(address.into(), amount - fee)], - Change::fingerprintable(Some(Self::test_address().into())), - vec![], - self.rpc.get_fee_rate(FeePriority::Unimportant).await.unwrap(), - ) - .unwrap() - .sign(&mut OsRng, &Zeroizing::new(Scalar::ONE.0)) - .unwrap(); - - let block = self.get_latest_block_number().await.unwrap() + 1; - self.rpc.publish_transaction(&tx).await.unwrap(); - for _ in 0 .. 10 { - self.mine_block().await; - } - self.get_block(block).await.unwrap() - } -} - -impl UtxoNetwork for Monero { - // wallet2 will not create a transaction larger than 100kb, and Monero won't relay a transaction - // larger than 150kb. This fits within the 100kb mark - // Technically, it can be ~124, yet a small bit of buffer is appreciated - // TODO: Test creating a TX this big - const MAX_INPUTS: usize = 120; -} diff --git a/processor/src/plan.rs b/processor/src/plan.rs deleted file mode 100644 index 58a8a5e11..000000000 --- a/processor/src/plan.rs +++ /dev/null @@ -1,212 +0,0 @@ -use std::io; - -use scale::{Encode, Decode}; - -use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::group::GroupEncoding; -use frost::curve::Ciphersuite; - -use serai_client::primitives::Balance; - -use crate::{ - networks::{Output, Network}, - multisigs::scheduler::{SchedulerAddendum, Scheduler}, -}; - -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Payment { - pub address: N::Address, - pub data: Option>, - pub balance: Balance, -} - -impl Payment { - pub fn transcript(&self, transcript: &mut T) { - transcript.domain_separate(b"payment"); - transcript.append_message(b"address", self.address.to_string().as_bytes()); - if let Some(data) = self.data.as_ref() { - transcript.append_message(b"data", data); - } - transcript.append_message(b"coin", self.balance.coin.encode()); - transcript.append_message(b"amount", self.balance.amount.0.to_le_bytes()); - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - // TODO: Don't allow creating Payments with an Address which can't be serialized - let address: Vec = self - .address - .clone() - .try_into() - .map_err(|_| io::Error::other("address couldn't be serialized"))?; - writer.write_all(&u32::try_from(address.len()).unwrap().to_le_bytes())?; - writer.write_all(&address)?; - - writer.write_all(&[u8::from(self.data.is_some())])?; - if let Some(data) = &self.data { - writer.write_all(&u32::try_from(data.len()).unwrap().to_le_bytes())?; - writer.write_all(data)?; - } - - writer.write_all(&self.balance.encode()) - } - - pub fn read(reader: &mut R) -> io::Result { - let mut buf = [0; 4]; - reader.read_exact(&mut buf)?; - let mut address = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()]; - reader.read_exact(&mut address)?; - let address = N::Address::try_from(address).map_err(|_| io::Error::other("invalid address"))?; - - let mut buf = [0; 1]; - reader.read_exact(&mut buf)?; - let data = if buf[0] == 1 { - let mut buf = [0; 4]; - reader.read_exact(&mut buf)?; - let mut data = vec![0; usize::try_from(u32::from_le_bytes(buf)).unwrap()]; - reader.read_exact(&mut data)?; - Some(data) - } else { - None - }; - - let balance = Balance::decode(&mut scale::IoReader(reader)) - .map_err(|_| io::Error::other("invalid balance"))?; - - Ok(Payment { address, data, balance }) - } -} - -#[derive(Clone, PartialEq)] -pub struct Plan { - pub key: ::G, - pub inputs: Vec, - /// The payments this Plan is intended to create. - /// - /// This should only contain payments leaving Serai. While it is acceptable for users to enter - /// Serai's address(es) as the payment address, as that'll be handled by anything which expects - /// certain properties, Serai as a system MUST NOT use payments for internal transfers. Doing - /// so will cause a reduction in their value by the TX fee/operating costs, creating an - /// incomplete transfer. - pub payments: Vec>, - /// The change this Plan should use. - /// - /// This MUST contain a Serai address. Operating costs may be deducted from the payments in this - /// Plan on the premise that the change address is Serai's, and accordingly, Serai will recoup - /// the operating costs. - // - // TODO: Consider moving to ::G? - pub change: Option, - /// The scheduler's additional data. - pub scheduler_addendum: >::Addendum, -} -impl core::fmt::Debug for Plan { - fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - fmt - .debug_struct("Plan") - .field("key", &hex::encode(self.key.to_bytes())) - .field("inputs", &self.inputs) - .field("payments", &self.payments) - .field("change", &self.change.as_ref().map(ToString::to_string)) - .field("scheduler_addendum", &self.scheduler_addendum) - .finish() - } -} - -impl Plan { - pub fn transcript(&self) -> RecommendedTranscript { - let mut transcript = RecommendedTranscript::new(b"Serai Processor Plan ID"); - transcript.domain_separate(b"meta"); - transcript.append_message(b"network", N::ID); - transcript.append_message(b"key", self.key.to_bytes()); - - transcript.domain_separate(b"inputs"); - for input in &self.inputs { - transcript.append_message(b"input", input.id()); - } - - transcript.domain_separate(b"payments"); - for payment in &self.payments { - payment.transcript(&mut transcript); - } - - if let Some(change) = &self.change { - transcript.append_message(b"change", change.to_string()); - } - - let mut addendum_bytes = vec![]; - self.scheduler_addendum.write(&mut addendum_bytes).unwrap(); - transcript.append_message(b"scheduler_addendum", addendum_bytes); - - transcript - } - - pub fn id(&self) -> [u8; 32] { - let challenge = self.transcript().challenge(b"id"); - let mut res = [0; 32]; - res.copy_from_slice(&challenge[.. 32]); - res - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(self.key.to_bytes().as_ref())?; - - writer.write_all(&u32::try_from(self.inputs.len()).unwrap().to_le_bytes())?; - for input in &self.inputs { - input.write(writer)?; - } - - writer.write_all(&u32::try_from(self.payments.len()).unwrap().to_le_bytes())?; - for payment in &self.payments { - payment.write(writer)?; - } - - // TODO: Have Plan construction fail if change cannot be serialized - let change = if let Some(change) = &self.change { - change.clone().try_into().map_err(|_| { - io::Error::other(format!( - "an address we said to use as change couldn't be converted to a Vec: {}", - change.to_string(), - )) - })? - } else { - vec![] - }; - assert!(serai_client::primitives::MAX_ADDRESS_LEN <= u8::MAX.into()); - writer.write_all(&[u8::try_from(change.len()).unwrap()])?; - writer.write_all(&change)?; - self.scheduler_addendum.write(writer) - } - - pub fn read(reader: &mut R) -> io::Result { - let key = N::Curve::read_G(reader)?; - - let mut inputs = vec![]; - let mut buf = [0; 4]; - reader.read_exact(&mut buf)?; - for _ in 0 .. u32::from_le_bytes(buf) { - inputs.push(N::Output::read(reader)?); - } - - let mut payments = vec![]; - reader.read_exact(&mut buf)?; - for _ in 0 .. u32::from_le_bytes(buf) { - payments.push(Payment::::read(reader)?); - } - - let mut len = [0; 1]; - reader.read_exact(&mut len)?; - let mut change = vec![0; usize::from(len[0])]; - reader.read_exact(&mut change)?; - let change = - if change.is_empty() { - None - } else { - Some(N::Address::try_from(change).map_err(|_| { - io::Error::other("couldn't deserialize an Address serialized into a Plan") - })?) - }; - - let scheduler_addendum = >::Addendum::read(reader)?; - Ok(Plan { key, inputs, payments, change, scheduler_addendum }) - } -} diff --git a/processor/src/signer.rs b/processor/src/signer.rs deleted file mode 100644 index cab0bceb1..000000000 --- a/processor/src/signer.rs +++ /dev/null @@ -1,654 +0,0 @@ -use core::{marker::PhantomData, fmt}; -use std::collections::HashMap; - -use rand_core::OsRng; -use frost::{ - ThresholdKeys, FrostError, - sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, -}; - -use log::{info, debug, warn, error}; - -use serai_client::validator_sets::primitives::Session; -use messages::sign::*; - -pub use serai_db::*; - -use crate::{ - Get, DbTxn, Db, - networks::{Eventuality, Network}, -}; - -create_db!( - SignerDb { - CompletionsDb: (id: [u8; 32]) -> Vec, - EventualityDb: (id: [u8; 32]) -> Vec, - AttemptDb: (id: &SignId) -> (), - CompletionDb: (claim: &[u8]) -> Vec, - ActiveSignsDb: () -> Vec<[u8; 32]>, - CompletedOnChainDb: (id: &[u8; 32]) -> (), - } -); - -impl ActiveSignsDb { - fn add_active_sign(txn: &mut impl DbTxn, id: &[u8; 32]) { - if CompletedOnChainDb::get(txn, id).is_some() { - return; - } - let mut active = ActiveSignsDb::get(txn).unwrap_or_default(); - active.push(*id); - ActiveSignsDb::set(txn, &active); - } -} - -impl CompletedOnChainDb { - fn complete_on_chain(txn: &mut impl DbTxn, id: &[u8; 32]) { - CompletedOnChainDb::set(txn, id, &()); - ActiveSignsDb::set( - txn, - &ActiveSignsDb::get(txn) - .unwrap_or_default() - .into_iter() - .filter(|active| active != id) - .collect::>(), - ); - } -} -impl CompletionsDb { - fn completions( - getter: &impl Get, - id: [u8; 32], - ) -> Vec<::Claim> { - let Some(completions) = Self::get(getter, id) else { return vec![] }; - - // If this was set yet is empty, it's because it's the encoding of a claim with a length of 0 - if completions.is_empty() { - let default = ::Claim::default(); - assert_eq!(default.as_ref().len(), 0); - return vec![default]; - } - - let mut completions_ref = completions.as_slice(); - let mut res = vec![]; - while !completions_ref.is_empty() { - let mut id = ::Claim::default(); - let id_len = id.as_ref().len(); - id.as_mut().copy_from_slice(&completions_ref[.. id_len]); - completions_ref = &completions_ref[id_len ..]; - res.push(id); - } - res - } - - fn complete( - txn: &mut impl DbTxn, - id: [u8; 32], - completion: &::Completion, - ) { - // Completions can be completed by multiple signatures - // Save every solution in order to be robust - CompletionDb::save_completion::(txn, completion); - - let claim = N::Eventuality::claim(completion); - let claim: &[u8] = claim.as_ref(); - - // If claim has a 0-byte encoding, the set key, even if empty, is the claim - if claim.is_empty() { - Self::set(txn, id, &vec![]); - return; - } - - let mut existing = Self::get(txn, id).unwrap_or_default(); - assert_eq!(existing.len() % claim.len(), 0); - - // Don't add this completion if it's already present - let mut i = 0; - while i < existing.len() { - if &existing[i .. (i + claim.len())] == claim { - return; - } - i += claim.len(); - } - - existing.extend(claim); - Self::set(txn, id, &existing); - } -} - -impl EventualityDb { - fn save_eventuality( - txn: &mut impl DbTxn, - id: [u8; 32], - eventuality: &N::Eventuality, - ) { - txn.put(Self::key(id), eventuality.serialize()); - } - - fn eventuality(getter: &impl Get, id: [u8; 32]) -> Option { - Some(N::Eventuality::read(&mut getter.get(Self::key(id))?.as_slice()).unwrap()) - } -} - -impl CompletionDb { - fn save_completion( - txn: &mut impl DbTxn, - completion: &::Completion, - ) { - let claim = N::Eventuality::claim(completion); - let claim: &[u8] = claim.as_ref(); - Self::set(txn, claim, &N::Eventuality::serialize_completion(completion)); - } - - fn completion( - getter: &impl Get, - claim: &::Claim, - ) -> Option<::Completion> { - Self::get(getter, claim.as_ref()) - .map(|completion| N::Eventuality::read_completion::<&[u8]>(&mut completion.as_ref()).unwrap()) - } -} - -type PreprocessFor = <::TransactionMachine as PreprocessMachine>::Preprocess; -type SignMachineFor = <::TransactionMachine as PreprocessMachine>::SignMachine; -type SignatureShareFor = as SignMachine< - <::Eventuality as Eventuality>::Completion, ->>::SignatureShare; -type SignatureMachineFor = as SignMachine< - <::Eventuality as Eventuality>::Completion, ->>::SignatureMachine; - -pub struct Signer { - db: PhantomData, - - network: N, - - session: Session, - keys: Vec>, - - signable: HashMap<[u8; 32], N::SignableTransaction>, - attempt: HashMap<[u8; 32], u32>, - #[allow(clippy::type_complexity)] - preprocessing: HashMap<[u8; 32], (Vec>, Vec>)>, - #[allow(clippy::type_complexity)] - signing: HashMap<[u8; 32], (SignatureMachineFor, Vec>)>, -} - -impl fmt::Debug for Signer { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("Signer") - .field("network", &self.network) - .field("signable", &self.signable) - .field("attempt", &self.attempt) - .finish_non_exhaustive() - } -} - -impl Signer { - /// Rebroadcast already signed TXs which haven't had their completions mined into a sufficiently - /// confirmed block. - pub async fn rebroadcast_task(db: D, network: N) { - log::info!("rebroadcasting transactions for plans whose completions yet to be confirmed..."); - loop { - for active in ActiveSignsDb::get(&db).unwrap_or_default() { - for claim in CompletionsDb::completions::(&db, active) { - log::info!("rebroadcasting completion with claim {}", hex::encode(claim.as_ref())); - // TODO: Don't drop the error entirely. Check for invariants - let _ = - network.publish_completion(&CompletionDb::completion::(&db, &claim).unwrap()).await; - } - } - // Only run every five minutes so we aren't frequently loading tens to hundreds of KB from - // the DB - tokio::time::sleep(core::time::Duration::from_secs(5 * 60)).await; - } - } - pub fn new(network: N, session: Session, keys: Vec>) -> Signer { - assert!(!keys.is_empty()); - Signer { - db: PhantomData, - - network, - - session, - keys, - - signable: HashMap::new(), - attempt: HashMap::new(), - preprocessing: HashMap::new(), - signing: HashMap::new(), - } - } - - fn verify_id(&self, id: &SignId) -> Result<(), ()> { - // Check the attempt lines up - match self.attempt.get(&id.id) { - // If we don't have an attempt logged, it's because the coordinator is faulty OR because we - // rebooted OR we detected the signed transaction on chain, so there's notable network - // latency/a malicious validator - None => { - warn!( - "not attempting {} #{}. this is an error if we didn't reboot", - hex::encode(id.id), - id.attempt - ); - Err(())?; - } - Some(attempt) => { - if attempt != &id.attempt { - warn!( - "sent signing data for {} #{} yet we have attempt #{}", - hex::encode(id.id), - id.attempt, - attempt - ); - Err(())?; - } - } - } - - Ok(()) - } - - #[must_use] - fn already_completed(txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool { - if !CompletionsDb::completions::(txn, id).is_empty() { - debug!( - "SignTransaction/Reattempt order for {}, which we've already completed signing", - hex::encode(id) - ); - - true - } else { - false - } - } - - #[must_use] - fn complete( - &mut self, - id: [u8; 32], - claim: &::Claim, - ) -> ProcessorMessage { - // Assert we're actively signing for this TX - assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for"); - assert!(self.attempt.remove(&id).is_some(), "attempt had an ID signable didn't have"); - // If we weren't selected to participate, we'll have a preprocess - self.preprocessing.remove(&id); - // If we were selected, the signature will only go through if we contributed a share - // Despite this, we then need to get everyone's shares, and we may get a completion before - // we get everyone's shares - // This would be if the coordinator fails and we find the eventuality completion on-chain - self.signing.remove(&id); - - // Emit the event for it - ProcessorMessage::Completed { session: self.session, id, tx: claim.as_ref().to_vec() } - } - - #[must_use] - pub fn completed( - &mut self, - txn: &mut D::Transaction<'_>, - id: [u8; 32], - completion: &::Completion, - ) -> Option { - let first_completion = !Self::already_completed(txn, id); - - // Save this completion to the DB - CompletedOnChainDb::complete_on_chain(txn, &id); - CompletionsDb::complete::(txn, id, completion); - - if first_completion { - Some(self.complete(id, &N::Eventuality::claim(completion))) - } else { - None - } - } - - /// Returns Some if the first completion. - // Doesn't use any loops/retries since we'll eventually get this from the Scanner anyways - #[must_use] - async fn claimed_eventuality_completion( - &mut self, - txn: &mut D::Transaction<'_>, - id: [u8; 32], - claim: &::Claim, - ) -> Option { - if let Some(eventuality) = EventualityDb::eventuality::(txn, id) { - match self.network.confirm_completion(&eventuality, claim).await { - Ok(Some(completion)) => { - info!( - "signer eventuality for {} resolved in {}", - hex::encode(id), - hex::encode(claim.as_ref()) - ); - - let first_completion = !Self::already_completed(txn, id); - - // Save this completion to the DB - CompletionsDb::complete::(txn, id, &completion); - - if first_completion { - return Some(self.complete(id, claim)); - } - } - Ok(None) => { - warn!( - "a validator claimed {} completed {} when it did not", - hex::encode(claim.as_ref()), - hex::encode(id), - ); - } - Err(_) => { - // Transaction hasn't hit our mempool/was dropped for a different signature - // The latter can happen given certain latency conditions/a single malicious signer - // In the case of a single malicious signer, they can drag multiple honest validators down - // with them, so we unfortunately can't slash on this case - warn!( - "a validator claimed {} completed {} yet we couldn't check that claim", - hex::encode(claim.as_ref()), - hex::encode(id), - ); - } - } - } else { - warn!( - "informed of completion {} for eventuality {}, when we didn't have that eventuality", - hex::encode(claim.as_ref()), - hex::encode(id), - ); - } - None - } - - #[must_use] - async fn attempt( - &mut self, - txn: &mut D::Transaction<'_>, - id: [u8; 32], - attempt: u32, - ) -> Option { - if Self::already_completed(txn, id) { - return None; - } - - // Check if we're already working on this attempt - if let Some(curr_attempt) = self.attempt.get(&id) { - if curr_attempt >= &attempt { - warn!( - "told to attempt {} #{} yet we're already working on {}", - hex::encode(id), - attempt, - curr_attempt - ); - return None; - } - } - - // Start this attempt - // Clone the TX so we don't have an immutable borrow preventing the below mutable actions - // (also because we do need an owned tx anyways) - let Some(tx) = self.signable.get(&id).cloned() else { - warn!("told to attempt a TX we aren't currently signing for"); - return None; - }; - - // Delete any existing machines - self.preprocessing.remove(&id); - self.signing.remove(&id); - - // Update the attempt number - self.attempt.insert(id, attempt); - - let id = SignId { session: self.session, id, attempt }; - - info!("signing for {} #{}", hex::encode(id.id), id.attempt); - - // If we reboot mid-sign, the current design has us abort all signs and wait for latter - // attempts/new signing protocols - // This is distinct from the DKG which will continue DKG sessions, even on reboot - // This is because signing is tolerant of failures of up to 1/3rd of the group - // The DKG requires 100% participation - // While we could apply similar tricks as the DKG (a seeded RNG) to achieve support for - // reboots, it's not worth the complexity when messing up here leaks our secret share - // - // Despite this, on reboot, we'll get told of active signing items, and may be in this - // branch again for something we've already attempted - // - // Only run if this hasn't already been attempted - // TODO: This isn't complete as this txn may not be committed with the expected timing - if AttemptDb::get(txn, &id).is_some() { - warn!( - "already attempted {} #{}. this is an error if we didn't reboot", - hex::encode(id.id), - id.attempt - ); - return None; - } - AttemptDb::set(txn, &id, &()); - - // Attempt to create the TX - let mut machines = vec![]; - let mut preprocesses = vec![]; - let mut serialized_preprocesses = vec![]; - for keys in &self.keys { - let machine = match self.network.attempt_sign(keys.clone(), tx.clone()).await { - Err(e) => { - error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e); - return None; - } - Ok(machine) => machine, - }; - - let (machine, preprocess) = machine.preprocess(&mut OsRng); - machines.push(machine); - serialized_preprocesses.push(preprocess.serialize()); - preprocesses.push(preprocess); - } - - self.preprocessing.insert(id.id, (machines, preprocesses)); - - // Broadcast our preprocess - Some(ProcessorMessage::Preprocess { id, preprocesses: serialized_preprocesses }) - } - - #[must_use] - pub async fn sign_transaction( - &mut self, - txn: &mut D::Transaction<'_>, - id: [u8; 32], - tx: N::SignableTransaction, - eventuality: &N::Eventuality, - ) -> Option { - // The caller is expected to re-issue sign orders on reboot - // This is solely used by the rebroadcast task - ActiveSignsDb::add_active_sign(txn, &id); - - if Self::already_completed(txn, id) { - return None; - } - - EventualityDb::save_eventuality::(txn, id, eventuality); - - self.signable.insert(id, tx); - self.attempt(txn, id, 0).await - } - - #[must_use] - pub async fn handle( - &mut self, - txn: &mut D::Transaction<'_>, - msg: CoordinatorMessage, - ) -> Option { - match msg { - CoordinatorMessage::Preprocesses { id, preprocesses } => { - if self.verify_id(&id).is_err() { - return None; - } - - let (machines, our_preprocesses) = match self.preprocessing.remove(&id.id) { - // Either rebooted or RPC error, or some invariant - None => { - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(id.id) - ); - return None; - } - Some(machine) => machine, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = preprocesses.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); - let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !preprocess_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let preprocesses = parsed; - - // Only keep a single machine as we only need one to get the signature - let mut signature_machine = None; - let mut shares = vec![]; - let mut serialized_shares = vec![]; - for (m, machine) in machines.into_iter().enumerate() { - let mut preprocesses = preprocesses.clone(); - for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { - if i != m { - assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); - } - } - - // Use an empty message, as expected of TransactionMachines - let (machine, share) = match machine.sign(preprocesses, &[]) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - if m == 0 { - signature_machine = Some(machine); - } - serialized_shares.push(share.serialize()); - shares.push(share); - } - self.signing.insert(id.id, (signature_machine.unwrap(), shares)); - - // Broadcast our shares - Some(ProcessorMessage::Share { id, shares: serialized_shares }) - } - - CoordinatorMessage::Shares { id, shares } => { - if self.verify_id(&id).is_err() { - return None; - } - - let (machine, our_shares) = match self.signing.remove(&id.id) { - // Rebooted, RPC error, or some invariant - None => { - // If preprocessing has this ID, it means we were never sent the preprocess by the - // coordinator - if self.preprocessing.contains_key(&id.id) { - panic!("never preprocessed yet signing?"); - } - - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(id.id) - ); - return None; - } - Some(machine) => machine, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = shares.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut share_ref = shares.get(&l).unwrap().as_slice(); - let Ok(res) = machine.read_share(&mut share_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !share_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let mut shares = parsed; - - for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { - assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); - } - - let completion = match machine.complete(shares) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - - // Save the completion in case it's needed for recovery - CompletionsDb::complete::(txn, id.id, &completion); - - // Publish it - if let Err(e) = self.network.publish_completion(&completion).await { - error!("couldn't publish completion for plan {}: {:?}", hex::encode(id.id), e); - } else { - info!("published completion for plan {}", hex::encode(id.id)); - } - - // Stop trying to sign for this TX - Some(self.complete(id.id, &N::Eventuality::claim(&completion))) - } - - CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await, - - CoordinatorMessage::Completed { session: _, id, tx: mut claim_vec } => { - let mut claim = ::Claim::default(); - if claim.as_ref().len() != claim_vec.len() { - let true_len = claim_vec.len(); - claim_vec.truncate(2 * claim.as_ref().len()); - warn!( - "a validator claimed {}... (actual length {}) completed {} yet {}", - hex::encode(&claim_vec), - true_len, - hex::encode(id), - "that's not a valid Claim", - ); - return None; - } - claim.as_mut().copy_from_slice(&claim_vec); - - self.claimed_eventuality_completion(txn, id, &claim).await - } - } - } -} diff --git a/processor/src/slash_report_signer.rs b/processor/src/slash_report_signer.rs deleted file mode 100644 index b7b2d55ce..000000000 --- a/processor/src/slash_report_signer.rs +++ /dev/null @@ -1,293 +0,0 @@ -use core::fmt; -use std::collections::HashMap; - -use rand_core::OsRng; - -use frost::{ - curve::Ristretto, - ThresholdKeys, FrostError, - algorithm::Algorithm, - sign::{ - Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, - AlgorithmSignMachine, AlgorithmSignatureMachine, - }, -}; -use frost_schnorrkel::Schnorrkel; - -use log::{info, warn}; - -use serai_client::{ - Public, - primitives::NetworkId, - validator_sets::primitives::{Session, ValidatorSet, report_slashes_message}, -}; - -use messages::coordinator::*; -use crate::{Get, DbTxn, create_db}; - -create_db! { - SlashReportSignerDb { - Completed: (session: Session) -> (), - Attempt: (session: Session, attempt: u32) -> (), - } -} - -type Preprocess = as PreprocessMachine>::Preprocess; -type SignatureShare = as SignMachine< - >::Signature, ->>::SignatureShare; - -pub struct SlashReportSigner { - network: NetworkId, - session: Session, - keys: Vec>, - report: Vec<([u8; 32], u32)>, - - attempt: u32, - #[allow(clippy::type_complexity)] - preprocessing: Option<(Vec>, Vec)>, - #[allow(clippy::type_complexity)] - signing: Option<(AlgorithmSignatureMachine, Vec)>, -} - -impl fmt::Debug for SlashReportSigner { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt - .debug_struct("SlashReportSigner") - .field("session", &self.session) - .field("report", &self.report) - .field("attempt", &self.attempt) - .field("preprocessing", &self.preprocessing.is_some()) - .field("signing", &self.signing.is_some()) - .finish_non_exhaustive() - } -} - -impl SlashReportSigner { - pub fn new( - txn: &mut impl DbTxn, - network: NetworkId, - session: Session, - keys: Vec>, - report: Vec<([u8; 32], u32)>, - attempt: u32, - ) -> Option<(SlashReportSigner, ProcessorMessage)> { - assert!(!keys.is_empty()); - - if Completed::get(txn, session).is_some() { - return None; - } - - if Attempt::get(txn, session, attempt).is_some() { - warn!( - "already attempted signing slash report for session {:?}, attempt #{}. {}", - session, attempt, "this is an error if we didn't reboot", - ); - return None; - } - Attempt::set(txn, session, attempt, &()); - - info!("signing slash report for session {:?} with attempt #{}", session, attempt); - - let mut machines = vec![]; - let mut preprocesses = vec![]; - let mut serialized_preprocesses = vec![]; - for keys in &keys { - // b"substrate" is a literal from sp-core - let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone()); - - let (machine, preprocess) = machine.preprocess(&mut OsRng); - machines.push(machine); - serialized_preprocesses.push(preprocess.serialize().try_into().unwrap()); - preprocesses.push(preprocess); - } - let preprocessing = Some((machines, preprocesses)); - - let substrate_sign_id = - SubstrateSignId { session, id: SubstrateSignableId::SlashReport, attempt }; - - Some(( - SlashReportSigner { network, session, keys, report, attempt, preprocessing, signing: None }, - ProcessorMessage::SlashReportPreprocess { - id: substrate_sign_id, - preprocesses: serialized_preprocesses, - }, - )) - } - - #[must_use] - pub fn handle( - &mut self, - txn: &mut impl DbTxn, - msg: CoordinatorMessage, - ) -> Option { - match msg { - CoordinatorMessage::CosignSubstrateBlock { .. } => { - panic!("SlashReportSigner passed CosignSubstrateBlock") - } - - CoordinatorMessage::SignSlashReport { .. } => { - panic!("SlashReportSigner passed SignSlashReport") - } - - CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => { - assert_eq!(id.session, self.session); - assert_eq!(id.id, SubstrateSignableId::SlashReport); - if id.attempt != self.attempt { - panic!("given preprocesses for a distinct attempt than SlashReportSigner is signing") - } - - let (machines, our_preprocesses) = match self.preprocessing.take() { - // Either rebooted or RPC error, or some invariant - None => { - warn!("not preprocessing. this is an error if we didn't reboot"); - return None; - } - Some(preprocess) => preprocess, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = preprocesses.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice(); - let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !preprocess_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let preprocesses = parsed; - - // Only keep a single machine as we only need one to get the signature - let mut signature_machine = None; - let mut shares = vec![]; - let mut serialized_shares = vec![]; - for (m, machine) in machines.into_iter().enumerate() { - let mut preprocesses = preprocesses.clone(); - for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() { - if i != m { - assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none()); - } - } - - let (machine, share) = match machine.sign( - preprocesses, - &report_slashes_message( - &ValidatorSet { network: self.network, session: self.session }, - &self - .report - .clone() - .into_iter() - .map(|(validator, points)| (Public(validator), points)) - .collect::>(), - ), - ) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - if m == 0 { - signature_machine = Some(machine); - } - - let mut share_bytes = [0; 32]; - share_bytes.copy_from_slice(&share.serialize()); - serialized_shares.push(share_bytes); - - shares.push(share); - } - self.signing = Some((signature_machine.unwrap(), shares)); - - // Broadcast our shares - Some(ProcessorMessage::SubstrateShare { id, shares: serialized_shares }) - } - - CoordinatorMessage::SubstrateShares { id, shares } => { - assert_eq!(id.session, self.session); - assert_eq!(id.id, SubstrateSignableId::SlashReport); - if id.attempt != self.attempt { - panic!("given preprocesses for a distinct attempt than SlashReportSigner is signing") - } - - let (machine, our_shares) = match self.signing.take() { - // Rebooted, RPC error, or some invariant - None => { - // If preprocessing has this ID, it means we were never sent the preprocess by the - // coordinator - if self.preprocessing.is_some() { - panic!("never preprocessed yet signing?"); - } - - warn!("not preprocessing. this is an error if we didn't reboot"); - return None; - } - Some(signing) => signing, - }; - - let mut parsed = HashMap::new(); - for l in { - let mut keys = shares.keys().copied().collect::>(); - keys.sort(); - keys - } { - let mut share_ref = shares.get(&l).unwrap().as_slice(); - let Ok(res) = machine.read_share(&mut share_ref) else { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - }; - if !share_ref.is_empty() { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }); - } - parsed.insert(l, res); - } - let mut shares = parsed; - - for (i, our_share) in our_shares.into_iter().enumerate().skip(1) { - assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); - } - - let sig = match machine.complete(shares) { - Ok(res) => res, - Err(e) => match e { - FrostError::InternalError(_) | - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!(), - - FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => { - return Some(ProcessorMessage::InvalidParticipant { id, participant: l }) - } - }, - }; - - info!("signed slash report for session {:?} with attempt #{}", self.session, id.attempt); - - Completed::set(txn, self.session, &()); - - Some(ProcessorMessage::SignedSlashReport { - session: self.session, - signature: sig.to_bytes().to_vec(), - }) - } - CoordinatorMessage::BatchReattempt { .. } => { - panic!("BatchReattempt passed to SlashReportSigner") - } - } - } -} diff --git a/processor/view-keys/Cargo.toml b/processor/view-keys/Cargo.toml new file mode 100644 index 000000000..6fdd91346 --- /dev/null +++ b/processor/view-keys/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "serai-processor-view-keys" +version = "0.1.0" +description = "View keys for the Serai processor" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/processor/view-keys" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +ciphersuite = { version = "0.4", path = "../../crypto/ciphersuite", default-features = false, features = ["std"] } diff --git a/processor/view-keys/LICENSE b/processor/view-keys/LICENSE new file mode 100644 index 000000000..91d893c11 --- /dev/null +++ b/processor/view-keys/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022-2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/processor/view-keys/README.md b/processor/view-keys/README.md new file mode 100644 index 000000000..4354eed6b --- /dev/null +++ b/processor/view-keys/README.md @@ -0,0 +1,6 @@ +# Serai Processor View Keys + +View keys for the Serai processor. + +This is a MIT-licensed library made available for anyone to generate Serai's +view keys, as necessary for auditing reasons and for sending coins to Serai. diff --git a/processor/view-keys/src/lib.rs b/processor/view-keys/src/lib.rs new file mode 100644 index 000000000..c0d4c68e0 --- /dev/null +++ b/processor/view-keys/src/lib.rs @@ -0,0 +1,13 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] +#![deny(missing_docs)] + +use ciphersuite::Ciphersuite; + +/// Generate a view key for usage within Serai. +/// +/// `k` is the index of the key to generate (enabling generating multiple view keys within a +/// single context). +pub fn view_key(k: u64) -> C::F { + C::hash_to_F(b"Serai DEX View Key", &k.to_le_bytes()) +} diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 73cb338ca..d99e65884 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.80" +channel = "1.81" targets = ["wasm32-unknown-unknown"] profile = "minimal" components = ["rust-src", "rustfmt", "clippy"] diff --git a/spec/processor/Multisig Rotation.md b/spec/processor/Multisig Rotation.md index ff5c3d286..867080259 100644 --- a/spec/processor/Multisig Rotation.md +++ b/spec/processor/Multisig Rotation.md @@ -12,11 +12,11 @@ The following timeline is established: 1) The new multisig is created, and has its keys set on Serai. Once the next `Batch` with a new external network block is published, its block becomes the "queue block". The new multisig is set to activate at the "queue block", plus - `CONFIRMATIONS` blocks (the "activation block"). + `WINDOW_LENGTH` blocks (the "activation block"). We don't use the last `Batch`'s external network block, as that `Batch` may - be older than `CONFIRMATIONS` blocks. Any yet-to-be-included-and-finalized - `Batch` will be within `CONFIRMATIONS` blocks of what any processor has + be older than `WINDOW_LENGTH` blocks. Any yet-to-be-included-and-finalized + `Batch` will be within `WINDOW_LENGTH` blocks of what any processor has scanned however, as it'll wait for inclusion and finalization before continuing scanning. @@ -102,7 +102,8 @@ The following timeline is established: 5) For the next 6 hours, all non-`Branch` outputs received are immediately forwarded to the new multisig. Only external transactions to the new multisig - are included in `Batch`s. + are included in `Batch`s. Any outputs not yet transferred as change are + explicitly transferred. The new multisig infers the `InInstruction`, and refund address, for forwarded `External` outputs via reading what they were for the original @@ -121,7 +122,7 @@ The following timeline is established: Once all the 6 hour period has expired, no `Eventuality`s remain, and all outputs are forwarded, the multisig publishes a final `Batch` of the first - block, plus `CONFIRMATIONS`, which met these conditions, regardless of if it + block, plus `WINDOW_LENGTH`, which met these conditions, regardless of if it would've otherwise had a `Batch`. No further actions by it, nor its validators, are expected (unless, of course, those validators remain present in the new multisig). diff --git a/substrate/client/Cargo.toml b/substrate/client/Cargo.toml index e653c9af6..f59c70feb 100644 --- a/substrate/client/Cargo.toml +++ b/substrate/client/Cargo.toml @@ -24,6 +24,7 @@ bitvec = { version = "1", default-features = false, features = ["alloc", "serde" hex = "0.4" scale = { package = "parity-scale-codec", version = "3" } +borsh = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"], optional = true } serde_json = { version = "1", optional = true } @@ -41,7 +42,7 @@ simple-request = { path = "../../common/request", version = "0.1", optional = tr bitcoin = { version = "0.32", optional = true } ciphersuite = { path = "../../crypto/ciphersuite", version = "0.4", optional = true } -monero-wallet = { path = "../../networks/monero/wallet", version = "0.1.0", default-features = false, features = ["std"], optional = true } +monero-address = { path = "../../networks/monero/wallet/address", version = "0.1.0", default-features = false, features = ["std"], optional = true } [dev-dependencies] rand_core = "0.6" @@ -64,7 +65,8 @@ borsh = ["serai-abi/borsh"] networks = [] bitcoin = ["networks", "dep:bitcoin"] -monero = ["networks", "ciphersuite/ed25519", "monero-wallet"] +ethereum = ["networks"] +monero = ["networks", "ciphersuite/ed25519", "monero-address"] # Assumes the default usage is to use Serai as a DEX, which doesn't actually # require connecting to a Serai node diff --git a/substrate/client/src/networks/bitcoin.rs b/substrate/client/src/networks/bitcoin.rs index 502bfb440..28f660536 100644 --- a/substrate/client/src/networks/bitcoin.rs +++ b/substrate/client/src/networks/bitcoin.rs @@ -1,6 +1,7 @@ use core::{str::FromStr, fmt}; use scale::{Encode, Decode}; +use borsh::{BorshSerialize, BorshDeserialize}; use bitcoin::{ hashes::{Hash as HashTrait, hash160::Hash}, @@ -10,47 +11,10 @@ use bitcoin::{ address::{AddressType, NetworkChecked, Address as BAddress}, }; -#[derive(Clone, Eq, Debug)] -pub struct Address(ScriptBuf); - -impl PartialEq for Address { - fn eq(&self, other: &Self) -> bool { - // Since Serai defines the Bitcoin-address specification as a variant of the script alone, - // define equivalency as the script alone - self.0 == other.0 - } -} - -impl From
for ScriptBuf { - fn from(addr: Address) -> ScriptBuf { - addr.0 - } -} - -impl FromStr for Address { - type Err = (); - fn from_str(str: &str) -> Result { - Address::new( - BAddress::from_str(str) - .map_err(|_| ())? - .require_network(Network::Bitcoin) - .map_err(|_| ())? - .script_pubkey(), - ) - .ok_or(()) - } -} - -impl fmt::Display for Address { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - BAddress::::from_script(&self.0, Network::Bitcoin) - .map_err(|_| fmt::Error)? - .fmt(f) - } -} +use crate::primitives::ExternalAddress; -// SCALE-encoded variant of Monero addresses. -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +// SCALE-encodable representation of Bitcoin addresses, used internally. +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] enum EncodedAddress { P2PKH([u8; 20]), P2SH([u8; 20]), @@ -59,34 +23,13 @@ enum EncodedAddress { P2TR([u8; 32]), } -impl TryFrom> for Address { +impl TryFrom<&ScriptBuf> for EncodedAddress { type Error = (); - fn try_from(data: Vec) -> Result { - Ok(Address(match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? { - EncodedAddress::P2PKH(hash) => { - ScriptBuf::new_p2pkh(&PubkeyHash::from_raw_hash(Hash::from_byte_array(hash))) - } - EncodedAddress::P2SH(hash) => { - ScriptBuf::new_p2sh(&ScriptHash::from_raw_hash(Hash::from_byte_array(hash))) - } - EncodedAddress::P2WPKH(hash) => { - ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) - } - EncodedAddress::P2WSH(hash) => { - ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) - } - EncodedAddress::P2TR(key) => { - ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V1, &key).unwrap()) - } - })) - } -} - -fn try_to_vec(addr: &Address) -> Result, ()> { - let parsed_addr = - BAddress::::from_script(&addr.0, Network::Bitcoin).map_err(|_| ())?; - Ok( - (match parsed_addr.address_type() { + fn try_from(script_buf: &ScriptBuf) -> Result { + // This uses mainnet as our encodings don't specify a network. + let parsed_addr = + BAddress::::from_script(script_buf, Network::Bitcoin).map_err(|_| ())?; + Ok(match parsed_addr.address_type() { Some(AddressType::P2pkh) => { EncodedAddress::P2PKH(*parsed_addr.pubkey_hash().unwrap().as_raw_hash().as_byte_array()) } @@ -110,23 +53,119 @@ fn try_to_vec(addr: &Address) -> Result, ()> { } _ => Err(())?, }) - .encode(), - ) + } } -impl From
for Vec { - fn from(addr: Address) -> Vec { +impl From for ScriptBuf { + fn from(encoded: EncodedAddress) -> Self { + match encoded { + EncodedAddress::P2PKH(hash) => { + ScriptBuf::new_p2pkh(&PubkeyHash::from_raw_hash(Hash::from_byte_array(hash))) + } + EncodedAddress::P2SH(hash) => { + ScriptBuf::new_p2sh(&ScriptHash::from_raw_hash(Hash::from_byte_array(hash))) + } + EncodedAddress::P2WPKH(hash) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) + } + EncodedAddress::P2WSH(hash) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) + } + EncodedAddress::P2TR(key) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V1, &key).unwrap()) + } + } + } +} + +/// A Bitcoin address usable with Serai. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Address(ScriptBuf); + +// Support consuming into the underlying ScriptBuf. +impl From
for ScriptBuf { + fn from(addr: Address) -> ScriptBuf { + addr.0 + } +} + +impl From<&Address> for BAddress { + fn from(addr: &Address) -> BAddress { + // This fails if the script doesn't have an address representation, yet all our representable + // addresses' scripts do + BAddress::::from_script(&addr.0, Network::Bitcoin).unwrap() + } +} + +// Support converting a string into an address. +impl FromStr for Address { + type Err = (); + fn from_str(str: &str) -> Result { + Address::new( + BAddress::from_str(str) + .map_err(|_| ())? + .require_network(Network::Bitcoin) + .map_err(|_| ())? + .script_pubkey(), + ) + .ok_or(()) + } +} + +// Support converting an address into a string. +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + BAddress::from(self).fmt(f) + } +} + +impl TryFrom for Address { + type Error = (); + fn try_from(data: ExternalAddress) -> Result { + // Decode as an EncodedAddress, then map to a ScriptBuf + let mut data = data.as_ref(); + let encoded = EncodedAddress::decode(&mut data).map_err(|_| ())?; + if !data.is_empty() { + Err(())? + } + Ok(Address(ScriptBuf::from(encoded))) + } +} + +impl From
for EncodedAddress { + fn from(addr: Address) -> EncodedAddress { // Safe since only encodable addresses can be created - try_to_vec(&addr).unwrap() + EncodedAddress::try_from(&addr.0).unwrap() + } +} + +impl From
for ExternalAddress { + fn from(addr: Address) -> ExternalAddress { + // Safe since all variants are fixed-length and fit into MAX_ADDRESS_LEN + ExternalAddress::new(EncodedAddress::from(addr).encode()).unwrap() + } +} + +impl BorshSerialize for Address { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + EncodedAddress::from(self.clone()).serialize(writer) + } +} + +impl BorshDeserialize for Address { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + Ok(Self(ScriptBuf::from(EncodedAddress::deserialize_reader(reader)?))) } } impl Address { - pub fn new(address: ScriptBuf) -> Option { - let res = Self(address); - if try_to_vec(&res).is_ok() { - return Some(res); + /// Create a new Address from a ScriptBuf. + pub fn new(script_buf: ScriptBuf) -> Option { + // If we can represent this Script, it's an acceptable address + if EncodedAddress::try_from(&script_buf).is_ok() { + return Some(Self(script_buf)); } + // Else, it isn't acceptable None } } diff --git a/substrate/client/src/networks/ethereum.rs b/substrate/client/src/networks/ethereum.rs new file mode 100644 index 000000000..47b58af56 --- /dev/null +++ b/substrate/client/src/networks/ethereum.rs @@ -0,0 +1,129 @@ +use core::str::FromStr; +use std::io::Read; + +use borsh::{BorshSerialize, BorshDeserialize}; + +use crate::primitives::{MAX_ADDRESS_LEN, ExternalAddress}; + +/// THe maximum amount of gas an address is allowed to specify as its gas limit. +/// +/// Payments to an address with a gas limit which exceed this value will be dropped entirely. +pub const ADDRESS_GAS_LIMIT: u32 = 950_000; + +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub struct ContractDeployment { + /// The gas limit to use for this contract's execution. + /// + /// THis MUST be less than the Serai gas limit. The cost of it will be deducted from the amount + /// transferred. + gas_limit: u32, + /// The initialization code of the contract to deploy. + /// + /// This contract will be deployed (executing the initialization code). No further calls will + /// be made. + code: Vec, +} + +/// A contract to deploy, enabling executing arbitrary code. +impl ContractDeployment { + pub fn new(gas_limit: u32, code: Vec) -> Option { + // Check the gas limit is less the address gas limit + if gas_limit > ADDRESS_GAS_LIMIT { + None?; + } + + // The max address length, minus the type byte, minus the size of the gas + const MAX_CODE_LEN: usize = (MAX_ADDRESS_LEN as usize) - (1 + core::mem::size_of::()); + if code.len() > MAX_CODE_LEN { + None?; + } + + Some(Self { gas_limit, code }) + } + + pub fn gas_limit(&self) -> u32 { + self.gas_limit + } + pub fn code(&self) -> &[u8] { + &self.code + } +} + +/// A representation of an Ethereum address. +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub enum Address { + /// A traditional address. + Address([u8; 20]), + /// A contract to deploy, enabling executing arbitrary code. + Contract(ContractDeployment), +} + +impl From<[u8; 20]> for Address { + fn from(address: [u8; 20]) -> Self { + Address::Address(address) + } +} + +impl TryFrom for Address { + type Error = (); + fn try_from(data: ExternalAddress) -> Result { + let mut kind = [0xff]; + let mut reader: &[u8] = data.as_ref(); + reader.read_exact(&mut kind).map_err(|_| ())?; + Ok(match kind[0] { + 0 => { + let mut address = [0xff; 20]; + reader.read_exact(&mut address).map_err(|_| ())?; + Address::Address(address) + } + 1 => { + let mut gas_limit = [0xff; 4]; + reader.read_exact(&mut gas_limit).map_err(|_| ())?; + Address::Contract(ContractDeployment { + gas_limit: { + let gas_limit = u32::from_le_bytes(gas_limit); + if gas_limit > ADDRESS_GAS_LIMIT { + Err(())?; + } + gas_limit + }, + // The code is whatever's left since the ExternalAddress is a delimited container of + // appropriately bounded length + code: reader.to_vec(), + }) + } + _ => Err(())?, + }) + } +} +impl From
for ExternalAddress { + fn from(address: Address) -> ExternalAddress { + let mut res = Vec::with_capacity(1 + 20); + match address { + Address::Address(address) => { + res.push(0); + res.extend(&address); + } + Address::Contract(ContractDeployment { gas_limit, code }) => { + res.push(1); + res.extend(&gas_limit.to_le_bytes()); + res.extend(&code); + } + } + // We only construct addresses whose code is small enough this can safely be constructed + ExternalAddress::new(res).unwrap() + } +} + +impl FromStr for Address { + type Err = (); + fn from_str(str: &str) -> Result { + let Some(address) = str.strip_prefix("0x") else { Err(())? }; + if address.len() != 40 { + Err(())? + }; + Ok(Address::Address( + hex::decode(address.to_lowercase()).map_err(|_| ())?.try_into().map_err(|_| ())?, + )) + } +} diff --git a/substrate/client/src/networks/mod.rs b/substrate/client/src/networks/mod.rs index 63ebf481a..7a99631a4 100644 --- a/substrate/client/src/networks/mod.rs +++ b/substrate/client/src/networks/mod.rs @@ -1,5 +1,8 @@ #[cfg(feature = "bitcoin")] pub mod bitcoin; +#[cfg(feature = "ethereum")] +pub mod ethereum; + #[cfg(feature = "monero")] pub mod monero; diff --git a/substrate/client/src/networks/monero.rs b/substrate/client/src/networks/monero.rs index bd5e0a15c..c99a0abdd 100644 --- a/substrate/client/src/networks/monero.rs +++ b/substrate/client/src/networks/monero.rs @@ -1,102 +1,141 @@ use core::{str::FromStr, fmt}; -use scale::{Encode, Decode}; - use ciphersuite::{Ciphersuite, Ed25519}; -use monero_wallet::address::{AddressError, Network, AddressType, MoneroAddress}; +use monero_address::{Network, AddressType as MoneroAddressType, MoneroAddress}; + +use crate::primitives::ExternalAddress; + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +enum AddressType { + Legacy, + Subaddress, + Featured(u8), +} + +/// A representation of a Monero address. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Address { + kind: AddressType, + spend: ::G, + view: ::G, +} -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct Address(MoneroAddress); -impl Address { - pub fn new(address: MoneroAddress) -> Option
{ - if address.payment_id().is_some() { - return None; +fn byte_for_kind(kind: AddressType) -> u8 { + // We use the second and third highest bits for the type + // This leaves the top bit open for interpretation as a VarInt later + match kind { + AddressType::Legacy => 0, + AddressType::Subaddress => 1 << 5, + AddressType::Featured(flags) => { + // The flags only take up the low three bits + debug_assert!(flags <= 0b111); + (2 << 5) | flags } - Some(Address(address)) } } -impl FromStr for Address { - type Err = AddressError; - fn from_str(str: &str) -> Result { - MoneroAddress::from_str(Network::Mainnet, str).map(Address) +impl borsh::BorshSerialize for Address { + fn serialize(&self, writer: &mut W) -> borsh::io::Result<()> { + writer.write_all(&[byte_for_kind(self.kind)])?; + writer.write_all(&self.spend.compress().to_bytes())?; + writer.write_all(&self.view.compress().to_bytes()) } } - -impl fmt::Display for Address { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) +impl borsh::BorshDeserialize for Address { + fn deserialize_reader(reader: &mut R) -> borsh::io::Result { + let mut kind_byte = [0xff]; + reader.read_exact(&mut kind_byte)?; + let kind_byte = kind_byte[0]; + let kind = match kind_byte >> 5 { + 0 => AddressType::Legacy, + 1 => AddressType::Subaddress, + 2 => AddressType::Featured(kind_byte & 0b111), + _ => Err(borsh::io::Error::other("unrecognized type"))?, + }; + // Check this wasn't malleated + if byte_for_kind(kind) != kind_byte { + Err(borsh::io::Error::other("malleated type byte"))?; + } + let spend = Ed25519::read_G(reader)?; + let view = Ed25519::read_G(reader)?; + Ok(Self { kind, spend, view }) } } -// SCALE-encoded variant of Monero addresses. -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] -enum EncodedAddressType { - Legacy, - Subaddress, - Featured(u8), +impl TryFrom for Address { + type Error = (); + fn try_from(address: MoneroAddress) -> Result { + let spend = address.spend().compress().to_bytes(); + let view = address.view().compress().to_bytes(); + let kind = match address.kind() { + MoneroAddressType::Legacy => AddressType::Legacy, + MoneroAddressType::LegacyIntegrated(_) => Err(())?, + MoneroAddressType::Subaddress => AddressType::Subaddress, + MoneroAddressType::Featured { subaddress, payment_id, guaranteed } => { + if payment_id.is_some() { + Err(())? + } + // This maintains the same bit layout as featured addresses use + AddressType::Featured(u8::from(*subaddress) + (u8::from(*guaranteed) << 2)) + } + }; + Ok(Address { + kind, + spend: Ed25519::read_G(&mut spend.as_slice()).map_err(|_| ())?, + view: Ed25519::read_G(&mut view.as_slice()).map_err(|_| ())?, + }) + } } -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] -struct EncodedAddress { - kind: EncodedAddressType, - spend: [u8; 32], - view: [u8; 32], +impl From
for MoneroAddress { + fn from(address: Address) -> MoneroAddress { + let kind = match address.kind { + AddressType::Legacy => MoneroAddressType::Legacy, + AddressType::Subaddress => MoneroAddressType::Subaddress, + AddressType::Featured(features) => { + debug_assert!(features <= 0b111); + let subaddress = (features & 1) != 0; + let integrated = (features & (1 << 1)) != 0; + debug_assert!(!integrated); + let guaranteed = (features & (1 << 2)) != 0; + MoneroAddressType::Featured { subaddress, payment_id: None, guaranteed } + } + }; + MoneroAddress::new(Network::Mainnet, kind, address.spend.0, address.view.0) + } } -impl TryFrom> for Address { +impl TryFrom for Address { type Error = (); - fn try_from(data: Vec) -> Result { - // Decode as SCALE - let addr = EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())?; - // Convert over - Ok(Address(MoneroAddress::new( - Network::Mainnet, - match addr.kind { - EncodedAddressType::Legacy => AddressType::Legacy, - EncodedAddressType::Subaddress => AddressType::Subaddress, - EncodedAddressType::Featured(flags) => { - let subaddress = (flags & 1) != 0; - let integrated = (flags & (1 << 1)) != 0; - let guaranteed = (flags & (1 << 2)) != 0; - if integrated { - Err(())?; - } - AddressType::Featured { subaddress, payment_id: None, guaranteed } - } - }, - Ed25519::read_G::<&[u8]>(&mut addr.spend.as_ref()).map_err(|_| ())?.0, - Ed25519::read_G::<&[u8]>(&mut addr.view.as_ref()).map_err(|_| ())?.0, - ))) + fn try_from(data: ExternalAddress) -> Result { + // Decode as an Address + let mut data = data.as_ref(); + let address = +
::deserialize_reader(&mut data).map_err(|_| ())?; + if !data.is_empty() { + Err(())? + } + Ok(address) + } +} +impl From
for ExternalAddress { + fn from(address: Address) -> ExternalAddress { + // This is 65 bytes which is less than MAX_ADDRESS_LEN + ExternalAddress::new(borsh::to_vec(&address).unwrap()).unwrap() } } -#[allow(clippy::from_over_into)] -impl Into for Address { - fn into(self) -> MoneroAddress { - self.0 +impl FromStr for Address { + type Err = (); + fn from_str(str: &str) -> Result { + let Ok(address) = MoneroAddress::from_str(Network::Mainnet, str) else { Err(())? }; + Address::try_from(address) } } -#[allow(clippy::from_over_into)] -impl Into> for Address { - fn into(self) -> Vec { - EncodedAddress { - kind: match self.0.kind() { - AddressType::Legacy => EncodedAddressType::Legacy, - AddressType::LegacyIntegrated(_) => { - panic!("integrated address became Serai Monero address") - } - AddressType::Subaddress => EncodedAddressType::Subaddress, - AddressType::Featured { subaddress, payment_id, guaranteed } => { - debug_assert!(payment_id.is_none()); - EncodedAddressType::Featured(u8::from(*subaddress) + (u8::from(*guaranteed) << 2)) - } - }, - spend: self.0.spend().compress().0, - view: self.0.view().compress().0, - } - .encode() +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + MoneroAddress::from(*self).fmt(f) } } diff --git a/substrate/client/tests/burn.rs b/substrate/client/tests/burn.rs index a30dabec1..b8b849d3c 100644 --- a/substrate/client/tests/burn.rs +++ b/substrate/client/tests/burn.rs @@ -12,7 +12,7 @@ use sp_core::Pair; use serai_client::{ primitives::{ - Amount, NetworkId, Coin, Balance, BlockHash, SeraiAddress, Data, ExternalAddress, + Amount, NetworkId, Coin, Balance, BlockHash, SeraiAddress, ExternalAddress, insecure_pair_from_name, }, in_instructions::{ @@ -55,39 +55,35 @@ serai_test!( let block = provide_batch(&serai, batch.clone()).await; let instruction = { - let serai = serai.as_of(block); - let batches = serai.in_instructions().batch_events().await.unwrap(); - assert_eq!( - batches, - vec![InInstructionsEvent::Batch { - network, - id, - block: block_hash, - instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), - }] - ); - - assert_eq!( - serai.coins().mint_events().await.unwrap(), - vec![CoinsEvent::Mint { to: address, balance }] - ); - assert_eq!(serai.coins().coin_supply(coin).await.unwrap(), amount); - assert_eq!(serai.coins().coin_balance(coin, address).await.unwrap(), amount); - - // Now burn it - let mut rand_bytes = vec![0; 32]; - OsRng.fill_bytes(&mut rand_bytes); - let external_address = ExternalAddress::new(rand_bytes).unwrap(); - - let mut rand_bytes = vec![0; 32]; - OsRng.fill_bytes(&mut rand_bytes); - let data = Data::new(rand_bytes).unwrap(); - - OutInstructionWithBalance { - balance, - instruction: OutInstruction { address: external_address, data: Some(data) }, - } -}; + let serai = serai.as_of(block); + let batches = serai.in_instructions().batch_events().await.unwrap(); + assert_eq!( + batches, + vec![InInstructionsEvent::Batch { + network, + id, + block: block_hash, + instructions_hash: Blake2b::::digest(batch.instructions.encode()).into(), + }] + ); + + assert_eq!( + serai.coins().mint_events().await.unwrap(), + vec![CoinsEvent::Mint { to: address, balance }] + ); + assert_eq!(serai.coins().coin_supply(coin).await.unwrap(), amount); + assert_eq!(serai.coins().coin_balance(coin, address).await.unwrap(), amount); + + // Now burn it + let mut rand_bytes = vec![0; 32]; + OsRng.fill_bytes(&mut rand_bytes); + let external_address = ExternalAddress::new(rand_bytes).unwrap(); + + OutInstructionWithBalance { + balance, + instruction: OutInstruction { address: external_address }, + } + }; let block = publish_tx( &serai, diff --git a/substrate/coins/primitives/src/lib.rs b/substrate/coins/primitives/src/lib.rs index a7b45cf0c..53db73820 100644 --- a/substrate/coins/primitives/src/lib.rs +++ b/substrate/coins/primitives/src/lib.rs @@ -13,17 +13,17 @@ use serde::{Serialize, Deserialize}; use scale::{Encode, Decode, MaxEncodedLen}; use scale_info::TypeInfo; -use serai_primitives::{Balance, SeraiAddress, ExternalAddress, Data, system_address}; +use serai_primitives::{Balance, SeraiAddress, ExternalAddress, system_address}; pub const FEE_ACCOUNT: SeraiAddress = system_address(b"Coins-fees"); +// TODO: Replace entirely with just Address #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] pub struct OutInstruction { pub address: ExternalAddress, - pub data: Option, } #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] diff --git a/substrate/in-instructions/pallet/src/lib.rs b/substrate/in-instructions/pallet/src/lib.rs index f90ae4122..1cb05c409 100644 --- a/substrate/in-instructions/pallet/src/lib.rs +++ b/substrate/in-instructions/pallet/src/lib.rs @@ -205,11 +205,7 @@ pub mod pallet { let coin_balance = Coins::::balance(IN_INSTRUCTION_EXECUTOR.into(), out_balance.coin); let instruction = OutInstructionWithBalance { - instruction: OutInstruction { - address: out_address.as_external().unwrap(), - // TODO: Properly pass data. Replace address with an OutInstruction entirely? - data: None, - }, + instruction: OutInstruction { address: out_address.as_external().unwrap() }, balance: Balance { coin: out_balance.coin, amount: coin_balance }, }; Coins::::burn_with_instruction(origin.into(), instruction)?; diff --git a/substrate/primitives/src/lib.rs b/substrate/primitives/src/lib.rs index d2c52219e..b2515a7e3 100644 --- a/substrate/primitives/src/lib.rs +++ b/substrate/primitives/src/lib.rs @@ -59,10 +59,7 @@ pub fn borsh_deserialize_bounded_vec &[u8] { - self.0.as_ref() - } - #[cfg(feature = "std")] pub fn consume(self) -> Vec { self.0.into_inner() @@ -106,51 +99,6 @@ impl AsRef<[u8]> for ExternalAddress { } } -// Should be enough for a Uniswap v3 call -pub const MAX_DATA_LEN: u32 = 512; -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub struct Data( - #[cfg_attr( - feature = "borsh", - borsh( - serialize_with = "borsh_serialize_bounded_vec", - deserialize_with = "borsh_deserialize_bounded_vec" - ) - )] - BoundedVec>, -); - -#[cfg(feature = "std")] -impl Zeroize for Data { - fn zeroize(&mut self) { - self.0.as_mut().zeroize() - } -} - -impl Data { - #[cfg(feature = "std")] - pub fn new(data: Vec) -> Result { - Ok(Data(data.try_into().map_err(|_| "data length exceeds {MAX_DATA_LEN}")?)) - } - - pub fn data(&self) -> &[u8] { - self.0.as_ref() - } - - #[cfg(feature = "std")] - pub fn consume(self) -> Vec { - self.0.into_inner() - } -} - -impl AsRef<[u8]> for Data { - fn as_ref(&self) -> &[u8] { - self.0.as_ref() - } -} - /// Lexicographically reverses a given byte array. pub fn reverse_lexicographic_order(bytes: [u8; N]) -> [u8; N] { let mut res = [0u8; N]; diff --git a/substrate/validator-sets/primitives/src/lib.rs b/substrate/validator-sets/primitives/src/lib.rs index 90d58c37c..341d211ff 100644 --- a/substrate/validator-sets/primitives/src/lib.rs +++ b/substrate/validator-sets/primitives/src/lib.rs @@ -103,7 +103,25 @@ pub fn set_keys_message(set: &ValidatorSet, key_pair: &KeyPair) -> Vec { (b"ValidatorSets-set_keys", set, key_pair).encode() } -pub fn report_slashes_message(set: &ValidatorSet, slashes: &[(Public, u32)]) -> Vec { +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Slash { + #[cfg_attr( + feature = "borsh", + borsh( + serialize_with = "serai_primitives::borsh_serialize_public", + deserialize_with = "serai_primitives::borsh_deserialize_public" + ) + )] + key: Public, + points: u32, +} +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, TypeInfo, MaxEncodedLen)] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct SlashReport(pub BoundedVec>); + +pub fn report_slashes_message(set: &ValidatorSet, slashes: &SlashReport) -> Vec { (b"ValidatorSets-report_slashes", set, slashes).encode() } diff --git a/tests/coordinator/src/tests/sign.rs b/tests/coordinator/src/tests/sign.rs index db8a72034..6e9142fec 100644 --- a/tests/coordinator/src/tests/sign.rs +++ b/tests/coordinator/src/tests/sign.rs @@ -247,7 +247,6 @@ async fn sign_test() { balance, instruction: OutInstruction { address: ExternalAddress::new(b"external".to_vec()).unwrap(), - data: None, }, }; serai diff --git a/tests/full-stack/Cargo.toml b/tests/full-stack/Cargo.toml index 12af01bdf..a9dbdc63a 100644 --- a/tests/full-stack/Cargo.toml +++ b/tests/full-stack/Cargo.toml @@ -34,7 +34,7 @@ scale = { package = "parity-scale-codec", version = "3" } serde = "1" serde_json = "1" -processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "monero"] } +# processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "monero"] } serai-client = { path = "../../substrate/client", features = ["serai"] } diff --git a/tests/full-stack/src/tests/mint_and_burn.rs b/tests/full-stack/src/tests/mint_and_burn.rs index ce19808fd..8987facc7 100644 --- a/tests/full-stack/src/tests/mint_and_burn.rs +++ b/tests/full-stack/src/tests/mint_and_burn.rs @@ -493,7 +493,7 @@ async fn mint_and_burn_test() { move |nonce, coin, amount, address| async move { let out_instruction = OutInstructionWithBalance { balance: Balance { coin, amount: Amount(amount) }, - instruction: OutInstruction { address, data: None }, + instruction: OutInstruction { address }, }; serai diff --git a/tests/processor/Cargo.toml b/tests/processor/Cargo.toml index f06e47419..c7267b55e 100644 --- a/tests/processor/Cargo.toml +++ b/tests/processor/Cargo.toml @@ -29,7 +29,6 @@ dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] bitcoin-serai = { path = "../../networks/bitcoin" } k256 = "0.13" -ethereum-serai = { path = "../../networks/ethereum" } monero-simple-request-rpc = { path = "../../networks/monero/rpc/simple-request" } monero-wallet = { path = "../../networks/monero/wallet" } @@ -46,7 +45,7 @@ serde_json = { version = "1", default-features = false } tokio = { version = "1", features = ["time"] } -processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "ethereum", "monero"] } +# processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "ethereum", "monero"] } dockertest = "0.5" serai-docker-tests = { path = "../docker" } diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index 8dfb53535..4c811e2b5 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -246,7 +246,7 @@ fn send_test() { }, block: substrate_block_num, burns: vec![OutInstructionWithBalance { - instruction: OutInstruction { address: wallet.address(), data: None }, + instruction: OutInstruction { address: wallet.address() }, balance: Balance { coin: balance_sent.coin, amount: amount_minted }, }], batches: vec![batch.batch.id],