diff --git a/.github/nightly-version b/.github/nightly-version index 84376b179..6cb7e59b1 100644 --- a/.github/nightly-version +++ b/.github/nightly-version @@ -1 +1 @@ -nightly-2023-12-01 +nightly-2023-12-04 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 420f3725c..167075552 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -75,3 +75,12 @@ jobs: - name: Verify Dockerfiles are up to date # Runs the file which generates them and checks the diff has no lines run: cd orchestration && ./dockerfiles.sh && git diff | wc -l | grep -x "0" + + machete: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac + - name: Verify all dependencies are in use + run: | + cargo install cargo-machete + cargo machete diff --git a/Cargo.lock b/Cargo.lock index 9b349d366..144cf0418 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -73,9 +73,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91429305e9f0a25f6205c5b8e0d2db09e0708a7a6df0f42212bb56c32c8ac97a" +checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ "cfg-if", "getrandom", @@ -125,9 +125,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +checksum = "d664a92ecae85fd0a7392615844904654d1d5f5514837f471ddef4a057aba1b6" dependencies = [ "anstyle", "anstyle-parse", @@ -154,9 +154,9 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3a318f1f38d2418400f8209655bfd825785afd25aa30bb7ba6cc792e4596748" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" dependencies = [ "windows-sys 0.52.0", ] @@ -173,9 +173,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4668cab20f66d8d020e1fbc0ebe47217433c1b6c8f2040faf858554e394ace6" +checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" [[package]] name = "approx" @@ -262,9 +262,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.2.1" +version = "2.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6d3b15875ba253d1110c740755e246537483f152fa334f91abd7fe84c88b3ff" +checksum = "6afaa937395a620e33dc6a742c593c01aced20aa376ffb0f628121198578ccc7" dependencies = [ "async-lock", "cfg-if", @@ -285,20 +285,31 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7125e42787d53db9dd54261812ef17e937c95a51e4d291373b670342fa44310c" dependencies = [ - "event-listener 4.0.0", + "event-listener 4.0.1", "event-listener-strategy", "pin-project-lite 0.2.13", ] +[[package]] +name = "async-recursion" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.47", +] + [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -365,7 +376,7 @@ dependencies = [ "cfg-if", "libc", "miniz_oxide", - "object 0.32.1", + "object 0.32.2", "rustc-demangle", ] @@ -445,13 +456,13 @@ dependencies = [ "lazy_static", "lazycell", "peeking_take_while", - "prettyplease 0.2.15", + "prettyplease 0.2.16", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -661,9 +672,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf617fabf5cdbdc92f774bfe5062d870f228b80056d41180797abf48bed4056e" +checksum = "26d4d6dafc1a3bb54687538972158f07b2c948bc57d5890df22c0739098b3028" dependencies = [ "borsh-derive", "cfg_aliases", @@ -671,15 +682,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f404657a7ea7b5249e36808dff544bc88a28f26e0ac40009f674b7a009d14be3" +checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0" dependencies = [ "once_cell", - "proc-macro-crate 2.0.0", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", "syn_derive", ] @@ -706,9 +717,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "542f33a8835a0884b006a0c3df3dadd99c0c3f296ed26c2fdc8028e01ad6230c" +checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" dependencies = [ "memchr", "serde", @@ -778,9 +789,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e34637b3140142bdf929fb439e8aa4ebad7651ebf7b1080b3930aa16ac1459ff" +checksum = "ceed8ef69d8518a5dda55c07425450b58a4e1946f4951eab6d7191ee86c2443d" dependencies = [ "serde", ] @@ -793,7 +804,7 @@ checksum = "e7daec1a2a2129eeba1644b220b4647ec537b0b5d4bfd6876fcc5a540056b592" dependencies = [ "camino", "cargo-platform", - "semver 1.0.20", + "semver 1.0.21", "serde", "serde_json", "thiserror", @@ -820,9 +831,9 @@ dependencies = [ [[package]] name = "cfg-expr" -version = "0.15.5" +version = "0.15.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03915af431787e6ffdcc74c645077518c6b6e01f80b761e0fbbfa288536311b3" +checksum = "6100bc57b6209840798d95cb2775684849d332f7bd788db2a8c8caf7ef82a41a" dependencies = [ "smallvec", ] @@ -927,9 +938,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "67523a3b4be3ce1989d607a828d036249522dd9c1c8de7f4dd2dae43a37369d1" dependencies = [ "glob", "libc", @@ -938,9 +949,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.11" +version = "4.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfaff671f6b22ca62406885ece523383b9b64022e341e53e009a62ebc47a45f2" +checksum = "dcfab8ba68f3668e89f6ff60f5b205cea56aa7b769451a59f34b8682f51c056d" dependencies = [ "clap_builder", "clap_derive", @@ -948,9 +959,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.11" +version = "4.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a216b506622bb1d316cd51328dce24e07bdff4a6128a47c7e7fad11878d5adbb" +checksum = "fb7fb5e4e979aec3be7791562fcba452f94ad85e954da024396433e0e25a79e9" dependencies = [ "anstream", "anstyle", @@ -967,7 +978,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -1015,9 +1026,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-random" @@ -1216,9 +1227,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "fca89a0e215bab21874660c67903c5f143333cab1da83d041c7ded6053774751" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -1227,22 +1238,20 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "0e3681d554572a651dda4186cd47240627c3d0114d45a95f6ad27f2f22e7548d" dependencies = [ "autocfg", "cfg-if", "crossbeam-utils", - "memoffset", - "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.16" +version = "0.8.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +checksum = "c3a430a770ebd84726f584a90ee7f020d28db52c6d02138900f22341f866d39c" dependencies = [ "cfg-if", ] @@ -1312,14 +1321,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "cxx" -version = "1.0.110" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7129e341034ecb940c9072817cd9007974ea696844fc4dd582dc1653a7fbe2e8" +checksum = "2ed3a27153f220bb42b96005947ca3b87266cfdae5b4b4d703642c3a565e9708" dependencies = [ "cc", "cxxbridge-flags", @@ -1329,9 +1338,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.110" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2a24f3f5f8eed71936f21e570436f024f5c2e25628f7496aa7ccd03b90109d5" +checksum = "005721caedeb9869792e656d567695281c7e2bf2ac022d4ed95e5240b215f44d" dependencies = [ "cc", "codespan-reporting", @@ -1339,24 +1348,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "cxxbridge-flags" -version = "1.0.110" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06fdd177fc61050d63f67f5bd6351fac6ab5526694ea8e359cd9cd3b75857f44" +checksum = "b6981d27196cca89f82c8a89fd495cca25066d2933c974e907f7c3699801e112" [[package]] name = "cxxbridge-macro" -version = "1.0.110" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "587663dd5fb3d10932c8aecfe7c844db1bcf0aee93eeab08fac13dc1212c2e7f" +checksum = "ca7e7d41b675af76ee4844e8e4c1cec70b65555dbc4852eae7b11c9cd5525d60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -1436,9 +1445,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eb30d70a07a3b04884d2677f06bec33509dc67ca60d92949e5535352d3191dc" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", "serde", @@ -1518,11 +1527,8 @@ dependencies = [ [[package]] name = "directories-next" version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "339ee130d97a610ea5a5872d2bbb130fdf68884ff09d3028b81bec8a1ac23bbc" dependencies = [ - "cfg-if", - "dirs-sys-next", + "directories", ] [[package]] @@ -1537,17 +1543,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "dirs-sys-next" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" -dependencies = [ - "libc", - "redox_users", - "winapi", -] - [[package]] name = "displaydoc" version = "0.2.4" @@ -1556,7 +1551,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -1571,7 +1566,6 @@ dependencies = [ "multiexp", "rand_core", "schnorr-signatures", - "serde", "std-shims", "thiserror", "zeroize", @@ -1599,8 +1593,7 @@ dependencies = [ [[package]] name = "dockertest" version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88933ed892cc8f5be247da11a1cd86a5c64802ac0172982e8aeb8315cb6dacfa" +source = "git+https://github.com/kayabaNerve/dockertest-rs?branch=arc#c0ea77997048f9edc9987984bbe20e43fac74e06" dependencies = [ "anyhow", "async-trait", @@ -1791,7 +1784,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -1928,13 +1921,13 @@ dependencies = [ "dunce", "ethers-core", "eyre", - "prettyplease 0.2.15", + "prettyplease 0.2.16", "proc-macro2", "quote", "regex", "serde", "serde_json", - "syn 2.0.39", + "syn 2.0.47", "toml 0.7.8", "walkdir", ] @@ -1952,7 +1945,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -1978,7 +1971,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.39", + "syn 2.0.47", "tempfile", "thiserror", "tiny-keccak", @@ -2029,9 +2022,9 @@ checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "event-listener" -version = "4.0.0" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770d968249b5d99410d61f5bf89057f3199a077a04d087092f58e7d10692baae" +checksum = "84f2cdcf274580f2d63697192d744727b3198894b1bf02923643bf59e2c26712" dependencies = [ "concurrent-queue", "parking", @@ -2044,7 +2037,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" dependencies = [ - "event-listener 4.0.0", + "event-listener 4.0.1", "pin-project-lite 0.2.13", ] @@ -2067,14 +2060,14 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "eyre" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80f656be11ddf91bd709454d15d5bd896fbaf4cc3314e69349e4d1569f5b46cd" +checksum = "b6267a1fa6f59179ea4afc8e50fd8612a3cc60bc858f786ff877a4a8cb042799" dependencies = [ "indenter", "once_cell", @@ -2219,7 +2212,7 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "parity-scale-codec", ] @@ -2242,7 +2235,7 @@ checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "frame-support", "frame-support-procedural", @@ -2267,7 +2260,7 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "frame-support", "frame-system", @@ -2296,7 +2289,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "bitflags 1.3.2", "environmental", @@ -2329,7 +2322,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "Inflector", "cfg-expr", @@ -2341,35 +2334,35 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "cfg-if", "frame-support", @@ -2388,7 +2381,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "parity-scale-codec", "sp-api", @@ -2397,7 +2390,7 @@ dependencies = [ [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "frame-support", "parity-scale-codec", @@ -2447,9 +2440,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0290714b38af9b4a7b094b8a37086d1b4e61f2df9122c3cad2577669145335" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -2472,9 +2465,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dd66668b557604244583e3e1e1eada8c5c2e96a6d0d6653ede395b78bbacb" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -2482,15 +2475,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1d22c66e66d9d72e1758f0bd7d4fd0bee04cad842ee34587d68c07e45d088c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f4fb8693db0cf099eadcca0efe2a5a22e4550f98ed16aba6c48700da29597bc" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -2500,9 +2493,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bf34a163b5c4c52d0478a4d757da8fb65cabef42ba90515efee0f6f9fa45aaa" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" @@ -2516,13 +2509,13 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53b153fd91e4b0147f4aced87be237c98248656bb01050b96bf3ee89220a8ddb" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -2537,15 +2530,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e36d3378ee38c2a36ad710c5d30c2911d752cb941c00c72dbabfb786a7970817" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd193069b0ddadc69c46389b740bbccdd97203899b48d09c5f7969591d6bae2" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-ticker" @@ -2570,9 +2563,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19526d624e703a3179b3d322efec918b6246ea0fa51d41124525f00f1cc8104" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -2836,9 +2829,9 @@ checksum = "3011d1213f159867b13cfd6ac92d2cd5f1345762c63be3554e84092d85a50bbd" [[package]] name = "hkdf" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ "hmac", ] @@ -2854,11 +2847,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2885,9 +2878,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http", @@ -2920,9 +2913,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.27" +version = "0.14.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" dependencies = [ "bytes", "futures-channel", @@ -2935,7 +2928,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.13", - "socket2 0.4.10", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -2973,9 +2966,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.58" +version = "0.1.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +checksum = "b6a67363e2aa4443928ce15e57ebae94fd8949958fd1223c4cfc0cd473ad7539" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -3183,14 +3176,7 @@ checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] name = "is-terminal" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" -dependencies = [ - "hermit-abi", - "rustix", - "windows-sys 0.48.0", -] +version = "0.4.10" [[package]] name = "itertools" @@ -3203,9 +3189,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "jobserver" @@ -3405,18 +3391,18 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.150" +version = "0.2.151" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" +checksum = "302d7ab3130588088d277783b1e2d2e10c9e9e4a16dd9050e6ec93fb3e7048f4" [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" dependencies = [ "cfg-if", - "winapi", + "windows-sys 0.48.0", ] [[package]] @@ -3790,7 +3776,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -4028,8 +4014,15 @@ dependencies = [ [[package]] name = "mach" version = "0.3.2" +dependencies = [ + "mach2", +] + +[[package]] +name = "mach2" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa" +checksum = "19b955cdeb2a02b9117f121ce63aa52d08ade45de53e48fe6a38b39c10f6f709" dependencies = [ "libc", ] @@ -4043,7 +4036,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -4057,7 +4050,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -4068,7 +4061,7 @@ checksum = "d710e1214dffbab3b5dacb21475dde7d6ed84c69ff722b3a47a782668d44fbac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -4079,7 +4072,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -4109,8 +4102,6 @@ dependencies = [ [[package]] name = "matches" version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2532096657941c2fea9c289d370a250971c689d4f143798ff67113ec042024a5" [[package]] name = "matrixmultiply" @@ -4134,9 +4125,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" [[package]] name = "memfd" @@ -4232,9 +4223,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dce281c5e46beae905d4de1870d8b1509a9142b62eedf18b443b011ca8343d0" +checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" dependencies = [ "libc", "wasi", @@ -4306,6 +4297,7 @@ dependencies = [ name = "monero-serai" version = "0.1.4-alpha" dependencies = [ + "async-lock", "async-trait", "base58-monero", "curve25519-dalek", @@ -4313,7 +4305,6 @@ dependencies = [ "digest_auth", "dleq", "flexible-transcript", - "futures", "group", "hex", "hex-literal", @@ -4717,10 +4708,10 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c11e44798ad209ccdd91fc192f0526a369a01234f7373e1b141c96d7cee4f0e" dependencies = [ - "proc-macro-crate 2.0.0", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -4737,9 +4728,9 @@ dependencies = [ [[package]] name = "object" -version = "0.32.1" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" dependencies = [ "memchr", ] @@ -4755,9 +4746,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "opaque-debug" @@ -4799,8 +4790,6 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "option-ext" version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" [[package]] name = "overload" @@ -4831,7 +4820,7 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "frame-support", "frame-system", @@ -4845,7 +4834,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "frame-benchmarking", "frame-support", @@ -4869,7 +4858,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "frame-benchmarking", "frame-support", @@ -4892,7 +4881,7 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "frame-support", "frame-system", @@ -4913,7 +4902,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "frame-benchmarking", "frame-support", @@ -4931,7 +4920,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "frame-support", "frame-system", @@ -4947,7 +4936,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", @@ -4963,7 +4952,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "pallet-transaction-payment", "parity-scale-codec", @@ -5013,7 +5002,7 @@ version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate 2.0.0", + "proc-macro-crate 2.0.1", "proc-macro2", "quote", "syn 1.0.109", @@ -5196,7 +5185,7 @@ checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -5229,15 +5218,15 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "69d3587f8a9e599cc7ec2c00e331f71c4e69a5f9a4b8a6efd5b07466b9736f9a" [[package]] name = "platforms" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14e6ab3f592e6fb464fc9712d8d6e6912de6473954635fd76a589d832cffcbb0" +checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" [[package]] name = "polling" @@ -5330,12 +5319,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -5373,11 +5362,18 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.0" +version = "2.0.1" +dependencies = [ + "proc-macro-crate 3.0.0", +] + +[[package]] +name = "proc-macro-crate" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +checksum = "6b2685dd208a3771337d8d386a89840f0f43cd68be8dae90a5f8c2384effc9cd" dependencies = [ - "toml_edit 0.20.7", + "toml_edit 0.21.0", ] [[package]] @@ -5412,14 +5408,14 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "proc-macro2" -version = "1.0.70" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39278fbbf5fb4f646ce651690877f89d1c5811a3d4acb27700c1cb3cdb78fd3b" +checksum = "907a61bd0f64c2f29cd1cf1dc34d05176426a3f504a78010f08416ddb7b13708" dependencies = [ "unicode-ident", ] @@ -5458,7 +5454,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -5613,9 +5609,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.33" +version = "1.0.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" dependencies = [ "proc-macro2", ] @@ -5744,22 +5740,22 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acde58d073e9c79da00f2b5b84eed919c8326832648a5b109b3fce1bb1175280" +checksum = "c4846d4c50d1721b1a3bef8af76924eef20d5e723647333798c1b519b3a9473f" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7473c2cfcf90008193dd0e3e16599455cb601a9fce322b5bb55de799664925" +checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -5821,9 +5817,9 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" dependencies = [ "base64 0.21.5", "bytes", @@ -6004,7 +6000,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.20", + "semver 1.0.21", ] [[package]] @@ -6018,9 +6014,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.26" +version = "0.38.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9470c4bf8246c8daf25f9598dca807fb6510347b1e1cfa55749113850c79d88a" +checksum = "72e572a5e8ca657d7366229cdde4bd14c4eb5499a9573d4d366fe1b599daa316" dependencies = [ "bitflags 2.4.1", "errno", @@ -6031,9 +6027,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.9" +version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "629648aced5775d558af50b2b4c7b02983a04b312126d45eeead26e7caa498b9" +checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", "ring 0.17.7", @@ -6091,9 +6087,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" [[package]] name = "safe_arch" @@ -6116,7 +6112,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "log", "sp-core", @@ -6127,7 +6123,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-trait", "futures", @@ -6155,7 +6151,7 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "futures", "futures-timer", @@ -6178,7 +6174,7 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -6193,7 +6189,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "memmap2", "sc-chain-spec-derive", @@ -6212,18 +6208,18 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "array-bytes", "chrono", @@ -6262,7 +6258,7 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "fnv", "futures", @@ -6287,7 +6283,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "hash-db", "kvdb", @@ -6313,7 +6309,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-trait", "futures", @@ -6338,7 +6334,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-trait", "fork-tree", @@ -6374,7 +6370,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "fork-tree", "parity-scale-codec", @@ -6387,7 +6383,7 @@ dependencies = [ [[package]] name = "sc-consensus-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "ahash", "array-bytes", @@ -6428,7 +6424,7 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-trait", "futures", @@ -6451,7 +6447,7 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -6473,7 +6469,7 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "sc-allocator", "sp-maybe-compressed-blob", @@ -6485,7 +6481,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "anyhow", "cfg-if", @@ -6502,7 +6498,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "anstyle", "futures", @@ -6518,7 +6514,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "array-bytes", "parking_lot 0.12.1", @@ -6532,7 +6528,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "array-bytes", "async-channel", @@ -6574,7 +6570,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-channel", "cid", @@ -6594,7 +6590,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-trait", "bitflags 1.3.2", @@ -6611,7 +6607,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "ahash", "futures", @@ -6630,7 +6626,7 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "array-bytes", "async-channel", @@ -6651,7 +6647,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "array-bytes", "async-channel", @@ -6685,7 +6681,7 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "array-bytes", "futures", @@ -6703,7 +6699,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "bytes", "fnv", @@ -6735,7 +6731,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -6744,7 +6740,7 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "futures", "jsonrpsee", @@ -6774,7 +6770,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "jsonrpsee", "parity-scale-codec", @@ -6793,7 +6789,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "http", "jsonrpsee", @@ -6808,7 +6804,7 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "array-bytes", "futures", @@ -6834,7 +6830,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-trait", "directories", @@ -6897,7 +6893,7 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "log", "parity-scale-codec", @@ -6908,7 +6904,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "futures", "libc", @@ -6927,7 +6923,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "chrono", "futures", @@ -6946,7 +6942,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "anstyle", "chrono", @@ -6974,18 +6970,18 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-trait", "futures", @@ -7011,7 +7007,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-trait", "futures", @@ -7027,7 +7023,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-channel", "futures", @@ -7067,11 +7063,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -7162,9 +7158,9 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2acea373acb8c21ecb5a23741452acd2593ed44ee3d343e72baaa143bc89d0d5" +checksum = "3f622567e3b4b38154fb8190bcf6b160d7a4301d70595a49195b48c116007a27" dependencies = [ "bitcoin_hashes", "rand", @@ -7174,9 +7170,9 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.9.0" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09e67c467c38fd24bd5499dc9a18183b31575c12ee549197e3e20d57aa4fe3b7" +checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" dependencies = [ "cc", ] @@ -7224,9 +7220,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" dependencies = [ "serde", ] @@ -7283,6 +7279,7 @@ dependencies = [ "hex", "modular-frost", "monero-serai", + "multiaddr", "parity-scale-codec", "rand_core", "serai-abi", @@ -7337,13 +7334,12 @@ dependencies = [ "env_logger", "flexible-transcript", "frost-schnorrkel", - "futures", + "futures-util", "hex", "libp2p", "log", "modular-frost", "parity-scale-codec", - "rand_chacha", "rand_core", "schnorr-signatures", "serai-client", @@ -7355,6 +7351,7 @@ dependencies = [ "sp-runtime", "tokio", "tributary-chain", + "zalloc", "zeroize", ] @@ -7362,6 +7359,8 @@ dependencies = [ name = "serai-coordinator-tests" version = "0.1.0" dependencies = [ + "async-recursion", + "async-trait", "blake2", "borsh", "ciphersuite", @@ -7421,6 +7420,8 @@ version = "0.1.0" name = "serai-full-stack-tests" version = "0.1.0" dependencies = [ + "async-recursion", + "async-trait", "bitcoin-serai", "curve25519-dalek", "dockertest", @@ -7430,6 +7431,7 @@ dependencies = [ "rand_core", "serai-client", "serai-coordinator-tests", + "serai-docker-tests", "serai-message-queue-tests", "serai-processor", "serai-processor-tests", @@ -7492,6 +7494,7 @@ dependencies = [ "serai-env", "serai-primitives", "tokio", + "zalloc", "zeroize", ] @@ -7533,7 +7536,7 @@ version = "0.1.0" dependencies = [ "clap", "frame-benchmarking", - "futures", + "futures-util", "jsonrpsee", "pallet-transaction-payment-rpc", "sc-authority-discovery", @@ -7562,6 +7565,7 @@ dependencies = [ "sp-timestamp", "substrate-build-script-utils", "substrate-frame-rpc-system", + "tokio", ] [[package]] @@ -7612,6 +7616,7 @@ dependencies = [ "sp-application-crypto", "thiserror", "tokio", + "zalloc", "zeroize", ] @@ -7674,6 +7679,8 @@ dependencies = [ "frame-support", "frame-system", "frame-system-rpc-runtime-api", + "hashbrown 0.14.3", + "pallet-authorship", "pallet-babe", "pallet-grandpa", "pallet-timestamp", @@ -7737,6 +7744,7 @@ version = "0.1.0" dependencies = [ "frame-support", "frame-system", + "hashbrown 0.14.3", "pallet-babe", "pallet-grandpa", "parity-scale-codec", @@ -7750,6 +7758,7 @@ dependencies = [ "sp-io", "sp-runtime", "sp-session", + "sp-staking", "sp-std", ] @@ -7771,38 +7780,38 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.193" +version = "1.0.194" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25dd9975e68d0cb5aa1120c288333fc98731bd1dd12f561e468ea4728c042b89" +checksum = "0b114498256798c94a0689e1a15fec6005dee8ac1f41de56404b67afc2a4b773" dependencies = [ "serde_derive", ] [[package]] name = "serde_bytes" -version = "0.11.12" +version = "0.11.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" +checksum = "8b8497c313fd43ab992087548117643f6fcd935cbf36f176ffda0aacf9591734" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.193" +version = "1.0.194" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43576ca501357b9b071ac53cdc7da8ef0cbd9493d8df094cd821777ea6e894d3" +checksum = "a3385e45322e8f9931410f01b3031ec534c3947d0e94c18049af4d9f9907d4e0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "serde_json" -version = "1.0.108" +version = "1.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1c7e3eac408d115102c4c24ad393e0821bb3a5df4d506a80f85f7a742a526b" +checksum = "6fbd975230bada99c8bb618e0c365c2eefa219158d5c6c29610fd09ff1833257" dependencies = [ "itoa", "ryu", @@ -7811,20 +7820,20 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3081f5ffbb02284dda55132aa26daecedd7372a42417bbbab6f14ab7d6bb9145" +checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "serde_spanned" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12022b835073e5b11e90a14f86838ceb1c8fb0325b72416845c487ac0fa95e80" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ "serde", ] @@ -8061,7 +8070,7 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "hash-db", "log", @@ -8082,7 +8091,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "Inflector", "blake2", @@ -8090,13 +8099,13 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "sp-application-crypto" version = "23.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "parity-scale-codec", "scale-info", @@ -8109,7 +8118,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "16.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "integer-sqrt", "num-traits", @@ -8123,7 +8132,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "parity-scale-codec", "scale-info", @@ -8135,7 +8144,7 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "sp-api", "sp-inherents", @@ -8146,7 +8155,7 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "futures", "log", @@ -8164,7 +8173,7 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-trait", "futures", @@ -8178,7 +8187,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-trait", "parity-scale-codec", @@ -8197,7 +8206,7 @@ dependencies = [ [[package]] name = "sp-consensus-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "finality-grandpa", "log", @@ -8215,7 +8224,7 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "parity-scale-codec", "scale-info", @@ -8227,7 +8236,7 @@ dependencies = [ [[package]] name = "sp-core" version = "21.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "array-bytes", "bitflags 1.3.2", @@ -8270,7 +8279,7 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "9.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "blake2b_simd", "byteorder", @@ -8282,17 +8291,17 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "9.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -8301,17 +8310,17 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "8.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "sp-externalities" version = "0.19.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "environmental", "parity-scale-codec", @@ -8322,7 +8331,7 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-trait", "impl-trait-for-tuples", @@ -8336,7 +8345,7 @@ dependencies = [ [[package]] name = "sp-io" version = "23.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "bytes", "ed25519", @@ -8358,7 +8367,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "24.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "lazy_static", "sp-core", @@ -8369,7 +8378,7 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.27.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "parity-scale-codec", "parking_lot 0.12.1", @@ -8381,7 +8390,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "thiserror", "zstd 0.12.4", @@ -8390,7 +8399,7 @@ dependencies = [ [[package]] name = "sp-metadata-ir" version = "0.1.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "frame-metadata", "parity-scale-codec", @@ -8401,7 +8410,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "sp-api", "sp-core", @@ -8411,7 +8420,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "8.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "backtrace", "lazy_static", @@ -8421,7 +8430,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "rustc-hash", "serde", @@ -8431,7 +8440,7 @@ dependencies = [ [[package]] name = "sp-runtime" version = "24.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "either", "hash256-std-hasher", @@ -8453,7 +8462,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "17.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "bytes", "impl-trait-for-tuples", @@ -8471,19 +8480,19 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "11.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "Inflector", "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "parity-scale-codec", "scale-info", @@ -8498,7 +8507,7 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -8512,7 +8521,7 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.28.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "hash-db", "log", @@ -8533,12 +8542,12 @@ dependencies = [ [[package]] name = "sp-std" version = "8.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" [[package]] name = "sp-storage" version = "13.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "impl-serde", "parity-scale-codec", @@ -8551,7 +8560,7 @@ dependencies = [ [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "async-trait", "parity-scale-codec", @@ -8564,7 +8573,7 @@ dependencies = [ [[package]] name = "sp-tracing" version = "10.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "parity-scale-codec", "sp-std", @@ -8576,7 +8585,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "sp-api", "sp-runtime", @@ -8585,7 +8594,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "22.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "ahash", "hash-db", @@ -8608,7 +8617,7 @@ dependencies = [ [[package]] name = "sp-version" version = "22.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "impl-serde", "parity-scale-codec", @@ -8625,18 +8634,18 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "8.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] name = "sp-wasm-interface" version = "14.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "anyhow", "impl-trait-for-tuples", @@ -8649,7 +8658,7 @@ dependencies = [ [[package]] name = "sp-weights" version = "20.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "parity-scale-codec", "scale-info", @@ -8809,7 +8818,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -8827,12 +8836,12 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "frame-system-rpc-runtime-api", "futures", @@ -8851,7 +8860,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "hyper", "log", @@ -8863,7 +8872,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/serai-dex/substrate#34c8c3595e7a3230058ee5e181c9d7e4be329ed2" +source = "git+https://github.com/serai-dex/substrate#400d5c9d4da49ae96035964da14c7654478b11e5" dependencies = [ "anstyle", "build-helper", @@ -8897,9 +8906,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.39" +version = "2.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +checksum = "1726efe18f42ae774cc644f330953a5e7b3c3003d3edcecf18850fe9d4dd9afb" dependencies = [ "proc-macro2", "quote", @@ -8915,7 +8924,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -8959,21 +8968,21 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.12" +version = "0.12.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c39fd04924ca3a864207c66fc2cd7d22d7c016007f9ce846cbb9326331930a" +checksum = "69758bda2e78f098e4ccb393021a0963bb3442eac05f135c30f61b7370bbafae" [[package]] name = "tempfile" -version = "3.8.1" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef1adac450ad7f4b3c28589471ade84f25f731a7a0fe30d71dfa9f60fd808e5" +checksum = "01ce4141aa927a6d1bd34a041795abd0db1cccba5d5f24b009f694bdf3a1f3fa" dependencies = [ "cfg-if", "fastrand", "redox_syscall 0.4.1", "rustix", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -8981,7 +8990,8 @@ name = "tendermint-machine" version = "0.2.0" dependencies = [ "async-trait", - "futures", + "futures-channel", + "futures-util", "hex", "log", "parity-scale-codec", @@ -9006,22 +9016,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -9055,9 +9065,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4a34ab300f2dee6e562c10a046fc05e358b29f9bf92277f30c3c8d82275f6f5" +checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" dependencies = [ "deranged", "itoa", @@ -9075,9 +9085,9 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ad70d68dba9e1f8aceda7aa6711965dfec1cac869f311a51bd08b3a2ccbce20" +checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" dependencies = [ "time-core", ] @@ -9127,9 +9137,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.34.0" +version = "1.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c014766411e834f7af5b8f4cf46257aab4036ca95e9d2c144a10f59ad6f5b9" +checksum = "c89b4efa943be685f629b149f53829423f8f5531ea21249408e8e2f8671ec104" dependencies = [ "backtrace", "bytes", @@ -9152,7 +9162,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -9237,9 +9247,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.20.7" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" +checksum = "d34d383cd00a163b4a5b85053df514d45bc330f6de7737edfe0a93311d1eaa03" dependencies = [ "indexmap 2.1.0", "toml_datetime", @@ -9307,7 +9317,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -9400,7 +9410,8 @@ dependencies = [ "blake2", "ciphersuite", "flexible-transcript", - "futures", + "futures-channel", + "futures-util", "hex", "log", "parity-scale-codec", @@ -9511,9 +9522,9 @@ dependencies = [ [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "tt-call" @@ -9553,9 +9564,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "6f2528f27a9eb2b21e69c95319b30bd0efd85d09c379741b0f78ea1d86be2416" [[package]] name = "unicode-ident" @@ -9711,7 +9722,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", "wasm-bindgen-shared", ] @@ -9745,7 +9756,7 @@ checksum = "f0eb82fcb7930ae6219a7ecfd55b217f5f0893484b7a13022ebb2b2bf20b5283" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9836,7 +9847,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dfcdb72d96f01e6c85b6bf20102e7423bdbaad5c337301bab2bbf253d26413c" dependencies = [ "indexmap 2.1.0", - "semver 1.0.20", + "semver 1.0.21", ] [[package]] @@ -10054,7 +10065,7 @@ checksum = "ca7af9bb3ee875c4907835e607a275d10b04d15623d3aebe01afe8fbd3f85050" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -10294,9 +10305,9 @@ checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" [[package]] name = "winnow" -version = "0.5.25" +version = "0.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7e87b8dfbe3baffbe687eef2e164e32286eff31a5ee16463ce03d991643ec94" +checksum = "8434aeec7b290e8da5c3f0d628cb0eac6cabcb31d14bb74f779a08109a5914d6" dependencies = [ "memchr", ] @@ -10416,22 +10427,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.29" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d075cf85bbb114e933343e087b92f2146bac0d55b534cbb8188becf0039948e" +checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.29" +version = "0.7.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86cd5ca076997b97ef09d3ad65efe811fa68c9e874cb636ccb211223a813b0c2" +checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] @@ -10451,7 +10462,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.39", + "syn 2.0.47", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index a63b74612..8bd7bec54 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,6 +67,13 @@ members = [ "tests/coordinator", "tests/full-stack", "tests/reproducible-runtime", + + "patches/is-terminal", + "patches/matches", + "patches/option-ext", + "patches/directories-next", + "patches/mach", + "patches/proc-macro-crate", ] # Always compile Monero (and a variety of dependencies) with optimizations due @@ -93,6 +100,75 @@ panic = "unwind" # https://github.com/rust-lang-nursery/lazy-static.rs/issues/201 lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" } -# subxt *can* pull these off crates.io yet there's no benefit to this -sp-core-hashing = { git = "https://github.com/serai-dex/substrate" } -sp-std = { git = "https://github.com/serai-dex/substrate" } +# Needed due to dockertest's usage of `Rc`s when we need `Arc`s +dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "arc" } + +# is-terminal now has an std-based solution with an equivalent API +is-terminal = { path = "patches/is-terminal" } +# So does matches +matches = { path = "patches/matches" } + +# directories-next was created because directories was unmaintained +# directories-next is now unmaintained while directories is maintained +# The directories author pulls in ridiculously pointless crates and prefers +# copyleft licenses +# The following two patches resolve everything +option-ext = { path = "patches/option-ext" } +directories-next = { path = "patches/directories-next" } + +# mach is unmaintained, so this wraps mach2 as mach +mach = { path = "patches/mach" } + +# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3 +proc-macro-crate = { path = "patches/proc-macro-crate" } + +[workspace.lints.clippy] +unwrap_or_default = "allow" +borrow_as_ptr = "deny" +cast_lossless = "deny" +cast_possible_truncation = "deny" +cast_possible_wrap = "deny" +cast_precision_loss = "deny" +cast_ptr_alignment = "deny" +cast_sign_loss = "deny" +checked_conversions = "deny" +cloned_instead_of_copied = "deny" +enum_glob_use = "deny" +expl_impl_clone_on_copy = "deny" +explicit_into_iter_loop = "deny" +explicit_iter_loop = "deny" +flat_map_option = "deny" +float_cmp = "deny" +fn_params_excessive_bools = "deny" +ignored_unit_patterns = "deny" +implicit_clone = "deny" +inefficient_to_string = "deny" +invalid_upcast_comparisons = "deny" +large_stack_arrays = "deny" +linkedlist = "deny" +macro_use_imports = "deny" +manual_instant_elapsed = "deny" +manual_let_else = "deny" +manual_ok_or = "deny" +manual_string_new = "deny" +map_unwrap_or = "deny" +match_bool = "deny" +match_same_arms = "deny" +missing_fields_in_debug = "deny" +needless_continue = "deny" +needless_pass_by_value = "deny" +ptr_cast_constness = "deny" +range_minus_one = "deny" +range_plus_one = "deny" +redundant_closure_for_method_calls = "deny" +redundant_else = "deny" +string_add_assign = "deny" +unchecked_duration_subtraction = "deny" +uninlined_format_args = "deny" +unnecessary_box_returns = "deny" +unnecessary_join = "deny" +unnecessary_wraps = "deny" +unnested_or_patterns = "deny" +unused_async = "deny" +unused_self = "deny" +zero_sized_map_values = "deny" diff --git a/coins/bitcoin/Cargo.toml b/coins/bitcoin/Cargo.toml index d61e2fd61..4ff0f79a4 100644 --- a/coins/bitcoin/Cargo.toml +++ b/coins/bitcoin/Cargo.toml @@ -8,6 +8,13 @@ authors = ["Luke Parker ", "Vrx "] edition = "2021" rust-version = "1.74" +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + [dependencies] std-shims = { version = "0.1.1", path = "../../common/std-shims", default-features = false } diff --git a/coins/bitcoin/src/rpc.rs b/coins/bitcoin/src/rpc.rs index 687b6eeaa..6778636bd 100644 --- a/coins/bitcoin/src/rpc.rs +++ b/coins/bitcoin/src/rpc.rs @@ -84,7 +84,7 @@ impl Rpc { for line in res.split('\n') { // This doesn't check if the arguments are as expected // This is due to Bitcoin supporting a large amount of optional arguments, which - // occassionally change, with their own mechanism of text documentation, making matching off + // occasionally change, with their own mechanism of text documentation, making matching off // it a quite involved task // Instead, once we've confirmed the methods are present, we assume our arguments are aligned // Else we'll error at time of call @@ -187,7 +187,20 @@ impl Rpc { /// Publish a transaction. pub async fn send_raw_transaction(&self, tx: &Transaction) -> Result { - let txid = self.rpc_call("sendrawtransaction", json!([encode::serialize_hex(tx)])).await?; + let txid = match self.rpc_call("sendrawtransaction", json!([encode::serialize_hex(tx)])).await { + Ok(txid) => txid, + Err(e) => { + // A const from Bitcoin's bitcoin/src/rpc/protocol.h + const RPC_VERIFY_ALREADY_IN_CHAIN: isize = -27; + // If this was already successfully published, consider this having succeeded + if let RpcError::RequestError(Error { code, .. }) = e { + if code == RPC_VERIFY_ALREADY_IN_CHAIN { + return Ok(tx.txid()); + } + } + Err(e)? + } + }; if txid != tx.txid() { Err(RpcError::InvalidResponse("returned TX ID inequals calculated TX ID"))?; } diff --git a/coins/bitcoin/src/tests/crypto.rs b/coins/bitcoin/src/tests/crypto.rs index 7380e296e..2170219c0 100644 --- a/coins/bitcoin/src/tests/crypto.rs +++ b/coins/bitcoin/src/tests/crypto.rs @@ -20,7 +20,7 @@ fn test_algorithm() { let mut keys = key_gen::<_, Secp256k1>(&mut OsRng); const MESSAGE: &[u8] = b"Hello, World!"; - for (_, keys) in keys.iter_mut() { + for keys in keys.values_mut() { let (_, offset) = make_even(keys.group_key()); *keys = keys.offset(Scalar::from(offset)); } @@ -29,9 +29,9 @@ fn test_algorithm() { Schnorr::::new(RecommendedTranscript::new(b"bitcoin-serai sign test")); let sig = sign( &mut OsRng, - algo.clone(), + &algo, keys.clone(), - algorithm_machines(&mut OsRng, algo, &keys), + algorithm_machines(&mut OsRng, &algo, &keys), Hash::hash(MESSAGE).as_ref(), ); diff --git a/coins/bitcoin/src/wallet/send.rs b/coins/bitcoin/src/wallet/send.rs index a33fe4f19..f4cfa3b5d 100644 --- a/coins/bitcoin/src/wallet/send.rs +++ b/coins/bitcoin/src/wallet/send.rs @@ -122,7 +122,7 @@ impl SignableTransaction { pub fn new( mut inputs: Vec, payments: &[(Address, u64)], - change: Option
, + change: Option<&Address>, data: Option>, fee_per_weight: u64, ) -> Result { @@ -140,7 +140,7 @@ impl SignableTransaction { } } - if data.as_ref().map(|data| data.len()).unwrap_or(0) > 80 { + if data.as_ref().map_or(0, Vec::len) > 80 { Err(TransactionError::TooMuchData)?; } @@ -186,7 +186,7 @@ impl SignableTransaction { // src/policy/policy.cpp#L295-L298 // implements this as expected - // Technically, it takes whatever's greater, the weight or the amount of signature operatons + // Technically, it takes whatever's greater, the weight or the amount of signature operations // multiplied by DEFAULT_BYTES_PER_SIGOP (20) // We only use 1 signature per input, and our inputs have a weight exceeding 20 // Accordingly, our inputs' weight will always be greater than the cost of the signature ops @@ -212,7 +212,7 @@ impl SignableTransaction { } // If there's a change address, check if there's change to give it - if let Some(change) = change.as_ref() { + if let Some(change) = change { let weight_with_change = Self::calculate_weight(tx_ins.len(), payments, Some(change)); let fee_with_change = fee_per_weight * weight_with_change; if let Some(value) = input_sat.checked_sub(payment_sat + fee_with_change) { @@ -263,7 +263,7 @@ impl SignableTransaction { /// Returns None if the wrong keys are used. pub fn multisig( self, - keys: ThresholdKeys, + keys: &ThresholdKeys, mut transcript: RecommendedTranscript, ) -> Option { transcript.domain_separate(b"bitcoin_transaction"); @@ -355,10 +355,10 @@ impl SignMachine for TransactionSignMachine { } fn from_cache( - _: (), + (): (), _: ThresholdKeys, _: CachedPreprocess, - ) -> Result { + ) -> (Self, Self::Preprocess) { unimplemented!( "Bitcoin transactions don't support caching their preprocesses due to {}", "being already bound to a specific transaction" diff --git a/coins/bitcoin/tests/wallet.rs b/coins/bitcoin/tests/wallet.rs index 01a035c96..9eca20c78 100644 --- a/coins/bitcoin/tests/wallet.rs +++ b/coins/bitcoin/tests/wallet.rs @@ -82,7 +82,7 @@ async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint) fn keys() -> (HashMap>, ProjectivePoint) { let mut keys = key_gen(&mut OsRng); - for (_, keys) in keys.iter_mut() { + for keys in keys.values_mut() { *keys = tweak_keys(keys); } let key = keys.values().next().unwrap().group_key(); @@ -91,14 +91,14 @@ fn keys() -> (HashMap>, ProjectivePoint) { fn sign( keys: &HashMap>, - tx: SignableTransaction, + tx: &SignableTransaction, ) -> Transaction { let mut machines = HashMap::new(); for i in (1 ..= THRESHOLD).map(|i| Participant::new(i).unwrap()) { machines.insert( i, tx.clone() - .multisig(keys[&i].clone(), RecommendedTranscript::new(b"bitcoin-serai Test Transaction")) + .multisig(&keys[&i].clone(), RecommendedTranscript::new(b"bitcoin-serai Test Transaction")) .unwrap(), ); } @@ -206,7 +206,7 @@ async_sequential! { // No change assert!(SignableTransaction::new(inputs.clone(), &[(addr(), 1000)], None, None, FEE).is_ok()); // Consolidation TX - assert!(SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, FEE).is_ok()); + assert!(SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, FEE).is_ok()); // Data assert!(SignableTransaction::new(inputs.clone(), &[], None, Some(vec![]), FEE).is_ok()); // No outputs @@ -229,7 +229,7 @@ async_sequential! { ); assert_eq!( - SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, 0), + SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, 0), Err(TransactionError::TooLowFee), ); @@ -274,13 +274,13 @@ async_sequential! { let tx = SignableTransaction::new( vec![output.clone(), offset_output.clone()], &payments, - Some(change_addr.clone()), + Some(&change_addr), None, FEE ).unwrap(); let needed_fee = tx.needed_fee(); let expected_id = tx.txid(); - let tx = sign(&keys, tx); + let tx = sign(&keys, &tx); assert_eq!(tx.output.len(), 3); @@ -341,10 +341,10 @@ async_sequential! { let tx = sign( &keys, - SignableTransaction::new( + &SignableTransaction::new( vec![output], &[], - Some(Address::::new(Network::Regtest, address_payload(key).unwrap())), + Some(&Address::::new(Network::Regtest, address_payload(key).unwrap())), Some(data.clone()), FEE ).unwrap() diff --git a/coins/ethereum/Cargo.toml b/coins/ethereum/Cargo.toml index 1d2a3d395..1d1c6dbbd 100644 --- a/coins/ethereum/Cargo.toml +++ b/coins/ethereum/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] thiserror = { version = "1", default-features = false } eyre = { version = "0.6", default-features = false } diff --git a/coins/ethereum/tests/contract.rs b/coins/ethereum/tests/contract.rs index 5577744ab..378758190 100644 --- a/coins/ethereum/tests/contract.rs +++ b/coins/ethereum/tests/contract.rs @@ -112,9 +112,9 @@ async fn test_ecrecover_hack() { let algo = IetfSchnorr::::ietf(); let sig = sign( &mut OsRng, - algo.clone(), + &algo, keys.clone(), - algorithm_machines(&mut OsRng, algo, &keys), + algorithm_machines(&mut OsRng, &algo, &keys), full_message, ); let mut processed_sig = diff --git a/coins/ethereum/tests/crypto.rs b/coins/ethereum/tests/crypto.rs index e531e4b56..f1ab08b03 100644 --- a/coins/ethereum/tests/crypto.rs +++ b/coins/ethereum/tests/crypto.rs @@ -47,13 +47,8 @@ fn test_signing() { const MESSAGE: &[u8] = b"Hello, World!"; let algo = IetfSchnorr::::ietf(); - let _sig = sign( - &mut OsRng, - algo, - keys.clone(), - algorithm_machines(&mut OsRng, IetfSchnorr::::ietf(), &keys), - MESSAGE, - ); + let _sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); } #[test] @@ -79,9 +74,9 @@ fn test_ecrecover_hack() { let algo = IetfSchnorr::::ietf(); let sig = sign( &mut OsRng, - algo.clone(), + &algo, keys.clone(), - algorithm_machines(&mut OsRng, algo, &keys), + algorithm_machines(&mut OsRng, &algo, &keys), full_message, ); diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index bb7602056..9c78e431c 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -12,6 +12,9 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] std-shims = { path = "../../common/std-shims", version = "^0.1.1", default-features = false } @@ -45,7 +48,7 @@ frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8 monero-generators = { path = "generators", version = "0.4", default-features = false } -futures = { version = "0.3", default-features = false, features = ["alloc"], optional = true } +async-lock = { version = "3", default-features = false, optional = true } hex-literal = "0.4" hex = { version = "0.4", default-features = false, features = ["alloc"] } @@ -92,7 +95,7 @@ std = [ "monero-generators/std", - "futures?/std", + "async-lock?/std", "hex/std", "serde/std", @@ -101,7 +104,7 @@ std = [ "base58-monero/std", ] -cache-distribution = ["futures"] +cache-distribution = ["async-lock"] http-rpc = ["digest_auth", "simple-request", "tokio"] multisig = ["transcript", "frost", "dleq", "std"] binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"] diff --git a/coins/monero/build.rs b/coins/monero/build.rs index db15c1cfd..b10e956a5 100644 --- a/coins/monero/build.rs +++ b/coins/monero/build.rs @@ -28,10 +28,10 @@ fn serialize(generators_string: &mut String, points: &[EdwardsPoint]) { fn generators(prefix: &'static str, path: &str) { let generators = bulletproofs_generators(prefix.as_bytes()); #[allow(non_snake_case)] - let mut G_str = "".to_string(); + let mut G_str = String::new(); serialize(&mut G_str, &generators.G); #[allow(non_snake_case)] - let mut H_str = "".to_string(); + let mut H_str = String::new(); serialize(&mut H_str, &generators.H); let path = Path::new(&env::var("OUT_DIR").unwrap()).join(path); diff --git a/coins/monero/generators/Cargo.toml b/coins/monero/generators/Cargo.toml index 95cfa25ab..5e7b745a9 100644 --- a/coins/monero/generators/Cargo.toml +++ b/coins/monero/generators/Cargo.toml @@ -11,6 +11,9 @@ edition = "2021" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false } diff --git a/coins/monero/src/bin/reserialize_chain.rs b/coins/monero/src/bin/reserialize_chain.rs index 9f55073a9..01f94a482 100644 --- a/coins/monero/src/bin/reserialize_chain.rs +++ b/coins/monero/src/bin/reserialize_chain.rs @@ -239,7 +239,7 @@ mod binaries { assert!(batch.verify_vartime()); } - println!("Deserialized, hashed, and reserialized {block_i} with {} TXs", txs_len); + println!("Deserialized, hashed, and reserialized {block_i} with {txs_len} TXs"); } } diff --git a/coins/monero/src/block.rs b/coins/monero/src/block.rs index 2bced983b..b4e97169d 100644 --- a/coins/monero/src/block.rs +++ b/coins/monero/src/block.rs @@ -58,10 +58,10 @@ pub struct Block { } impl Block { - pub fn number(&self) -> usize { + pub fn number(&self) -> Option { match self.miner_tx.prefix.inputs.first() { - Some(Input::Gen(number)) => (*number).try_into().unwrap(), - _ => panic!("invalid block, miner TX didn't have a Input::Gen"), + Some(Input::Gen(number)) => Some(*number), + _ => None, } } @@ -114,9 +114,16 @@ impl Block { } pub fn read(r: &mut R) -> io::Result { + let header = BlockHeader::read(r)?; + + let miner_tx = Transaction::read(r)?; + if !matches!(miner_tx.prefix.inputs.as_slice(), &[Input::Gen(_)]) { + Err(io::Error::other("Miner transaction has incorrect input type."))?; + } + Ok(Block { - header: BlockHeader::read(r)?, - miner_tx: Transaction::read(r)?, + header, + miner_tx, txs: (0_usize .. read_varint(r)?).map(|_| read_bytes(r)).collect::>()?, }) } diff --git a/coins/monero/src/ringct/bulletproofs/mod.rs b/coins/monero/src/ringct/bulletproofs/mod.rs index 6b25b1a05..df0c6ff8a 100644 --- a/coins/monero/src/ringct/bulletproofs/mod.rs +++ b/coins/monero/src/ringct/bulletproofs/mod.rs @@ -7,7 +7,7 @@ use std_shims::{ use rand_core::{RngCore, CryptoRng}; -use zeroize::Zeroize; +use zeroize::{Zeroize, Zeroizing}; use curve25519_dalek::edwards::EdwardsPoint; use multiexp::BatchVerifier; @@ -91,7 +91,7 @@ impl Bulletproofs { Bulletproofs::Plus( AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect()) .unwrap() - .prove(rng, AggregateRangeWitness::new(outputs).unwrap()) + .prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap())) .unwrap(), ) }) @@ -172,8 +172,8 @@ impl Bulletproofs { write_scalar(&bp.wip.r_answer.0, w)?; write_scalar(&bp.wip.s_answer.0, w)?; write_scalar(&bp.wip.delta_answer.0, w)?; - specific_write_vec(&bp.wip.L.iter().cloned().map(|L| L.0).collect::>(), w)?; - specific_write_vec(&bp.wip.R.iter().cloned().map(|R| R.0).collect::>(), w) + specific_write_vec(&bp.wip.L.iter().copied().map(|L| L.0).collect::>(), w)?; + specific_write_vec(&bp.wip.R.iter().copied().map(|R| R.0).collect::>(), w) } } } diff --git a/coins/monero/src/ringct/bulletproofs/original.rs b/coins/monero/src/ringct/bulletproofs/original.rs index 7c1439d31..5e50c02ea 100644 --- a/coins/monero/src/ringct/bulletproofs/original.rs +++ b/coins/monero/src/ringct/bulletproofs/original.rs @@ -223,7 +223,7 @@ impl OriginalStruct { let A = normalize(&self.A); let S = normalize(&self.S); - let commitments = commitments.iter().map(|c| c.mul_by_cofactor()).collect::>(); + let commitments = commitments.iter().map(EdwardsPoint::mul_by_cofactor).collect::>(); // Verify it let mut proof = Vec::with_capacity(4 + commitments.len()); diff --git a/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs b/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs index b99e5f52e..859cb1e44 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs @@ -2,7 +2,7 @@ use std_shims::vec::Vec; use rand_core::{RngCore, CryptoRng}; -use zeroize::{Zeroize, ZeroizeOnDrop}; +use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; use multiexp::{multiexp, multiexp_vartime, BatchVerifier}; use group::{ @@ -142,7 +142,7 @@ impl AggregateRangeStatement { A_terms.push((y_mn_plus_one, commitment_accum)); A_terms.push(( ((y_pows * z) - (d.sum() * y_mn_plus_one * z) - (y_pows * z.square())), - generators.g(), + Generators::g(), )); (y, d_descending_y, y_mn_plus_one, z, ScalarVector(z_pow), A + multiexp_vartime(&A_terms)) @@ -151,7 +151,7 @@ impl AggregateRangeStatement { pub(crate) fn prove( self, rng: &mut R, - witness: AggregateRangeWitness, + witness: &AggregateRangeWitness, ) -> Option { // Check for consistency with the witness if self.V.len() != witness.values.len() { @@ -202,7 +202,7 @@ impl AggregateRangeStatement { for (i, a_r) in a_r.0.iter().enumerate() { A_terms.push((*a_r, generators.generator(GeneratorsList::HBold1, i))); } - A_terms.push((alpha, generators.h())); + A_terms.push((alpha, Generators::h())); let mut A = multiexp(&A_terms); A_terms.zeroize(); @@ -222,7 +222,7 @@ impl AggregateRangeStatement { Some(AggregateRangeProof { A, wip: WipStatement::new(generators, A_hat, y) - .prove(rng, transcript, WipWitness::new(a_l, a_r, alpha).unwrap()) + .prove(rng, transcript, &Zeroizing::new(WipWitness::new(a_l, a_r, alpha).unwrap())) .unwrap(), }) } diff --git a/coins/monero/src/ringct/bulletproofs/plus/mod.rs b/coins/monero/src/ringct/bulletproofs/plus/mod.rs index f52677eec..6a2d7b9c4 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/mod.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/mod.rs @@ -31,8 +31,6 @@ pub(crate) enum GeneratorsList { // TODO: Table these #[derive(Clone, Debug)] pub(crate) struct Generators { - g: EdwardsPoint, - g_bold1: &'static [EdwardsPoint], h_bold1: &'static [EdwardsPoint], } @@ -47,18 +45,18 @@ impl Generators { #[allow(clippy::new_without_default)] pub(crate) fn new() -> Self { let gens = generators::GENERATORS(); - Generators { g: dalek_ff_group::EdwardsPoint(crate::H()), g_bold1: &gens.G, h_bold1: &gens.H } + Generators { g_bold1: &gens.G, h_bold1: &gens.H } } pub(crate) fn len(&self) -> usize { self.g_bold1.len() } - pub(crate) fn g(&self) -> EdwardsPoint { - self.g + pub(crate) fn g() -> EdwardsPoint { + dalek_ff_group::EdwardsPoint(crate::H()) } - pub(crate) fn h(&self) -> EdwardsPoint { + pub(crate) fn h() -> EdwardsPoint { EdwardsPoint::generator() } @@ -74,11 +72,7 @@ impl Generators { let generators = padded_pow_of_2(generators); assert!(generators <= self.g_bold1.len()); - Generators { - g: self.g, - g_bold1: &self.g_bold1[.. generators], - h_bold1: &self.h_bold1[.. generators], - } + Generators { g_bold1: &self.g_bold1[.. generators], h_bold1: &self.h_bold1[.. generators] } } } diff --git a/coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs b/coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs index a8b0866ef..7bc0c3f47 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs @@ -32,7 +32,7 @@ impl ScalarVector { pub(crate) fn add(&self, scalar: impl Borrow) -> Self { let mut res = self.clone(); - for val in res.0.iter_mut() { + for val in &mut res.0 { *val += scalar.borrow(); } res @@ -40,7 +40,7 @@ impl ScalarVector { pub(crate) fn sub(&self, scalar: impl Borrow) -> Self { let mut res = self.clone(); - for val in res.0.iter_mut() { + for val in &mut res.0 { *val -= scalar.borrow(); } res @@ -48,7 +48,7 @@ impl ScalarVector { pub(crate) fn mul(&self, scalar: impl Borrow) -> Self { let mut res = self.clone(); - for val in res.0.iter_mut() { + for val in &mut res.0 { *val *= scalar.borrow(); } res diff --git a/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs b/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs index 9aea21488..1bc1e85da 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs @@ -189,7 +189,7 @@ impl WipStatement { self, rng: &mut R, mut transcript: Scalar, - witness: WipWitness, + witness: &WipWitness, ) -> Option { let WipStatement { generators, P, mut y } = self; #[cfg(not(debug_assertions))] @@ -198,7 +198,7 @@ impl WipStatement { if generators.len() != witness.a.len() { return None; } - let (g, h) = (generators.g(), generators.h()); + let (g, h) = (Generators::g(), Generators::h()); let mut g_bold = vec![]; let mut h_bold = vec![]; for i in 0 .. generators.len() { @@ -345,7 +345,7 @@ impl WipStatement { ) -> bool { let WipStatement { generators, P, y } = self; - let (g, h) = (generators.g(), generators.h()); + let (g, h) = (Generators::g(), Generators::h()); // Verify the L/R lengths { @@ -414,7 +414,7 @@ impl WipStatement { let mut multiexp = P_terms; multiexp.reserve(4 + (2 * generators.len())); - for (scalar, _) in multiexp.iter_mut() { + for (scalar, _) in &mut multiexp { *scalar *= neg_e_square; } diff --git a/coins/monero/src/ringct/clsag/mod.rs b/coins/monero/src/ringct/clsag/mod.rs index 0a6141b2d..1290e3e38 100644 --- a/coins/monero/src/ringct/clsag/mod.rs +++ b/coins/monero/src/ringct/clsag/mod.rs @@ -96,7 +96,7 @@ fn core( msg: &[u8; 32], D: &EdwardsPoint, s: &[Scalar], - A_c1: Mode, + A_c1: &Mode, ) -> ((EdwardsPoint, Scalar, Scalar), Scalar) { let n = ring.len(); @@ -164,7 +164,7 @@ fn core( Mode::Verify(c1) => { start = 0; end = n; - c = c1; + c = *c1; } } @@ -226,7 +226,7 @@ impl Clsag { s.push(random_scalar(rng)); } let ((D, p, c), c1) = - core(&input.decoys.ring, I, &pseudo_out, msg, &D, &s, Mode::Sign(r, A, AH)); + core(&input.decoys.ring, I, &pseudo_out, msg, &D, &s, &Mode::Sign(r, A, AH)); (Clsag { D, s, c1 }, pseudo_out, p, c * z) } @@ -301,7 +301,7 @@ impl Clsag { Err(ClsagError::InvalidD)?; } - let (_, c1) = core(ring, I, pseudo_out, msg, &D, &self.s, Mode::Verify(self.c1)); + let (_, c1) = core(ring, I, pseudo_out, msg, &D, &self.s, &Mode::Verify(self.c1)); if c1 != self.c1 { Err(ClsagError::InvalidC1)?; } diff --git a/coins/monero/src/ringct/mlsag.rs b/coins/monero/src/ringct/mlsag.rs index d3b4080e7..e5f00bf7a 100644 --- a/coins/monero/src/ringct/mlsag.rs +++ b/coins/monero/src/ringct/mlsag.rs @@ -33,7 +33,10 @@ pub struct RingMatrix { impl RingMatrix { pub fn new(matrix: Vec>) -> Result { - if matrix.is_empty() { + // Monero requires that there is more than one ring member for MLSAG signatures: + // https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/ + // src/ringct/rctSigs.cpp#L462 + if matrix.len() < 2 { Err(MlsagError::InvalidRing)?; } for member in &matrix { diff --git a/coins/monero/src/ringct/mod.rs b/coins/monero/src/ringct/mod.rs index c86f96765..bcd7f0c86 100644 --- a/coins/monero/src/ringct/mod.rs +++ b/coins/monero/src/ringct/mod.rs @@ -104,13 +104,11 @@ impl RctType { pub fn compact_encrypted_amounts(&self) -> bool { match self { - RctType::Null => false, - RctType::MlsagAggregate => false, - RctType::MlsagIndividual => false, + RctType::Null | + RctType::MlsagAggregate | + RctType::MlsagIndividual | RctType::Bulletproofs => false, - RctType::BulletproofsCompactAmount => true, - RctType::Clsag => true, - RctType::BulletproofsPlus => true, + RctType::BulletproofsCompactAmount | RctType::Clsag | RctType::BulletproofsPlus => true, } } } @@ -151,9 +149,7 @@ impl RctBase { RctType::from_byte(read_byte(r)?).ok_or_else(|| io::Error::other("invalid RCT type"))?; match rct_type { - RctType::Null => {} - RctType::MlsagAggregate => {} - RctType::MlsagIndividual => {} + RctType::Null | RctType::MlsagAggregate | RctType::MlsagIndividual => {} RctType::Bulletproofs | RctType::BulletproofsCompactAmount | RctType::Clsag | @@ -261,7 +257,8 @@ impl RctPrunable { pub fn read( rct_type: RctType, - decoys: &[usize], + ring_length: usize, + inputs: usize, outputs: usize, r: &mut R, ) -> io::Result { @@ -272,7 +269,7 @@ impl RctPrunable { // src/ringct/rctSigs.cpp#L609 // And then for RctNull, that's only allowed for miner TXs which require one input of // Input::Gen - if decoys.is_empty() { + if inputs == 0 { Err(io::Error::other("transaction had no inputs"))?; } @@ -280,11 +277,11 @@ impl RctPrunable { RctType::Null => RctPrunable::Null, RctType::MlsagAggregate => RctPrunable::AggregateMlsagBorromean { borromean: read_raw_vec(BorromeanRange::read, outputs, r)?, - mlsag: Mlsag::read(decoys[0], decoys.len() + 1, r)?, + mlsag: Mlsag::read(ring_length, inputs + 1, r)?, }, RctType::MlsagIndividual => RctPrunable::MlsagBorromean { borromean: read_raw_vec(BorromeanRange::read, outputs, r)?, - mlsags: decoys.iter().map(|d| Mlsag::read(*d, 2, r)).collect::>()?, + mlsags: (0 .. inputs).map(|_| Mlsag::read(ring_length, 2, r)).collect::>()?, }, RctType::Bulletproofs | RctType::BulletproofsCompactAmount => { RctPrunable::MlsagBulletproofs { @@ -299,8 +296,10 @@ impl RctPrunable { } Bulletproofs::read(r)? }, - mlsags: decoys.iter().map(|d| Mlsag::read(*d, 2, r)).collect::>()?, - pseudo_outs: read_raw_vec(read_point, decoys.len(), r)?, + mlsags: (0 .. inputs) + .map(|_| Mlsag::read(ring_length, 2, r)) + .collect::>()?, + pseudo_outs: read_raw_vec(read_point, inputs, r)?, } } RctType::Clsag | RctType::BulletproofsPlus => RctPrunable::Clsag { @@ -312,8 +311,8 @@ impl RctPrunable { r, )? }, - clsags: (0 .. decoys.len()).map(|o| Clsag::read(decoys[o], r)).collect::>()?, - pseudo_outs: read_raw_vec(read_point, decoys.len(), r)?, + clsags: (0 .. inputs).map(|_| Clsag::read(ring_length, r)).collect::>()?, + pseudo_outs: read_raw_vec(read_point, inputs, r)?, }, }) } @@ -325,7 +324,7 @@ impl RctPrunable { RctPrunable::MlsagBorromean { borromean, .. } => { borromean.iter().try_for_each(|rs| rs.write(w)) } - RctPrunable::MlsagBulletproofs { bulletproofs, .. } => bulletproofs.signature_write(w), + RctPrunable::MlsagBulletproofs { bulletproofs, .. } | RctPrunable::Clsag { bulletproofs, .. } => bulletproofs.signature_write(w), } } @@ -386,8 +385,16 @@ impl RctSignatures { serialized } - pub fn read(decoys: Vec, outputs: usize, r: &mut R) -> io::Result { - let base = RctBase::read(decoys.len(), outputs, r)?; - Ok(RctSignatures { base: base.0, prunable: RctPrunable::read(base.1, &decoys, outputs, r)? }) + pub fn read( + ring_length: usize, + inputs: usize, + outputs: usize, + r: &mut R, + ) -> io::Result { + let base = RctBase::read(inputs, outputs, r)?; + Ok(RctSignatures { + base: base.0, + prunable: RctPrunable::read(base.1, ring_length, inputs, outputs, r)?, + }) } } diff --git a/coins/monero/src/rpc/http.rs b/coins/monero/src/rpc/http.rs index c0ec9ff37..4ed349a5c 100644 --- a/coins/monero/src/rpc/http.rs +++ b/coins/monero/src/rpc/http.rs @@ -97,7 +97,7 @@ impl HttpRpc { Err(RpcError::ConnectionError("invalid amount of passwords".to_string()))?; } - let client = Client::without_connection_pool(url.clone()) + let client = Client::without_connection_pool(&url) .map_err(|_| RpcError::ConnectionError("invalid URL".to_string()))?; // Obtain the initial challenge, which also somewhat validates this connection let challenge = Self::digest_auth_challenge( @@ -112,7 +112,7 @@ impl HttpRpc { )?; Authentication::Authenticated { username: split_userpass[0].to_string(), - password: split_userpass.get(1).unwrap_or(&"").to_string(), + password: (*split_userpass.get(1).unwrap_or(&"")).to_string(), connection: Arc::new(Mutex::new((challenge, client))), } } else { diff --git a/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs b/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs index 34aa84784..a50b9d407 100644 --- a/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs +++ b/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs @@ -23,7 +23,7 @@ fn test_aggregate_range_proof() { let statement = AggregateRangeStatement::new(commitment_points).unwrap(); let witness = AggregateRangeWitness::new(&commitments).unwrap(); - let proof = statement.clone().prove(&mut OsRng, witness).unwrap(); + let proof = statement.clone().prove(&mut OsRng, &witness).unwrap(); statement.verify(&mut OsRng, &mut verifier, (), proof); } assert!(verifier.verify_vartime()); diff --git a/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs b/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs index 3da9c6ade..7db2ecc8c 100644 --- a/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs +++ b/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs @@ -23,7 +23,7 @@ fn test_zero_weighted_inner_product() { let witness = WipWitness::new(ScalarVector::new(1), ScalarVector::new(1), Scalar::ZERO).unwrap(); let transcript = Scalar::random(&mut OsRng); - let proof = statement.clone().prove(&mut OsRng, transcript, witness).unwrap(); + let proof = statement.clone().prove(&mut OsRng, transcript, &witness).unwrap(); let mut verifier = BatchVerifier::new(1); statement.verify(&mut OsRng, &mut verifier, (), transcript, proof); @@ -37,8 +37,8 @@ fn test_weighted_inner_product() { let generators = Generators::new(); for i in [1, 2, 4, 8, 16, 32] { let generators = generators.reduce(i); - let g = generators.g(); - let h = generators.h(); + let g = Generators::g(); + let h = Generators::h(); assert_eq!(generators.len(), i); let mut g_bold = vec![]; let mut h_bold = vec![]; @@ -75,7 +75,7 @@ fn test_weighted_inner_product() { let witness = WipWitness::new(a, b, alpha).unwrap(); let transcript = Scalar::random(&mut OsRng); - let proof = statement.clone().prove(&mut OsRng, transcript, witness).unwrap(); + let proof = statement.clone().prove(&mut OsRng, transcript, &witness).unwrap(); statement.verify(&mut OsRng, &mut verifier, (), transcript, proof); } assert!(verifier.verify_vartime()); diff --git a/coins/monero/src/tests/clsag.rs b/coins/monero/src/tests/clsag.rs index f816170c2..59e41ebf5 100644 --- a/coins/monero/src/tests/clsag.rs +++ b/coins/monero/src/tests/clsag.rs @@ -119,9 +119,9 @@ fn clsag_multisig() { sign( &mut OsRng, - algorithm.clone(), + &algorithm, keys.clone(), - algorithm_machines(&mut OsRng, algorithm, &keys), + algorithm_machines(&mut OsRng, &algorithm, &keys), &[1; 32], ); } diff --git a/coins/monero/src/tests/seed.rs b/coins/monero/src/tests/seed.rs index 04851d7a2..878293a0c 100644 --- a/coins/monero/src/tests/seed.rs +++ b/coins/monero/src/tests/seed.rs @@ -137,6 +137,53 @@ fn test_classic_seed() { spend: "647f4765b66b636ff07170ab6280a9a6804dfbaf19db2ad37d23be024a18730b".into(), view: "045da65316a906a8c30046053119c18020b07a7a3a6ef5c01ab2a8755416bd02".into(), }, + // The following seeds require the language specification in order to calculate + // a single valid checksum + Vector { + language: classic::Language::Spanish, + seed: "pluma laico atraer pintor peor cerca balde buscar \ + lancha batir nulo reloj resto gemelo nevera poder columna gol \ + oveja latir amplio bolero feliz fuerza nevera" + .into(), + spend: "30303983fc8d215dd020cc6b8223793318d55c466a86e4390954f373fdc7200a".into(), + view: "97c649143f3c147ba59aa5506cc09c7992c5c219bb26964442142bf97980800e".into(), + }, + Vector { + language: classic::Language::Spanish, + seed: "pluma pluma pluma pluma pluma pluma pluma pluma \ + pluma pluma pluma pluma pluma pluma pluma pluma \ + pluma pluma pluma pluma pluma pluma pluma pluma pluma" + .into(), + spend: "b4050000b4050000b4050000b4050000b4050000b4050000b4050000b4050000".into(), + view: "d73534f7912b395eb70ef911791a2814eb6df7ce56528eaaa83ff2b72d9f5e0f".into(), + }, + Vector { + language: classic::Language::English, + seed: "plus plus plus plus plus plus plus plus \ + plus plus plus plus plus plus plus plus \ + plus plus plus plus plus plus plus plus plus" + .into(), + spend: "3b0400003b0400003b0400003b0400003b0400003b0400003b0400003b040000".into(), + view: "43a8a7715eed11eff145a2024ddcc39740255156da7bbd736ee66a0838053a02".into(), + }, + Vector { + language: classic::Language::Spanish, + seed: "audio audio audio audio audio audio audio audio \ + audio audio audio audio audio audio audio audio \ + audio audio audio audio audio audio audio audio audio" + .into(), + spend: "ba000000ba000000ba000000ba000000ba000000ba000000ba000000ba000000".into(), + view: "1437256da2c85d029b293d8c6b1d625d9374969301869b12f37186e3f906c708".into(), + }, + Vector { + language: classic::Language::English, + seed: "audio audio audio audio audio audio audio audio \ + audio audio audio audio audio audio audio audio \ + audio audio audio audio audio audio audio audio audio" + .into(), + spend: "7900000079000000790000007900000079000000790000007900000079000000".into(), + view: "20bec797ab96780ae6a045dd816676ca7ed1d7c6773f7022d03ad234b581d600".into(), + }, ]; for vector in vectors { @@ -150,8 +197,15 @@ fn test_classic_seed() { // Test against Monero { - let seed = Seed::from_string(Zeroizing::new(vector.seed.clone())).unwrap(); - assert_eq!(seed, Seed::from_string(Zeroizing::new(trim_seed(&vector.seed))).unwrap()); + println!("{}. language: {:?}, seed: {}", line!(), vector.language, vector.seed.clone()); + let seed = + Seed::from_string(SeedType::Classic(vector.language), Zeroizing::new(vector.seed.clone())) + .unwrap(); + let trim = trim_seed(&vector.seed); + assert_eq!( + seed, + Seed::from_string(SeedType::Classic(vector.language), Zeroizing::new(trim)).unwrap() + ); let spend: [u8; 32] = hex::decode(vector.spend).unwrap().try_into().unwrap(); // For classical seeds, Monero directly uses the entropy as a spend key @@ -177,12 +231,20 @@ fn test_classic_seed() { // Test against ourselves { let seed = Seed::new(&mut OsRng, SeedType::Classic(vector.language)); - assert_eq!(seed, Seed::from_string(Zeroizing::new(trim_seed(&seed.to_string()))).unwrap()); + println!("{}. seed: {}", line!(), *seed.to_string()); + let trim = trim_seed(&seed.to_string()); + assert_eq!( + seed, + Seed::from_string(SeedType::Classic(vector.language), Zeroizing::new(trim)).unwrap() + ); assert_eq!( seed, Seed::from_entropy(SeedType::Classic(vector.language), seed.entropy(), None).unwrap() ); - assert_eq!(seed, Seed::from_string(seed.to_string()).unwrap()); + assert_eq!( + seed, + Seed::from_string(SeedType::Classic(vector.language), seed.to_string()).unwrap() + ); } } } @@ -295,6 +357,18 @@ fn test_polyseed() { has_prefix: false, has_accent: false, }, + // The following seed requires the language specification in order to calculate + // a single valid checksum + Vector { + language: polyseed::Language::Spanish, + seed: "impo sort usua cabi venu nobl oliv clim \ + cont barr marc auto prod vaca torn fati" + .into(), + entropy: "dbfce25fe09b68a340e01c62417eeef43ad51800000000000000000000000000".into(), + birthday: 1701511650, + has_prefix: true, + has_accent: true, + }, ]; for vector in vectors { @@ -306,7 +380,7 @@ fn test_polyseed() { let seed_without_accents = |seed: &str| { seed .split_whitespace() - .map(|w| w.chars().filter(|c| c.is_ascii()).collect::()) + .map(|w| w.chars().filter(char::is_ascii).collect::()) .collect::>() .join(" ") }; @@ -336,21 +410,32 @@ fn test_polyseed() { }; // String -> Seed - let seed = Seed::from_string(Zeroizing::new(vector.seed.clone())).unwrap(); + println!("{}. language: {:?}, seed: {}", line!(), vector.language, vector.seed.clone()); + let seed = + Seed::from_string(SeedType::Polyseed(vector.language), Zeroizing::new(vector.seed.clone())) + .unwrap(); + let trim = trim_seed(&vector.seed); + let add_whitespace = add_whitespace(vector.seed.clone()); + let seed_without_accents = seed_without_accents(&vector.seed); // Make sure a version with added whitespace still works let whitespaced_seed = - Seed::from_string(Zeroizing::new(add_whitespace(vector.seed.clone()))).unwrap(); + Seed::from_string(SeedType::Polyseed(vector.language), Zeroizing::new(add_whitespace)) + .unwrap(); assert_eq!(seed, whitespaced_seed); // Check trimmed versions works if vector.has_prefix { - let trimmed_seed = Seed::from_string(Zeroizing::new(trim_seed(&vector.seed))).unwrap(); + let trimmed_seed = + Seed::from_string(SeedType::Polyseed(vector.language), Zeroizing::new(trim)).unwrap(); assert_eq!(seed, trimmed_seed); } // Check versions without accents work if vector.has_accent { - let seed_without_accents = - Seed::from_string(Zeroizing::new(seed_without_accents(&vector.seed))).unwrap(); + let seed_without_accents = Seed::from_string( + SeedType::Polyseed(vector.language), + Zeroizing::new(seed_without_accents), + ) + .unwrap(); assert_eq!(seed, seed_without_accents); } @@ -367,7 +452,11 @@ fn test_polyseed() { // Check against ourselves { let seed = Seed::new(&mut OsRng, SeedType::Polyseed(vector.language)); - assert_eq!(seed, Seed::from_string(seed.to_string()).unwrap()); + println!("{}. seed: {}", line!(), *seed.to_string()); + assert_eq!( + seed, + Seed::from_string(SeedType::Polyseed(vector.language), seed.to_string()).unwrap() + ); assert_eq!( seed, Seed::from_entropy( diff --git a/coins/monero/src/transaction.rs b/coins/monero/src/transaction.rs index dceccfb27..20ad40092 100644 --- a/coins/monero/src/transaction.rs +++ b/coins/monero/src/transaction.rs @@ -331,14 +331,11 @@ impl Transaction { } } else if prefix.version == 2 { rct_signatures = RctSignatures::read( - prefix - .inputs - .iter() - .map(|input| match input { - Input::Gen(_) => 0, - Input::ToKey { key_offsets, .. } => key_offsets.len(), - }) - .collect(), + prefix.inputs.first().map_or(0, |input| match input { + Input::Gen(_) => 0, + Input::ToKey { key_offsets, .. } => key_offsets.len(), + }), + prefix.inputs.len(), prefix.outputs.len(), r, )?; diff --git a/coins/monero/src/unreduced_scalar.rs b/coins/monero/src/unreduced_scalar.rs index 500c23f11..d0baa681e 100644 --- a/coins/monero/src/unreduced_scalar.rs +++ b/coins/monero/src/unreduced_scalar.rs @@ -16,7 +16,7 @@ pub(crate) fn PRECOMPUTED_SCALARS() -> [Scalar; 8] { *PRECOMPUTED_SCALARS_CELL.get_or_init(|| { let mut precomputed_scalars = [Scalar::ONE; 8]; for (i, scalar) in precomputed_scalars.iter_mut().enumerate().skip(1) { - *scalar = Scalar::from(((i * 2) + 1) as u8); + *scalar = Scalar::from(u8::try_from((i * 2) + 1).unwrap()); } precomputed_scalars }) @@ -57,7 +57,7 @@ impl UnreducedScalar { let bits = self.as_bits(); let mut naf = [0i8; 256]; for (b, bit) in bits.into_iter().enumerate() { - naf[b] = bit as i8; + naf[b] = i8::try_from(bit).unwrap(); } for i in 0 .. 256 { @@ -127,8 +127,8 @@ impl UnreducedScalar { for &numb in self.non_adjacent_form().iter().rev() { recovered += recovered; match numb.cmp(&0) { - Ordering::Greater => recovered += precomputed_scalars[(numb as usize) / 2], - Ordering::Less => recovered -= precomputed_scalars[((-numb) as usize) / 2], + Ordering::Greater => recovered += precomputed_scalars[usize::try_from(numb).unwrap() / 2], + Ordering::Less => recovered -= precomputed_scalars[usize::try_from(-numb).unwrap() / 2], Ordering::Equal => (), } } diff --git a/coins/monero/src/wallet/decoys.rs b/coins/monero/src/wallet/decoys.rs index 7f1e64af2..e3b9776f3 100644 --- a/coins/monero/src/wallet/decoys.rs +++ b/coins/monero/src/wallet/decoys.rs @@ -6,7 +6,7 @@ use std_shims::sync::OnceLock; #[cfg(all(feature = "cache-distribution", not(feature = "std")))] use std_shims::sync::Mutex; #[cfg(all(feature = "cache-distribution", feature = "std"))] -use futures::lock::Mutex; +use async_lock::Mutex; use zeroize::{Zeroize, ZeroizeOnDrop}; @@ -28,6 +28,7 @@ const MATURITY: u64 = 60; const RECENT_WINDOW: usize = 15; const BLOCK_TIME: usize = 120; const BLOCKS_PER_YEAR: usize = 365 * 24 * 60 * 60 / BLOCK_TIME; +#[allow(clippy::cast_precision_loss)] const TIP_APPLICATION: f64 = (LOCK_WINDOW * BLOCK_TIME) as f64; // TODO: Resolve safety of this in case a reorg occurs/the network changes @@ -76,6 +77,7 @@ async fn select_n<'a, R: RngCore + CryptoRng, RPC: RpcConnection>( // Use a gamma distribution let mut age = Gamma::::new(19.28, 1.0 / 1.61).unwrap().sample(rng).exp(); + #[allow(clippy::cast_precision_loss)] if age > TIP_APPLICATION { age -= TIP_APPLICATION; } else { @@ -83,6 +85,7 @@ async fn select_n<'a, R: RngCore + CryptoRng, RPC: RpcConnection>( age = (rng.next_u64() % u64::try_from(RECENT_WINDOW * BLOCK_TIME).unwrap()) as f64; } + #[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)] let o = (age * per_second) as u64; if o < high { let i = distribution.partition_point(|s| *s < (high - 1 - o)); @@ -193,6 +196,7 @@ impl Decoys { distribution.truncate(height + 1); // height is inclusive, and 0 is a valid height let high = distribution[distribution.len() - 1]; + #[allow(clippy::cast_precision_loss)] let per_second = { let blocks = distribution.len().min(BLOCKS_PER_YEAR); let outputs = high - distribution[distribution.len().saturating_sub(blocks + 1)]; diff --git a/coins/monero/src/wallet/mod.rs b/coins/monero/src/wallet/mod.rs index b405c3eea..3b08fd975 100644 --- a/coins/monero/src/wallet/mod.rs +++ b/coins/monero/src/wallet/mod.rs @@ -80,7 +80,7 @@ pub(crate) fn shared_key( // uniqueness || let shared_key = if let Some(uniqueness) = uniqueness { - [uniqueness.as_ref(), &output_derivation].concat().to_vec() + [uniqueness.as_ref(), &output_derivation].concat() } else { output_derivation }; diff --git a/coins/monero/src/wallet/scan.rs b/coins/monero/src/wallet/scan.rs index c819eb135..df73cb8a2 100644 --- a/coins/monero/src/wallet/scan.rs +++ b/coins/monero/src/wallet/scan.rs @@ -356,17 +356,18 @@ impl Scanner { let output_key = output_key.unwrap(); for key in [Some(Some(&tx_key)), additional.as_ref().map(|additional| additional.get(o))] { - let key = if let Some(Some(key)) = key { - key - } else if let Some(None) = key { - // This is non-standard. There were additional keys, yet not one for this output - // https://github.com/monero-project/monero/ - // blob/04a1e2875d6e35e27bb21497988a6c822d319c28/ - // src/cryptonote_basic/cryptonote_format_utils.cpp#L1062 - // TODO: Should this return? Where does Monero set the trap handler for this exception? - continue; - } else { - break; + let key = match key { + Some(Some(key)) => key, + Some(None) => { + // This is non-standard. There were additional keys, yet not one for this output + // https://github.com/monero-project/monero/ + // blob/04a1e2875d6e35e27bb21497988a6c822d319c28/ + // src/cryptonote_basic/cryptonote_format_utils.cpp#L1062 + continue; + } + None => { + break; + } }; let (view_tag, shared_key, payment_id_xor) = shared_key( if self.burning_bug.is_none() { Some(uniqueness(&tx.prefix.inputs)) } else { None }, @@ -395,7 +396,7 @@ impl Scanner { } let subaddress = *subaddress.unwrap(); - // If it has torsion, it'll substract the non-torsioned shared key to a torsioned key + // If it has torsion, it'll subtract the non-torsioned shared key to a torsioned key // We will not have a torsioned key in our HashMap of keys, so we wouldn't identify it as // ours // If we did though, it'd enable bypassing the included burning bug protection diff --git a/coins/monero/src/wallet/seed/classic.rs b/coins/monero/src/wallet/seed/classic.rs index 78c4f2099..0605e4bce 100644 --- a/coins/monero/src/wallet/seed/classic.rs +++ b/coins/monero/src/wallet/seed/classic.rs @@ -16,7 +16,7 @@ use crate::{random_scalar, wallet::seed::SeedError}; pub(crate) const CLASSIC_SEED_LENGTH: usize = 24; pub(crate) const CLASSIC_SEED_LENGTH_WITH_CHECKSUM: usize = 25; -#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)] +#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, Zeroize)] pub enum Language { Chinese, English, @@ -134,6 +134,7 @@ fn checksum_index(words: &[Zeroizing], lang: &WordList) -> usize { } // Convert a private key to a seed +#[allow(clippy::needless_pass_by_value)] fn key_to_seed(lang: Language, key: Zeroizing) -> ClassicSeed { let bytes = Zeroizing::new(key.to_bytes()); @@ -183,63 +184,58 @@ fn key_to_seed(lang: Language, key: Zeroizing) -> ClassicSeed { } *res += word; } - ClassicSeed(res) + ClassicSeed(lang, res) } // Convert a seed to bytes -pub(crate) fn seed_to_bytes(words: &str) -> Result<(Language, Zeroizing<[u8; 32]>), SeedError> { +pub(crate) fn seed_to_bytes(lang: Language, words: &str) -> Result, SeedError> { // get seed words let words = words.split_whitespace().map(|w| Zeroizing::new(w.to_string())).collect::>(); if (words.len() != CLASSIC_SEED_LENGTH) && (words.len() != CLASSIC_SEED_LENGTH_WITH_CHECKSUM) { panic!("invalid seed passed to seed_to_bytes"); } - // find the language - let (matched_indices, lang_name, lang) = (|| { + let has_checksum = words.len() == CLASSIC_SEED_LENGTH_WITH_CHECKSUM; + if has_checksum && lang == Language::EnglishOld { + Err(SeedError::EnglishOldWithChecksum)?; + } + + // Validate words are in the language word list + let lang_word_list: &WordList = &LANGUAGES()[&lang]; + let matched_indices = (|| { let has_checksum = words.len() == CLASSIC_SEED_LENGTH_WITH_CHECKSUM; let mut matched_indices = Zeroizing::new(vec![]); - // Iterate through all the languages - 'language: for (lang_name, lang) in LANGUAGES().iter() { - matched_indices.zeroize(); - matched_indices.clear(); - - // Iterate through all the words and see if they're all present - for word in &words { - let trimmed = trim(word, lang.unique_prefix_length); - let word = if has_checksum { &trimmed } else { word }; - - if let Some(index) = if has_checksum { - lang.trimmed_word_map.get(word.deref()) - } else { - lang.word_map.get(&word.as_str()) - } { - matched_indices.push(*index); - } else { - continue 'language; - } + // Iterate through all the words and see if they're all present + for word in &words { + let trimmed = trim(word, lang_word_list.unique_prefix_length); + let word = if has_checksum { &trimmed } else { word }; + + if let Some(index) = if has_checksum { + lang_word_list.trimmed_word_map.get(word.deref()) + } else { + lang_word_list.word_map.get(&word.as_str()) + } { + matched_indices.push(*index); + } else { + Err(SeedError::InvalidSeed)?; } + } - if has_checksum { - if lang_name == &Language::EnglishOld { - Err(SeedError::EnglishOldWithChecksum)?; - } - - // exclude the last word when calculating a checksum. - let last_word = words.last().unwrap().clone(); - let checksum = words[checksum_index(&words[.. words.len() - 1], lang)].clone(); + if has_checksum { + // exclude the last word when calculating a checksum. + let last_word = words.last().unwrap().clone(); + let checksum = words[checksum_index(&words[.. words.len() - 1], lang_word_list)].clone(); - // check the trimmed checksum and trimmed last word line up - if trim(&checksum, lang.unique_prefix_length) != trim(&last_word, lang.unique_prefix_length) - { - Err(SeedError::InvalidChecksum)?; - } + // check the trimmed checksum and trimmed last word line up + if trim(&checksum, lang_word_list.unique_prefix_length) != + trim(&last_word, lang_word_list.unique_prefix_length) + { + Err(SeedError::InvalidChecksum)?; } - - return Ok((matched_indices, lang_name, lang)); } - Err(SeedError::UnknownLanguage)? + Ok(matched_indices) })()?; // convert to bytes @@ -253,16 +249,17 @@ pub(crate) fn seed_to_bytes(words: &str) -> Result<(Language, Zeroizing<[u8; 32] indices[3] = matched_indices[i3 + 2]; let inner = |i| { - let mut base = (lang.word_list.len() - indices[i] + indices[i + 1]) % lang.word_list.len(); + let mut base = (lang_word_list.word_list.len() - indices[i] + indices[i + 1]) % + lang_word_list.word_list.len(); // Shift the index over for _ in 0 .. i { - base *= lang.word_list.len(); + base *= lang_word_list.word_list.len(); } base }; // set the last index indices[0] = indices[1] + inner(1) + inner(2); - if (indices[0] % lang.word_list.len()) != indices[1] { + if (indices[0] % lang_word_list.word_list.len()) != indices[1] { Err(SeedError::InvalidSeed)?; } @@ -272,18 +269,19 @@ pub(crate) fn seed_to_bytes(words: &str) -> Result<(Language, Zeroizing<[u8; 32] bytes.zeroize(); } - Ok((*lang_name, res)) + Ok(res) } #[derive(Clone, PartialEq, Eq, Zeroize)] -pub struct ClassicSeed(Zeroizing); +pub struct ClassicSeed(Language, Zeroizing); impl ClassicSeed { pub(crate) fn new(rng: &mut R, lang: Language) -> ClassicSeed { key_to_seed(lang, Zeroizing::new(random_scalar(rng))) } - pub fn from_string(words: Zeroizing) -> Result { - let (lang, entropy) = seed_to_bytes(&words)?; + #[allow(clippy::needless_pass_by_value)] + pub fn from_string(lang: Language, words: Zeroizing) -> Result { + let entropy = seed_to_bytes(lang, &words)?; // Make sure this is a valid scalar let scalar = Scalar::from_canonical_bytes(*entropy); @@ -297,16 +295,17 @@ impl ClassicSeed { Ok(Self::from_entropy(lang, entropy).unwrap()) } + #[allow(clippy::needless_pass_by_value)] pub fn from_entropy(lang: Language, entropy: Zeroizing<[u8; 32]>) -> Option { Option::from(Scalar::from_canonical_bytes(*entropy)) .map(|scalar| key_to_seed(lang, Zeroizing::new(scalar))) } pub(crate) fn to_string(&self) -> Zeroizing { - self.0.clone() + self.1.clone() } pub(crate) fn entropy(&self) -> Zeroizing<[u8; 32]> { - seed_to_bytes(&self.0).unwrap().1 + seed_to_bytes(self.0, &self.1).unwrap() } } diff --git a/coins/monero/src/wallet/seed/mod.rs b/coins/monero/src/wallet/seed/mod.rs index 22ba37b07..3cb2911e2 100644 --- a/coins/monero/src/wallet/seed/mod.rs +++ b/coins/monero/src/wallet/seed/mod.rs @@ -61,13 +61,23 @@ impl Seed { } /// Parse a seed from a `String`. - pub fn from_string(words: Zeroizing) -> Result { - match words.split_whitespace().count() { - CLASSIC_SEED_LENGTH | CLASSIC_SEED_LENGTH_WITH_CHECKSUM => { - ClassicSeed::from_string(words).map(Seed::Classic) + pub fn from_string(seed_type: SeedType, words: Zeroizing) -> Result { + let word_count = words.split_whitespace().count(); + match seed_type { + SeedType::Classic(lang) => { + if word_count != CLASSIC_SEED_LENGTH && word_count != CLASSIC_SEED_LENGTH_WITH_CHECKSUM { + Err(SeedError::InvalidSeedLength)? + } else { + ClassicSeed::from_string(lang, words).map(Seed::Classic) + } + } + SeedType::Polyseed(lang) => { + if word_count != POLYSEED_LENGTH { + Err(SeedError::InvalidSeedLength)? + } else { + Polyseed::from_string(lang, words).map(Seed::Polyseed) + } } - POLYSEED_LENGTH => Polyseed::from_string(words).map(Seed::Polyseed), - _ => Err(SeedError::InvalidSeedLength)?, } } diff --git a/coins/monero/src/wallet/seed/polyseed.rs b/coins/monero/src/wallet/seed/polyseed.rs index b9833297c..519ba7d4a 100644 --- a/coins/monero/src/wallet/seed/polyseed.rs +++ b/coins/monero/src/wallet/seed/polyseed.rs @@ -55,6 +55,7 @@ const CLEAR_BITS: usize = (SECRET_SIZE * BITS_PER_BYTE) - SECRET_BITS; // 2 // Polyseed calls this CLEAR_MASK and has a very complicated formula for this fundamental // equivalency +#[allow(clippy::cast_possible_truncation)] const LAST_BYTE_SECRET_BITS_MASK: u8 = ((1 << (BITS_PER_BYTE - CLEAR_BITS)) - 1) as u8; const SECRET_BITS_PER_WORD: usize = 10; @@ -261,67 +262,63 @@ impl Polyseed { } /// Create a new `Polyseed` from a String. - pub fn from_string(seed: Zeroizing) -> Result { + #[allow(clippy::needless_pass_by_value)] + pub fn from_string(lang: Language, seed: Zeroizing) -> Result { // Decode the seed into its polynomial coefficients let mut poly = [0; POLYSEED_LENGTH]; - let lang = (|| { - 'language: for (name, lang) in LANGUAGES().iter() { - for (i, word) in seed.split_whitespace().enumerate() { - // Find the word's index - fn check_if_matches, I: Iterator>( - has_prefix: bool, - mut lang_words: I, - word: &str, - ) -> Option { - if has_prefix { - // Get the position of the word within the iterator - // Doesn't use starts_with and some words are substrs of others, leading to false - // positives - let mut get_position = || { - lang_words.position(|lang_word| { - let mut lang_word = lang_word.as_ref().chars(); - let mut word = word.chars(); - - let mut res = true; - for _ in 0 .. PREFIX_LEN { - res &= lang_word.next() == word.next(); - } - res - }) - }; - let res = get_position(); - // If another word has this prefix, don't call it a match - if get_position().is_some() { - return None; + + // Validate words are in the lang word list + let lang_word_list: &WordList = &LANGUAGES()[&lang]; + for (i, word) in seed.split_whitespace().enumerate() { + // Find the word's index + fn check_if_matches, I: Iterator>( + has_prefix: bool, + mut lang_words: I, + word: &str, + ) -> Option { + if has_prefix { + // Get the position of the word within the iterator + // Doesn't use starts_with and some words are substrs of others, leading to false + // positives + let mut get_position = || { + lang_words.position(|lang_word| { + let mut lang_word = lang_word.as_ref().chars(); + let mut word = word.chars(); + + let mut res = true; + for _ in 0 .. PREFIX_LEN { + res &= lang_word.next() == word.next(); } res - } else { - lang_words.position(|lang_word| lang_word.as_ref() == word) - } - } - - let Some(coeff) = (if lang.has_accent { - let ascii = |word: &str| word.chars().filter(|c| c.is_ascii()).collect::(); - check_if_matches( - lang.has_prefix, - lang.words.iter().map(|lang_word| ascii(lang_word)), - &ascii(word), - ) - } else { - check_if_matches(lang.has_prefix, lang.words.iter(), word) - }) else { - continue 'language; + }) }; - - // WordList asserts the word list length is less than u16::MAX - poly[i] = u16::try_from(coeff).expect("coeff exceeded u16"); + let res = get_position(); + // If another word has this prefix, don't call it a match + if get_position().is_some() { + return None; + } + res + } else { + lang_words.position(|lang_word| lang_word.as_ref() == word) } - - return Ok(*name); } - Err(SeedError::UnknownLanguage) - })()?; + let Some(coeff) = (if lang_word_list.has_accent { + let ascii = |word: &str| word.chars().filter(char::is_ascii).collect::(); + check_if_matches( + lang_word_list.has_prefix, + lang_word_list.words.iter().map(|lang_word| ascii(lang_word)), + &ascii(word), + ) + } else { + check_if_matches(lang_word_list.has_prefix, lang_word_list.words.iter(), word) + }) else { + Err(SeedError::InvalidSeed)? + }; + + // WordList asserts the word list length is less than u16::MAX + poly[i] = u16::try_from(coeff).expect("coeff exceeded u16"); + } // xor out the coin poly[POLY_NUM_CHECK_DIGITS] ^= COIN; diff --git a/coins/monero/src/wallet/send/builder.rs b/coins/monero/src/wallet/send/builder.rs index eaa199c41..55d0fc29c 100644 --- a/coins/monero/src/wallet/send/builder.rs +++ b/coins/monero/src/wallet/send/builder.rs @@ -136,7 +136,7 @@ impl SignableTransactionBuilder { read.r_seed.clone(), read.inputs.clone(), read.payments.clone(), - read.change_address.clone(), + &read.change_address, read.data.clone(), read.fee_rate, ) diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index 616158666..9553d1877 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -415,7 +415,7 @@ impl SignableTransaction { r_seed: Option>, inputs: Vec<(SpendableOutput, Decoys)>, payments: Vec<(MoneroAddress, u64)>, - change: Change, + change: &Change, data: Vec>, fee_rate: Fee, ) -> Result { diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index eecfd3fe7..f3c437e56 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -70,7 +70,7 @@ impl SignableTransaction { /// The height is the Monero blockchain height to synchronize around. pub fn multisig( self, - keys: ThresholdKeys, + keys: &ThresholdKeys, mut transcript: RecommendedTranscript, ) -> Result { let mut inputs = vec![]; @@ -226,7 +226,11 @@ impl SignMachine for TransactionSignMachine { ); } - fn from_cache(_: (), _: ThresholdKeys, _: CachedPreprocess) -> Result { + fn from_cache( + (): (), + _: ThresholdKeys, + _: CachedPreprocess, + ) -> (Self, Self::Preprocess) { unimplemented!( "Monero transactions don't support caching their preprocesses due to {}", "being already bound to a specific transaction" diff --git a/coins/monero/tests/runner.rs b/coins/monero/tests/runner.rs index cb0a38088..fa0b48df4 100644 --- a/coins/monero/tests/runner.rs +++ b/coins/monero/tests/runner.rs @@ -241,7 +241,7 @@ macro_rules! test { tx .clone() .multisig( - keys[&i].clone(), + &keys[&i], RecommendedTranscript::new(b"Monero Serai Test Transaction"), ) .unwrap(), diff --git a/coins/monero/tests/send.rs b/coins/monero/tests/send.rs index cd1b919d5..ca4ea5ad8 100644 --- a/coins/monero/tests/send.rs +++ b/coins/monero/tests/send.rs @@ -46,7 +46,7 @@ test!( builder.add_payment(addr, 5); (builder.build().unwrap(), ()) }, - |_, tx: Transaction, mut scanner: Scanner, _| async move { + |_, tx: Transaction, mut scanner: Scanner, ()| async move { let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0); assert_eq!(output.commitment().amount, 5); }, @@ -61,7 +61,7 @@ test!( builder.add_payment(addr, 2000000000000); (builder.build().unwrap(), ()) }, - |_, tx: Transaction, mut scanner: Scanner, _| async move { + |_, tx: Transaction, mut scanner: Scanner, ()| async move { let mut outputs = scanner.scan_transaction(&tx).not_locked(); outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount)); assert_eq!(outputs[0].commitment().amount, 1000000000000); @@ -75,7 +75,7 @@ test!( builder.add_payment(addr, 6); (builder.build().unwrap(), ()) }, - |_, tx: Transaction, mut scanner: Scanner, _| async move { + |_, tx: Transaction, mut scanner: Scanner, ()| async move { let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0); assert_eq!(output.commitment().amount, 6); }, @@ -92,7 +92,7 @@ test!( builder.add_payment(addr, 1000000000000); (builder.build().unwrap(), ()) }, - |_, tx: Transaction, mut scanner: Scanner, _| async move { + |_, tx: Transaction, mut scanner: Scanner, ()| async move { let mut outputs = scanner.scan_transaction(&tx).not_locked(); outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount)); assert_eq!(outputs[0].commitment().amount, 1000000000000); @@ -157,7 +157,7 @@ test!( builder.add_payment(addr, 2000000000000); (builder.build().unwrap(), ()) }, - |_, tx: Transaction, mut scanner: Scanner, _| async move { + |_, tx: Transaction, mut scanner: Scanner, ()| async move { let mut outputs = scanner.scan_transaction(&tx).not_locked(); outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount)); assert_eq!(outputs[0].commitment().amount, 2000000000000); @@ -170,7 +170,7 @@ test!( builder.add_payment(addr, 2); (builder.build().unwrap(), ()) }, - |_, tx: Transaction, mut scanner: Scanner, _| async move { + |_, tx: Transaction, mut scanner: Scanner, ()| async move { let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0); assert_eq!(output.commitment().amount, 2); }, @@ -184,7 +184,7 @@ test!( builder.add_payment(addr, 1000000000000); (builder.build().unwrap(), ()) }, - |_, tx: Transaction, mut scanner: Scanner, _| async move { + |_, tx: Transaction, mut scanner: Scanner, ()| async move { let mut outputs = scanner.scan_transaction(&tx).not_locked(); outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount)); assert_eq!(outputs[0].commitment().amount, 1000000000000); @@ -200,12 +200,12 @@ test!( } (builder.build().unwrap(), ()) }, - |_, tx: Transaction, mut scanner: Scanner, _| async move { + |_, tx: Transaction, mut scanner: Scanner, ()| async move { let mut scanned_tx = scanner.scan_transaction(&tx).not_locked(); let mut output_amounts = HashSet::new(); for i in 0 .. 15 { - output_amounts.insert((i + 1) as u64); + output_amounts.insert(i + 1); } for _ in 0 .. 15 { let output = scanned_tx.swap_remove(0); @@ -224,7 +224,7 @@ test!( builder.add_payment(addr, 1000000000000); (builder.build().unwrap(), ()) }, - |_, tx: Transaction, mut scanner: Scanner, _| async move { + |_, tx: Transaction, mut scanner: Scanner, ()| async move { let mut outputs = scanner.scan_transaction(&tx).not_locked(); outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount)); assert_eq!(outputs[0].commitment().amount, 1000000000000); @@ -245,7 +245,7 @@ test!( builder.add_payment( view.address(Network::Mainnet, AddressSpec::Subaddress(subaddress)), - (i + 1) as u64, + u64::from(i + 1), ); subaddresses.push(subaddress); } @@ -259,7 +259,7 @@ test!( let mut output_amounts_by_subaddress = HashMap::new(); for i in 0 .. 15 { - output_amounts_by_subaddress.insert((i + 1) as u64, state.1[i]); + output_amounts_by_subaddress.insert(u64::try_from(i + 1).unwrap(), state.1[i]); } for _ in 0 .. 15 { let output = scanned_tx.swap_remove(0); @@ -281,7 +281,7 @@ test!( builder.add_payment(addr, 1000000000000); (builder.build().unwrap(), ()) }, - |_, tx: Transaction, mut scanner: Scanner, _| async move { + |_, tx: Transaction, mut scanner: Scanner, ()| async move { let mut outputs = scanner.scan_transaction(&tx).not_locked(); outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount)); assert_eq!(outputs[0].commitment().amount, 1000000000000); @@ -303,7 +303,7 @@ test!( (builder.build().unwrap(), ()) }, - |_, tx: Transaction, mut scanner: Scanner, _| async move { + |_, tx: Transaction, mut scanner: Scanner, ()| async move { let mut outputs = scanner.scan_transaction(&tx).not_locked(); outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount)); assert_eq!(outputs[0].commitment().amount, 10000); diff --git a/common/db/Cargo.toml b/common/db/Cargo.toml index 9833f755c..78d486a1b 100644 --- a/common/db/Cargo.toml +++ b/common/db/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.65" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] parity-db = { version = "0.4", default-features = false, optional = true } rocksdb = { version = "0.21", default-features = false, features = ["lz4"], optional = true } diff --git a/common/db/src/create_db.rs b/common/db/src/create_db.rs index 1c5bfad1e..abd86e464 100644 --- a/common/db/src/create_db.rs +++ b/common/db/src/create_db.rs @@ -22,7 +22,7 @@ pub fn serai_db_key( /// /// * `db_name` - A database name /// * `field_name` - An item name -/// * `args` - Comma seperated list of key arguments +/// * `args` - Comma separated list of key arguments /// * `field_type` - The return type /// /// # Example @@ -42,26 +42,27 @@ macro_rules! create_db { }) => { $( #[derive(Clone, Debug)] - pub struct $field_name; + pub(crate) struct $field_name; impl $field_name { - pub fn key($($arg: $arg_type),*) -> Vec { + pub(crate) fn key($($arg: $arg_type),*) -> Vec { + use scale::Encode; $crate::serai_db_key( stringify!($db_name).as_bytes(), stringify!($field_name).as_bytes(), ($($arg),*).encode() ) } - pub fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) { + pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) { let key = $field_name::key($($arg),*); txn.put(&key, borsh::to_vec(data).unwrap()); } - pub fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> { + pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> { getter.get($field_name::key($($arg),*)).map(|data| { borsh::from_slice(data.as_ref()).unwrap() }) } #[allow(dead_code)] - pub fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) { + pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) { txn.del(&$field_name::key($($arg),*)) } } @@ -82,7 +83,7 @@ macro_rules! db_channel { } impl $field_name { - pub fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) { + pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) { // Use index 0 to store the amount of messages let messages_sent_key = $field_name::key($($arg),*, 0); let messages_sent = txn.get(&messages_sent_key).map(|counter| { @@ -97,7 +98,7 @@ macro_rules! db_channel { $field_name::set(txn, $($arg),*, index_to_use, value); } - pub fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> { + pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> { let messages_recvd_key = $field_name::key($($arg),*, 1); let messages_recvd = txn.get(&messages_recvd_key).map(|counter| { u32::from_le_bytes(counter.try_into().unwrap()) diff --git a/common/db/src/mem.rs b/common/db/src/mem.rs index f52408693..ecac300ec 100644 --- a/common/db/src/mem.rs +++ b/common/db/src/mem.rs @@ -6,7 +6,7 @@ use std::{ use crate::*; -/// An atomic operation for the in-memory databae. +/// An atomic operation for the in-memory database. #[must_use] #[derive(PartialEq, Eq, Debug)] pub struct MemDbTxn<'a>(&'a MemDb, HashMap, Vec>, HashSet>); diff --git a/common/env/Cargo.toml b/common/env/Cargo.toml index 010b933b0..8e296a66b 100644 --- a/common/env/Cargo.toml +++ b/common/env/Cargo.toml @@ -12,3 +12,6 @@ rust-version = "1.60" [package.metadata.docs.rs] all-features = true rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true diff --git a/common/request/Cargo.toml b/common/request/Cargo.toml index 7fd694253..054e7f581 100644 --- a/common/request/Cargo.toml +++ b/common/request/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.64" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] # Deprecated here means to enable deprecated warnings, not to restore deprecated APIs hyper = { version = "0.14", default-features = false, features = ["http1", "tcp", "client", "runtime", "backports", "deprecated"] } diff --git a/common/request/src/lib.rs b/common/request/src/lib.rs index 4c738e2ef..2c56db945 100644 --- a/common/request/src/lib.rs +++ b/common/request/src/lib.rs @@ -49,11 +49,14 @@ pub struct Client { impl Client { fn connector() -> Connector { + let mut res = HttpConnector::new(); + res.set_keepalive(Some(core::time::Duration::from_secs(60))); #[cfg(feature = "tls")] - let res = - HttpsConnectorBuilder::new().with_native_roots().https_or_http().enable_http1().build(); - #[cfg(not(feature = "tls"))] - let res = HttpConnector::new(); + let res = HttpsConnectorBuilder::new() + .with_native_roots() + .https_or_http() + .enable_http1() + .wrap_connector(res); res } @@ -63,7 +66,7 @@ impl Client { } } - pub fn without_connection_pool(host: String) -> Result { + pub fn without_connection_pool(host: &str) -> Result { Ok(Client { connection: Connection::Connection { connector: Self::connector(), diff --git a/common/request/src/request.rs b/common/request/src/request.rs index 1117e9fd6..f6ca6f447 100644 --- a/common/request/src/request.rs +++ b/common/request/src/request.rs @@ -18,7 +18,7 @@ impl Request { let mut userpass_iter = userpass.split(':'); let username = userpass_iter.next().unwrap().to_string(); - let password = userpass_iter.next().map(str::to_string).unwrap_or_else(String::new); + let password = userpass_iter.next().map_or_else(String::new, str::to_string); zeroize::Zeroize::zeroize(&mut userpass); return Ok((username, password)); diff --git a/common/request/src/response.rs b/common/request/src/response.rs index 04c8472b8..46a4239f1 100644 --- a/common/request/src/response.rs +++ b/common/request/src/response.rs @@ -1,7 +1,7 @@ use hyper::{ StatusCode, header::{HeaderValue, HeaderMap}, - body::{Buf, Body}, + body::{HttpBody, Buf, Body}, }; use crate::{Client, Error}; @@ -17,6 +17,6 @@ impl<'a> Response<'a> { self.0.headers() } pub async fn body(self) -> Result { - hyper::body::aggregate(self.0.into_body()).await.map(Buf::reader).map_err(Error::Hyper) + Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader()) } } diff --git a/common/std-shims/Cargo.toml b/common/std-shims/Cargo.toml index 3f248dc6d..4861e00ae 100644 --- a/common/std-shims/Cargo.toml +++ b/common/std-shims/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.70" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "once"] } hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] } diff --git a/common/zalloc/Cargo.toml b/common/zalloc/Cargo.toml index 169f08a46..1a4a6b45f 100644 --- a/common/zalloc/Cargo.toml +++ b/common/zalloc/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.60" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] zeroize = { version = "^1.5", default-features = false } diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index 525ee1ba6..12f8e763f 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -13,12 +13,14 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] async-trait = { version = "0.1", default-features = false } zeroize = { version = "^1.5", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] } -rand_chacha = { version = "0.3", default-features = false, features = ["std"] } blake2 = { version = "0.10", default-features = false, features = ["std"] } @@ -30,6 +32,7 @@ frost-schnorrkel = { path = "../crypto/schnorrkel" } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] } +zalloc = { path = "../common/zalloc" } serai-db = { path = "../common/db" } serai-env = { path = "../common/env" } @@ -38,7 +41,7 @@ message-queue = { package = "serai-message-queue", path = "../message-queue" } tributary = { package = "tributary-chain", path = "./tributary" } sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } -serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] } +serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] } hex = { version = "0.4", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } @@ -46,16 +49,16 @@ borsh = { version = "1", default-features = false, features = ["std", "derive", log = { version = "0.4", default-features = false, features = ["std"] } env_logger = { version = "0.10", default-features = false, features = ["humantime"] } -futures = { version = "0.3", default-features = false, features = ["std"] } +futures-util = { version = "0.3", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } -libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "mdns", "macros"] } +libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "macros"] } [dev-dependencies] -futures = { version = "0.3", default-features = false, features = ["std"] } tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] } sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } [features] +longer-reattempts = [] parity-db = ["serai-db/parity-db"] rocksdb = ["serai-db/rocksdb"] diff --git a/coordinator/src/cosign_evaluator.rs b/coordinator/src/cosign_evaluator.rs index a38377b41..4ce7faf77 100644 --- a/coordinator/src/cosign_evaluator.rs +++ b/coordinator/src/cosign_evaluator.rs @@ -9,7 +9,7 @@ use tokio::{ time::sleep, }; -use scale::Encode; +use borsh::BorshSerialize; use sp_application_crypto::RuntimePublic; use serai_client::{ primitives::{NETWORKS, NetworkId, Signature}, @@ -28,7 +28,8 @@ use crate::{ create_db! { CosignDb { - ReceivedCosign: (set: ValidatorSet, block: [u8; 32]) -> Vec, + ReceivedCosign: (set: ValidatorSet, block: [u8; 32]) -> CosignedBlock, + LatestCosign: (network: NetworkId) -> CosignedBlock, DistinctChain: (set: ValidatorSet) -> (), } } @@ -37,7 +38,7 @@ pub struct CosignEvaluator { db: Mutex, serai: Arc, stakes: RwLock>>, - latest_cosigns: RwLock>, + latest_cosigns: RwLock>, } impl CosignEvaluator { @@ -46,14 +47,14 @@ impl CosignEvaluator { // If we haven't gotten the stake data yet, return let Some(stakes) = stakes_lock.as_ref() else { return }; - let total_stake = stakes.values().cloned().sum::(); + let total_stake = stakes.values().copied().sum::(); let latest_cosigns = self.latest_cosigns.read().await; let mut highest_block = 0; - for (block_num, _) in latest_cosigns.values() { + for cosign in latest_cosigns.values() { let mut networks = HashSet::new(); - for (network, (sub_block_num, _)) in &*latest_cosigns { - if sub_block_num >= block_num { + for (network, sub_cosign) in &*latest_cosigns { + if sub_cosign.block_number >= cosign.block_number { networks.insert(network); } } @@ -61,7 +62,7 @@ impl CosignEvaluator { networks.into_iter().map(|network| stakes.get(network).unwrap_or(&0)).sum::(); let needed_stake = ((total_stake * 2) / 3) + 1; if (total_stake == 0) || (sum_stake > needed_stake) { - highest_block = highest_block.max(*block_num); + highest_block = highest_block.max(cosign.block_number); } } @@ -106,7 +107,7 @@ impl CosignEvaluator { async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), SeraiError> { // If we already have this cosign or a newer cosign, return if let Some(latest) = self.latest_cosigns.read().await.get(&cosign.network) { - if latest.0 >= cosign.block_number { + if latest.block_number >= cosign.block_number { return Ok(()); } } @@ -180,7 +181,8 @@ impl CosignEvaluator { { let mut db = self.db.lock().await; let mut txn = db.txn(); - ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign.encode()); + ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign); + LatestCosign::set(&mut txn, set_with_keys.network, &(cosign)); txn.commit(); } @@ -258,7 +260,7 @@ impl CosignEvaluator { } else { { let mut latest_cosigns = self.latest_cosigns.write().await; - latest_cosigns.insert(cosign.network, (block.number(), cosign)); + latest_cosigns.insert(cosign.network, cosign); } self.update_latest_cosign().await; } @@ -268,11 +270,18 @@ impl CosignEvaluator { #[allow(clippy::new_ret_no_self)] pub fn new(db: D, p2p: P, serai: Arc) -> mpsc::UnboundedSender { + let mut latest_cosigns = HashMap::new(); + for network in NETWORKS { + if let Some(cosign) = LatestCosign::get(&db, network) { + latest_cosigns.insert(network, cosign); + } + } + let evaluator = Arc::new(Self { db: Mutex::new(db), serai, stakes: RwLock::new(None), - latest_cosigns: RwLock::new(HashMap::new()), + latest_cosigns: RwLock::new(latest_cosigns), }); // Spawn a task to update stakes regularly @@ -310,15 +319,11 @@ impl CosignEvaluator { tokio::spawn({ async move { loop { - let cosigns = evaluator - .latest_cosigns - .read() - .await - .values() - .map(|cosign| cosign.1) - .collect::>(); + let cosigns = evaluator.latest_cosigns.read().await.values().copied().collect::>(); for cosign in cosigns { - P2p::broadcast(&p2p, P2pMessageKind::CosignedBlock, cosign.encode()).await; + let mut buf = vec![]; + cosign.serialize(&mut buf).unwrap(); + P2p::broadcast(&p2p, P2pMessageKind::CosignedBlock, buf).await; } sleep(Duration::from_secs(60)).await; } diff --git a/coordinator/src/db.rs b/coordinator/src/db.rs index 560946bc9..09eab1732 100644 --- a/coordinator/src/db.rs +++ b/coordinator/src/db.rs @@ -4,6 +4,7 @@ use blake2::{ }; use scale::Encode; +use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{ primitives::NetworkId, validator_sets::primitives::{Session, ValidatorSet}, @@ -20,7 +21,6 @@ create_db!( HandledMessageDb: (network: NetworkId) -> u64, ActiveTributaryDb: () -> Vec, RetiredTributaryDb: (set: ValidatorSet) -> (), - SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec, FirstPreprocessDb: ( network: NetworkId, id_type: RecognizedIdType, @@ -43,7 +43,7 @@ impl ActiveTributaryDb { let mut tributaries = vec![]; while !bytes_ref.is_empty() { - tributaries.push(TributarySpec::read(&mut bytes_ref).unwrap()); + tributaries.push(TributarySpec::deserialize_reader(&mut bytes_ref).unwrap()); } (bytes, tributaries) @@ -57,7 +57,7 @@ impl ActiveTributaryDb { } } - spec.write(&mut existing_bytes).unwrap(); + spec.serialize(&mut existing_bytes).unwrap(); ActiveTributaryDb::set(txn, &existing_bytes); } @@ -72,41 +72,26 @@ impl ActiveTributaryDb { let mut bytes = vec![]; for active in active { - active.write(&mut bytes).unwrap(); + active.serialize(&mut bytes).unwrap(); } Self::set(txn, &bytes); RetiredTributaryDb::set(txn, set, &()); } } -impl SignedTransactionDb { - pub fn take_signed_transaction( - txn: &mut impl DbTxn, - order: &[u8], - nonce: u32, - ) -> Option { - let res = SignedTransactionDb::get(txn, order, nonce) - .map(|bytes| Transaction::read(&mut bytes.as_slice()).unwrap()); - if res.is_some() { - Self::del(txn, order, nonce); - } - res - } -} - impl FirstPreprocessDb { pub fn save_first_preprocess( txn: &mut impl DbTxn, network: NetworkId, id_type: RecognizedIdType, id: &[u8], - preprocess: Vec>, + preprocess: &Vec>, ) { if let Some(existing) = FirstPreprocessDb::get(txn, network, id_type, id) { - assert_eq!(existing, preprocess, "saved a distinct first preprocess"); + assert_eq!(&existing, preprocess, "saved a distinct first preprocess"); return; } - FirstPreprocessDb::set(txn, network, id_type, id, &preprocess); + FirstPreprocessDb::set(txn, network, id_type, id, preprocess); } } @@ -129,7 +114,7 @@ impl HandoverBatchDb { } } impl QueuedBatchesDb { - pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: Transaction) { + pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: &Transaction) { let mut batches = Self::get(txn, set).unwrap_or_default(); batch.write(&mut batches).unwrap(); Self::set(txn, set, &batches); diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index e67684502..d0f387411 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -18,6 +18,7 @@ use frost::Participant; use serai_db::{DbTxn, Db}; use scale::Encode; +use borsh::BorshSerialize; use serai_client::{ primitives::NetworkId, validator_sets::primitives::{Session, ValidatorSet, KeyPair}, @@ -31,12 +32,12 @@ use tokio::{ time::sleep, }; -use ::tributary::{ - ProvidedError, TransactionKind, TransactionError, TransactionTrait, Block, Tributary, -}; +use ::tributary::{ProvidedError, TransactionKind, TransactionTrait, Block, Tributary}; mod tributary; -use crate::tributary::{TributarySpec, SignData, Transaction, scanner::RecognizedIdType, PlanIds}; +use crate::tributary::{ + TributarySpec, Label, SignData, Transaction, scanner::RecognizedIdType, PlanIds, +}; mod db; use db::*; @@ -62,6 +63,10 @@ use cosign_evaluator::CosignEvaluator; #[cfg(test)] pub mod tests; +#[global_allocator] +static ALLOCATOR: zalloc::ZeroizingAlloc = + zalloc::ZeroizingAlloc(std::alloc::System); + #[derive(Clone)] pub struct ActiveTributary { pub spec: TributarySpec, @@ -107,14 +112,14 @@ async fn add_tributary( // This is safe due to the message-queue deduplicating based off the intent system let set = spec.set(); let our_i = spec - .i(Ristretto::generator() * key.deref()) + .i(&[], Ristretto::generator() * key.deref()) .expect("adding a tributary for a set we aren't in set for"); processors .send( set.network, processor_messages::key_gen::CoordinatorMessage::GenerateKey { id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 }, - params: frost::ThresholdParams::new(spec.t(), spec.n(), our_i.start).unwrap(), + params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(), shares: u16::from(our_i.end) - u16::from(our_i.start), }, ) @@ -126,48 +131,6 @@ async fn add_tributary( .unwrap(); } -async fn publish_signed_transaction( - txn: &mut D::Transaction<'_>, - tributary: &Tributary, - tx: Transaction, -) { - log::debug!("publishing transaction {}", hex::encode(tx.hash())); - - let (order, signer) = if let TransactionKind::Signed(order, signed) = tx.kind() { - let signer = signed.signer; - - // Safe as we should deterministically create transactions, meaning if this is already on-disk, - // it's what we're saving now - SignedTransactionDb::set(txn, &order, signed.nonce, &tx.serialize()); - - (order, signer) - } else { - panic!("non-signed transaction passed to publish_signed_transaction"); - }; - - // If we're trying to publish 5, when the last transaction published was 3, this will delay - // publication until the point in time we publish 4 - while let Some(tx) = SignedTransactionDb::take_signed_transaction( - txn, - &order, - tributary - .next_nonce(&signer, &order) - .await - .expect("we don't have a nonce, meaning we aren't a participant on this tributary"), - ) { - // We need to return a proper error here to enable that, due to a race condition around - // multiple publications - match tributary.add_transaction(tx.clone()).await { - Ok(_) => {} - // Some asynchonicity if InvalidNonce, assumed safe to deterministic nonces - Err(TransactionError::InvalidNonce) => { - log::warn!("publishing TX {tx:?} returned InvalidNonce. was it already added?") - } - Err(e) => panic!("created an invalid transaction: {e:?}"), - } - } -} - // TODO: Find a better pattern for this static HANDOVER_VERIFY_QUEUE_LOCK: OnceLock> = OnceLock::new(); @@ -200,17 +163,17 @@ async fn handle_processor_message( // We'll only receive these if we fired GenerateKey, which we'll only do if if we're // in-set, making the Tributary relevant ProcessorMessage::KeyGen(inner_msg) => match inner_msg { - key_gen::ProcessorMessage::Commitments { id, .. } => Some(id.session), - key_gen::ProcessorMessage::InvalidCommitments { id, .. } => Some(id.session), - key_gen::ProcessorMessage::Shares { id, .. } => Some(id.session), - key_gen::ProcessorMessage::InvalidShare { id, .. } => Some(id.session), - key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.session), + key_gen::ProcessorMessage::Commitments { id, .. } | + key_gen::ProcessorMessage::InvalidCommitments { id, .. } | + key_gen::ProcessorMessage::Shares { id, .. } | + key_gen::ProcessorMessage::InvalidShare { id, .. } | + key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } | key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session), }, ProcessorMessage::Sign(inner_msg) => match inner_msg { // We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing - sign::ProcessorMessage::InvalidParticipant { id, .. } => Some(id.session), - sign::ProcessorMessage::Preprocess { id, .. } => Some(id.session), + sign::ProcessorMessage::InvalidParticipant { id, .. } | + sign::ProcessorMessage::Preprocess { id, .. } | sign::ProcessorMessage::Share { id, .. } => Some(id.session), // While the Processor's Scanner will always emit Completed, that's routed through the // Signer and only becomes a ProcessorMessage::Completed if the Signer is present and @@ -274,9 +237,9 @@ async fn handle_processor_message( None } // We'll only fire these if we are the Substrate signer, making the Tributary relevant - coordinator::ProcessorMessage::InvalidParticipant { id, .. } => Some(id.session), - coordinator::ProcessorMessage::CosignPreprocess { id, .. } => Some(id.session), - coordinator::ProcessorMessage::BatchPreprocess { id, .. } => Some(id.session), + coordinator::ProcessorMessage::InvalidParticipant { id, .. } | + coordinator::ProcessorMessage::CosignPreprocess { id, .. } | + coordinator::ProcessorMessage::BatchPreprocess { id, .. } | coordinator::ProcessorMessage::SubstrateShare { id, .. } => Some(id.session), coordinator::ProcessorMessage::CosignedBlock { block_number, block, signature } => { let cosigned_block = CosignedBlock { @@ -290,7 +253,9 @@ async fn handle_processor_message( }, }; cosign_channel.send(cosigned_block).unwrap(); - P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, cosigned_block.encode()).await; + let mut buf = vec![]; + cosigned_block.serialize(&mut buf).unwrap(); + P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, buf).await; None } }, @@ -317,7 +282,9 @@ async fn handle_processor_message( BatchDb::set(&mut txn, batch.batch.network, batch.batch.id, &batch.clone()); // Get the next-to-execute batch ID - let mut next = substrate::get_expected_next_batch(serai, network).await; + let Ok(mut next) = substrate::expected_next_batch(serai, network).await else { + return false; + }; // Since we have a new batch, publish all batches yet to be published to Serai // This handles the edge-case where batch n+1 is signed before batch n is @@ -329,7 +296,10 @@ async fn handle_processor_message( while let Some(batch) = batches.pop_front() { // If this Batch should no longer be published, continue - if substrate::get_expected_next_batch(serai, network).await > batch.batch.id { + let Ok(expected_next_batch) = substrate::expected_next_batch(serai, network).await else { + return false; + }; + if expected_next_batch > batch.batch.id { continue; } @@ -398,31 +368,46 @@ async fn handle_processor_message( let txs = match msg.msg.clone() { ProcessorMessage::KeyGen(inner_msg) => match inner_msg { key_gen::ProcessorMessage::Commitments { id, commitments } => { - vec![Transaction::DkgCommitments(id.attempt, commitments, Transaction::empty_signed())] + vec![Transaction::DkgCommitments { + attempt: id.attempt, + commitments, + signed: Transaction::empty_signed(), + }] } - key_gen::ProcessorMessage::InvalidCommitments { id: _, faulty } => { - // This doesn't need the ID since it's a Provided transaction which everyone will provide - // With this provision comes explicit ordering (with regards to other RemoveParticipant - // transactions) and group consensus - // Accordingly, this can't be replayed - // It could be included on-chain early/late with regards to the chain's active attempt, - // which attempt scheduling is written to avoid - vec![Transaction::RemoveParticipant(faulty)] + key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => { + // This doesn't have guaranteed timing + // + // While the party *should* be fatally slashed and not included in future attempts, + // they'll actually be fatally slashed (assuming liveness before the Tributary retires) + // and not included in future attempts *which begin after the latency window completes* + let participant = spec + .reverse_lookup_i( + &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) + .expect("participating in DKG attempt yet we didn't save who was removed"), + faulty, + ) + .unwrap(); + vec![Transaction::RemoveParticipantDueToDkg { + participant, + signed: Transaction::empty_signed(), + }] } key_gen::ProcessorMessage::Shares { id, mut shares } => { // Create a MuSig-based machine to inform Substrate of this key generation - let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, id.attempt); + let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt); + let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt) + .expect("participating in a DKG attempt yet we didn't track who was removed yet?"); let our_i = spec - .i(pub_key) - .expect("processor message to DKG for a session we aren't a validator in"); + .i(&removed, pub_key) + .expect("processor message to DKG for an attempt we aren't a validator in"); // `tx_shares` needs to be done here as while it can be serialized from the HashMap // without further context, it can't be deserialized without context let mut tx_shares = Vec::with_capacity(shares.len()); for shares in &mut shares { tx_shares.push(vec![]); - for i in 1 ..= spec.n() { + for i in 1 ..= spec.n(&removed) { let i = Participant::new(i).unwrap(); if our_i.contains(&i) { if shares.contains_key(&i) { @@ -444,27 +429,13 @@ async fn handle_processor_message( }] } key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => { - // Check if the MuSig signature had any errors as if so, we need to provide - // RemoveParticipant - // As for the safety of calling error_generating_key_pair, the processor is presumed - // to only send InvalidShare or GeneratedKeyPair for a given attempt - let mut txs = if let Some(faulty) = - crate::tributary::error_generating_key_pair::<_>(&txn, key, spec, id.attempt) - { - vec![Transaction::RemoveParticipant(faulty)] - } else { - vec![] - }; - - txs.push(Transaction::InvalidDkgShare { + vec![Transaction::InvalidDkgShare { attempt: id.attempt, accuser, faulty, blame, signed: Transaction::empty_signed(), - }); - - txs + }] } key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => { // TODO2: Check the KeyGenId fields @@ -478,19 +449,42 @@ async fn handle_processor_message( id.attempt, ); + // TODO: Move this into generated_key_pair? match share { Ok(share) => { - vec![Transaction::DkgConfirmed(id.attempt, share, Transaction::empty_signed())] + vec![Transaction::DkgConfirmed { + attempt: id.attempt, + confirmation_share: share, + signed: Transaction::empty_signed(), + }] } Err(p) => { - vec![Transaction::RemoveParticipant(p)] + let participant = spec + .reverse_lookup_i( + &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) + .expect("participating in DKG attempt yet we didn't save who was removed"), + p, + ) + .unwrap(); + vec![Transaction::RemoveParticipantDueToDkg { + participant, + signed: Transaction::empty_signed(), + }] } } } - // This is a response to the ordered VerifyBlame, which is why this satisfies the provided - // transaction's needs to be perfectly ordered - key_gen::ProcessorMessage::Blame { id: _, participant } => { - vec![Transaction::RemoveParticipant(participant)] + key_gen::ProcessorMessage::Blame { id, participant } => { + let participant = spec + .reverse_lookup_i( + &crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt) + .expect("participating in DKG attempt yet we didn't save who was removed"), + participant, + ) + .unwrap(); + vec![Transaction::RemoveParticipantDueToDkg { + participant, + signed: Transaction::empty_signed(), + }] } }, ProcessorMessage::Sign(msg) => match msg { @@ -506,23 +500,25 @@ async fn handle_processor_message( network, RecognizedIdType::Plan, &id.id, - preprocesses, + &preprocesses, ); vec![] } else { - vec![Transaction::SignPreprocess(SignData { + vec![Transaction::Sign(SignData { plan: id.id, attempt: id.attempt, + label: Label::Preprocess, data: preprocesses, signed: Transaction::empty_signed(), })] } } sign::ProcessorMessage::Share { id, shares } => { - vec![Transaction::SignShare(SignData { + vec![Transaction::Sign(SignData { plan: id.id, attempt: id.attempt, + label: Label::Share, data: shares, signed: Transaction::empty_signed(), })] @@ -555,9 +551,10 @@ async fn handle_processor_message( vec![] } coordinator::ProcessorMessage::CosignPreprocess { id, preprocesses } => { - vec![Transaction::SubstratePreprocess(SignData { + vec![Transaction::SubstrateSign(SignData { plan: id.id, attempt: id.attempt, + label: Label::Preprocess, data: preprocesses.into_iter().map(Into::into).collect(), signed: Transaction::empty_signed(), })] @@ -581,18 +578,18 @@ async fn handle_processor_message( let SubstrateSignableId::Batch(id) = id.id else { panic!("BatchPreprocess SubstrateSignableId wasn't Batch") }; - id.encode() + id.to_le_bytes() }, - preprocesses.into_iter().map(Into::into).collect(), + &preprocesses.into_iter().map(Into::into).collect::>(), ); - let intended = Transaction::Batch( - block.0, - match id.id { + let intended = Transaction::Batch { + block: block.0, + batch: match id.id { SubstrateSignableId::Batch(id) => id, _ => panic!("BatchPreprocess did not contain Batch ID"), }, - ); + }; // If this is the new key's first Batch, only create this TX once we verify all // all prior published `Batch`s @@ -628,8 +625,7 @@ async fn handle_processor_message( // the prior Batch hasn't been verified yet... if (last_received != 0) && LastVerifiedBatchDb::get(&txn, msg.network) - .map(|last_verified| last_verified < (last_received - 1)) - .unwrap_or(true) + .map_or(true, |last_verified| last_verified < (last_received - 1)) { // Withhold this TX until we verify all prior `Batch`s queue = true; @@ -637,7 +633,7 @@ async fn handle_processor_message( } if queue { - QueuedBatchesDb::queue(&mut txn, spec.set(), intended); + QueuedBatchesDb::queue(&mut txn, spec.set(), &intended); vec![] } else { // Because this is post-verification of the handover batch, take all queued `Batch`s @@ -649,26 +645,29 @@ async fn handle_processor_message( res } } else { - vec![Transaction::SubstratePreprocess(SignData { + vec![Transaction::SubstrateSign(SignData { plan: id.id, attempt: id.attempt, + label: Label::Preprocess, data: preprocesses.into_iter().map(Into::into).collect(), signed: Transaction::empty_signed(), })] } } coordinator::ProcessorMessage::SubstrateShare { id, shares } => { - vec![Transaction::SubstrateShare(SignData { + vec![Transaction::SubstrateSign(SignData { plan: id.id, attempt: id.attempt, + label: Label::Share, data: shares.into_iter().map(|share| share.to_vec()).collect(), signed: Transaction::empty_signed(), })] } + #[allow(clippy::match_same_arms)] // Allowed to preserve layout coordinator::ProcessorMessage::CosignedBlock { .. } => unreachable!(), }, ProcessorMessage::Substrate(inner_msg) => match inner_msg { - processor_messages::substrate::ProcessorMessage::Batch { .. } => unreachable!(), + processor_messages::substrate::ProcessorMessage::Batch { .. } | processor_messages::substrate::ProcessorMessage::SignedBatch { .. } => unreachable!(), }, }; @@ -706,7 +705,7 @@ async fn handle_processor_message( } TransactionKind::Signed(_, _) => { tx.sign(&mut OsRng, genesis, key); - publish_signed_transaction(&mut txn, tributary, tx).await; + tributary::publish_signed_transaction(&mut txn, tributary, tx).await; } } } @@ -838,9 +837,8 @@ async fn handle_cosigns_and_batch_publication( let _hvq_lock = HANDOVER_VERIFY_QUEUE_LOCK.get_or_init(|| Mutex::new(())).lock().await; let mut txn = db.txn(); let mut to_publish = vec![]; - let start_id = LastVerifiedBatchDb::get(&txn, network) - .map(|already_verified| already_verified + 1) - .unwrap_or(0); + let start_id = + LastVerifiedBatchDb::get(&txn, network).map_or(0, |already_verified| already_verified + 1); if let Some(last_id) = substrate::verify_published_batches::(&mut txn, network, u32::MAX).await { @@ -862,7 +860,7 @@ async fn handle_cosigns_and_batch_publication( to_publish.push((set.session, queued.remove(0))); // Re-queue the remaining batches for remaining in queued { - QueuedBatchesDb::queue(&mut txn, set, remaining); + QueuedBatchesDb::queue(&mut txn, set, &remaining); } } @@ -957,10 +955,8 @@ pub async fn run( key: Zeroizing<::F>, p2p: P, processors: Pro, - serai: Serai, + serai: Arc, ) { - let serai = Arc::new(serai); - let (new_tributary_spec_send, mut new_tributary_spec_recv) = mpsc::unbounded_channel(); // Reload active tributaries from the database for spec in ActiveTributaryDb::active_tributaries(&raw_db).1 { @@ -1079,16 +1075,18 @@ pub async fn run( }; let mut tx = match id_type { - RecognizedIdType::Batch => Transaction::SubstratePreprocess(SignData { + RecognizedIdType::Batch => Transaction::SubstrateSign(SignData { data: get_preprocess(&raw_db, id_type, &id).await, - plan: SubstrateSignableId::Batch(id.as_slice().try_into().unwrap()), + plan: SubstrateSignableId::Batch(u32::from_le_bytes(id.try_into().unwrap())), + label: Label::Preprocess, attempt: 0, signed: Transaction::empty_signed(), }), - RecognizedIdType::Plan => Transaction::SignPreprocess(SignData { + RecognizedIdType::Plan => Transaction::Sign(SignData { data: get_preprocess(&raw_db, id_type, &id).await, plan: id.try_into().unwrap(), + label: Label::Preprocess, attempt: 0, signed: Transaction::empty_signed(), }), @@ -1119,7 +1117,7 @@ pub async fn run( // TODO: Should this not take a txn accordingly? It's best practice to take a txn, yet // taking a txn fails to declare its achieved independence let mut txn = raw_db.txn(); - publish_signed_transaction(&mut txn, tributary, tx).await; + tributary::publish_signed_transaction(&mut txn, tributary, tx).await; txn.commit(); break; } @@ -1216,11 +1214,10 @@ async fn main() { key_bytes.zeroize(); key }; - let p2p = LibP2p::new(); let processors = Arc::new(MessageQueue::from_env(Service::Coordinator)); - let serai = || async { + let serai = (async { loop { let Ok(serai) = Serai::new(format!( "http://{}:9944", @@ -1233,8 +1230,10 @@ async fn main() { continue; }; log::info!("made initial connection to Serai node"); - return serai; + return Arc::new(serai); } - }; - run(db, key, p2p, processors, serai().await).await + }) + .await; + let p2p = LibP2p::new(serai.clone()); + run(db, key, p2p, processors, serai).await } diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index f3dda43b6..ce6be6880 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -7,19 +7,22 @@ use std::{ }; use async_trait::async_trait; +use rand_core::{RngCore, OsRng}; -use scale::{Encode, Decode}; -use serai_client::primitives::NetworkId; +use scale::Encode; +use borsh::{BorshSerialize, BorshDeserialize}; +use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai}; use serai_db::Db; +use futures_util::StreamExt; use tokio::{ sync::{Mutex, RwLock, mpsc, broadcast}, time::sleep, }; use libp2p::{ - futures::StreamExt, + core::multiaddr::{Protocol, Multiaddr}, identity::Keypair, PeerId, tcp::Config as TcpConfig, @@ -39,7 +42,7 @@ use crate::{Transaction, Block, Tributary, ActiveTributary, TributaryEvent}; const LIBP2P_TOPIC: &str = "serai-coordinator"; -#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)] +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)] pub struct CosignedBlock { pub network: NetworkId, pub block_number: u64, @@ -59,11 +62,10 @@ pub enum P2pMessageKind { impl P2pMessageKind { fn genesis(&self) -> Option<[u8; 32]> { match self { - P2pMessageKind::KeepAlive => None, - P2pMessageKind::Tributary(genesis) => Some(*genesis), - P2pMessageKind::Heartbeat(genesis) => Some(*genesis), + P2pMessageKind::KeepAlive | P2pMessageKind::CosignedBlock => None, + P2pMessageKind::Tributary(genesis) | + P2pMessageKind::Heartbeat(genesis) | P2pMessageKind::Block(genesis) => Some(*genesis), - P2pMessageKind::CosignedBlock => None, } } @@ -128,8 +130,8 @@ pub struct Message { pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { type Id: Send + Sync + Clone + Copy + fmt::Debug; - async fn subscribe(&self, genesis: [u8; 32]); - async fn unsubscribe(&self, genesis: [u8; 32]); + async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]); + async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]); async fn send_raw(&self, to: Self::Id, genesis: Option<[u8; 32]>, msg: Vec); async fn broadcast_raw(&self, genesis: Option<[u8; 32]>, msg: Vec); @@ -191,14 +193,12 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { #[derive(NetworkBehaviour)] struct Behavior { gossipsub: GsBehavior, - #[cfg(debug_assertions)] - mdns: libp2p::mdns::tokio::Behaviour, } #[allow(clippy::type_complexity)] #[derive(Clone)] pub struct LibP2p { - subscribe: Arc>>, + subscribe: Arc>>, broadcast: Arc, Vec)>>>, receive: Arc)>>>, } @@ -210,14 +210,13 @@ impl fmt::Debug for LibP2p { impl LibP2p { #[allow(clippy::new_without_default)] - pub fn new() -> Self { + pub fn new(serai: Arc) -> Self { // Block size limit + 1 KB of space for signatures/metadata const MAX_LIBP2P_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024; log::info!("creating a libp2p instance"); let throwaway_key_pair = Keypair::generate_ed25519(); - let throwaway_peer_id = PeerId::from(throwaway_key_pair.public()); let behavior = Behavior { gossipsub: { @@ -259,14 +258,6 @@ impl LibP2p { gossipsub }, - - // Only use MDNS in debug environments, as it should have no value in a release build - #[cfg(debug_assertions)] - mdns: { - log::info!("creating mdns service"); - libp2p::mdns::tokio::Behaviour::new(libp2p::mdns::Config::default(), throwaway_peer_id) - .unwrap() - }, }; // Uses noise for authentication, yamux for multiplexing @@ -295,28 +286,25 @@ impl LibP2p { let (receive_send, receive_recv) = mpsc::unbounded_channel(); let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel(); - fn topic_for_genesis(genesis: [u8; 32]) -> IdentTopic { - IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(genesis))) + fn topic_for_set(set: ValidatorSet) -> IdentTopic { + IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode()))) } tokio::spawn({ let mut time_of_last_p2p_message = Instant::now(); #[allow(clippy::needless_pass_by_ref_mut)] // False positive - async fn broadcast_raw( + fn broadcast_raw( p2p: &mut Swarm, time_of_last_p2p_message: &mut Instant, - genesis: Option<[u8; 32]>, + set: Option, msg: Vec, ) { // Update the time of last message *time_of_last_p2p_message = Instant::now(); - let topic = if let Some(genesis) = genesis { - topic_for_genesis(genesis) - } else { - IdentTopic::new(LIBP2P_TOPIC) - }; + let topic = + if let Some(set) = set { topic_for_set(set) } else { IdentTopic::new(LIBP2P_TOPIC) }; match p2p.behaviour_mut().gossipsub.publish(topic, msg.clone()) { Err(PublishError::SigningError(e)) => panic!("signing error when broadcasting: {e}"), @@ -332,66 +320,115 @@ impl LibP2p { } async move { + let mut set_for_genesis = HashMap::new(); + let mut pending_p2p_connections = vec![]; // Run this task ad-infinitum loop { + // Handle pending P2P connections + // TODO: Break this out onto its own task with better peer management logic? + { + let mut connect = |addr: Multiaddr| { + log::info!("found peer from substrate: {addr}"); + + let protocols = addr.iter().filter_map(|piece| match piece { + // Drop PeerIds from the Substrate P2p network + Protocol::P2p(_) => None, + // Use our own TCP port + Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)), + other => Some(other), + }); + + let mut new_addr = Multiaddr::empty(); + for protocol in protocols { + new_addr.push(protocol); + } + let addr = new_addr; + log::debug!("transformed found peer: {addr}"); + + if let Err(e) = swarm.dial(addr) { + log::warn!("dialing peer failed: {e:?}"); + } + }; + + while let Some(network) = pending_p2p_connections.pop() { + if let Ok(mut nodes) = serai.p2p_validators(network).await { + // If there's an insufficient amount of nodes known, connect to all yet add it back + // and break + if nodes.len() < 3 { + log::warn!( + "insufficient amount of P2P nodes known for {:?}: {}", + network, + nodes.len() + ); + pending_p2p_connections.push(network); + for node in nodes { + connect(node); + } + break; + } + + // Randomly select up to 5 + for _ in 0 .. 5 { + if !nodes.is_empty() { + let to_connect = nodes.swap_remove( + usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) + .unwrap(), + ); + connect(to_connect); + } + } + } + } + } + let time_since_last = Instant::now().duration_since(time_of_last_p2p_message); tokio::select! { biased; // Subscribe to any new topics - topic = subscribe_recv.recv() => { - let (subscribe, topic) = topic.expect("subscribe_recv closed. are we shutting down?"); + set = subscribe_recv.recv() => { + let (subscribe, set, genesis): (_, ValidatorSet, [u8; 32]) = + set.expect("subscribe_recv closed. are we shutting down?"); + let topic = topic_for_set(set); if subscribe { - swarm - .behaviour_mut() - .gossipsub - .subscribe(&topic_for_genesis(topic)) - .unwrap(); + log::info!("subscribing to p2p messages for {set:?}"); + pending_p2p_connections.push(set.network); + set_for_genesis.insert(genesis, set); + swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap(); } else { - swarm - .behaviour_mut() - .gossipsub - .unsubscribe(&topic_for_genesis(topic)) - .unwrap(); + log::info!("unsubscribing to p2p messages for {set:?}"); + set_for_genesis.remove(&genesis); + swarm.behaviour_mut().gossipsub.unsubscribe(&topic).unwrap(); } } // Handle any queued outbound messages msg = broadcast_recv.recv() => { - let (genesis, msg) = msg.expect("broadcast_recv closed. are we shutting down?"); + let (genesis, msg): (Option<[u8; 32]>, Vec) = + msg.expect("broadcast_recv closed. are we shutting down?"); + let set = genesis.and_then(|genesis| set_for_genesis.get(&genesis).copied()); broadcast_raw( &mut swarm, &mut time_of_last_p2p_message, - genesis, + set, msg, - ).await; + ); } // Handle new incoming messages event = swarm.next() => { match event { - #[cfg(debug_assertions)] - Some(SwarmEvent::Behaviour(BehaviorEvent::Mdns( - libp2p::mdns::Event::Discovered(list), - ))) => { - for (peer, mut addr) in list { - // Check the port is as expected to prevent trying to peer with Substrate nodes - if addr.pop() == Some(libp2p::multiaddr::Protocol::Tcp(PORT)) { - log::info!("found peer via mdns"); - swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer); - } - } + Some(SwarmEvent::Dialing { connection_id, .. }) => { + log::debug!("dialing to peer in connection ID {}", &connection_id); } - #[cfg(debug_assertions)] - Some(SwarmEvent::Behaviour(BehaviorEvent::Mdns( - libp2p::mdns::Event::Expired(list), - ))) => { - for (peer, _) in list { - log::info!("disconnecting peer due to mdns"); - swarm.behaviour_mut().gossipsub.remove_explicit_peer(&peer); + Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => { + log::debug!( + "connection established to peer {} in connection ID {}", + &peer_id, + &connection_id, + ); + swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id) } - } - Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub( GsEvent::Message { propagation_source, message, .. }, ))) => { @@ -410,13 +447,13 @@ impl LibP2p { // If we are sending heartbeats, we should've sent one after 60s of no finalized blocks // (where a finalized block only occurs due to network activity), meaning this won't be // run - _ = tokio::time::sleep(Duration::from_secs(80).saturating_sub(time_since_last)) => { + () = tokio::time::sleep(Duration::from_secs(80).saturating_sub(time_since_last)) => { broadcast_raw( &mut swarm, &mut time_of_last_p2p_message, None, P2pMessageKind::KeepAlive.serialize() - ).await; + ); } } } @@ -435,21 +472,21 @@ impl LibP2p { impl P2p for LibP2p { type Id = PeerId; - async fn subscribe(&self, genesis: [u8; 32]) { + async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]) { self .subscribe .lock() .await - .send((true, genesis)) + .send((true, set, genesis)) .expect("subscribe_send closed. are we shutting down?"); } - async fn unsubscribe(&self, genesis: [u8; 32]) { + async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]) { self .subscribe .lock() .await - .send((false, genesis)) + .send((false, set, genesis)) .expect("subscribe_send closed. are we shutting down?"); } @@ -553,7 +590,7 @@ pub async fn handle_p2p_task( channels.write().await.insert(genesis, send); // Subscribe to the topic for this tributary - p2p.subscribe(genesis).await; + p2p.subscribe(tributary.spec.set(), genesis).await; // Per-Tributary P2P message handler tokio::spawn({ @@ -676,8 +713,8 @@ pub async fn handle_p2p_task( } TributaryEvent::TributaryRetired(set) => { if let Some(genesis) = set_to_genesis.remove(&set) { + p2p.unsubscribe(set, genesis).await; channels.write().await.remove(&genesis); - p2p.unsubscribe(genesis).await; } } } @@ -689,24 +726,15 @@ pub async fn handle_p2p_task( let msg = p2p.receive().await; match msg.kind { P2pMessageKind::KeepAlive => {} - P2pMessageKind::Tributary(genesis) => { - if let Some(channel) = channels.read().await.get(&genesis) { - channel.send(msg).unwrap(); - } - } - P2pMessageKind::Heartbeat(genesis) => { - if let Some(channel) = channels.read().await.get(&genesis) { - channel.send(msg).unwrap(); - } - } + P2pMessageKind::Tributary(genesis) | + P2pMessageKind::Heartbeat(genesis) | P2pMessageKind::Block(genesis) => { if let Some(channel) = channels.read().await.get(&genesis) { channel.send(msg).unwrap(); } } P2pMessageKind::CosignedBlock => { - let mut msg_ref: &[u8] = msg.msg.as_ref(); - let Ok(msg) = CosignedBlock::decode(&mut scale::IoReader(&mut msg_ref)) else { + let Ok(msg) = CosignedBlock::deserialize_reader(&mut msg.msg.as_slice()) else { log::error!("received CosignedBlock message with invalidly serialized contents"); continue; }; diff --git a/coordinator/src/substrate/cosign.rs b/coordinator/src/substrate/cosign.rs index ffb5d202b..2443c8116 100644 --- a/coordinator/src/substrate/cosign.rs +++ b/coordinator/src/substrate/cosign.rs @@ -12,57 +12,48 @@ ensure any block needing cosigned is consigned within a reasonable amount of time. */ -use core::{ops::Deref, time::Duration}; -use std::{ - sync::Arc, - collections::{HashSet, HashMap}, -}; - use zeroize::Zeroizing; -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use ciphersuite::{Ciphersuite, Ristretto}; + +use borsh::{BorshSerialize, BorshDeserialize}; -use scale::{Encode, Decode}; use serai_client::{ - SeraiError, Block, Serai, TemporalSerai, - primitives::{BlockHash, NetworkId}, - validator_sets::{ - primitives::{Session, ValidatorSet, KeyPair, amortize_excess_key_shares}, - ValidatorSetsEvent, - }, - in_instructions::InInstructionsEvent, - coins::CoinsEvent, + SeraiError, Serai, + primitives::NetworkId, + validator_sets::primitives::{Session, ValidatorSet}, }; use serai_db::*; -use processor_messages::SubstrateContext; - -use tokio::{sync::mpsc, time::sleep}; - -use crate::{ - Db, - processors::Processors, - tributary::{TributarySpec, SeraiBlockNumber}, -}; +use crate::{Db, substrate::in_set, tributary::SeraiBlockNumber}; // 5 minutes, expressed in blocks // TODO: Pull a constant for block time const COSIGN_DISTANCE: u64 = 5 * 60 / 6; +#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +enum HasEvents { + KeyGen, + Yes, + No, +} + create_db!( SubstrateCosignDb { - CosignTriggered: () -> (), IntendedCosign: () -> (u64, Option), - BlockHasEvents: (block: u64) -> u8, + BlockHasEvents: (block: u64) -> HasEvents, LatestCosignedBlock: () -> u64, } ); impl IntendedCosign { + // Sets the intended to cosign block, clearing the prior value entirely. pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) { Self::set(txn, &(intended, None::)); } + + // Sets the cosign skipped since the last intended to cosign block. pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) { let (intended, prior_skipped) = Self::get(txn).unwrap(); assert!(prior_skipped.is_none()); @@ -89,12 +80,6 @@ impl CosignTransactions { } } -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] -enum HasEvents { - KeyGen, - Yes, - No, -} async fn block_has_events( txn: &mut impl DbTxn, serai: &Serai, @@ -122,143 +107,193 @@ async fn block_has_events( let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes }; - let has_events = has_events.encode(); - assert_eq!(has_events.len(), 1); - BlockHasEvents::set(txn, block, &has_events[0]); + BlockHasEvents::set(txn, block, &has_events); Ok(HasEvents::Yes) } - Some(code) => Ok(HasEvents::decode(&mut [code].as_slice()).unwrap()), + Some(code) => Ok(code), } } -/* - Advances the cosign protocol as should be done per the latest block. - - A block is considered cosigned if: - A) It was cosigned - B) It's the parent of a cosigned block - C) It immediately follows a cosigned block and has no events requiring cosigning (TODO) -*/ -async fn advance_cosign_protocol(db: &mut impl Db, serai: &Serai, latest_number: u64) -> Result<(), ()> { - let Some((last_intended_to_cosign_block, mut skipped_block)) = IntendedCosign::get(&txn) else { - let mut txn = db.txn(); - IntendedCosign::set_intended_cosign(&mut txn, 1); - txn.commit(); - return Ok(()); - }; -} - -// If we haven't flagged skipped, and a block within the distance had events, flag the first -// such block as skipped -let mut distance_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE; -// If we've never triggered a cosign, don't skip any cosigns -if CosignTriggered::get(&txn).is_none() { - distance_end_exclusive = 0; -} -if skipped_block.is_none() { - for b in (last_intended_to_cosign_block + 1) .. distance_end_exclusive { - if b > latest_number { - break; - } - - if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes { - skipped_block = Some(b); - log::debug!("skipping cosigning {b} due to proximity to prior cosign"); - IntendedCosign::set_skipped_cosign(&mut txn, b); - break; - } +async fn potentially_cosign_block( + txn: &mut impl DbTxn, + serai: &Serai, + block: u64, + skipped_block: Option, + window_end_exclusive: u64, +) -> Result { + // The following code regarding marking cosigned if prior block is cosigned expects this block to + // not be zero + // While we could perform this check there, there's no reason not to optimize the entire function + // as such + if block == 0 { + return Ok(false); } -} -let mut has_no_cosigners = None; -let mut cosign = vec![]; + let block_has_events = block_has_events(txn, serai, block).await?; -// Block we should cosign no matter what if no prior blocks qualified for cosigning -let maximally_latent_cosign_block = - skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE); -for block in (last_intended_to_cosign_block + 1) ..= latest_number { - let actual_block = serai - .finalized_block_by_number(block) - .await? - .expect("couldn't get block which should've been finalized"); - SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block); + // If this block had no events and immediately follows a cosigned block, mark it as cosigned + if (block_has_events == HasEvents::No) && + (LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1)) + { + LatestCosignedBlock::set(txn, &block); + } - let mut set = false; + // If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks + // trigger a cosigning protocol covering it + // This means there will be the maximum delay allowed from a block needing cosigning occurring + // and a cosign for it triggering + let maximally_latent_cosign_block = + skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE); - let block_has_events = block_has_events(&mut txn, serai, block).await?; - // If this block is within the distance, - if block < distance_end_exclusive { + // If this block is within the window, + if block < window_end_exclusive { // and set a key, cosign it if block_has_events == HasEvents::KeyGen { - IntendedCosign::set_intended_cosign(&mut txn, block); - set = true; + IntendedCosign::set_intended_cosign(txn, block); // Carry skipped if it isn't included by cosigning this block if let Some(skipped) = skipped_block { if skipped > block { - IntendedCosign::set_skipped_cosign(&mut txn, block); + IntendedCosign::set_skipped_cosign(txn, block); } } + return Ok(true); } - } else if (Some(block) == maximally_latent_cosign_block) || - (block_has_events != HasEvents::No) - { - // Since this block was outside the distance and had events/was maximally latent, cosign it - IntendedCosign::set_intended_cosign(&mut txn, block); - set = true; + } else if (Some(block) == maximally_latent_cosign_block) || (block_has_events != HasEvents::No) { + // Since this block was outside the window and had events/was maximally latent, cosign it + IntendedCosign::set_intended_cosign(txn, block); + return Ok(true); } + Ok(false) +} - if set { - // Get the keys as of the prior block - // That means if this block is setting new keys (which won't lock in until we process this - // block), we won't freeze up waiting for the yet-to-be-processed keys to sign this block - let serai = serai.as_of(actual_block.header.parent_hash.into()); - - has_no_cosigners = Some(actual_block.clone()); - - for network in serai_client::primitives::NETWORKS { - // Get the latest session to have set keys - let Some(latest_session) = serai.validator_sets().session(network).await? else { - continue; - }; - let prior_session = Session(latest_session.0.saturating_sub(1)); - let set_with_keys = if serai - .validator_sets() - .keys(ValidatorSet { network, session: prior_session }) - .await? - .is_some() - { - ValidatorSet { network, session: prior_session } - } else { - let set = ValidatorSet { network, session: latest_session }; - if serai.validator_sets().keys(set).await?.is_none() { - continue; - } - set - }; +/* + Advances the cosign protocol as should be done per the latest block. + + A block is considered cosigned if: + A) It was cosigned + B) It's the parent of a cosigned block + C) It immediately follows a cosigned block and has no events requiring cosigning - // Since this is a valid cosigner, don't flag this block as having no cosigners - has_no_cosigners = None; - log::debug!("{:?} will be cosigning {block}", set_with_keys.network); + This only actually performs advancement within a limited bound (generally until it finds a block + which should be cosigned). Accordingly, it is necessary to call multiple times even if + `latest_number` doesn't change. +*/ +pub async fn advance_cosign_protocol( + db: &mut impl Db, + key: &Zeroizing<::F>, + serai: &Serai, + latest_number: u64, +) -> Result<(), SeraiError> { + let mut txn = db.txn(); + + const INITIAL_INTENDED_COSIGN: u64 = 1; + let (last_intended_to_cosign_block, mut skipped_block) = { + let intended_cosign = IntendedCosign::get(&txn); + // If we haven't prior intended to cosign a block, set the intended cosign to 1 + if let Some(intended_cosign) = intended_cosign { + intended_cosign + } else { + IntendedCosign::set_intended_cosign(&mut txn, INITIAL_INTENDED_COSIGN); + IntendedCosign::get(&txn).unwrap() + } + }; - if in_set(key, &serai, set_with_keys).await?.unwrap() { - cosign.push((set_with_keys, block, actual_block.hash())); + // "windows" refers to the window of blocks where even if there's a block which should be + // cosigned, it won't be due to proximity due to the prior cosign + let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE; + // If we've never triggered a cosign, don't skip any cosigns based on proximity + if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN { + window_end_exclusive = 0; + } + + // Check all blocks within the window to see if they should be cosigned + // If so, we're skipping them and need to flag them as skipped so that once the window closes, we + // do cosign them + // We only perform this check if we haven't already marked a block as skipped since the cosign + // the skipped block will cause will cosign all other blocks within this window + if skipped_block.is_none() { + for b in (last_intended_to_cosign_block + 1) .. window_end_exclusive.min(latest_number) { + if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes { + skipped_block = Some(b); + log::debug!("skipping cosigning {b} due to proximity to prior cosign"); + IntendedCosign::set_skipped_cosign(&mut txn, b); + break; } } + } + + // A block which should be cosigned + let mut to_cosign = None; + // A list of sets which are cosigning, along with a boolean of if we're in the set + let mut cosigning = vec![]; + + for block in (last_intended_to_cosign_block + 1) ..= latest_number { + let actual_block = serai + .finalized_block_by_number(block) + .await? + .expect("couldn't get block which should've been finalized"); + + // Save the block number for this block, as needed by the cosigner to perform cosigning + SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block); + + if potentially_cosign_block(&mut txn, serai, block, skipped_block, window_end_exclusive).await? + { + to_cosign = Some((block, actual_block.hash())); + + // Get the keys as of the prior block + // If this key sets new keys, the coordinator won't acknowledge so until we process this + // block + // We won't process this block until its co-signed + // Using the keys of the prior block ensures this deadlock isn't reached + let serai = serai.as_of(actual_block.header.parent_hash.into()); + + for network in serai_client::primitives::NETWORKS { + // Get the latest session to have set keys + let set_with_keys = { + let Some(latest_session) = serai.validator_sets().session(network).await? else { + continue; + }; + let prior_session = Session(latest_session.0.saturating_sub(1)); + if serai + .validator_sets() + .keys(ValidatorSet { network, session: prior_session }) + .await? + .is_some() + { + ValidatorSet { network, session: prior_session } + } else { + let set = ValidatorSet { network, session: latest_session }; + if serai.validator_sets().keys(set).await?.is_none() { + continue; + } + set + } + }; + + log::debug!("{:?} will be cosigning {block}", set_with_keys.network); + cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys).await?.unwrap())); + } - break; + break; + } } -} -// If this block doesn't have cosigners, yet does have events, automatically mark it as -// cosigned -if let Some(has_no_cosigners) = has_no_cosigners { - log::debug!("{} had no cosigners available, marking as cosigned", has_no_cosigners.number()); - LatestCosignedBlock::set(&mut txn, &has_no_cosigners.number()); -} else { - CosignTriggered::set(&mut txn, &()); - for (set, block, hash) in cosign { - log::debug!("cosigning {block} with {:?} {:?}", set.network, set.session); - CosignTransactions::append_cosign(&mut txn, set, block, hash); + if let Some((number, hash)) = to_cosign { + // If this block doesn't have cosigners, yet does have events, automatically mark it as + // cosigned + if cosigning.is_empty() { + log::debug!("{} had no cosigners available, marking as cosigned", number); + LatestCosignedBlock::set(&mut txn, &number); + } else { + for (set, in_set) in cosigning { + if in_set { + log::debug!("cosigning {number} with {:?} {:?}", set.network, set.session); + CosignTransactions::append_cosign(&mut txn, set, number, hash); + } + } + } } + txn.commit(); + + Ok(()) } -txn.commit(); diff --git a/coordinator/src/substrate/db.rs b/coordinator/src/substrate/db.rs index e2e33c51e..2621e5ef7 100644 --- a/coordinator/src/substrate/db.rs +++ b/coordinator/src/substrate/db.rs @@ -1,61 +1,32 @@ -use scale::Encode; - -use serai_client::{ - primitives::NetworkId, - validator_sets::primitives::{Session, ValidatorSet}, -}; +use serai_client::primitives::NetworkId; pub use serai_db::*; -create_db!( - SubstrateDb { - CosignTriggered: () -> (), - IntendedCosign: () -> (u64, Option), - BlockHasEvents: (block: u64) -> u8, - LatestCosignedBlock: () -> u64, - NextBlock: () -> u64, - EventDb: (id: &[u8], index: u32) -> (), - BatchInstructionsHashDb: (network: NetworkId, id: u32) -> [u8; 32] - } -); +mod inner_db { + use super::*; -impl IntendedCosign { - pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) { - Self::set(txn, &(intended, None::)); - } - pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) { - let (intended, prior_skipped) = Self::get(txn).unwrap(); - assert!(prior_skipped.is_none()); - Self::set(txn, &(intended, Some(skipped))); - } + create_db!( + SubstrateDb { + NextBlock: () -> u64, + HandledEvent: (block: [u8; 32]) -> u32, + BatchInstructionsHashDb: (network: NetworkId, id: u32) -> [u8; 32] + } + ); } +pub(crate) use inner_db::{NextBlock, BatchInstructionsHashDb}; -impl LatestCosignedBlock { - pub fn latest_cosigned_block(getter: &impl Get) -> u64 { - Self::get(getter).unwrap_or_default().max(1) +pub struct HandledEvent; +impl HandledEvent { + fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 { + inner_db::HandledEvent::get(getter, block).map_or(0, |last| last + 1) } -} - -impl EventDb { - pub fn is_unhandled(getter: &impl Get, id: &[u8], index: u32) -> bool { - Self::get(getter, id, index).is_none() + pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32) -> bool { + let next = Self::next_to_handle_event(getter, block); + assert!(next >= event_id); + next == event_id } - - pub fn handle_event(txn: &mut impl DbTxn, id: &[u8], index: u32) { - assert!(Self::is_unhandled(txn, id, index)); - Self::set(txn, id, index, &()); - } -} - -db_channel! { - SubstrateDbChannels { - CosignTransactions: (network: NetworkId) -> (Session, u64, [u8; 32]), - } -} - -impl CosignTransactions { - // Append a cosign transaction. - pub fn append_cosign(txn: &mut impl DbTxn, set: ValidatorSet, number: u64, hash: [u8; 32]) { - CosignTransactions::send(txn, set.network, &(set.session, number, hash)) + pub fn handle_event(txn: &mut impl DbTxn, block: [u8; 32], index: u32) { + assert!(Self::next_to_handle_event(txn, block) == index); + inner_db::HandledEvent::set(txn, block, &index); } } diff --git a/coordinator/src/substrate/mod.rs b/coordinator/src/substrate/mod.rs index b5c58f2b9..446780f2c 100644 --- a/coordinator/src/substrate/mod.rs +++ b/coordinator/src/substrate/mod.rs @@ -8,12 +8,11 @@ use zeroize::Zeroizing; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; -use scale::{Encode, Decode}; use serai_client::{ SeraiError, Block, Serai, TemporalSerai, primitives::{BlockHash, NetworkId}, validator_sets::{ - primitives::{Session, ValidatorSet, KeyPair, amortize_excess_key_shares}, + primitives::{ValidatorSet, amortize_excess_key_shares}, ValidatorSetsEvent, }, in_instructions::InInstructionsEvent, @@ -29,12 +28,15 @@ use tokio::{sync::mpsc, time::sleep}; use crate::{ Db, processors::Processors, - tributary::{TributarySpec, SeraiBlockNumber}, + tributary::{TributarySpec, SeraiDkgCompleted}, }; mod db; pub use db::*; +mod cosign; +pub use cosign::*; + async fn in_set( key: &Zeroizing<::F>, serai: &TemporalSerai<'_>, @@ -110,45 +112,14 @@ async fn handle_new_set( new_tributary_spec.send(spec).unwrap(); } else { - log::info!("not present in set {:?}", set); + log::info!("not present in new set {:?}", set); } Ok(()) } -async fn handle_key_gen( - processors: &Pro, - serai: &Serai, - block: &Block, - set: ValidatorSet, - key_pair: KeyPair, -) -> Result<(), SeraiError> { - processors - .send( - set.network, - processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair { - context: SubstrateContext { - serai_time: block.time().unwrap() / 1000, - network_latest_finalized_block: serai - .as_of(block.hash()) - .in_instructions() - .latest_block_for_network(set.network) - .await? - // The processor treats this as a magic value which will cause it to find a network - // block which has a time greater than or equal to the Serai time - .unwrap_or(BlockHash([0; 32])), - }, - session: set.session, - key_pair, - }, - ) - .await; - - Ok(()) -} - -async fn handle_batch_and_burns( - db: &mut D, +async fn handle_batch_and_burns( + txn: &mut impl DbTxn, processors: &Pro, serai: &Serai, block: &Block, @@ -178,9 +149,7 @@ async fn handle_batch_and_burns( { network_had_event(&mut burns, &mut batches, network); - let mut txn = db.txn(); - BatchInstructionsHashDb::set(&mut txn, network, id, &instructions_hash); - txn.commit(); + BatchInstructionsHashDb::set(txn, network, id, &instructions_hash); // Make sure this is the only Batch event for this network in this Block assert!(batch_block.insert(network, network_block).is_none()); @@ -257,8 +226,8 @@ async fn handle_block( for new_set in serai.as_of(hash).validator_sets().new_set_events().await? { // Individually mark each event as handled so on reboot, we minimize duplicates // Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000 - // events will successfully be incrementally handled (though the Serai connection should be - // stable) + // events will successfully be incrementally handled + // (though the Serai connection should be stable, making this unnecessary) let ValidatorSetsEvent::NewSet { set } = new_set else { panic!("NewSet event wasn't NewSet: {new_set:?}"); }; @@ -269,11 +238,11 @@ async fn handle_block( continue; } - if EventDb::is_unhandled(db, &hash, event_id) { + if HandledEvent::is_unhandled(db, hash, event_id) { log::info!("found fresh new set event {:?}", new_set); let mut txn = db.txn(); handle_new_set::(&mut txn, key, new_tributary_spec, serai, &block, set).await?; - EventDb::handle_event(&mut txn, &hash, event_id); + HandledEvent::handle_event(&mut txn, hash, event_id); txn.commit(); } event_id += 1; @@ -281,15 +250,38 @@ async fn handle_block( // If a key pair was confirmed, inform the processor for key_gen in serai.as_of(hash).validator_sets().key_gen_events().await? { - if EventDb::is_unhandled(db, &hash, event_id) { + if HandledEvent::is_unhandled(db, hash, event_id) { log::info!("found fresh key gen event {:?}", key_gen); - if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen { - handle_key_gen(processors, serai, &block, set, key_pair).await?; - } else { + let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen else { panic!("KeyGen event wasn't KeyGen: {key_gen:?}"); - } + }; + let substrate_key = key_pair.0 .0; + processors + .send( + set.network, + processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair { + context: SubstrateContext { + serai_time: block.time().unwrap() / 1000, + network_latest_finalized_block: serai + .as_of(block.hash()) + .in_instructions() + .latest_block_for_network(set.network) + .await? + // The processor treats this as a magic value which will cause it to find a network + // block which has a time greater than or equal to the Serai time + .unwrap_or(BlockHash([0; 32])), + }, + session: set.session, + key_pair, + }, + ) + .await; + + // TODO: If we were in the set, yet were removed, drop the tributary + let mut txn = db.txn(); - EventDb::handle_event(&mut txn, &hash, event_id); + SeraiDkgCompleted::set(&mut txn, set, &substrate_key); + HandledEvent::handle_event(&mut txn, hash, event_id); txn.commit(); } event_id += 1; @@ -304,28 +296,26 @@ async fn handle_block( continue; } - if EventDb::is_unhandled(db, &hash, event_id) { + if HandledEvent::is_unhandled(db, hash, event_id) { log::info!("found fresh set retired event {:?}", retired_set); let mut txn = db.txn(); crate::ActiveTributaryDb::retire_tributary(&mut txn, set); tributary_retired.send(set).unwrap(); - EventDb::handle_event(&mut txn, &hash, event_id); + HandledEvent::handle_event(&mut txn, hash, event_id); txn.commit(); } event_id += 1; } // Finally, tell the processor of acknowledged blocks/burns - // This uses a single event as. unlike prior events which individually executed code, all + // This uses a single event as unlike prior events which individually executed code, all // following events share data collection - // This does break the uniqueness of (hash, event_id) -> one event, yet - // (network, (hash, event_id)) remains valid as a unique ID for an event - if EventDb::is_unhandled(db, &hash, event_id) { - handle_batch_and_burns(db, processors, serai, &block).await?; + if HandledEvent::is_unhandled(db, hash, event_id) { + let mut txn = db.txn(); + handle_batch_and_burns(&mut txn, processors, serai, &block).await?; + HandledEvent::handle_event(&mut txn, hash, event_id); + txn.commit(); } - let mut txn = db.txn(); - EventDb::handle_event(&mut txn, &hash, event_id); - txn.commit(); Ok(()) } @@ -342,181 +332,8 @@ async fn handle_new_blocks( // Check if there's been a new Substrate block let latest_number = serai.latest_finalized_block().await?.number(); - // TODO: If this block directly builds off a cosigned block *and* doesn't contain events, mark - // cosigned, - { - // If: - // A) This block has events and it's been at least X blocks since the last cosign or - // B) This block doesn't have events but it's been X blocks since a skipped block which did - // have events or - // C) This block key gens (which changes who the cosigners are) - // cosign this block. - const COSIGN_DISTANCE: u64 = 5 * 60 / 6; // 5 minutes, expressed in blocks - - #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] - enum HasEvents { - KeyGen, - Yes, - No, - } - async fn block_has_events( - txn: &mut impl DbTxn, - serai: &Serai, - block: u64, - ) -> Result { - let cached = BlockHasEvents::get(txn, block); - match cached { - None => { - let serai = serai.as_of( - serai - .finalized_block_by_number(block) - .await? - .expect("couldn't get block which should've been finalized") - .hash(), - ); - - if !serai.validator_sets().key_gen_events().await?.is_empty() { - return Ok(HasEvents::KeyGen); - } - - let has_no_events = serai.coins().burn_with_instruction_events().await?.is_empty() && - serai.in_instructions().batch_events().await?.is_empty() && - serai.validator_sets().new_set_events().await?.is_empty() && - serai.validator_sets().set_retired_events().await?.is_empty(); - - let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes }; - - let has_events = has_events.encode(); - assert_eq!(has_events.len(), 1); - BlockHasEvents::set(txn, block, &has_events[0]); - Ok(HasEvents::Yes) - } - Some(code) => Ok(HasEvents::decode(&mut [code].as_slice()).unwrap()), - } - } - - let mut txn = db.txn(); - let Some((last_intended_to_cosign_block, mut skipped_block)) = IntendedCosign::get(&txn) else { - IntendedCosign::set_intended_cosign(&mut txn, 1); - txn.commit(); - return Ok(()); - }; - - // If we haven't flagged skipped, and a block within the distance had events, flag the first - // such block as skipped - let mut distance_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE; - // If we've never triggered a cosign, don't skip any cosigns - if CosignTriggered::get(&txn).is_none() { - distance_end_exclusive = 0; - } - if skipped_block.is_none() { - for b in (last_intended_to_cosign_block + 1) .. distance_end_exclusive { - if b > latest_number { - break; - } - - if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes { - skipped_block = Some(b); - log::debug!("skipping cosigning {b} due to proximity to prior cosign"); - IntendedCosign::set_skipped_cosign(&mut txn, b); - break; - } - } - } - - let mut has_no_cosigners = None; - let mut cosign = vec![]; - - // Block we should cosign no matter what if no prior blocks qualified for cosigning - let maximally_latent_cosign_block = - skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE); - for block in (last_intended_to_cosign_block + 1) ..= latest_number { - let actual_block = serai - .finalized_block_by_number(block) - .await? - .expect("couldn't get block which should've been finalized"); - SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block); - - let mut set = false; - - let block_has_events = block_has_events(&mut txn, serai, block).await?; - // If this block is within the distance, - if block < distance_end_exclusive { - // and set a key, cosign it - if block_has_events == HasEvents::KeyGen { - IntendedCosign::set_intended_cosign(&mut txn, block); - set = true; - // Carry skipped if it isn't included by cosigning this block - if let Some(skipped) = skipped_block { - if skipped > block { - IntendedCosign::set_skipped_cosign(&mut txn, block); - } - } - } - } else if (Some(block) == maximally_latent_cosign_block) || - (block_has_events != HasEvents::No) - { - // Since this block was outside the distance and had events/was maximally latent, cosign it - IntendedCosign::set_intended_cosign(&mut txn, block); - set = true; - } - - if set { - // Get the keys as of the prior block - // That means if this block is setting new keys (which won't lock in until we process this - // block), we won't freeze up waiting for the yet-to-be-processed keys to sign this block - let serai = serai.as_of(actual_block.header.parent_hash.into()); - - has_no_cosigners = Some(actual_block.clone()); - - for network in serai_client::primitives::NETWORKS { - // Get the latest session to have set keys - let Some(latest_session) = serai.validator_sets().session(network).await? else { - continue; - }; - let prior_session = Session(latest_session.0.saturating_sub(1)); - let set_with_keys = if serai - .validator_sets() - .keys(ValidatorSet { network, session: prior_session }) - .await? - .is_some() - { - ValidatorSet { network, session: prior_session } - } else { - let set = ValidatorSet { network, session: latest_session }; - if serai.validator_sets().keys(set).await?.is_none() { - continue; - } - set - }; - - // Since this is a valid cosigner, don't flag this block as having no cosigners - has_no_cosigners = None; - log::debug!("{:?} will be cosigning {block}", set_with_keys.network); - - if in_set(key, &serai, set_with_keys).await?.unwrap() { - cosign.push((set_with_keys, block, actual_block.hash())); - } - } - - break; - } - } - - // If this block doesn't have cosigners, yet does have events, automatically mark it as - // cosigned - if let Some(has_no_cosigners) = has_no_cosigners { - log::debug!("{} had no cosigners available, marking as cosigned", has_no_cosigners.number()); - LatestCosignedBlock::set(&mut txn, &has_no_cosigners.number()); - } else { - CosignTriggered::set(&mut txn, &()); - for (set, block, hash) in cosign { - log::debug!("cosigning {block} with {:?} {:?}", set.network, set.session); - CosignTransactions::append_cosign(&mut txn, set, block, hash); - } - } - txn.commit(); - } + // Advance the cosigning protocol + advance_cosign_protocol(db, key, serai, latest_number).await?; // Reduce to the latest cosigned block let latest_number = latest_number.min(LatestCosignedBlock::latest_cosigned_block(db)); @@ -526,24 +343,19 @@ async fn handle_new_blocks( } for b in *next_block ..= latest_number { - log::info!("found substrate block {b}"); - handle_block( - db, - key, - new_tributary_spec, - tributary_retired, - processors, - serai, - serai - .finalized_block_by_number(b) - .await? - .expect("couldn't get block before the latest finalized block"), - ) - .await?; + let block = serai + .finalized_block_by_number(b) + .await? + .expect("couldn't get block before the latest finalized block"); + + log::info!("handling substrate block {b}"); + handle_block(db, key, new_tributary_spec, tributary_retired, processors, serai, block).await?; *next_block += 1; + let mut txn = db.txn(); NextBlock::set(&mut txn, next_block); txn.commit(); + log::info!("handled substrate block {b}"); } @@ -578,6 +390,7 @@ pub async fn scan_task( }; */ // TODO: Restore the above subscription-based system + // That would require moving serai-client from HTTP to websockets let new_substrate_block_notifier = { let serai = &serai; move |next_substrate_block| async move { @@ -586,9 +399,8 @@ pub async fn scan_task( Ok(latest) => { if latest.header.number >= next_substrate_block { return latest; - } else { - sleep(Duration::from_secs(3)).await; } + sleep(Duration::from_secs(3)).await; } Err(e) => { log::error!("couldn't communicate with serai node: {e}"); @@ -648,22 +460,25 @@ pub async fn scan_task( } /// Gets the expected ID for the next Batch. -pub(crate) async fn get_expected_next_batch(serai: &Serai, network: NetworkId) -> u32 { - let mut first = true; - loop { - if !first { - log::error!("{} {network:?}", "couldn't connect to Serai node to get the next batch ID for",); - sleep(Duration::from_secs(5)).await; +/// +/// Will log an error and apply a slight sleep on error, letting the caller simply immediately +/// retry. +pub(crate) async fn expected_next_batch( + serai: &Serai, + network: NetworkId, +) -> Result { + async fn expected_next_batch_inner(serai: &Serai, network: NetworkId) -> Result { + let serai = serai.as_of_latest_finalized_block().await?; + let last = serai.in_instructions().last_batch_for_network(network).await?; + Ok(if let Some(last) = last { last + 1 } else { 0 }) + } + match expected_next_batch_inner(serai, network).await { + Ok(next) => Ok(next), + Err(e) => { + log::error!("couldn't get the expected next batch from substrate: {e:?}"); + sleep(Duration::from_millis(100)).await; + Err(e) } - first = false; - - let Ok(serai) = serai.as_of_latest_finalized_block().await else { - continue; - }; - let Ok(last) = serai.in_instructions().last_batch_for_network(network).await else { - continue; - }; - break if let Some(last) = last { last + 1 } else { 0 }; } } @@ -680,7 +495,7 @@ pub(crate) async fn verify_published_batches( ) -> Option { // TODO: Localize from MainDb to SubstrateDb let last = crate::LastVerifiedBatchDb::get(txn, network); - for id in last.map(|last| last + 1).unwrap_or(0) ..= optimistic_up_to { + for id in last.map_or(0, |last| last + 1) ..= optimistic_up_to { let Some(on_chain) = BatchInstructionsHashDb::get(txn, network, id) else { break; }; diff --git a/coordinator/src/tests/mod.rs b/coordinator/src/tests/mod.rs index 3763cf0da..45a62297c 100644 --- a/coordinator/src/tests/mod.rs +++ b/coordinator/src/tests/mod.rs @@ -4,7 +4,7 @@ use std::{ collections::{VecDeque, HashSet, HashMap}, }; -use serai_client::primitives::NetworkId; +use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet}; use processor_messages::CoordinatorMessage; @@ -62,8 +62,8 @@ impl LocalP2p { impl P2p for LocalP2p { type Id = usize; - async fn subscribe(&self, _genesis: [u8; 32]) {} - async fn unsubscribe(&self, _genesis: [u8; 32]) {} + async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {} + async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {} async fn send_raw(&self, to: Self::Id, _genesis: Option<[u8; 32]>, msg: Vec) { self.1.write().await.1[to].push_back((self.0, msg)); diff --git a/coordinator/src/tests/tributary/chain.rs b/coordinator/src/tests/tributary/chain.rs index 36bdef416..360af7ecf 100644 --- a/coordinator/src/tests/tributary/chain.rs +++ b/coordinator/src/tests/tributary/chain.rs @@ -5,7 +5,7 @@ use std::{ use zeroize::Zeroizing; use rand_core::{RngCore, CryptoRng, OsRng}; -use futures::{task::Poll, poll}; +use futures_util::{task::Poll, poll}; use ciphersuite::{ group::{ff::Field, GroupEncoding}, @@ -13,7 +13,7 @@ use ciphersuite::{ }; use sp_application_crypto::sr25519; - +use borsh::BorshDeserialize; use serai_client::{ primitives::NetworkId, validator_sets::primitives::{Session, ValidatorSet}, @@ -58,21 +58,26 @@ pub fn new_spec( .collect::>(); let res = TributarySpec::new(serai_block, start_time, set, set_participants); - assert_eq!(TributarySpec::read::<&[u8]>(&mut res.serialize().as_ref()).unwrap(), res); + assert_eq!( + TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(), + res, + ); res } pub async fn new_tributaries( keys: &[Zeroizing<::F>], spec: &TributarySpec, -) -> Vec<(LocalP2p, Tributary)> { +) -> Vec<(MemDb, LocalP2p, Tributary)> { let p2p = LocalP2p::new(keys.len()); let mut res = vec![]; for (i, key) in keys.iter().enumerate() { + let db = MemDb::new(); res.push(( + db.clone(), p2p[i].clone(), Tributary::<_, Transaction, _>::new( - MemDb::new(), + db, spec.genesis(), spec.start_time(), key.clone(), @@ -90,7 +95,7 @@ pub async fn run_tributaries( mut tributaries: Vec<(LocalP2p, Tributary)>, ) { loop { - for (p2p, tributary) in tributaries.iter_mut() { + for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { P2pMessageKind::Tributary(genesis) => { @@ -152,7 +157,11 @@ async fn tributary_test() { let keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); - let mut tributaries = new_tributaries(&keys, &spec).await; + let mut tributaries = new_tributaries(&keys, &spec) + .await + .into_iter() + .map(|(_, p2p, tributary)| (p2p, tributary)) + .collect::>(); let mut blocks = 0; let mut last_block = spec.genesis(); @@ -161,7 +170,7 @@ async fn tributary_test() { // run_tributaries will run them ad infinitum let timeout = SystemTime::now() + Duration::from_secs(65); while (blocks < 10) && (SystemTime::now().duration_since(timeout).is_err()) { - for (p2p, tributary) in tributaries.iter_mut() { + for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { P2pMessageKind::Tributary(genesis) => { @@ -187,7 +196,7 @@ async fn tributary_test() { } // Handle all existing messages - for (p2p, tributary) in tributaries.iter_mut() { + for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { P2pMessageKind::Tributary(genesis) => { @@ -211,7 +220,7 @@ async fn tributary_test() { } assert!(tips.len() <= 2); if tips.len() == 2 { - for tip in tips.iter() { + for tip in &tips { // Find a Tributary where this isn't the tip for (_, tributary) in &tributaries { let Some(after) = tributary.reader().block_after(tip) else { continue }; diff --git a/coordinator/src/tests/tributary/dkg.rs b/coordinator/src/tests/tributary/dkg.rs index 347e94da7..04a528f90 100644 --- a/coordinator/src/tests/tributary/dkg.rs +++ b/coordinator/src/tests/tributary/dkg.rs @@ -8,11 +8,14 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use frost::Participant; use sp_runtime::traits::Verify; -use serai_client::validator_sets::primitives::KeyPair; +use serai_client::{ + primitives::{SeraiAddress, Signature}, + validator_sets::primitives::{ValidatorSet, KeyPair}, +}; use tokio::time::sleep; -use serai_db::{Db, MemDb, DbTxn}; +use serai_db::{Get, DbTxn, Db, MemDb}; use processor_messages::{ key_gen::{self, KeyGenId}, @@ -24,7 +27,7 @@ use tributary::{TransactionTrait, Tributary}; use crate::{ tributary::{ Transaction, TributarySpec, - scanner::{PstTxType, handle_new_blocks}, + scanner::{PublishSeraiTransaction, handle_new_blocks}, }, tests::{ MemProcessors, LocalP2p, @@ -34,10 +37,18 @@ use crate::{ #[tokio::test] async fn dkg_test() { + env_logger::init(); + let keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); - let tributaries = new_tributaries(&keys, &spec).await; + let full_tributaries = new_tributaries(&keys, &spec).await; + let mut dbs = vec![]; + let mut tributaries = vec![]; + for (db, p2p, tributary) in full_tributaries { + dbs.push(db); + tributaries.push((p2p, tributary)); + } // Run the tributaries in the background tokio::spawn(run_tributaries(tributaries.clone())); @@ -49,8 +60,11 @@ async fn dkg_test() { let mut commitments = vec![0; 256]; OsRng.fill_bytes(&mut commitments); - let mut tx = - Transaction::DkgCommitments(attempt, vec![commitments], Transaction::empty_signed()); + let mut tx = Transaction::DkgCommitments { + attempt, + commitments: vec![commitments], + signed: Transaction::empty_signed(), + }; tx.sign(&mut OsRng, spec.genesis(), key); txs.push(tx); } @@ -71,7 +85,7 @@ async fn dkg_test() { .iter() .enumerate() .map(|(i, tx)| { - if let Transaction::DkgCommitments(_, commitments, _) = tx { + if let Transaction::DkgCommitments { commitments, .. } = tx { (Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone()) } else { panic!("txs had non-commitments"); @@ -80,20 +94,20 @@ async fn dkg_test() { .collect(); async fn new_processors( + db: &mut MemDb, key: &Zeroizing<::F>, spec: &TributarySpec, tributary: &Tributary, - ) -> (MemDb, MemProcessors) { - let mut scanner_db = MemDb::new(); + ) -> MemProcessors { let processors = MemProcessors::new(); - handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>( - &mut scanner_db, + handle_new_blocks::<_, _, _, _, _, LocalP2p>( + db, key, - |_, _, _, _| async { + &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called in new_processors") }, &processors, - |_, _, _| async { panic!("test tried to publish a new Serai TX in new_processors") }, + &(), &|_| async { panic!( "test tried to publish a new Tributary TX from handle_application_tx in new_processors" @@ -103,11 +117,11 @@ async fn dkg_test() { &tributary.reader(), ) .await; - (scanner_db, processors) + processors } // Instantiate a scanner and verify it has nothing to report - let (mut scanner_db, processors) = new_processors(&keys[0], &spec, &tributaries[0].1).await; + let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await; assert!(processors.0.read().await.is_empty()); // Publish the last commitment @@ -117,14 +131,14 @@ async fn dkg_test() { sleep(Duration::from_secs(Tributary::::block_time().into())).await; // Verify the scanner emits a KeyGen::Commitments message - handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>( - &mut scanner_db, + handle_new_blocks::<_, _, _, _, _, LocalP2p>( + &mut dbs[0], &keys[0], - |_, _, _, _| async { + &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after Commitments") }, &processors, - |_, _, _| async { panic!("test tried to publish a new Serai TX after Commitments") }, + &(), &|_| async { panic!( "test tried to publish a new Tributary TX from handle_application_tx after Commitments" @@ -151,8 +165,8 @@ async fn dkg_test() { } // Verify all keys exhibit this scanner behavior - for (i, key) in keys.iter().enumerate() { - let (_, processors) = new_processors(key, &spec, &tributaries[i].1).await; + for (i, key) in keys.iter().enumerate().skip(1) { + let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await; let mut msgs = processors.0.write().await; assert_eq!(msgs.len(), 1); let msgs = msgs.get_mut(&spec.set().network).unwrap(); @@ -182,12 +196,14 @@ async fn dkg_test() { } } + let mut txn = dbs[k].txn(); let mut tx = Transaction::DkgShares { attempt, shares, - confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, 0), + confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0), signed: Transaction::empty_signed(), }; + txn.commit(); tx.sign(&mut OsRng, spec.genesis(), key); txs.push(tx); } @@ -201,14 +217,14 @@ async fn dkg_test() { } // With just 4 sets of shares, nothing should happen yet - handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>( - &mut scanner_db, + handle_new_blocks::<_, _, _, _, _, LocalP2p>( + &mut dbs[0], &keys[0], - |_, _, _, _| async { + &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after some shares") }, &processors, - |_, _, _| async { panic!("test tried to publish a new Serai TX after some shares") }, + &(), &|_| async { panic!( "test tried to publish a new Tributary TX from handle_application_tx after some shares" @@ -254,28 +270,30 @@ async fn dkg_test() { }; // Any scanner which has handled the prior blocks should only emit the new event - handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>( - &mut scanner_db, - &keys[0], - |_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") }, - &processors, - |_, _, _| async { panic!("test tried to publish a new Serai TX") }, - &|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") }, - &spec, - &tributaries[0].1.reader(), - ) - .await; - { - let mut msgs = processors.0.write().await; - assert_eq!(msgs.len(), 1); - let msgs = msgs.get_mut(&spec.set().network).unwrap(); - assert_eq!(msgs.pop_front().unwrap(), shares_for(0)); - assert!(msgs.is_empty()); + for (i, key) in keys.iter().enumerate() { + handle_new_blocks::<_, _, _, _, _, LocalP2p>( + &mut dbs[i], + key, + &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") }, + &processors, + &(), + &|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") }, + &spec, + &tributaries[i].1.reader(), + ) + .await; + { + let mut msgs = processors.0.write().await; + assert_eq!(msgs.len(), 1); + let msgs = msgs.get_mut(&spec.set().network).unwrap(); + assert_eq!(msgs.pop_front().unwrap(), shares_for(i)); + assert!(msgs.is_empty()); + } } // Yet new scanners should emit all events for (i, key) in keys.iter().enumerate() { - let (_, processors) = new_processors(key, &spec, &tributaries[i].1).await; + let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await; let mut msgs = processors.0.write().await; assert_eq!(msgs.len(), 1); let msgs = msgs.get_mut(&spec.set().network).unwrap(); @@ -302,17 +320,16 @@ async fn dkg_test() { let mut txs = vec![]; for (i, key) in keys.iter().enumerate() { let attempt = 0; - let mut scanner_db = &mut scanner_db; - let (mut local_scanner_db, _) = new_processors(key, &spec, &tributaries[0].1).await; - if i != 0 { - scanner_db = &mut local_scanner_db; - } - let mut txn = scanner_db.txn(); + let mut txn = dbs[i].txn(); let share = crate::tributary::generated_key_pair::(&mut txn, key, &spec, &key_pair, 0).unwrap(); txn.commit(); - let mut tx = Transaction::DkgConfirmed(attempt, share, Transaction::empty_signed()); + let mut tx = Transaction::DkgConfirmed { + attempt, + confirmation_share: share, + signed: Transaction::empty_signed(), + }; tx.sign(&mut OsRng, spec.genesis(), key); txs.push(tx); } @@ -320,56 +337,50 @@ async fn dkg_test() { for (i, tx) in txs.iter().enumerate() { assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true)); } - for tx in txs.iter() { + for tx in &txs { wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await; } + struct CheckPublishSetKeys { + spec: TributarySpec, + key_pair: KeyPair, + } + #[async_trait::async_trait] + impl PublishSeraiTransaction for CheckPublishSetKeys { + async fn publish_set_keys( + &self, + _db: &(impl Sync + Get), + set: ValidatorSet, + removed: Vec, + key_pair: KeyPair, + signature: Signature, + ) { + assert_eq!(set, self.spec.set()); + assert!(removed.is_empty()); + assert_eq!(self.key_pair, key_pair); + assert!(signature.verify( + &*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair), + &serai_client::Public( + frost::dkg::musig::musig_key::( + &serai_client::validator_sets::primitives::musig_context(set), + &self.spec.validators().into_iter().map(|(validator, _)| validator).collect::>() + ) + .unwrap() + .to_bytes() + ), + )); + } + } + // The scanner should successfully try to publish a transaction with a validly signed signature - handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>( - &mut scanner_db, + handle_new_blocks::<_, _, _, _, _, LocalP2p>( + &mut dbs[0], &keys[0], - |_, _, _, _| async { + &|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after DKG confirmation") }, &processors, - |set, tx_type, tx| { - assert_eq!(tx_type, PstTxType::SetKeys); - - let spec = spec.clone(); - let key_pair = key_pair.clone(); - async move { - assert_eq!(tx.signature, None); - match tx.call { - serai_client::abi::Call::ValidatorSets( - serai_client::abi::validator_sets::Call::set_keys { - network, - key_pair: set_key_pair, - signature, - }, - ) => { - assert_eq!(set, spec.set()); - assert_eq!(set.network, network); - assert_eq!(key_pair, set_key_pair); - assert!(signature.verify( - &*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair), - &serai_client::Public( - frost::dkg::musig::musig_key::( - &serai_client::validator_sets::primitives::musig_context(set), - &spec - .validators() - .into_iter() - .map(|(validator, _)| validator) - .collect::>() - ) - .unwrap() - .to_bytes() - ), - )); - } - _ => panic!("Serai TX wasn't to set_keys"), - } - } - }, + &CheckPublishSetKeys { spec: spec.clone(), key_pair: key_pair.clone() }, &|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") }, &spec, &tributaries[0].1.reader(), diff --git a/coordinator/src/tests/tributary/handle_p2p.rs b/coordinator/src/tests/tributary/handle_p2p.rs index 00ef6d345..756f45610 100644 --- a/coordinator/src/tests/tributary/handle_p2p.rs +++ b/coordinator/src/tests/tributary/handle_p2p.rs @@ -27,7 +27,11 @@ async fn handle_p2p_test() { let keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); - let mut tributaries = new_tributaries(&keys, &spec).await; + let mut tributaries = new_tributaries(&keys, &spec) + .await + .into_iter() + .map(|(_, p2p, tributary)| (p2p, tributary)) + .collect::>(); let mut tributary_senders = vec![]; let mut tributary_arcs = vec![]; diff --git a/coordinator/src/tests/tributary/mod.rs b/coordinator/src/tests/tributary/mod.rs index 515202be4..cc6567d68 100644 --- a/coordinator/src/tests/tributary/mod.rs +++ b/coordinator/src/tests/tributary/mod.rs @@ -2,12 +2,18 @@ use core::fmt::Debug; use rand_core::{RngCore, OsRng}; +use ciphersuite::{group::Group, Ciphersuite, Ristretto}; + use scale::{Encode, Decode}; +use serai_client::{ + primitives::{SeraiAddress, Signature}, + validator_sets::primitives::{ValidatorSet, KeyPair}, +}; use processor_messages::coordinator::SubstrateSignableId; use tributary::{ReadWrite, tests::random_signed_with_nonce}; -use crate::tributary::{SignData, Transaction}; +use crate::tributary::{Label, SignData, Transaction, scanner::PublishSeraiTransaction}; mod chain; pub use chain::*; @@ -20,6 +26,20 @@ mod dkg; mod handle_p2p; mod sync; +#[async_trait::async_trait] +impl PublishSeraiTransaction for () { + async fn publish_set_keys( + &self, + _db: &(impl Sync + serai_db::Get), + _set: ValidatorSet, + _removed: Vec, + _key_pair: KeyPair, + _signature: Signature, + ) { + panic!("publish_set_keys was called in test") + } +} + fn random_u32(rng: &mut R) -> u32 { u32::try_from(rng.next_u64() >> 32).unwrap() } @@ -34,26 +54,27 @@ fn random_vec(rng: &mut R, limit: usize) -> Vec { fn random_sign_data( rng: &mut R, plan: Id, - nonce: u32, + label: Label, ) -> SignData { SignData { plan, attempt: random_u32(&mut OsRng), + label, data: { let mut res = vec![]; - for _ in 0 .. ((rng.next_u64() % 255) + 1) { + for _ in 0 .. (rng.next_u64() % 256) { res.push(random_vec(&mut OsRng, 512)); } res }, - signed: random_signed_with_nonce(&mut OsRng, nonce), + signed: random_signed_with_nonce(&mut OsRng, label.nonce()), } } -fn test_read_write(value: RW) { - assert_eq!(value, RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap()); +fn test_read_write(value: &RW) { + assert_eq!(value, &RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap()); } #[test] @@ -84,48 +105,48 @@ fn tx_size_limit() { #[test] fn serialize_sign_data() { - fn test_read_write(value: SignData) { + fn test_read_write(value: &SignData) { let mut buf = vec![]; value.write(&mut buf).unwrap(); - assert_eq!(value, SignData::read(&mut buf.as_slice(), value.signed.nonce).unwrap()) + assert_eq!(value, &SignData::read(&mut buf.as_slice()).unwrap()) } let mut plan = [0; 3]; OsRng.fill_bytes(&mut plan); - test_read_write(random_sign_data::<_, _>( + test_read_write(&random_sign_data::<_, _>( &mut OsRng, plan, - u32::try_from(OsRng.next_u64() >> 32).unwrap(), + if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); let mut plan = [0; 5]; OsRng.fill_bytes(&mut plan); - test_read_write(random_sign_data::<_, _>( + test_read_write(&random_sign_data::<_, _>( &mut OsRng, plan, - u32::try_from(OsRng.next_u64() >> 32).unwrap(), + if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); let mut plan = [0; 8]; OsRng.fill_bytes(&mut plan); - test_read_write(random_sign_data::<_, _>( + test_read_write(&random_sign_data::<_, _>( &mut OsRng, plan, - u32::try_from(OsRng.next_u64() >> 32).unwrap(), + if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); let mut plan = [0; 24]; OsRng.fill_bytes(&mut plan); - test_read_write(random_sign_data::<_, _>( + test_read_write(&random_sign_data::<_, _>( &mut OsRng, plan, - u32::try_from(OsRng.next_u64() >> 32).unwrap(), + if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share }, )); } #[test] fn serialize_transaction() { - test_read_write(Transaction::RemoveParticipant( - frost::Participant::new(u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1)) - .unwrap(), - )); + test_read_write(&Transaction::RemoveParticipantDueToDkg { + participant: ::G::random(&mut OsRng), + signed: random_signed_with_nonce(&mut OsRng, 0), + }); { let mut commitments = vec![random_vec(&mut OsRng, 512)]; @@ -134,11 +155,11 @@ fn serialize_transaction() { OsRng.fill_bytes(&mut temp); commitments.push(temp); } - test_read_write(Transaction::DkgCommitments( - random_u32(&mut OsRng), + test_read_write(&Transaction::DkgCommitments { + attempt: random_u32(&mut OsRng), commitments, - random_signed_with_nonce(&mut OsRng, 0), - )); + signed: random_signed_with_nonce(&mut OsRng, 0), + }); } { @@ -149,7 +170,7 @@ fn serialize_transaction() { // Create a valid vec of shares let mut shares = vec![]; // Create up to 150 participants - for _ in 0 .. ((OsRng.next_u64() % 150) + 1) { + for _ in 0 ..= (OsRng.next_u64() % 150) { // Give each sender multiple shares let mut sender_shares = vec![]; for _ in 0 .. amount_of_shares { @@ -160,7 +181,7 @@ fn serialize_transaction() { shares.push(sender_shares); } - test_read_write(Transaction::DkgShares { + test_read_write(&Transaction::DkgShares { attempt: random_u32(&mut OsRng), shares, confirmation_nonces: { @@ -173,7 +194,7 @@ fn serialize_transaction() { } for i in 0 .. 2 { - test_read_write(Transaction::InvalidDkgShare { + test_read_write(&Transaction::InvalidDkgShare { attempt: random_u32(&mut OsRng), accuser: frost::Participant::new( u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1), @@ -192,70 +213,56 @@ fn serialize_transaction() { }); } - test_read_write(Transaction::DkgConfirmed( - random_u32(&mut OsRng), - { + test_read_write(&Transaction::DkgConfirmed { + attempt: random_u32(&mut OsRng), + confirmation_share: { let mut share = [0; 32]; OsRng.fill_bytes(&mut share); share }, - random_signed_with_nonce(&mut OsRng, 2), - )); - - { - let mut key = [0; 32]; - OsRng.fill_bytes(&mut key); - test_read_write(Transaction::DkgRemovalPreprocess(random_sign_data(&mut OsRng, key, 0))); - } - { - let mut key = [0; 32]; - OsRng.fill_bytes(&mut key); - test_read_write(Transaction::DkgRemovalShare(random_sign_data(&mut OsRng, key, 1))); - } + signed: random_signed_with_nonce(&mut OsRng, 2), + }); { let mut block = [0; 32]; OsRng.fill_bytes(&mut block); - test_read_write(Transaction::CosignSubstrateBlock(block)); + test_read_write(&Transaction::CosignSubstrateBlock(block)); } { let mut block = [0; 32]; OsRng.fill_bytes(&mut block); - let mut batch = [0; 5]; - OsRng.fill_bytes(&mut batch); - test_read_write(Transaction::Batch(block, batch)); + let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap(); + test_read_write(&Transaction::Batch { block, batch }); } - test_read_write(Transaction::SubstrateBlock(OsRng.next_u64())); + test_read_write(&Transaction::SubstrateBlock(OsRng.next_u64())); { - let mut plan = [0; 5]; - OsRng.fill_bytes(&mut plan); - test_read_write(Transaction::SubstratePreprocess(random_sign_data( + let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap(); + test_read_write(&Transaction::SubstrateSign(random_sign_data( &mut OsRng, - SubstrateSignableId::Batch(plan), - 0, + SubstrateSignableId::Batch(batch), + Label::Preprocess, ))); } { - let mut plan = [0; 5]; - OsRng.fill_bytes(&mut plan); - test_read_write(Transaction::SubstrateShare(random_sign_data( + let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap(); + test_read_write(&Transaction::SubstrateSign(random_sign_data( &mut OsRng, - SubstrateSignableId::Batch(plan), - 1, + SubstrateSignableId::Batch(batch), + Label::Share, ))); } { let mut plan = [0; 32]; OsRng.fill_bytes(&mut plan); - test_read_write(Transaction::SignPreprocess(random_sign_data(&mut OsRng, plan, 0))); + test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess))); } { let mut plan = [0; 32]; OsRng.fill_bytes(&mut plan); - test_read_write(Transaction::SignShare(random_sign_data(&mut OsRng, plan, 1))); + test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share))); } { @@ -263,7 +270,7 @@ fn serialize_transaction() { OsRng.fill_bytes(&mut plan); let mut tx_hash = vec![0; (OsRng.next_u64() % 64).try_into().unwrap()]; OsRng.fill_bytes(&mut tx_hash); - test_read_write(Transaction::SignCompleted { + test_read_write(&Transaction::SignCompleted { plan, tx_hash, first_signer: random_signed_with_nonce(&mut OsRng, 2).signer, diff --git a/coordinator/src/tests/tributary/sync.rs b/coordinator/src/tests/tributary/sync.rs index 1267368fa..0a468c63e 100644 --- a/coordinator/src/tests/tributary/sync.rs +++ b/coordinator/src/tests/tributary/sync.rs @@ -29,9 +29,13 @@ async fn sync_test() { let mut keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); // Ensure this can have a node fail - assert!(spec.n() > spec.t()); + assert!(spec.n(&[]) > spec.t()); - let mut tributaries = new_tributaries(&keys, &spec).await; + let mut tributaries = new_tributaries(&keys, &spec) + .await + .into_iter() + .map(|(_, p2p, tributary)| (p2p, tributary)) + .collect::>(); // Keep a Tributary back, effectively having it offline let syncer_key = keys.pop().unwrap(); @@ -138,7 +142,7 @@ async fn sync_test() { // Because only `t` validators are used in a commit, take n - t nodes offline // leaving only `t` nodes. Which should force it to participate in the consensus // of next blocks. - let spares = usize::from(spec.n() - spec.t()); + let spares = usize::from(spec.n(&[]) - spec.t()); for thread in p2p_threads.iter().take(spares) { thread.abort(); } diff --git a/coordinator/src/tests/tributary/tx.rs b/coordinator/src/tests/tributary/tx.rs index cfe1bab81..da9433b67 100644 --- a/coordinator/src/tests/tributary/tx.rs +++ b/coordinator/src/tests/tributary/tx.rs @@ -23,7 +23,11 @@ async fn tx_test() { let keys = new_keys(&mut OsRng); let spec = new_spec(&mut OsRng, &keys); - let tributaries = new_tributaries(&keys, &spec).await; + let tributaries = new_tributaries(&keys, &spec) + .await + .into_iter() + .map(|(_, p2p, tributary)| (p2p, tributary)) + .collect::>(); // Run the tributaries in the background tokio::spawn(run_tributaries(tributaries.clone())); @@ -39,8 +43,11 @@ async fn tx_test() { // Create the TX with a null signature so we can get its sig hash let block_before_tx = tributaries[sender].1.tip().await; - let mut tx = - Transaction::DkgCommitments(attempt, vec![commitments.clone()], Transaction::empty_signed()); + let mut tx = Transaction::DkgCommitments { + attempt, + commitments: vec![commitments.clone()], + signed: Transaction::empty_signed(), + }; tx.sign(&mut OsRng, spec.genesis(), &key); assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true)); diff --git a/coordinator/src/tributary/db.rs b/coordinator/src/tributary/db.rs index 2d485af00..27fef1f0e 100644 --- a/coordinator/src/tributary/db.rs +++ b/coordinator/src/tributary/db.rs @@ -1,24 +1,25 @@ -use core::ops::Deref; use std::collections::HashMap; -use zeroize::Zeroizing; -use ciphersuite::{Ciphersuite, Ristretto, group::GroupEncoding}; +use scale::Encode; +use borsh::{BorshSerialize, BorshDeserialize}; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use frost::Participant; -use serai_client::validator_sets::primitives::KeyPair; +use serai_client::validator_sets::primitives::{KeyPair, ValidatorSet}; use processor_messages::coordinator::SubstrateSignableId; -use scale::{Encode, Decode}; - pub use serai_db::*; -use crate::tributary::TributarySpec; +use tributary::ReadWrite; -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] +use crate::tributary::{Label, Transaction}; + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)] pub enum Topic { Dkg, - DkgRemoval([u8; 32]), + DkgConfirmation, SubstrateSign(SubstrateSignableId), Sign([u8; 32]), } @@ -27,7 +28,7 @@ pub enum Topic { #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)] pub struct DataSpecification { pub topic: Topic, - pub label: &'static str, + pub label: Label, pub attempt: u32, } @@ -41,32 +42,56 @@ pub enum Accumulation { NotReady, } +// TODO: Move from genesis to set for indexing create_db!( - NewTributary { + Tributary { SeraiBlockNumber: (hash: [u8; 32]) -> u64, - LastBlock: (genesis: [u8; 32]) -> [u8; 32], + SeraiDkgCompleted: (spec: ValidatorSet) -> [u8; 32], + + TributaryBlockNumber: (block: [u8; 32]) -> u32, + LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32], + FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>, + RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>, + OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>, FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (), - DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec, - PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>, - ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap>, - RemovalNonces: - (genesis: [u8; 32], removing: [u8; 32], attempt: u32) -> HashMap>, - CurrentlyCompletingKeyPair: (genesis: [u8; 32]) -> KeyPair, - DkgCompleted: (genesis: [u8; 32]) -> (), + + VotedToRemove: (genesis: [u8; 32], voter: [u8; 32], to_remove: [u8; 32]) -> (), + VotesToRemove: (genesis: [u8; 32], to_remove: [u8; 32]) -> u16, + AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32, + ReattemptDb: (genesis: [u8; 32], block: u32) -> Vec, DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16, DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec, - EventDb: (id: [u8; 32], index: u32) -> (), + + DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec, + ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap>, + DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair, + KeyToDkgAttempt: (key: [u8; 32]) -> u32, + DkgLocallyCompleted: (genesis: [u8; 32]) -> (), + + PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>, + + SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec, } ); +impl FatalSlashes { + pub fn get_as_keys(getter: &impl Get, genesis: [u8; 32]) -> Vec<::G> { + FatalSlashes::get(getter, genesis) + .unwrap_or(vec![]) + .iter() + .map(|key| ::G::from_bytes(key).unwrap()) + .collect::>() + } +} + impl FatallySlashed { pub fn set_fatally_slashed(txn: &mut impl DbTxn, genesis: [u8; 32], account: [u8; 32]) { Self::set(txn, genesis, account, &()); let mut existing = FatalSlashes::get(txn, genesis).unwrap_or_default(); - // Don't append if we already have it + // Don't append if we already have it, which can occur upon multiple faults if existing.iter().any(|existing| existing == &account) { return; } @@ -81,81 +106,78 @@ impl AttemptDb { Self::set(txn, genesis, &topic, &0u32); } + pub fn start_next_attempt(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) -> u32 { + let next = + Self::attempt(txn, genesis, topic).expect("starting next attempt for unknown topic") + 1; + Self::set(txn, genesis, &topic, &next); + next + } + pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option { let attempt = Self::get(getter, genesis, &topic); // Don't require explicit recognition of the Dkg topic as it starts when the chain does - if attempt.is_none() && (topic == Topic::Dkg) { + if attempt.is_none() && ((topic == Topic::Dkg) || (topic == Topic::DkgConfirmation)) { return Some(0); } attempt } } -impl DataDb { - pub fn accumulate( +impl ReattemptDb { + pub fn schedule_reattempt( txn: &mut impl DbTxn, - our_key: &Zeroizing<::F>, - spec: &TributarySpec, - data_spec: &DataSpecification, - signer: ::G, - data: &Vec, - ) -> Accumulation { - let genesis = spec.genesis(); - if Self::get(txn, genesis, data_spec, &signer.to_bytes()).is_some() { - panic!("accumulating data for a participant multiple times"); + genesis: [u8; 32], + current_block_number: u32, + topic: Topic, + ) { + // 5 minutes + #[cfg(not(feature = "longer-reattempts"))] + const BASE_REATTEMPT_DELAY: u32 = (5 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME; + + // 10 minutes, intended for latent environments like the GitHub CI + #[cfg(feature = "longer-reattempts")] + const BASE_REATTEMPT_DELAY: u32 = (10 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME; + + // 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5 + // Assumes no event will take longer than 15 minutes, yet grows the time in case there are + // network bandwidth issues + let mut reattempt_delay = BASE_REATTEMPT_DELAY * + ((AttemptDb::attempt(txn, genesis, topic) + .expect("scheduling re-attempt for unknown topic") / + 3) + + 1) + .min(3); + // Allow more time for DKGs since they have an extra round and much more data + if matches!(topic, Topic::Dkg) { + reattempt_delay *= 4; } - let signer_shares = { - let signer_i = - spec.i(signer).expect("transaction signed by a non-validator for this tributary"); - u16::from(signer_i.end) - u16::from(signer_i.start) - }; - - let prior_received = DataReceived::get(txn, genesis, data_spec).unwrap_or_default(); - let now_received = prior_received + signer_shares; - DataReceived::set(txn, genesis, data_spec, &now_received); - DataDb::set(txn, genesis, data_spec, &signer.to_bytes(), data); - - // If we have all the needed commitments/preprocesses/shares, tell the processor - let needed = if data_spec.topic == Topic::Dkg { spec.n() } else { spec.t() }; - if (prior_received < needed) && (now_received >= needed) { - return Accumulation::Ready({ - let mut data = HashMap::new(); - for validator in spec.validators().iter().map(|validator| validator.0) { - data.insert( - spec.i(validator).unwrap().start, - if let Some(data) = Self::get(txn, genesis, data_spec, &validator.to_bytes()) { - data - } else { - continue; - }, - ); - } - - assert_eq!(data.len(), usize::from(needed)); - - // Remove our own piece of data, if we were involved - if data - .remove( - &spec - .i(Ristretto::generator() * our_key.deref()) - .expect("handling a message for a Tributary we aren't part of") - .start, - ) - .is_some() - { - DataSet::Participating(data) - } else { - DataSet::NotParticipating - } - }); + let upon_block = current_block_number + reattempt_delay; + + let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]); + reattempts.push(topic); + Self::set(txn, genesis, upon_block, &reattempts); + } + + pub fn take(txn: &mut impl DbTxn, genesis: [u8; 32], block_number: u32) -> Vec { + let res = Self::get(txn, genesis, block_number).unwrap_or(vec![]); + if !res.is_empty() { + Self::del(txn, genesis, block_number); } - Accumulation::NotReady + res } } -impl EventDb { - pub fn handle_event(txn: &mut impl DbTxn, id: [u8; 32], index: u32) { - assert!(Self::get(txn, id, index).is_none()); - Self::set(txn, id, index, &()); +impl SignedTransactionDb { + pub fn take_signed_transaction( + txn: &mut impl DbTxn, + order: &[u8], + nonce: u32, + ) -> Option { + let res = SignedTransactionDb::get(txn, order, nonce) + .map(|bytes| Transaction::read(&mut bytes.as_slice()).unwrap()); + if res.is_some() { + Self::del(txn, order, nonce); + } + res } } diff --git a/coordinator/src/tributary/dkg_confirmer.rs b/coordinator/src/tributary/dkg_confirmer.rs deleted file mode 100644 index 5fca0b2f0..000000000 --- a/coordinator/src/tributary/dkg_confirmer.rs +++ /dev/null @@ -1,207 +0,0 @@ -use std::collections::HashMap; - -use zeroize::Zeroizing; - -use rand_core::SeedableRng; -use rand_chacha::ChaCha20Rng; - -use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::{Ciphersuite, Ristretto}; -use frost::{ - FrostError, - dkg::{Participant, musig::musig}, - sign::*, -}; -use frost_schnorrkel::Schnorrkel; - -use serai_client::validator_sets::primitives::{KeyPair, musig_context, set_keys_message}; - -use crate::tributary::TributarySpec; - -/* - The following confirms the results of the DKG performed by the Processors onto Substrate. - - This is done by a signature over the generated key pair by the validators' MuSig-aggregated - public key. The MuSig-aggregation achieves on-chain efficiency and prevents on-chain censorship - of individual validator's DKG results by the Serai validator set. - - Since we're using the validators public keys, as needed for their being the root of trust, the - coordinator must perform the signing. This is distinct from all other group-signing operations - which are generally done by the processor. - - Instead of maintaining state, the following rebuilds the full state on every call. This is deemed - acceptable re: performance as: - - 1) The DKG confirmation is only done upon the start of the Tributary. - 2) This is an O(n) algorithm. - 3) The size of the validator set is bounded by MAX_KEY_SHARES_PER_SET. - - Accordingly, this should be infrequently ran and of tolerable algorithmic complexity. - - As for safety, it is explicitly unsafe to reuse nonces across signing sessions. This is in - contradiction with our rebuilding which is dependent on deterministic nonces. Safety is derived - from the deterministic nonces being context-bound under a BFT protocol. The flow is as follows: - - 1) Derive a deterministic nonce by hashing the private key, Tributary parameters, and attempt. - 2) Publish the nonces' commitments, receiving everyone elses *and the DKG shares determining the - message to be signed*. - 3) Sign and publish the signature share. - - In order for nonce re-use to occur, the received nonce commitments, or the received DKG shares, - would have to be distinct and sign would have to be called again. - - Before we act on any received messages, they're ordered and finalized by a BFT algorithm. The - only way to operate on distinct received messages would be if: - - 1) A logical flaw exists, letting new messages over write prior messages - 2) A reorganization occured from chain A to chain B, and with it, different messages - - Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While - a significant amount of processes may be byzantine, leading to BFT being broken, that still will - not trigger a reorganization. The only way to move to a distinct chain, with distinct messages, - would be by rebuilding the local process entirely (this time following chain B). - - Accordingly, safety follows if: - - 1) The local view of received messages is static - 2) The local process doesn't rebuild after a byzantine fault produces multiple blockchains - - We assume the former. We can prevent the latter (TODO) by: - - 1) Defining a per-build entropy, used so long as a DB is used. - 2) Checking the initially used commitments for the DKG align with the per-build entropy. - - If a rebuild occurs, which is the only way we could follow a distinct blockchain, our entropy - will change (preventing nonce reuse). - - This will allow a validator to still participate in DKGs within a single build, even if they have - spontaneous reboots, and on collapse triggering a rebuild, they don't lose safety. - - TODO: We also need to review how we're handling Processor preprocesses and likely implement the - same on-chain-preprocess-matches-presumed-preprocess check before publishing shares. -*/ -pub(crate) struct DkgConfirmer; -impl DkgConfirmer { - // Convert the passed in HashMap, which uses the validators' start index for their `s` threshold - // shares, to the indexes needed for MuSig - fn from_threshold_i_to_musig_i( - spec: &TributarySpec, - mut old_map: HashMap>, - ) -> HashMap> { - let mut new_map = HashMap::new(); - for (new_i, validator) in spec.validators().into_iter().enumerate() { - let threshold_i = spec.i(validator.0).unwrap(); - if let Some(value) = old_map.remove(&threshold_i.start) { - new_map.insert(Participant::new(u16::try_from(new_i + 1).unwrap()).unwrap(), value); - } - } - new_map - } - - fn preprocess_internal( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - ) -> (AlgorithmSignMachine, [u8; 64]) { - let validators = spec.validators().iter().map(|val| val.0).collect::>(); - - let context = musig_context(spec.set()); - let mut chacha = ChaCha20Rng::from_seed({ - let mut entropy_transcript = RecommendedTranscript::new(b"DkgConfirmer Entropy"); - entropy_transcript.append_message(b"spec", spec.serialize()); - entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes())); - entropy_transcript.append_message(b"attempt", attempt.to_le_bytes()); - Zeroizing::new(entropy_transcript).rng_seed(b"preprocess") - }); - let (machine, preprocess) = AlgorithmMachine::new( - Schnorrkel::new(b"substrate"), - musig(&context, key, &validators) - .expect("confirming the DKG for a set we aren't in/validator present multiple times") - .into(), - ) - .preprocess(&mut chacha); - - (machine, preprocess.serialize().try_into().unwrap()) - } - // Get the preprocess for this confirmation. - pub(crate) fn preprocess( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - ) -> [u8; 64] { - Self::preprocess_internal(spec, key, attempt).1 - } - - fn share_internal( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - key_pair: &KeyPair, - ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { - let machine = Self::preprocess_internal(spec, key, attempt).0; - let preprocesses = Self::from_threshold_i_to_musig_i(spec, preprocesses) - .into_iter() - .map(|(p, preprocess)| { - machine - .read_preprocess(&mut preprocess.as_slice()) - .map(|preprocess| (p, preprocess)) - .map_err(|_| p) - }) - .collect::, _>>()?; - let (machine, share) = machine - .sign(preprocesses, &set_keys_message(&spec.set(), key_pair)) - .map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok((machine, share.serialize().try_into().unwrap())) - } - // Get the share for this confirmation, if the preprocesses are valid. - pub(crate) fn share( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - key_pair: &KeyPair, - ) -> Result<[u8; 32], Participant> { - Self::share_internal(spec, key, attempt, preprocesses, key_pair).map(|(_, share)| share) - } - - pub(crate) fn complete( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - key_pair: &KeyPair, - shares: HashMap>, - ) -> Result<[u8; 64], Participant> { - let machine = Self::share_internal(spec, key, attempt, preprocesses, key_pair) - .expect("trying to complete a machine which failed to preprocess") - .0; - - let shares = Self::from_threshold_i_to_musig_i(spec, shares) - .into_iter() - .map(|(p, share)| { - machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p) - }) - .collect::, _>>()?; - let signature = machine.complete(shares).map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok(signature.to_bytes()) - } -} diff --git a/coordinator/src/tributary/dkg_removal.rs b/coordinator/src/tributary/dkg_removal.rs deleted file mode 100644 index 0120ef061..000000000 --- a/coordinator/src/tributary/dkg_removal.rs +++ /dev/null @@ -1,241 +0,0 @@ -use core::ops::Deref; -use std::collections::HashMap; - -use zeroize::Zeroizing; - -use rand_core::SeedableRng; -use rand_chacha::ChaCha20Rng; - -use transcript::{Transcript, RecommendedTranscript}; -use ciphersuite::{ - group::{Group, GroupEncoding}, - Ciphersuite, Ristretto, -}; -use frost::{ - FrostError, - dkg::{Participant, musig::musig}, - sign::*, -}; -use frost_schnorrkel::Schnorrkel; - -use serai_client::{ - Public, SeraiAddress, - validator_sets::primitives::{musig_context, remove_participant_message}, -}; - -use crate::tributary::TributarySpec; - -/* - The following is a clone of DkgConfirmer modified for DKG removals. - - The notable difference is this uses a MuSig key of the first `t` participants to respond with - preprocesses, not all `n` participants. - - TODO: Exact same commentary on seeded RNGs. The following can drop its seeded RNG if cached - preprocesses are used to carry the preprocess between machines -*/ -pub(crate) struct DkgRemoval; -impl DkgRemoval { - // Convert the passed in HashMap, which uses the validators' start index for their `s` threshold - // shares, to the indexes needed for MuSig - fn from_threshold_i_to_musig_i( - mut old_map: HashMap<[u8; 32], Vec>, - ) -> HashMap> { - let mut new_map = HashMap::new(); - let mut participating = old_map.keys().cloned().collect::>(); - participating.sort(); - for (i, participating) in participating.into_iter().enumerate() { - new_map.insert( - Participant::new(u16::try_from(i + 1).unwrap()).unwrap(), - old_map.remove(&participating).unwrap(), - ); - } - new_map - } - - fn preprocess_rng( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - ) -> ChaCha20Rng { - ChaCha20Rng::from_seed({ - let mut entropy_transcript = RecommendedTranscript::new(b"DkgRemoval Entropy"); - entropy_transcript.append_message(b"spec", spec.serialize()); - entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes())); - entropy_transcript.append_message(b"attempt", attempt.to_le_bytes()); - Zeroizing::new(entropy_transcript).rng_seed(b"preprocess") - }) - } - - fn preprocess_internal( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - participants: Option<&[::G]>, - ) -> (Option>, [u8; 64]) { - // TODO: Diversify this among DkgConfirmer/DkgRemoval? - let context = musig_context(spec.set()); - - let (_, preprocess) = AlgorithmMachine::new( - Schnorrkel::new(b"substrate"), - // Preprocess with our key alone as we don't know the signing set yet - musig(&context, key, &[::G::generator() * key.deref()]) - .expect("couldn't get the MuSig key of our key alone") - .into(), - ) - .preprocess(&mut Self::preprocess_rng(spec, key, attempt)); - - let machine = if let Some(participants) = participants { - let (machine, actual_preprocess) = AlgorithmMachine::new( - Schnorrkel::new(b"substrate"), - // Preprocess with our key alone as we don't know the signing set yet - musig(&context, key, participants) - .expect( - "couldn't create a MuSig key for the DKG removal we're supposedly participating in", - ) - .into(), - ) - .preprocess(&mut Self::preprocess_rng(spec, key, attempt)); - // Doesn't use assert_eq due to lack of Debug - assert!(preprocess == actual_preprocess); - Some(machine) - } else { - None - }; - - (machine, preprocess.serialize().try_into().unwrap()) - } - // Get the preprocess for this confirmation. - pub(crate) fn preprocess( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - ) -> [u8; 64] { - Self::preprocess_internal(spec, key, attempt, None).1 - } - - fn share_internal( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - mut preprocesses: HashMap>, - removed: [u8; 32], - ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { - // TODO: Remove this ugly blob - let preprocesses = { - let mut preprocesses_participants = preprocesses.keys().cloned().collect::>(); - preprocesses_participants.sort(); - let mut actual_keys = vec![]; - let spec_validators = spec.validators(); - for participant in &preprocesses_participants { - for (validator, _) in &spec_validators { - if participant == &spec.i(*validator).unwrap().start { - actual_keys.push(*validator); - } - } - } - - let mut new_preprocesses = HashMap::new(); - for (participant, actual_key) in - preprocesses_participants.into_iter().zip(actual_keys.into_iter()) - { - new_preprocesses.insert(actual_key, preprocesses.remove(&participant).unwrap()); - } - new_preprocesses - }; - - let participants = preprocesses.keys().cloned().collect::>(); - let preprocesses = Self::from_threshold_i_to_musig_i( - preprocesses.into_iter().map(|(key, preprocess)| (key.to_bytes(), preprocess)).collect(), - ); - let machine = Self::preprocess_internal(spec, key, attempt, Some(&participants)).0.unwrap(); - let preprocesses = preprocesses - .into_iter() - .map(|(p, preprocess)| { - machine - .read_preprocess(&mut preprocess.as_slice()) - .map(|preprocess| (p, preprocess)) - .map_err(|_| p) - }) - .collect::, _>>()?; - let (machine, share) = machine - .sign(preprocesses, &remove_participant_message(&spec.set(), Public(removed))) - .map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok((machine, share.serialize().try_into().unwrap())) - } - // Get the share for this confirmation, if the preprocesses are valid. - pub(crate) fn share( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - removed: [u8; 32], - ) -> Result<[u8; 32], Participant> { - Self::share_internal(spec, key, attempt, preprocesses, removed).map(|(_, share)| share) - } - - pub(crate) fn complete( - spec: &TributarySpec, - key: &Zeroizing<::F>, - attempt: u32, - preprocesses: HashMap>, - removed: [u8; 32], - mut shares: HashMap>, - ) -> Result<(Vec, [u8; 64]), Participant> { - // TODO: Remove this ugly blob - let shares = { - let mut shares_participants = shares.keys().cloned().collect::>(); - shares_participants.sort(); - let mut actual_keys = vec![]; - let spec_validators = spec.validators(); - for participant in &shares_participants { - for (validator, _) in &spec_validators { - if participant == &spec.i(*validator).unwrap().start { - actual_keys.push(*validator); - } - } - } - - let mut new_shares = HashMap::new(); - for (participant, actual_key) in shares_participants.into_iter().zip(actual_keys.into_iter()) - { - new_shares.insert(actual_key.to_bytes(), shares.remove(&participant).unwrap()); - } - new_shares - }; - - let mut signers = shares.keys().cloned().map(SeraiAddress).collect::>(); - signers.sort(); - - let machine = Self::share_internal(spec, key, attempt, preprocesses, removed) - .expect("trying to complete a machine which failed to preprocess") - .0; - - let shares = Self::from_threshold_i_to_musig_i(shares) - .into_iter() - .map(|(p, share)| { - machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p) - }) - .collect::, _>>()?; - let signature = machine.complete(shares).map_err(|e| match e { - FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), - FrostError::InvalidParticipant(_, _) | - FrostError::InvalidSigningSet(_) | - FrostError::InvalidParticipantQuantity(_, _) | - FrostError::DuplicatedParticipant(_) | - FrostError::MissingParticipant(_) => unreachable!("{e:?}"), - FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, - })?; - - Ok((signers, signature.to_bytes())) - } -} diff --git a/coordinator/src/tributary/handle.rs b/coordinator/src/tributary/handle.rs index 499170c50..f2b62d5f2 100644 --- a/coordinator/src/tributary/handle.rs +++ b/coordinator/src/tributary/handle.rs @@ -1,19 +1,14 @@ -use core::{ops::Deref, future::Future}; +use core::ops::Deref; use std::collections::HashMap; +use zeroize::Zeroizing; use rand_core::OsRng; -use zeroize::{Zeroize, Zeroizing}; - use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use frost::dkg::Participant; use scale::{Encode, Decode}; -use serai_client::{ - Public, SeraiAddress, Signature, - validator_sets::primitives::{ValidatorSet, KeyPair}, - SeraiValidatorSets, -}; +use serai_client::{Signature, validator_sets::primitives::KeyPair}; use tributary::{Signed, TransactionKind, TransactionTrait}; @@ -23,71 +18,29 @@ use processor_messages::{ sign::{self, SignId}, }; -use serai_db::{Get, Db}; +use serai_db::*; use crate::{ processors::Processors, tributary::{ - SignData, Transaction, TributarySpec, SeraiBlockNumber, Topic, DataSpecification, DataSet, - Accumulation, - dkg_confirmer::DkgConfirmer, - dkg_removal::DkgRemoval, - scanner::{RecognizedIdType, RIDTrait, PstTxType}, - FatallySlashed, DkgShare, DkgCompleted, PlanIds, ConfirmationNonces, RemovalNonces, AttemptDb, - DataDb, + *, + signing_protocol::DkgConfirmer, + scanner::{ + RecognizedIdType, RIDTrait, PublishSeraiTransaction, PTTTrait, TributaryBlockHandler, + }, }, + P2p, }; -use super::CurrentlyCompletingKeyPair; - -const DKG_COMMITMENTS: &str = "commitments"; -const DKG_SHARES: &str = "shares"; -const DKG_CONFIRMATION_NONCES: &str = "confirmation_nonces"; -const DKG_CONFIRMATION_SHARES: &str = "confirmation_shares"; - -// These d/s/b prefixes between DKG Removal, Batch, and Sign should be unnecessary, as Batch/Share -// entries themselves should already be domain separated -const DKG_REMOVAL_PREPROCESS: &str = "d_preprocess"; -const DKG_REMOVAL_SHARE: &str = "d_share"; - -const BATCH_PREPROCESS: &str = "b_preprocess"; -const BATCH_SHARE: &str = "b_share"; - -const SIGN_PREPROCESS: &str = "s_preprocess"; -const SIGN_SHARE: &str = "s_share"; - pub fn dkg_confirmation_nonces( key: &Zeroizing<::F>, spec: &TributarySpec, + txn: &mut impl DbTxn, attempt: u32, ) -> [u8; 64] { - DkgConfirmer::preprocess(spec, key, attempt) -} - -// If there's an error generating a key pair, return any errors which would've occured when -// executing the DkgConfirmer in order to stay in sync with those who did. -// -// The caller must ensure only error_generating_key_pair or generated_key_pair is called for a -// given attempt. -pub fn error_generating_key_pair( - getter: &G, - key: &Zeroizing<::F>, - spec: &TributarySpec, - attempt: u32, -) -> Option { - let preprocesses = ConfirmationNonces::get(getter, spec.genesis(), attempt).unwrap(); - - // Sign a key pair which can't be valid - // (0xff used as 0 would be the Ristretto identity point, 0-length for the network key) - let key_pair = KeyPair(Public([0xff; 32]), vec![0xffu8; 0].try_into().unwrap()); - match DkgConfirmer::share(spec, key, attempt, preprocesses, &key_pair) { - Ok(mut share) => { - // Zeroize the share to ensure it's not accessed - share.zeroize(); - None - } - Err(p) => Some(p), - } + DkgConfirmer::new(key, spec, txn, attempt) + .expect("getting DKG confirmation nonces for unknown attempt") + .preprocess() } pub fn generated_key_pair( @@ -97,166 +50,160 @@ pub fn generated_key_pair( key_pair: &KeyPair, attempt: u32, ) -> Result<[u8; 32], Participant> { - CurrentlyCompletingKeyPair::set(txn, spec.genesis(), key_pair); + DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair); + KeyToDkgAttempt::set(txn, key_pair.0 .0, &attempt); let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap(); - DkgConfirmer::share(spec, key, attempt, preprocesses, key_pair) + DkgConfirmer::new(key, spec, txn, attempt) + .expect("claiming to have generated a key pair for an unrecognized attempt") + .share(preprocesses, key_pair) } -pub(super) async fn fatal_slash< - D: Db, - FPtt: Future, - PTT: Clone + Fn(Transaction) -> FPtt, ->( - txn: &mut D::Transaction<'_>, +fn unflatten( spec: &TributarySpec, - publish_tributary_tx: &PTT, - our_key: &Zeroizing<::F>, - slashing: [u8; 32], - reason: &str, + removed: &[::G], + data: &mut HashMap>, ) { - let genesis = spec.genesis(); - - log::warn!("fatally slashing {}. reason: {}", hex::encode(slashing), reason); - FatallySlashed::set_fatally_slashed(txn, genesis, slashing); - // TODO: disconnect the node from network/ban from further participation in all Tributaries - - // TODO: If during DKG, trigger a re-attempt - // Despite triggering a re-attempt, this DKG may still complete and may become in-use - - // If during a DKG, remove the participant - if DkgCompleted::get(txn, genesis).is_none() { - let preprocess = DkgRemoval::preprocess(spec, our_key, 0); - let mut tx = Transaction::DkgRemovalPreprocess(SignData { - plan: slashing, - attempt: 0, - data: vec![preprocess.to_vec()], - signed: Transaction::empty_signed(), - }); - tx.sign(&mut OsRng, genesis, our_key); - publish_tributary_tx(tx).await; + for (validator, _) in spec.validators() { + let Some(range) = spec.i(removed, validator) else { continue }; + let Some(all_segments) = data.remove(&range.start) else { + continue; + }; + let mut data_vec = Vec::<_>::decode(&mut all_segments.as_slice()).unwrap(); + for i in u16::from(range.start) .. u16::from(range.end) { + let i = Participant::new(i).unwrap(); + data.insert(i, data_vec.remove(0)); + } } } -// TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second -// Tributary post-DKG -// https://github.com/serai-dex/serai/issues/426 +impl< + D: Db, + T: DbTxn, + Pro: Processors, + PST: PublishSeraiTransaction, + PTT: PTTTrait, + RID: RIDTrait, + P: P2p, + > TributaryBlockHandler<'_, D, T, Pro, PST, PTT, RID, P> +{ + fn accumulate( + &mut self, + removed: &[::G], + data_spec: &DataSpecification, + signer: ::G, + data: &Vec, + ) -> Accumulation { + log::debug!("accumulating entry for {:?} attempt #{}", &data_spec.topic, &data_spec.attempt); + let genesis = self.spec.genesis(); + if DataDb::get(self.txn, genesis, data_spec, &signer.to_bytes()).is_some() { + panic!("accumulating data for a participant multiple times"); + } + let signer_shares = { + let Some(signer_i) = self.spec.i(removed, signer) else { + log::warn!("accumulating data from {} who was removed", hex::encode(signer.to_bytes())); + return Accumulation::NotReady; + }; + u16::from(signer_i.end) - u16::from(signer_i.start) + }; -async fn fatal_slash_with_participant_index< - D: Db, - FPtt: Future, - PTT: Clone + Fn(Transaction) -> FPtt, ->( - txn: &mut ::Transaction<'_>, - spec: &TributarySpec, - publish_tributary_tx: &PTT, - our_key: &Zeroizing<::F>, - i: Participant, - reason: &str, -) { - // Resolve from Participant to ::G - let i = u16::from(i); - let mut validator = None; - for (potential, _) in spec.validators() { - let v_i = spec.i(potential).unwrap(); - if (u16::from(v_i.start) <= i) && (i < u16::from(v_i.end)) { - validator = Some(potential); - break; + let prior_received = DataReceived::get(self.txn, genesis, data_spec).unwrap_or_default(); + let now_received = prior_received + signer_shares; + DataReceived::set(self.txn, genesis, data_spec, &now_received); + DataDb::set(self.txn, genesis, data_spec, &signer.to_bytes(), data); + + let received_range = (prior_received + 1) ..= now_received; + + // If 2/3rds of the network participated in this preprocess, queue it for an automatic + // re-attempt + // DkgConfirmation doesn't have a re-attempt as it's just an extension for Dkg + if (data_spec.label == Label::Preprocess) && + received_range.contains(&self.spec.t()) && + (data_spec.topic != Topic::DkgConfirmation) + { + // Double check the attempt on this entry, as we don't want to schedule a re-attempt if this + // is an old entry + // This is an assert, not part of the if check, as old data shouldn't be here in the first + // place + assert_eq!(AttemptDb::attempt(self.txn, genesis, data_spec.topic), Some(data_spec.attempt)); + ReattemptDb::schedule_reattempt(self.txn, genesis, self.block_number, data_spec.topic); } - } - let validator = validator.unwrap(); - fatal_slash::(txn, spec, publish_tributary_tx, our_key, validator.to_bytes(), reason) - .await; -} + // If we have all the needed commitments/preprocesses/shares, tell the processor + let needs_everyone = + (data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation); + let needed = if needs_everyone { self.spec.n(removed) } else { self.spec.t() }; + if received_range.contains(&needed) { + log::debug!( + "accumulation for entry {:?} attempt #{} is ready", + &data_spec.topic, + &data_spec.attempt + ); -#[allow(clippy::too_many_arguments)] -pub(crate) async fn handle_application_tx< - D: Db, - Pro: Processors, - FPst: Future, - PST: Clone + Fn(ValidatorSet, PstTxType, serai_client::Transaction) -> FPst, - FPtt: Future, - PTT: Clone + Fn(Transaction) -> FPtt, - FRid: Future, - RID: RIDTrait, ->( - tx: Transaction, - spec: &TributarySpec, - processors: &Pro, - publish_serai_tx: PST, - publish_tributary_tx: &PTT, - key: &Zeroizing<::F>, - recognized_id: RID, - txn: &mut ::Transaction<'_>, -) { - let genesis = spec.genesis(); - - // Don't handle transactions from fatally slashed participants - // TODO: Because fatally slashed participants can still publish onto the blockchain, they have - // a notable DoS ability - if let TransactionKind::Signed(_, signed) = tx.kind() { - if FatallySlashed::get(txn, genesis, signed.signer.to_bytes()).is_some() { - return; + let mut data = HashMap::new(); + for validator in self.spec.validators().iter().map(|validator| validator.0) { + let Some(i) = self.spec.i(removed, validator) else { continue }; + data.insert( + i.start, + if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) { + data + } else { + continue; + }, + ); + } + + assert_eq!(data.len(), usize::from(needed)); + + // Remove our own piece of data, if we were involved + if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) { + if data.remove(&i.start).is_some() { + return Accumulation::Ready(DataSet::Participating(data)); + } + } + return Accumulation::Ready(DataSet::NotParticipating); } + Accumulation::NotReady } - async fn handle, PTT: Clone + Fn(Transaction) -> FPtt>( - txn: &mut ::Transaction<'_>, - spec: &TributarySpec, - publish_tributary_tx: &PTT, - key: &Zeroizing<::F>, + fn handle_data( + &mut self, + removed: &[::G], data_spec: &DataSpecification, - bytes: Vec, + bytes: &Vec, signed: &Signed, ) -> Accumulation { - let genesis = spec.genesis(); + let genesis = self.spec.genesis(); - let Some(curr_attempt) = AttemptDb::attempt(txn, genesis, data_spec.topic) else { + let Some(curr_attempt) = AttemptDb::attempt(self.txn, genesis, data_spec.topic) else { // Premature publication of a valid ID/publication of an invalid ID - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer.to_bytes(), - "published data for ID without an attempt", - ) - .await; + self.fatal_slash(signed.signer.to_bytes(), "published data for ID without an attempt"); return Accumulation::NotReady; }; // If they've already published a TX for this attempt, slash // This shouldn't be reachable since nonces were made inserted by the coordinator, yet it's a // cheap check to leave in for safety - if DataDb::get(txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer.to_bytes(), - "published data multiple times", - ) - .await; + if DataDb::get(self.txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() { + self.fatal_slash(signed.signer.to_bytes(), "published data multiple times"); return Accumulation::NotReady; } - // If the attempt is lesser than the blockchain's, slash + // If the attempt is lesser than the blockchain's, return if data_spec.attempt < curr_attempt { - // TODO: Slash for being late + log::debug!( + "dated attempt published onto tributary for topic {:?} (used attempt {}, current {})", + data_spec.topic, + data_spec.attempt, + curr_attempt + ); return Accumulation::NotReady; } // If the attempt is greater, this is a premature publication, full slash if data_spec.attempt > curr_attempt { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, + self.fatal_slash( signed.signer.to_bytes(), "published data with an attempt which hasn't started", - ) - .await; + ); return Accumulation::NotReady; } @@ -266,769 +213,531 @@ pub(crate) async fn handle_application_tx< // TODO: If this is shares, we need to check they are part of the selected signing set // Accumulate this data - DataDb::accumulate(txn, key, spec, data_spec, signed.signer, &bytes) + self.accumulate(removed, data_spec, signed.signer, bytes) } - async fn check_sign_data_len< - D: Db, - FPtt: Future, - PTT: Clone + Fn(Transaction) -> FPtt, - >( - txn: &mut D::Transaction<'_>, - spec: &TributarySpec, - publish_tributary_tx: &PTT, - our_key: &Zeroizing<::F>, + fn check_sign_data_len( + &mut self, + removed: &[::G], signer: ::G, len: usize, ) -> Result<(), ()> { - let signer_i = spec.i(signer).unwrap(); + let Some(signer_i) = self.spec.i(removed, signer) else { + // TODO: Ensure processor doesn't so participate/check how it handles removals for being + // offline + self.fatal_slash(signer.to_bytes(), "signer participated despite being removed"); + Err(())? + }; if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - our_key, + self.fatal_slash( signer.to_bytes(), "signer published a distinct amount of sign data than they had shares", - ) - .await; + ); Err(())?; } Ok(()) } - fn unflatten(spec: &TributarySpec, data: &mut HashMap>) { - for (validator, _) in spec.validators() { - let range = spec.i(validator).unwrap(); - let Some(all_segments) = data.remove(&range.start) else { - continue; - }; - let mut data_vec = Vec::<_>::decode(&mut all_segments.as_slice()).unwrap(); - for i in u16::from(range.start) .. u16::from(range.end) { - let i = Participant::new(i).unwrap(); - data.insert(i, data_vec.remove(0)); + // TODO: Don't call fatal_slash in here, return the party to fatal_slash to ensure no further + // execution occurs + pub(crate) async fn handle_application_tx(&mut self, tx: Transaction) { + let genesis = self.spec.genesis(); + + // Don't handle transactions from fatally slashed participants + // This prevents removed participants from sabotaging the removal signing sessions and so on + // TODO: Because fatally slashed participants can still publish onto the blockchain, they have + // a notable DoS ability + if let TransactionKind::Signed(_, signed) = tx.kind() { + if FatallySlashed::get(self.txn, genesis, signed.signer.to_bytes()).is_some() { + return; } } - } - match tx { - Transaction::RemoveParticipant(i) => { - fatal_slash_with_participant_index::( - txn, - spec, - publish_tributary_tx, - key, - i, - "RemoveParticipant Provided TX", - ) - .await - } - Transaction::DkgCommitments(attempt, commitments, signed) => { - let Ok(_) = check_sign_data_len::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer, - commitments.len(), - ) - .await - else { - return; - }; - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { topic: Topic::Dkg, label: DKG_COMMITMENTS, attempt }, - commitments.encode(), - &signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(mut commitments)) => { - log::info!("got all DkgCommitments for {}", hex::encode(genesis)); - unflatten(spec, &mut commitments); - processors - .send( - spec.set().network, - key_gen::CoordinatorMessage::Commitments { - id: KeyGenId { session: spec.set().session, attempt }, - commitments, - }, - ) - .await; + match tx { + Transaction::RemoveParticipantDueToDkg { participant, signed } => { + if self.spec.i(&[], participant).is_none() { + self.fatal_slash( + participant.to_bytes(), + "RemoveParticipantDueToDkg vote for non-validator", + ); + return; } - Accumulation::Ready(DataSet::NotParticipating) => { - panic!("wasn't a participant in DKG commitments") + + let participant = participant.to_bytes(); + let signer = signed.signer.to_bytes(); + + assert!( + VotedToRemove::get(self.txn, genesis, signer, participant).is_none(), + "VotedToRemove multiple times despite a single nonce being allocated", + ); + VotedToRemove::set(self.txn, genesis, signer, participant, &()); + + let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0); + let signer_votes = + self.spec.i(&[], signed.signer).expect("signer wasn't a validator for this network?"); + let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start); + VotesToRemove::set(self.txn, genesis, participant, &new_votes); + if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) { + self.fatal_slash(participant, "RemoveParticipantDueToDkg vote") } - Accumulation::NotReady => {} } - } - Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => { - let sender_i = spec - .i(signed.signer) - .expect("transaction added to tributary by signer who isn't a participant"); - let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start); - - if shares.len() != usize::from(sender_is_len) { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer.to_bytes(), - "invalid amount of DKG shares by key shares", - ) - .await; - return; + Transaction::DkgCommitments { attempt, commitments, signed } => { + let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { + self.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt"); + return; + }; + let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else { + return; + }; + let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt }; + match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) { + Accumulation::Ready(DataSet::Participating(mut commitments)) => { + log::info!("got all DkgCommitments for {}", hex::encode(genesis)); + unflatten(self.spec, &removed, &mut commitments); + self + .processors + .send( + self.spec.set().network, + key_gen::CoordinatorMessage::Commitments { + id: KeyGenId { session: self.spec.set().session, attempt }, + commitments, + }, + ) + .await; + } + Accumulation::Ready(DataSet::NotParticipating) => { + assert!( + removed.contains(&(Ristretto::generator() * self.our_key.deref())), + "NotParticipating in a DkgCommitments we weren't removed for" + ); + } + Accumulation::NotReady => {} + } } - for shares in &shares { - if shares.len() != (usize::from(spec.n() - sender_is_len)) { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, + + Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => { + let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { + self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt"); + return; + }; + let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref())); + + let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else { + return; + }; + + let Some(sender_i) = self.spec.i(&removed, signed.signer) else { + self.fatal_slash( signed.signer.to_bytes(), - "invalid amount of DKG shares", - ) - .await; + "DkgShares for a DKG they aren't participating in", + ); return; + }; + let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start); + for shares in &shares { + if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) { + self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares"); + return; + } } - } - // Save each share as needed for blame - { - let from_range = spec.i(signed.signer).unwrap(); + // Save each share as needed for blame for (from_offset, shares) in shares.iter().enumerate() { let from = - Participant::new(u16::from(from_range.start) + u16::try_from(from_offset).unwrap()) + Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap()) .unwrap(); for (to_offset, share) in shares.iter().enumerate() { // 0-indexed (the enumeration) to 1-indexed (Participant) let mut to = u16::try_from(to_offset).unwrap() + 1; // Adjust for the omission of the sender's own shares - if to >= u16::from(from_range.start) { - to += u16::from(from_range.end) - u16::from(from_range.start); + if to >= u16::from(sender_i.start) { + to += u16::from(sender_i.end) - u16::from(sender_i.start); } let to = Participant::new(to).unwrap(); - DkgShare::set(txn, genesis, from.into(), to.into(), share); + DkgShare::set(self.txn, genesis, from.into(), to.into(), share); } } - } - // Filter down to only our share's bytes for handle - let our_i = spec - .i(Ristretto::generator() * key.deref()) - .expect("in a tributary we're not a validator for"); - - let our_shares = if sender_i == our_i { - vec![] - } else { - // 1-indexed to 0-indexed - let mut our_i_pos = u16::from(our_i.start) - 1; - // Handle the omission of the sender's own data - if u16::from(our_i.start) > u16::from(sender_i.start) { - our_i_pos -= sender_is_len; - } - let our_i_pos = usize::from(our_i_pos); - shares - .iter_mut() - .map(|shares| { + // Filter down to only our share's bytes for handle + let our_shares = if let Some(our_i) = + self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()) + { + if sender_i == our_i { + vec![] + } else { + // 1-indexed to 0-indexed + let mut our_i_pos = u16::from(our_i.start) - 1; + // Handle the omission of the sender's own data + if u16::from(our_i.start) > u16::from(sender_i.start) { + our_i_pos -= sender_is_len; + } + let our_i_pos = usize::from(our_i_pos); shares - .drain( - our_i_pos .. - (our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))), - ) - .collect::>() - }) - .collect() - }; - // Drop shares as it's been mutated into invalidity - drop(shares); - - let confirmation_nonces = handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { topic: Topic::Dkg, label: DKG_CONFIRMATION_NONCES, attempt }, - confirmation_nonces.to_vec(), - &signed, - ) - .await; - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { topic: Topic::Dkg, label: DKG_SHARES, attempt }, - our_shares.encode(), - &signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(shares)) => { - log::info!("got all DkgShares for {}", hex::encode(genesis)); - - let Accumulation::Ready(DataSet::Participating(confirmation_nonces)) = - confirmation_nonces - else { - panic!("got all DKG shares yet confirmation nonces aren't Ready(Participating(_))"); - }; - ConfirmationNonces::set(txn, genesis, attempt, &confirmation_nonces); - - // shares is a HashMap>>>, with the values representing: - // - Each of the sender's shares - // - Each of the our shares - // - Each share - // We need a Vec>>, with the outer being each of ours - let mut expanded_shares = vec![]; - for (sender_start_i, shares) in shares { - let shares: Vec>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap(); - for (sender_i_offset, our_shares) in shares.into_iter().enumerate() { - for (our_share_i, our_share) in our_shares.into_iter().enumerate() { - if expanded_shares.len() <= our_share_i { - expanded_shares.push(HashMap::new()); - } - expanded_shares[our_share_i].insert( - Participant::new( - u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(), + .iter_mut() + .map(|shares| { + shares + .drain( + our_i_pos .. + (our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))), ) - .unwrap(), - our_share, - ); + .collect::>() + }) + .collect() + } + } else { + assert!( + not_participating, + "we didn't have an i while handling DkgShares we weren't removed for" + ); + // Since we're not participating, simply save vec![] for our shares + vec![] + }; + // Drop shares as it's presumably been mutated into invalidity + drop(shares); + + let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt }; + let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode(); + match self.handle_data(&removed, &data_spec, &encoded_data, &signed) { + Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => { + log::info!("got all DkgShares for {}", hex::encode(genesis)); + + let mut confirmation_nonces = HashMap::new(); + let mut shares = HashMap::new(); + for (participant, confirmation_nonces_and_shares) in confirmation_nonces_and_shares { + let (these_confirmation_nonces, these_shares) = + <(Vec, Vec)>::decode(&mut confirmation_nonces_and_shares.as_slice()) + .unwrap(); + confirmation_nonces.insert(participant, these_confirmation_nonces); + shares.insert(participant, these_shares); + } + ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces); + + // shares is a HashMap>>>, with the values representing: + // - Each of the sender's shares + // - Each of the our shares + // - Each share + // We need a Vec>>, with the outer being each of ours + let mut expanded_shares = vec![]; + for (sender_start_i, shares) in shares { + let shares: Vec>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap(); + for (sender_i_offset, our_shares) in shares.into_iter().enumerate() { + for (our_share_i, our_share) in our_shares.into_iter().enumerate() { + if expanded_shares.len() <= our_share_i { + expanded_shares.push(HashMap::new()); + } + expanded_shares[our_share_i].insert( + Participant::new( + u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(), + ) + .unwrap(), + our_share, + ); + } } } - } - processors - .send( - spec.set().network, - key_gen::CoordinatorMessage::Shares { - id: KeyGenId { session: spec.set().session, attempt }, - shares: expanded_shares, - }, - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => { - panic!("wasn't a participant in DKG shares") + self + .processors + .send( + self.spec.set().network, + key_gen::CoordinatorMessage::Shares { + id: KeyGenId { session: self.spec.set().session, attempt }, + shares: expanded_shares, + }, + ) + .await; + } + Accumulation::Ready(DataSet::NotParticipating) => { + assert!(not_participating, "NotParticipating in a DkgShares we weren't removed for"); + } + Accumulation::NotReady => {} } - Accumulation::NotReady => assert!(matches!(confirmation_nonces, Accumulation::NotReady)), } - } - // TODO: Ban self-accusals - Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { - let range = spec.i(signed.signer).unwrap(); - if (u16::from(accuser) < u16::from(range.start)) || - (u16::from(range.end) <= u16::from(accuser)) - { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer.to_bytes(), - "accused with a Participant index which wasn't theirs", - ) - .await; - return; - } + Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { + let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { + self + .fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt"); + return; + }; + let Some(range) = self.spec.i(&removed, signed.signer) else { + self.fatal_slash( + signed.signer.to_bytes(), + "InvalidDkgShare for a DKG they aren't participating in", + ); + return; + }; + if !range.contains(&accuser) { + self.fatal_slash( + signed.signer.to_bytes(), + "accused with a Participant index which wasn't theirs", + ); + return; + } + if range.contains(&faulty) { + self.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare"); + return; + } - if !((u16::from(range.start) <= u16::from(faulty)) && - (u16::from(faulty) < u16::from(range.end))) - { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signed.signer.to_bytes(), - "accused self of having an InvalidDkgShare", - ) - .await; - return; + let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else { + self.fatal_slash( + signed.signer.to_bytes(), + "InvalidDkgShare had a non-existent faulty participant", + ); + return; + }; + self + .processors + .send( + self.spec.set().network, + key_gen::CoordinatorMessage::VerifyBlame { + id: KeyGenId { session: self.spec.set().session, attempt }, + accuser, + accused: faulty, + share, + blame, + }, + ) + .await; } - let share = DkgShare::get(txn, genesis, accuser.into(), faulty.into()).unwrap(); - processors - .send( - spec.set().network, - key_gen::CoordinatorMessage::VerifyBlame { - id: KeyGenId { session: spec.set().session, attempt }, - accuser, - accused: faulty, - share, - blame, - }, - ) - .await; - } + Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { + let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { + self.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt"); + return; + }; + + let data_spec = + DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt }; + match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) { + Accumulation::Ready(DataSet::Participating(shares)) => { + log::info!("got all DkgConfirmed for {}", hex::encode(genesis)); + + let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else { + panic!( + "DkgConfirmed for everyone yet didn't have the removed parties for this attempt", + ); + }; - Transaction::DkgConfirmed(attempt, shares, signed) => { - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { topic: Topic::Dkg, label: DKG_CONFIRMATION_SHARES, attempt }, - shares.to_vec(), - &signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(shares)) => { - log::info!("got all DkgConfirmed for {}", hex::encode(genesis)); - - let preprocesses = ConfirmationNonces::get(txn, genesis, attempt).unwrap(); - // TODO: This can technically happen under very very very specific timing as the txn put - // happens before DkgConfirmed, yet the txn commit isn't guaranteed to - let key_pair = CurrentlyCompletingKeyPair::get(txn, genesis).expect( - "in DkgConfirmed handling, which happens after everyone \ + let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap(); + // TODO: This can technically happen under very very very specific timing as the txn + // put happens before DkgConfirmed, yet the txn commit isn't guaranteed to + let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect( + "in DkgConfirmed handling, which happens after everyone \ (including us) fires DkgConfirmed, yet no confirming key pair", - ); - let sig = - match DkgConfirmer::complete(spec, key, attempt, preprocesses, &key_pair, shares) { + ); + let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt) + .expect("confirming DKG for unrecognized attempt"); + let sig = match confirmer.complete(preprocesses, &key_pair, shares) { Ok(sig) => sig, Err(p) => { - fatal_slash_with_participant_index::( - txn, - spec, - publish_tributary_tx, - key, - p, - "invalid DkgConfirmer share", - ) - .await; + let mut tx = Transaction::RemoveParticipantDueToDkg { + participant: self.spec.reverse_lookup_i(&removed, p).unwrap(), + signed: Transaction::empty_signed(), + }; + tx.sign(&mut OsRng, genesis, self.our_key); + self.publish_tributary_tx.publish_tributary_tx(tx).await; return; } }; - DkgCompleted::set(txn, genesis, &()); + DkgLocallyCompleted::set(self.txn, genesis, &()); - publish_serai_tx( - spec.set(), - PstTxType::SetKeys, - SeraiValidatorSets::set_keys(spec.set().network, key_pair, Signature(sig)), - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => { - panic!("wasn't a participant in DKG confirmination shares") - } - Accumulation::NotReady => {} - } - } - - Transaction::DkgRemovalPreprocess(data) => { - let signer = data.signed.signer; - // TODO: Only handle this if we're not actively removing this validator - if (data.data.len() != 1) || (data.data[0].len() != 64) { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signer.to_bytes(), - "non-64-byte DKG removal preprocess", - ) - .await; - return; - } - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { - topic: Topic::DkgRemoval(data.plan), - label: DKG_REMOVAL_PREPROCESS, - attempt: data.attempt, - }, - data.data.encode(), - &data.signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(preprocesses)) => { - RemovalNonces::set(txn, genesis, data.plan, data.attempt, &preprocesses); - - let Ok(share) = DkgRemoval::share(spec, key, data.attempt, preprocesses, data.plan) - else { - // TODO: Locally increase slash points to maximum (distinct from an explicitly fatal - // slash) and censor transactions (yet don't explicitly ban) - return; - }; - - let mut tx = Transaction::DkgRemovalPreprocess(SignData { - plan: data.plan, - attempt: data.attempt, - data: vec![share.to_vec()], - signed: Transaction::empty_signed(), - }); - tx.sign(&mut OsRng, genesis, key); - publish_tributary_tx(tx).await; - } - Accumulation::Ready(DataSet::NotParticipating) => {} - Accumulation::NotReady => {} - } - } - Transaction::DkgRemovalShare(data) => { - let signer = data.signed.signer; - if (data.data.len() != 1) || (data.data[0].len() != 32) { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signer.to_bytes(), - "non-32-byte DKG removal share", - ) - .await; - return; - } - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { - topic: Topic::DkgRemoval(data.plan), - label: DKG_REMOVAL_SHARE, - attempt: data.attempt, - }, - data.data.encode(), - &data.signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(shares)) => { - let preprocesses = RemovalNonces::get(txn, genesis, data.plan, data.attempt).unwrap(); - - let Ok((signers, signature)) = - DkgRemoval::complete(spec, key, data.attempt, preprocesses, data.plan, shares) - else { - // TODO: Locally increase slash points to maximum (distinct from an explicitly fatal - // slash) and censor transactions (yet don't explicitly ban) - return; - }; - - // TODO: Only handle this if we're not actively removing any of the signers - // The created Substrate call will fail if a removed validator was one of the signers - // Since: - // 1) publish_serai_tx will block this task until the TX is published - // 2) We won't scan any more TXs/blocks until we handle this TX - // The TX *must* be successfully published *before* we start removing any more signers - // Accordingly, if the signers aren't currently being removed, they won't be removed - // by the time this transaction is successfully published *unless* a malicious 34% - // participates with the non-participating 33% to continue operation and produce a - // distinct removal (since the non-participating won't block in this block) - // This breaks BFT and is accordingly within bounds - - let tx = serai_client::SeraiValidatorSets::remove_participant( - spec.set().network, - SeraiAddress(data.plan), - signers, - Signature(signature), - ); - publish_serai_tx(spec.set(), PstTxType::RemoveParticipant(data.plan), tx).await; + self + .publish_serai_tx + .publish_set_keys( + self.db, + self.spec.set(), + removed.into_iter().map(|key| key.to_bytes().into()).collect(), + key_pair, + Signature(sig), + ) + .await; + } + Accumulation::Ready(DataSet::NotParticipating) => { + panic!("wasn't a participant in DKG confirmination shares") + } + Accumulation::NotReady => {} } - Accumulation::Ready(DataSet::NotParticipating) => {} - Accumulation::NotReady => {} } - } - - Transaction::CosignSubstrateBlock(hash) => { - AttemptDb::recognize_topic( - txn, - genesis, - Topic::SubstrateSign(SubstrateSignableId::CosigningSubstrateBlock(hash)), - ); - let block_number = SeraiBlockNumber::get(txn, hash) - .expect("CosignSubstrateBlock yet didn't save Serai block number"); - processors - .send( - spec.set().network, - coordinator::CoordinatorMessage::CosignSubstrateBlock { - id: SubstrateSignId { - session: spec.set().session, - id: SubstrateSignableId::CosigningSubstrateBlock(hash), - attempt: 0, - }, - block_number, + Transaction::CosignSubstrateBlock(hash) => { + AttemptDb::recognize_topic( + self.txn, + genesis, + Topic::SubstrateSign(SubstrateSignableId::CosigningSubstrateBlock(hash)), + ); + + let block_number = SeraiBlockNumber::get(self.txn, hash) + .expect("CosignSubstrateBlock yet didn't save Serai block number"); + let msg = coordinator::CoordinatorMessage::CosignSubstrateBlock { + id: SubstrateSignId { + session: self.spec.set().session, + id: SubstrateSignableId::CosigningSubstrateBlock(hash), + attempt: 0, }, - ) - .await; - } - - Transaction::Batch(_, batch) => { - // Because this Batch has achieved synchrony, its batch ID should be authorized - AttemptDb::recognize_topic( - txn, - genesis, - Topic::SubstrateSign(SubstrateSignableId::Batch(batch)), - ); - recognized_id(spec.set(), genesis, RecognizedIdType::Batch, batch.to_vec()).await; - } - - Transaction::SubstrateBlock(block) => { - let plan_ids = PlanIds::get(txn, &genesis, block).expect( - "synced a tributary block finalizing a substrate block in a provided transaction \ - despite us not providing that transaction", - ); - - for id in plan_ids.into_iter() { - AttemptDb::recognize_topic(txn, genesis, Topic::Sign(id)); - recognized_id(spec.set(), genesis, RecognizedIdType::Plan, id.to_vec()).await; + block_number, + }; + self.processors.send(self.spec.set().network, msg).await; } - } - Transaction::SubstratePreprocess(data) => { - let signer = data.signed.signer; - let Ok(_) = check_sign_data_len::( - txn, - spec, - publish_tributary_tx, - key, - signer, - data.data.len(), - ) - .await - else { - return; - }; - for data in &data.data { - if data.len() != 64 { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - signer.to_bytes(), - "non-64-byte Substrate preprocess", + Transaction::Batch { block: _, batch } => { + // Because this Batch has achieved synchrony, its batch ID should be authorized + AttemptDb::recognize_topic( + self.txn, + genesis, + Topic::SubstrateSign(SubstrateSignableId::Batch(batch)), + ); + self + .recognized_id + .recognized_id( + self.spec.set(), + genesis, + RecognizedIdType::Batch, + batch.to_le_bytes().to_vec(), ) .await; - return; - } } - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { - topic: Topic::SubstrateSign(data.plan), - label: BATCH_PREPROCESS, - attempt: data.attempt, - }, - data.data.encode(), - &data.signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(mut preprocesses)) => { - unflatten(spec, &mut preprocesses); - processors - .send( - spec.set().network, - coordinator::CoordinatorMessage::SubstratePreprocesses { - id: SubstrateSignId { - session: spec.set().session, - id: data.plan, - attempt: data.attempt, - }, - preprocesses: preprocesses - .into_iter() - .map(|(k, v)| (k, v.try_into().unwrap())) - .collect(), - }, - ) + + Transaction::SubstrateBlock(block) => { + let plan_ids = PlanIds::get(self.txn, &genesis, block).expect( + "synced a tributary block finalizing a substrate block in a provided transaction \ + despite us not providing that transaction", + ); + + for id in plan_ids { + AttemptDb::recognize_topic(self.txn, genesis, Topic::Sign(id)); + self + .recognized_id + .recognized_id(self.spec.set(), genesis, RecognizedIdType::Plan, id.to_vec()) .await; } - Accumulation::Ready(DataSet::NotParticipating) => {} - Accumulation::NotReady => {} } - } - Transaction::SubstrateShare(data) => { - let Ok(_) = check_sign_data_len::( - txn, - spec, - publish_tributary_tx, - key, - data.signed.signer, - data.data.len(), - ) - .await - else { - return; - }; - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { + + Transaction::SubstrateSign(data) => { + // Provided transactions ensure synchrony on any signing protocol, and we won't start + // signing with threshold keys before we've confirmed them on-chain + let Some(removed) = + crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) + else { + self.fatal_slash( + data.signed.signer.to_bytes(), + "signing despite not having set keys on substrate", + ); + return; + }; + let signer = data.signed.signer; + let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else { + return; + }; + let expected_len = match data.label { + Label::Preprocess => 64, + Label::Share => 32, + }; + for data in &data.data { + if data.len() != expected_len { + self.fatal_slash( + signer.to_bytes(), + "unexpected length data for substrate signing protocol", + ); + return; + } + } + + let data_spec = DataSpecification { topic: Topic::SubstrateSign(data.plan), - label: BATCH_SHARE, + label: data.label, attempt: data.attempt, - }, - data.data.encode(), - &data.signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(mut shares)) => { - unflatten(spec, &mut shares); - processors - .send( - spec.set().network, - coordinator::CoordinatorMessage::SubstrateShares { - id: SubstrateSignId { - session: spec.set().session, - id: data.plan, - attempt: data.attempt, - }, - shares: shares - .into_iter() - .map(|(validator, share)| (validator, share.try_into().unwrap())) - .collect(), - }, - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => {} - Accumulation::NotReady => {} - } - } + }; + let Accumulation::Ready(DataSet::Participating(mut results)) = + self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) + else { + return; + }; + unflatten(self.spec, &removed, &mut results); - Transaction::SignPreprocess(data) => { - let Ok(_) = check_sign_data_len::( - txn, - spec, - publish_tributary_tx, - key, - data.signed.signer, - data.data.len(), - ) - .await - else { - return; - }; - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { - topic: Topic::Sign(data.plan), - label: SIGN_PREPROCESS, + let id = SubstrateSignId { + session: self.spec.set().session, + id: data.plan, attempt: data.attempt, - }, - data.data.encode(), - &data.signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(mut preprocesses)) => { - unflatten(spec, &mut preprocesses); - processors - .send( - spec.set().network, - sign::CoordinatorMessage::Preprocesses { - id: SignId { session: spec.set().session, id: data.plan, attempt: data.attempt }, - preprocesses, - }, - ) - .await; - } - Accumulation::Ready(DataSet::NotParticipating) => {} - Accumulation::NotReady => {} + }; + let msg = match data.label { + Label::Preprocess => coordinator::CoordinatorMessage::SubstratePreprocesses { + id, + preprocesses: results.into_iter().map(|(v, p)| (v, p.try_into().unwrap())).collect(), + }, + Label::Share => coordinator::CoordinatorMessage::SubstrateShares { + id, + shares: results.into_iter().map(|(v, p)| (v, p.try_into().unwrap())).collect(), + }, + }; + self.processors.send(self.spec.set().network, msg).await; } - } - Transaction::SignShare(data) => { - let Ok(_) = check_sign_data_len::( - txn, - spec, - publish_tributary_tx, - key, - data.signed.signer, - data.data.len(), - ) - .await - else { - return; - }; - match handle::( - txn, - spec, - publish_tributary_tx, - key, - &DataSpecification { + + Transaction::Sign(data) => { + let Some(removed) = + crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) + else { + self.fatal_slash( + data.signed.signer.to_bytes(), + "signing despite not having set keys on substrate", + ); + return; + }; + let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else { + return; + }; + + let data_spec = DataSpecification { topic: Topic::Sign(data.plan), - label: SIGN_SHARE, + label: data.label, attempt: data.attempt, - }, - data.data.encode(), - &data.signed, - ) - .await - { - Accumulation::Ready(DataSet::Participating(mut shares)) => { - unflatten(spec, &mut shares); - processors + }; + if let Accumulation::Ready(DataSet::Participating(mut results)) = + self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed) + { + unflatten(self.spec, &removed, &mut results); + let id = + SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt }; + self + .processors .send( - spec.set().network, - sign::CoordinatorMessage::Shares { - id: SignId { session: spec.set().session, id: data.plan, attempt: data.attempt }, - shares, + self.spec.set().network, + match data.label { + Label::Preprocess => { + sign::CoordinatorMessage::Preprocesses { id, preprocesses: results } + } + Label::Share => sign::CoordinatorMessage::Shares { id, shares: results }, }, ) .await; } - Accumulation::Ready(DataSet::NotParticipating) => {} - Accumulation::NotReady => {} } - } - Transaction::SignCompleted { plan, tx_hash, first_signer, signature: _ } => { - log::info!( - "on-chain SignCompleted claims {} completes {}", - hex::encode(&tx_hash), - hex::encode(plan) - ); - if AttemptDb::attempt(txn, genesis, Topic::Sign(plan)).is_none() { - fatal_slash::( - txn, - spec, - publish_tributary_tx, - key, - first_signer.to_bytes(), - "claimed an unrecognized plan was completed", - ) - .await; - return; - }; + Transaction::SignCompleted { plan, tx_hash, first_signer, signature: _ } => { + log::info!( + "on-chain SignCompleted claims {} completes {}", + hex::encode(&tx_hash), + hex::encode(plan) + ); + + if AttemptDb::attempt(self.txn, genesis, Topic::Sign(plan)).is_none() { + self.fatal_slash(first_signer.to_bytes(), "claimed an unrecognized plan was completed"); + return; + }; - // TODO: Confirm this signer hasn't prior published a completion + // TODO: Confirm this signer hasn't prior published a completion - processors - .send( - spec.set().network, - sign::CoordinatorMessage::Completed { - session: spec.set().session, - id: plan, - tx: tx_hash, - }, - ) - .await; + let msg = sign::CoordinatorMessage::Completed { + session: self.spec.set().session, + id: plan, + tx: tx_hash, + }; + self.processors.send(self.spec.set().network, msg).await; + } } } } diff --git a/coordinator/src/tributary/mod.rs b/coordinator/src/tributary/mod.rs index d9f1aa184..cc9bdb1ea 100644 --- a/coordinator/src/tributary/mod.rs +++ b/coordinator/src/tributary/mod.rs @@ -1,852 +1,100 @@ -use core::{ - ops::{Deref, Range}, - fmt::Debug, -}; -use std::io::{self, Read, Write}; - -use zeroize::Zeroizing; -use rand_core::{RngCore, CryptoRng}; +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; -use blake2::{Digest, Blake2s256}; -use transcript::{Transcript, RecommendedTranscript}; - -use ciphersuite::{ - group::{ff::Field, GroupEncoding}, - Ciphersuite, Ristretto, -}; -use schnorr::SchnorrSignature; -use frost::Participant; - -use scale::{Encode, Decode}; -use processor_messages::coordinator::SubstrateSignableId; - -use serai_client::{ - primitives::{NetworkId, PublicKey}, - validator_sets::primitives::{Session, ValidatorSet}, -}; +use serai_client::validator_sets::primitives::ValidatorSet; -#[rustfmt::skip] use tributary::{ ReadWrite, - transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait}, - TRANSACTION_SIZE_LIMIT, + transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait}, + Tributary, }; mod db; pub use db::*; -mod dkg_confirmer; -mod dkg_removal; +mod spec; +pub use spec::TributarySpec; + +mod transaction; +pub use transaction::{Label, SignData, Transaction}; + +mod signing_protocol; mod handle; pub use handle::*; pub mod scanner; -#[derive(Clone, PartialEq, Eq, Debug)] -pub struct TributarySpec { - serai_block: [u8; 32], - start_time: u64, - set: ValidatorSet, - validators: Vec<(::G, u16)>, -} - -impl TributarySpec { - pub fn new( - serai_block: [u8; 32], - start_time: u64, - set: ValidatorSet, - set_participants: Vec<(PublicKey, u16)>, - ) -> TributarySpec { - let mut validators = vec![]; - for (participant, shares) in set_participants { - let participant = ::read_G::<&[u8]>(&mut participant.0.as_ref()) - .expect("invalid key registered as participant"); - validators.push((participant, shares)); - } - - Self { serai_block, start_time, set, validators } - } - - pub fn set(&self) -> ValidatorSet { - self.set - } - - pub fn genesis(&self) -> [u8; 32] { - // Calculate the genesis for this Tributary - let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis"); - // This locks it to a specific Serai chain - genesis.append_message(b"serai_block", self.serai_block); - genesis.append_message(b"session", self.set.session.0.to_le_bytes()); - genesis.append_message(b"network", self.set.network.encode()); - let genesis = genesis.challenge(b"genesis"); - let genesis_ref: &[u8] = genesis.as_ref(); - genesis_ref[.. 32].try_into().unwrap() - } - - pub fn start_time(&self) -> u64 { - self.start_time - } - - pub fn n(&self) -> u16 { - self.validators.iter().map(|(_, weight)| weight).sum() - } - - pub fn t(&self) -> u16 { - ((2 * self.n()) / 3) + 1 - } - - pub fn i(&self, key: ::G) -> Option> { - let mut i = 1; - for (validator, weight) in &self.validators { - if validator == &key { - return Some(Range { - start: Participant::new(i).unwrap(), - end: Participant::new(i + weight).unwrap(), - }); - } - i += weight; - } - None - } - - pub fn validators(&self) -> Vec<(::G, u64)> { - self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect() - } - - pub fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&self.serai_block)?; - writer.write_all(&self.start_time.to_le_bytes())?; - writer.write_all(&self.set.session.0.to_le_bytes())?; - let network_encoded = self.set.network.encode(); - assert_eq!(network_encoded.len(), 1); - writer.write_all(&network_encoded)?; - writer.write_all(&u32::try_from(self.validators.len()).unwrap().to_le_bytes())?; - for validator in &self.validators { - writer.write_all(&validator.0.to_bytes())?; - writer.write_all(&validator.1.to_le_bytes())?; - } - Ok(()) - } - - pub fn serialize(&self) -> Vec { - let mut res = vec![]; - self.write(&mut res).unwrap(); - res - } - - pub fn read(reader: &mut R) -> io::Result { - let mut serai_block = [0; 32]; - reader.read_exact(&mut serai_block)?; - - let mut start_time = [0; 8]; - reader.read_exact(&mut start_time)?; - let start_time = u64::from_le_bytes(start_time); - - let mut session = [0; 4]; - reader.read_exact(&mut session)?; - let session = Session(u32::from_le_bytes(session)); - - let mut network = [0; 1]; - reader.read_exact(&mut network)?; - let network = - NetworkId::decode(&mut &network[..]).map_err(|_| io::Error::other("invalid network"))?; - - let mut validators_len = [0; 4]; - reader.read_exact(&mut validators_len)?; - let validators_len = usize::try_from(u32::from_le_bytes(validators_len)).unwrap(); - - let mut validators = Vec::with_capacity(validators_len); - for _ in 0 .. validators_len { - let key = Ristretto::read_G(reader)?; - let mut weight = [0; 2]; - reader.read_exact(&mut weight)?; - validators.push((key, u16::from_le_bytes(weight))); - } - - Ok(Self { serai_block, start_time, set: ValidatorSet { session, network }, validators }) - } -} - -#[derive(Clone, PartialEq, Eq)] -pub struct SignData { - pub plan: Id, - pub attempt: u32, - - pub data: Vec>, - - pub signed: Signed, -} - -impl Debug for SignData { - fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - fmt - .debug_struct("SignData") - .field("id", &hex::encode(self.plan.encode())) - .field("attempt", &self.attempt) - .field("signer", &hex::encode(self.signed.signer.to_bytes())) - .finish_non_exhaustive() - } -} - -impl SignData { - pub(crate) fn read(reader: &mut R, nonce: u32) -> io::Result { - let plan = Id::decode(&mut scale::IoReader(&mut *reader)) - .map_err(|_| io::Error::other("invalid plan in SignData"))?; - - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let data = { - let mut data_pieces = [0]; - reader.read_exact(&mut data_pieces)?; - if data_pieces[0] == 0 { - Err(io::Error::other("zero pieces of data in SignData"))?; - } - let mut all_data = vec![]; - for _ in 0 .. data_pieces[0] { - let mut data_len = [0; 2]; - reader.read_exact(&mut data_len)?; - let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; - reader.read_exact(&mut data)?; - all_data.push(data); - } - all_data - }; - - let signed = Signed::read_without_nonce(reader, nonce)?; - - Ok(SignData { plan, attempt, data, signed }) - } - - pub(crate) fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(&self.plan.encode())?; - writer.write_all(&self.attempt.to_le_bytes())?; - - writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?; - for data in &self.data { - if data.len() > u16::MAX.into() { - // Currently, the largest individual preprocess is a Monero transaction - // It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a - // key image and proof (96 bytes) - // Even with all of that, we could support 227 inputs in a single TX - // Monero is limited to ~120 inputs per TX - // - // Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess - Err(io::Error::other("signing data exceeded 65535 bytes"))?; - } - writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?; - writer.write_all(data)?; - } - - self.signed.write_without_nonce(writer) - } -} - -#[derive(Clone, PartialEq, Eq)] -pub enum Transaction { - RemoveParticipant(Participant), - - // Once this completes successfully, no more instances should be created. - DkgCommitments(u32, Vec>, Signed), - DkgShares { - attempt: u32, - // Sending Participant, Receiving Participant, Share - shares: Vec>>, - confirmation_nonces: [u8; 64], - signed: Signed, - }, - InvalidDkgShare { - attempt: u32, - accuser: Participant, - faulty: Participant, - blame: Option>, - signed: Signed, - }, - DkgConfirmed(u32, [u8; 32], Signed), - - DkgRemovalPreprocess(SignData<[u8; 32]>), - DkgRemovalShare(SignData<[u8; 32]>), - - // Co-sign a Substrate block. - CosignSubstrateBlock([u8; 32]), - - // When we have synchrony on a batch, we can allow signing it - // TODO (never?): This is less efficient compared to an ExternalBlock provided transaction, - // which would be binding over the block hash and automatically achieve synchrony on all - // relevant batches. ExternalBlock was removed for this due to complexity around the pipeline - // with the current processor, yet it would still be an improvement. - Batch([u8; 32], [u8; 5]), - // When a Serai block is finalized, with the contained batches, we can allow the associated plan - // IDs - SubstrateBlock(u64), - - SubstratePreprocess(SignData), - SubstrateShare(SignData), - - SignPreprocess(SignData<[u8; 32]>), - SignShare(SignData<[u8; 32]>), - // This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst - // reporters (who should all report the same thing) - // We do still track the signer in order to prevent a single signer from publishing arbitrarily - // many TXs without penalty - // Here, they're denoted as the first_signer, as only the signer of the first TX to be included - // with this pairing will be remembered on-chain - SignCompleted { - plan: [u8; 32], - tx_hash: Vec, - first_signer: ::G, - signature: SchnorrSignature, - }, -} - -impl Debug for Transaction { - fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { - match self { - Transaction::RemoveParticipant(participant) => fmt - .debug_struct("Transaction::RemoveParticipant") - .field("participant", participant) - .finish(), - Transaction::DkgCommitments(attempt, _, signed) => fmt - .debug_struct("Transaction::DkgCommitments") - .field("attempt", attempt) - .field("signer", &hex::encode(signed.signer.to_bytes())) - .finish_non_exhaustive(), - Transaction::DkgShares { attempt, signed, .. } => fmt - .debug_struct("Transaction::DkgShares") - .field("attempt", attempt) - .field("signer", &hex::encode(signed.signer.to_bytes())) - .finish_non_exhaustive(), - Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt - .debug_struct("Transaction::InvalidDkgShare") - .field("attempt", attempt) - .field("accuser", accuser) - .field("faulty", faulty) - .finish_non_exhaustive(), - Transaction::DkgConfirmed(attempt, _, signed) => fmt - .debug_struct("Transaction::DkgConfirmed") - .field("attempt", attempt) - .field("signer", &hex::encode(signed.signer.to_bytes())) - .finish_non_exhaustive(), - Transaction::DkgRemovalPreprocess(sign_data) => { - fmt.debug_struct("Transaction::DkgRemovalPreprocess").field("sign_data", sign_data).finish() - } - Transaction::DkgRemovalShare(sign_data) => { - fmt.debug_struct("Transaction::DkgRemovalShare").field("sign_data", sign_data).finish() - } - Transaction::CosignSubstrateBlock(block) => fmt - .debug_struct("Transaction::CosignSubstrateBlock") - .field("block", &hex::encode(block)) - .finish(), - Transaction::Batch(block, batch) => fmt - .debug_struct("Transaction::Batch") - .field("block", &hex::encode(block)) - .field("batch", &hex::encode(batch)) - .finish(), - Transaction::SubstrateBlock(block) => { - fmt.debug_struct("Transaction::SubstrateBlock").field("block", block).finish() - } - Transaction::SubstratePreprocess(sign_data) => { - fmt.debug_struct("Transaction::SubstratePreprocess").field("sign_data", sign_data).finish() - } - Transaction::SubstrateShare(sign_data) => { - fmt.debug_struct("Transaction::SubstrateShare").field("sign_data", sign_data).finish() - } - Transaction::SignPreprocess(sign_data) => { - fmt.debug_struct("Transaction::SignPreprocess").field("sign_data", sign_data).finish() - } - Transaction::SignShare(sign_data) => { - fmt.debug_struct("Transaction::SignShare").field("sign_data", sign_data).finish() - } - Transaction::SignCompleted { plan, tx_hash, .. } => fmt - .debug_struct("Transaction::SignCompleted") - .field("plan", &hex::encode(plan)) - .field("tx_hash", &hex::encode(tx_hash)) - .finish_non_exhaustive(), - } - } -} - -impl ReadWrite for Transaction { - fn read(reader: &mut R) -> io::Result { - let mut kind = [0]; - reader.read_exact(&mut kind)?; - - match kind[0] { - 0 => Ok(Transaction::RemoveParticipant({ - let mut participant = [0; 2]; - reader.read_exact(&mut participant)?; - Participant::new(u16::from_le_bytes(participant)) - .ok_or_else(|| io::Error::other("invalid participant in RemoveParticipant"))? - })), - - 1 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let commitments = { - let mut commitments_len = [0; 1]; - reader.read_exact(&mut commitments_len)?; - let commitments_len = usize::from(commitments_len[0]); - if commitments_len == 0 { - Err(io::Error::other("zero commitments in DkgCommitments"))?; - } - - let mut each_commitments_len = [0; 2]; - reader.read_exact(&mut each_commitments_len)?; - let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len)); - if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT { - Err(io::Error::other( - "commitments present in transaction exceeded transaction size limit", - ))?; - } - let mut commitments = vec![vec![]; commitments_len]; - for commitments in &mut commitments { - *commitments = vec![0; each_commitments_len]; - reader.read_exact(commitments)?; - } - commitments - }; - - let signed = Signed::read_without_nonce(reader, 0)?; - - Ok(Transaction::DkgCommitments(attempt, commitments, signed)) - } - - 2 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let shares = { - let mut share_quantity = [0; 1]; - reader.read_exact(&mut share_quantity)?; - - let mut key_share_quantity = [0; 1]; - reader.read_exact(&mut key_share_quantity)?; - - let mut share_len = [0; 2]; - reader.read_exact(&mut share_len)?; - let share_len = usize::from(u16::from_le_bytes(share_len)); - - let mut all_shares = vec![]; - for _ in 0 .. share_quantity[0] { - let mut shares = vec![]; - for _ in 0 .. key_share_quantity[0] { - let mut share = vec![0; share_len]; - reader.read_exact(&mut share)?; - shares.push(share); - } - all_shares.push(shares); - } - all_shares - }; - - let mut confirmation_nonces = [0; 64]; - reader.read_exact(&mut confirmation_nonces)?; - - let signed = Signed::read_without_nonce(reader, 1)?; - - Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed }) - } - - 3 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let mut accuser = [0; 2]; - reader.read_exact(&mut accuser)?; - let accuser = Participant::new(u16::from_le_bytes(accuser)) - .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; - - let mut faulty = [0; 2]; - reader.read_exact(&mut faulty)?; - let faulty = Participant::new(u16::from_le_bytes(faulty)) - .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; - - let mut blame_len = [0; 2]; - reader.read_exact(&mut blame_len)?; - let mut blame = vec![0; u16::from_le_bytes(blame_len).into()]; - reader.read_exact(&mut blame)?; - - // This shares a nonce with DkgConfirmed as only one is expected - let signed = Signed::read_without_nonce(reader, 2)?; - - Ok(Transaction::InvalidDkgShare { - attempt, - accuser, - faulty, - blame: Some(blame).filter(|blame| !blame.is_empty()), - signed, - }) - } - - 4 => { - let mut attempt = [0; 4]; - reader.read_exact(&mut attempt)?; - let attempt = u32::from_le_bytes(attempt); - - let mut confirmation_share = [0; 32]; - reader.read_exact(&mut confirmation_share)?; - - let signed = Signed::read_without_nonce(reader, 2)?; - - Ok(Transaction::DkgConfirmed(attempt, confirmation_share, signed)) - } - - 5 => SignData::read(reader, 0).map(Transaction::DkgRemovalPreprocess), - 6 => SignData::read(reader, 1).map(Transaction::DkgRemovalShare), - - 7 => { - let mut block = [0; 32]; - reader.read_exact(&mut block)?; - Ok(Transaction::CosignSubstrateBlock(block)) - } - - 8 => { - let mut block = [0; 32]; - reader.read_exact(&mut block)?; - let mut batch = [0; 5]; - reader.read_exact(&mut batch)?; - Ok(Transaction::Batch(block, batch)) - } - - 9 => { - let mut block = [0; 8]; - reader.read_exact(&mut block)?; - Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block))) - } - - 10 => SignData::read(reader, 0).map(Transaction::SubstratePreprocess), - 11 => SignData::read(reader, 1).map(Transaction::SubstrateShare), - - 12 => SignData::read(reader, 0).map(Transaction::SignPreprocess), - 13 => SignData::read(reader, 1).map(Transaction::SignShare), - - 14 => { - let mut plan = [0; 32]; - reader.read_exact(&mut plan)?; - - let mut tx_hash_len = [0]; - reader.read_exact(&mut tx_hash_len)?; - let mut tx_hash = vec![0; usize::from(tx_hash_len[0])]; - reader.read_exact(&mut tx_hash)?; - - let first_signer = Ristretto::read_G(reader)?; - let signature = SchnorrSignature::::read(reader)?; - - Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature }) - } - - _ => Err(io::Error::other("invalid transaction type")), - } - } - - fn write(&self, writer: &mut W) -> io::Result<()> { - match self { - Transaction::RemoveParticipant(i) => { - writer.write_all(&[0])?; - writer.write_all(&u16::from(*i).to_le_bytes()) - } - - Transaction::DkgCommitments(attempt, commitments, signed) => { - writer.write_all(&[1])?; - writer.write_all(&attempt.to_le_bytes())?; - if commitments.is_empty() { - Err(io::Error::other("zero commitments in DkgCommitments"))? - } - writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?; - for commitments_i in commitments { - if commitments_i.len() != commitments[0].len() { - Err(io::Error::other("commitments of differing sizes in DkgCommitments"))? - } - } - writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?; - for commitments in commitments { - writer.write_all(commitments)?; - } - signed.write_without_nonce(writer) - } - - Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => { - writer.write_all(&[2])?; - writer.write_all(&attempt.to_le_bytes())?; - - // `shares` is a Vec which is supposed to map to a HashMap>. Since we - // bound participants to 150, this conversion is safe if a valid in-memory transaction. - writer.write_all(&[u8::try_from(shares.len()).unwrap()])?; - // This assumes at least one share is being sent to another party - writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?; - let share_len = shares[0][0].len(); - // For BLS12-381 G2, this would be: - // - A 32-byte share - // - A 96-byte ephemeral key - // - A 128-byte signature - // Hence why this has to be u16 - writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?; - - for these_shares in shares { - assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable"); - for share in these_shares { - assert_eq!(share.len(), share_len, "sent shares were of variable length"); - writer.write_all(share)?; - } - } - - writer.write_all(confirmation_nonces)?; - signed.write_without_nonce(writer) - } - - Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { - writer.write_all(&[3])?; - writer.write_all(&attempt.to_le_bytes())?; - writer.write_all(&u16::from(*accuser).to_le_bytes())?; - writer.write_all(&u16::from(*faulty).to_le_bytes())?; - - // Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length - assert!(blame.as_ref().map(|blame| blame.len()).unwrap_or(1) != 0); - let blame_len = - u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB"); - writer.write_all(&blame_len.to_le_bytes())?; - writer.write_all(blame.as_ref().unwrap_or(&vec![]))?; - - signed.write_without_nonce(writer) - } - - Transaction::DkgConfirmed(attempt, share, signed) => { - writer.write_all(&[4])?; - writer.write_all(&attempt.to_le_bytes())?; - writer.write_all(share)?; - signed.write_without_nonce(writer) - } - - Transaction::DkgRemovalPreprocess(data) => { - writer.write_all(&[5])?; - data.write(writer) - } - Transaction::DkgRemovalShare(data) => { - writer.write_all(&[6])?; - data.write(writer) - } - - Transaction::CosignSubstrateBlock(block) => { - writer.write_all(&[7])?; - writer.write_all(block) - } - - Transaction::Batch(block, batch) => { - writer.write_all(&[8])?; - writer.write_all(block)?; - writer.write_all(batch) - } - - Transaction::SubstrateBlock(block) => { - writer.write_all(&[9])?; - writer.write_all(&block.to_le_bytes()) - } - - Transaction::SubstratePreprocess(data) => { - writer.write_all(&[10])?; - data.write(writer) - } - Transaction::SubstrateShare(data) => { - writer.write_all(&[11])?; - data.write(writer) - } - - Transaction::SignPreprocess(data) => { - writer.write_all(&[12])?; - data.write(writer) - } - Transaction::SignShare(data) => { - writer.write_all(&[13])?; - data.write(writer) - } - Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => { - writer.write_all(&[14])?; - writer.write_all(plan)?; - writer - .write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?; - writer.write_all(tx_hash)?; - writer.write_all(&first_signer.to_bytes())?; - signature.write(writer) - } - } +pub fn removed_as_of_dkg_attempt( + getter: &impl Get, + genesis: [u8; 32], + attempt: u32, +) -> Option::G>> { + if attempt == 0 { + Some(vec![]) + } else { + RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| { + keys.iter().map(|key| ::G::from_bytes(key).unwrap()).collect() + }) } } -impl TransactionTrait for Transaction { - fn kind(&self) -> TransactionKind<'_> { - match self { - Transaction::RemoveParticipant(_) => TransactionKind::Provided("remove"), - - Transaction::DkgCommitments(attempt, _, signed) => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) - } - Transaction::DkgShares { attempt, signed, .. } => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) - } - Transaction::InvalidDkgShare { attempt, signed, .. } => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) - } - Transaction::DkgConfirmed(attempt, _, signed) => { - TransactionKind::Signed((b"dkg", attempt).encode(), signed) - } - - Transaction::DkgRemovalPreprocess(data) => { - TransactionKind::Signed((b"dkg_removal", data.plan, data.attempt).encode(), &data.signed) - } - Transaction::DkgRemovalShare(data) => { - TransactionKind::Signed((b"dkg_removal", data.plan, data.attempt).encode(), &data.signed) - } - - Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"), - - Transaction::Batch(_, _) => TransactionKind::Provided("batch"), - Transaction::SubstrateBlock(_) => TransactionKind::Provided("serai"), - - Transaction::SubstratePreprocess(data) => { - TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed) - } - Transaction::SubstrateShare(data) => { - TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed) - } - - Transaction::SignPreprocess(data) => { - TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed) - } - Transaction::SignShare(data) => { - TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed) - } - Transaction::SignCompleted { .. } => TransactionKind::Unsigned, - } - } - - fn hash(&self) -> [u8; 32] { - let mut tx = self.serialize(); - if let TransactionKind::Signed(_, signed) = self.kind() { - // Make sure the part we're cutting off is the signature - assert_eq!(tx.drain((tx.len() - 64) ..).collect::>(), signed.signature.serialize()); - } - Blake2s256::digest([b"Coordinator Tributary Transaction".as_slice(), &tx].concat()).into() - } - - fn verify(&self) -> Result<(), TransactionError> { - if let Transaction::SubstrateShare(data) = self { - for data in &data.data { - if data.len() != 32 { - Err(TransactionError::InvalidContent)?; - } - } - } - - if let Transaction::SignCompleted { first_signer, signature, .. } = self { - if !signature.verify(*first_signer, self.sign_completed_challenge()) { - Err(TransactionError::InvalidContent)?; - } - } - - Ok(()) - } +pub fn removed_as_of_set_keys( + getter: &impl Get, + set: ValidatorSet, + genesis: [u8; 32], +) -> Option::G>> { + // SeraiDkgCompleted has the key placed on-chain. + // This key can be uniquely mapped to an attempt so long as one participant was honest, which we + // assume as a presumably honest participant. + // Resolve from generated key to attempt to fatally slashed as of attempt. + + // This expect will trigger if this is prematurely called and Substrate has tracked the keys yet + // we haven't locally synced and handled the Tributary + // All callers of this, at the time of writing, ensure the Tributary has sufficiently synced + // making the panic with context more desirable than the None + let attempt = KeyToDkgAttempt::get(getter, SeraiDkgCompleted::get(getter, set)?) + .expect("key completed on-chain didn't have an attempt related"); + removed_as_of_dkg_attempt(getter, genesis, attempt) } -impl Transaction { - // Used to initially construct transactions so we can then get sig hashes and perform signing - pub fn empty_signed() -> Signed { - Signed { - signer: Ristretto::generator(), - nonce: 0, - signature: SchnorrSignature:: { - R: Ristretto::generator(), - s: ::F::ZERO, - }, - } - } - - // Sign a transaction - pub fn sign( - &mut self, - rng: &mut R, - genesis: [u8; 32], - key: &Zeroizing<::F>, +pub async fn publish_signed_transaction( + txn: &mut D::Transaction<'_>, + tributary: &Tributary, + tx: Transaction, +) { + log::debug!("publishing transaction {}", hex::encode(tx.hash())); + + let (order, signer) = if let TransactionKind::Signed(order, signed) = tx.kind() { + let signer = signed.signer; + + // Safe as we should deterministically create transactions, meaning if this is already on-disk, + // it's what we're saving now + SignedTransactionDb::set(txn, &order, signed.nonce, &tx.serialize()); + + (order, signer) + } else { + panic!("non-signed transaction passed to publish_signed_transaction"); + }; + + // If we're trying to publish 5, when the last transaction published was 3, this will delay + // publication until the point in time we publish 4 + while let Some(tx) = SignedTransactionDb::take_signed_transaction( + txn, + &order, + tributary + .next_nonce(&signer, &order) + .await + .expect("we don't have a nonce, meaning we aren't a participant on this tributary"), ) { - fn signed(tx: &mut Transaction) -> (u32, &mut Signed) { - let nonce = match tx { - Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"), - - Transaction::DkgCommitments(_, _, _) => 0, - Transaction::DkgShares { .. } => 1, - Transaction::InvalidDkgShare { .. } => 2, - Transaction::DkgConfirmed(_, _, _) => 2, - - Transaction::DkgRemovalPreprocess(_) => 0, - Transaction::DkgRemovalShare(_) => 1, - - Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), - - Transaction::Batch(_, _) => panic!("signing Batch"), - Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"), - - Transaction::SubstratePreprocess(_) => 0, - Transaction::SubstrateShare(_) => 1, - - Transaction::SignPreprocess(_) => 0, - Transaction::SignShare(_) => 1, - Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), - }; - - ( - nonce, - match tx { - Transaction::RemoveParticipant(_) => panic!("signing RemoveParticipant"), - - Transaction::DkgCommitments(_, _, ref mut signed) => signed, - Transaction::DkgShares { ref mut signed, .. } => signed, - Transaction::InvalidDkgShare { ref mut signed, .. } => signed, - Transaction::DkgConfirmed(_, _, ref mut signed) => signed, - - Transaction::DkgRemovalPreprocess(ref mut data) => &mut data.signed, - Transaction::DkgRemovalShare(ref mut data) => &mut data.signed, - - Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), - - Transaction::Batch(_, _) => panic!("signing Batch"), - Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"), - - Transaction::SubstratePreprocess(ref mut data) => &mut data.signed, - Transaction::SubstrateShare(ref mut data) => &mut data.signed, - - Transaction::SignPreprocess(ref mut data) => &mut data.signed, - Transaction::SignShare(ref mut data) => &mut data.signed, - Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), - }, - ) - } - - let (nonce, signed_ref) = signed(self); - signed_ref.signer = Ristretto::generator() * key.deref(); - signed_ref.nonce = nonce; - - let sig_nonce = Zeroizing::new(::F::random(rng)); - signed(self).1.signature.R = ::generator() * sig_nonce.deref(); - let sig_hash = self.sig_hash(genesis); - signed(self).1.signature = SchnorrSignature::::sign(key, sig_nonce, sig_hash); - } - - pub fn sign_completed_challenge(&self) -> ::F { - if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self { - let mut transcript = - RecommendedTranscript::new(b"Coordinator Tributary Transaction SignCompleted"); - transcript.append_message(b"plan", plan); - transcript.append_message(b"tx_hash", tx_hash); - transcript.append_message(b"signer", first_signer.to_bytes()); - transcript.append_message(b"nonce", signature.R.to_bytes()); - Ristretto::hash_to_F(b"SignCompleted signature", &transcript.challenge(b"challenge")) - } else { - panic!("sign_completed_challenge called on transaction which wasn't SignCompleted") + // We need to return a proper error here to enable that, due to a race condition around + // multiple publications + match tributary.add_transaction(tx.clone()).await { + Ok(_) => {} + // Some asynchonicity if InvalidNonce, assumed safe to deterministic nonces + Err(TransactionError::InvalidNonce) => { + log::warn!("publishing TX {tx:?} returned InvalidNonce. was it already added?") + } + Err(e) => panic!("created an invalid transaction: {e:?}"), } } } diff --git a/coordinator/src/tributary/scanner.rs b/coordinator/src/tributary/scanner.rs index c127bdfa6..9680328e5 100644 --- a/coordinator/src/tributary/scanner.rs +++ b/coordinator/src/tributary/scanner.rs @@ -1,17 +1,23 @@ -use core::{future::Future, time::Duration}; -use std::sync::Arc; +use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration}; +use std::{sync::Arc, collections::HashSet}; use zeroize::Zeroizing; -use ciphersuite::{Ciphersuite, Ristretto}; +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use tokio::sync::broadcast; use scale::{Encode, Decode}; -use serai_client::{validator_sets::primitives::ValidatorSet, Serai}; +use serai_client::{ + primitives::{SeraiAddress, Signature}, + validator_sets::primitives::{KeyPair, ValidatorSet}, + Serai, +}; use serai_db::DbTxn; +use processor_messages::coordinator::SubstrateSignableId; + use tributary::{ TransactionKind, Transaction as TributaryTransaction, TransactionError, Block, TributaryReader, tendermint::{ @@ -20,13 +26,7 @@ use tributary::{ }, }; -use crate::{ - Db, - tributary::handle::{fatal_slash, handle_application_tx}, - processors::Processors, - tributary::{TributarySpec, Transaction, LastBlock, EventDb}, - P2p, -}; +use crate::{Db, processors::Processors, substrate::BatchInstructionsHashDb, tributary::*, P2p}; #[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] pub enum RecognizedIdType { @@ -34,140 +34,541 @@ pub enum RecognizedIdType { Plan, } -pub(crate) trait RIDTrait: - Clone + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec) -> FRid -{ +#[async_trait::async_trait] +pub trait RIDTrait { + async fn recognized_id( + &self, + set: ValidatorSet, + genesis: [u8; 32], + kind: RecognizedIdType, + id: Vec, + ); } -impl) -> FRid> RIDTrait - for F +#[async_trait::async_trait] +impl< + FRid: Send + Future, + F: Sync + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec) -> FRid, + > RIDTrait for F { + async fn recognized_id( + &self, + set: ValidatorSet, + genesis: [u8; 32], + kind: RecognizedIdType, + id: Vec, + ) { + (self)(set, genesis, kind, id).await + } } -#[derive(Clone, Copy, PartialEq, Eq, Debug)] -pub enum PstTxType { - SetKeys, - RemoveParticipant([u8; 32]), +#[async_trait::async_trait] +pub trait PublishSeraiTransaction { + async fn publish_set_keys( + &self, + db: &(impl Sync + Get), + set: ValidatorSet, + removed: Vec, + key_pair: KeyPair, + signature: Signature, + ); } -// Handle a specific Tributary block -#[allow(clippy::too_many_arguments)] -async fn handle_block< +mod impl_pst_for_serai { + use super::*; + + use serai_client::SeraiValidatorSets; + + // Uses a macro because Rust can't resolve the lifetimes/generics around the check function + // check is expected to return true if the effect has already occurred + // The generated publish function will return true if *we* published the transaction + macro_rules! common_pst { + ($Meta: ty, $check: ident) => { + async fn publish( + serai: &Serai, + db: &impl Get, + set: ValidatorSet, + tx: serai_client::Transaction, + meta: $Meta, + ) -> bool { + loop { + match serai.publish(&tx).await { + Ok(_) => return true, + // This is assumed to be some ephemeral error due to the assumed fault-free + // creation + // TODO2: Differentiate connection errors from invariants + Err(e) => { + // The following block is irrelevant, and can/likely will fail, if we're publishing + // a TX for an old session + // If we're on a newer session, move on + if crate::RetiredTributaryDb::get(db, set).is_some() { + log::warn!("trying to publish a TX relevant to set {set:?} which isn't the latest"); + return false; + } + + if let Ok(serai) = serai.as_of_latest_finalized_block().await { + let serai = serai.validator_sets(); + + // Check if someone else published the TX in question + if $check(serai, set, meta).await { + return false; + } + } + + log::error!("couldn't connect to Serai node to publish TX: {e:?}"); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; + } + } + } + } + }; + } + + #[async_trait::async_trait] + impl PublishSeraiTransaction for Serai { + async fn publish_set_keys( + &self, + db: &(impl Sync + Get), + set: ValidatorSet, + removed: Vec, + key_pair: KeyPair, + signature: Signature, + ) { + let tx = SeraiValidatorSets::set_keys(set.network, removed, key_pair, signature); + async fn check(serai: SeraiValidatorSets<'_>, set: ValidatorSet, (): ()) -> bool { + if matches!(serai.keys(set).await, Ok(Some(_))) { + log::info!("another coordinator set key pair for {:?}", set); + return true; + } + false + } + common_pst!((), check); + if publish(self, db, set, tx, ()).await { + log::info!("published set keys for {set:?}"); + } + } + } +} + +#[async_trait::async_trait] +pub trait PTTTrait { + async fn publish_tributary_tx(&self, tx: Transaction); +} +#[async_trait::async_trait] +impl, F: Sync + Fn(Transaction) -> FPtt> PTTTrait for F { + async fn publish_tributary_tx(&self, tx: Transaction) { + (self)(tx).await + } +} + +pub struct TributaryBlockHandler< + 'a, D: Db, + T: DbTxn, Pro: Processors, - FPst: Future, - PST: Clone + Fn(ValidatorSet, PstTxType, serai_client::Transaction) -> FPst, - FPtt: Future, - PTT: Clone + Fn(Transaction) -> FPtt, - FRid: Future, - RID: RIDTrait, + PST: PublishSeraiTransaction, + PTT: PTTTrait, + RID: RIDTrait, P: P2p, ->( - db: &mut D, - key: &Zeroizing<::F>, - recognized_id: RID, - processors: &Pro, - publish_serai_tx: PST, - publish_tributary_tx: &PTT, - spec: &TributarySpec, +> { + pub db: &'a D, + pub txn: &'a mut T, + pub our_key: &'a Zeroizing<::F>, + pub recognized_id: &'a RID, + pub processors: &'a Pro, + pub publish_serai_tx: &'a PST, + pub publish_tributary_tx: &'a PTT, + pub spec: &'a TributarySpec, block: Block, -) { - log::info!("found block for Tributary {:?}", spec.set()); + pub block_number: u32, + _p2p: PhantomData

, +} + +impl< + D: Db, + T: DbTxn, + Pro: Processors, + PST: PublishSeraiTransaction, + PTT: PTTTrait, + RID: RIDTrait, + P: P2p, + > TributaryBlockHandler<'_, D, T, Pro, PST, PTT, RID, P> +{ + pub fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) { + let genesis = self.spec.genesis(); + + log::warn!("fatally slashing {}. reason: {}", hex::encode(slashing), reason); + FatallySlashed::set_fatally_slashed(self.txn, genesis, slashing); + + // TODO: disconnect the node from network/ban from further participation in all Tributaries + } + + // TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second + // Tributary post-DKG + // https://github.com/serai-dex/serai/issues/426 - let hash = block.hash(); + async fn handle(mut self) { + log::info!("found block for Tributary {:?}", self.spec.set()); - let mut event_id = 0; - #[allow(clippy::explicit_counter_loop)] // event_id isn't TX index. It just currently lines up - for tx in block.transactions { - if EventDb::get(db, hash, event_id).is_some() { - event_id += 1; - continue; + let transactions = self.block.transactions.clone(); + for tx in transactions { + match tx { + TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => { + // Since the evidence is on the chain, it should already have been validated + // We can just punish the signer + let data = match ev { + Evidence::ConflictingMessages(first, second) | + Evidence::ConflictingPrecommit(first, second) => (first, Some(second)), + Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None), + }; + let msgs = ( + decode_signed_message::>(&data.0).unwrap(), + if data.1.is_some() { + Some( + decode_signed_message::>(&data.1.unwrap()) + .unwrap(), + ) + } else { + None + }, + ); + + // Since anything with evidence is fundamentally faulty behavior, not just temporal + // errors, mark the node as fatally slashed + self.fatal_slash(msgs.0.msg.sender, &format!("invalid tendermint messages: {msgs:?}")); + } + TributaryTransaction::Application(tx) => { + self.handle_application_tx(tx).await; + } + } } - let mut txn = db.txn(); - - match tx { - TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => { - // Since the evidence is on the chain, it should already have been validated - // We can just punish the signer - let data = match ev { - Evidence::ConflictingMessages(first, second) => (first, Some(second)), - Evidence::ConflictingPrecommit(first, second) => (first, Some(second)), - Evidence::InvalidPrecommit(first) => (first, None), - Evidence::InvalidValidRound(first) => (first, None), - }; - let msgs = ( - decode_signed_message::>(&data.0).unwrap(), - if data.1.is_some() { - Some( - decode_signed_message::>(&data.1.unwrap()) - .unwrap(), - ) - } else { - None - }, - ); - - // Since anything with evidence is fundamentally faulty behavior, not just temporal errors, - // mark the node as fatally slashed - fatal_slash::( - &mut txn, - spec, - publish_tributary_tx, - key, - msgs.0.msg.sender, - &format!("invalid tendermint messages: {:?}", msgs), - ) - .await; + let genesis = self.spec.genesis(); + + let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis); + + // Calculate the shares still present, spinning if not enough are + // still_present_shares is used by a below branch, yet it's a natural byproduct of checking if + // we should spin, hence storing it in a variable here + let still_present_shares = { + // Start with the original n value + let mut present_shares = self.spec.n(&[]); + // Remove everyone fatally slashed + for removed in ¤t_fatal_slashes { + let original_i_for_removed = + self.spec.i(&[], *removed).expect("removed party was never present"); + let removed_shares = + u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start); + present_shares -= removed_shares; } - TributaryTransaction::Application(tx) => { - handle_application_tx::( - tx, - spec, - processors, - publish_serai_tx.clone(), - publish_tributary_tx, - key, - recognized_id.clone(), - &mut txn, - ) - .await; + + // Spin if the present shares don't satisfy the required threshold + if present_shares < self.spec.t() { + loop { + log::error!( + "fatally slashed so many participants for {:?} we no longer meet the threshold", + self.spec.set() + ); + tokio::time::sleep(core::time::Duration::from_secs(60)).await; + } } - } - EventDb::handle_event(&mut txn, hash, event_id); - txn.commit(); + present_shares + }; - event_id += 1; - } + for topic in ReattemptDb::take(self.txn, genesis, self.block_number) { + let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic); + log::info!("re-attempting {topic:?} with attempt {attempt}"); - // TODO: Trigger any necessary re-attempts + // Slash people who failed to participate as expected in the prior attempt + { + let prior_attempt = attempt - 1; + let (removed, expected_participants) = match topic { + Topic::Dkg => { + // Every validator who wasn't removed is expected to have participated + let removed = + crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt) + .expect("prior attempt didn't have its removed saved to disk"); + let removed_set = removed.iter().copied().collect::>(); + ( + removed, + self + .spec + .validators() + .into_iter() + .filter_map(|(validator, _)| { + Some(validator).filter(|validator| !removed_set.contains(validator)) + }) + .collect(), + ) + } + Topic::DkgConfirmation => { + panic!("TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg") + } + Topic::SubstrateSign(_) | Topic::Sign(_) => { + let removed = + crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis) + .expect("SubstrateSign/Sign yet have yet to set keys"); + // TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![] + let expected_participants = vec![]; + (removed, expected_participants) + } + }; + + let (expected_topic, expected_label) = match topic { + Topic::Dkg => { + let n = self.spec.n(&removed); + // If we got all the DKG shares, we should be on DKG confirmation + let share_spec = + DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt }; + if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n { + // Label::Share since there is no Label::Preprocess for DkgConfirmation since the + // preprocess is part of Topic::Dkg Label::Share + (Topic::DkgConfirmation, Label::Share) + } else { + let preprocess_spec = DataSpecification { + topic: Topic::Dkg, + label: Label::Preprocess, + attempt: prior_attempt, + }; + // If we got all the DKG preprocesses, DKG shares + if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n { + // Label::Share since there is no Label::Preprocess for DkgConfirmation since the + // preprocess is part of Topic::Dkg Label::Share + (Topic::Dkg, Label::Share) + } else { + (Topic::Dkg, Label::Preprocess) + } + } + } + Topic::DkgConfirmation => unreachable!(), + // If we got enough participants to move forward, then we expect shares from them all + Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share), + }; + + let mut did_not_participate = vec![]; + for expected_participant in expected_participants { + if DataDb::get( + self.txn, + genesis, + &DataSpecification { + topic: expected_topic, + label: expected_label, + attempt: prior_attempt, + }, + &expected_participant.to_bytes(), + ) + .is_none() + { + did_not_participate.push(expected_participant); + } + } + + // If a supermajority didn't participate as expected, the protocol was likely aborted due + // to detection of a completion or some larger networking error + // Accordingly, clear did_not_participate + // TODO + + // If during the DKG, explicitly mark these people as having been offline + // TODO: If they were offline sufficiently long ago, don't strike them off + if topic == Topic::Dkg { + let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]); + for did_not_participate in did_not_participate { + existing.push(did_not_participate.to_bytes()); + } + OfflineDuringDkg::set(self.txn, genesis, &existing); + } + + // Slash everyone who didn't participate as expected + // This may be overzealous as if a minority detects a completion, they'll abort yet the + // supermajority will cause the above allowance to not trigger, causing an honest minority + // to be slashed + // At the end of the protocol, the accumulated slashes are reduced by the amount obtained + // by the worst-performing member of the supermajority, and this is expected to + // sufficiently compensate for slashes which occur under normal operation + // TODO + } + + /* + All of these have the same common flow: + + 1) Check if this re-attempt is actually needed + 2) If so, dispatch whatever events as needed + + This is because we *always* re-attempt any protocol which had participation. That doesn't + mean we *should* re-attempt this protocol. + + The alternatives were: + 1) Note on-chain we completed a protocol, halting re-attempts upon 34%. + 2) Vote on-chain to re-attempt a protocol. + + This schema doesn't have any additional messages upon the success case (whereas + alternative #1 does) and doesn't have overhead (as alternative #2 does, sending votes and + then preprocesses. This only sends preprocesses). + */ + match topic { + Topic::Dkg => { + let mut removed = current_fatal_slashes.clone(); + + let t = self.spec.t(); + { + let mut present_shares = still_present_shares; + + // Load the parties marked as offline across the various attempts + let mut offline = OfflineDuringDkg::get(self.txn, genesis) + .unwrap_or(vec![]) + .iter() + .map(|key| ::G::from_bytes(key).unwrap()) + .collect::>(); + // Pop from the list to prioritize the removal of those recently offline + while let Some(offline) = offline.pop() { + // Make sure they weren't removed already (such as due to being fatally slashed) + // This also may trigger if they were offline across multiple attempts + if removed.contains(&offline) { + continue; + } + + // If we can remove them and still meet the threshold, do so + let original_i_for_offline = + self.spec.i(&[], offline).expect("offline was never present?"); + let offline_shares = + u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start); + if (present_shares - offline_shares) >= t { + present_shares -= offline_shares; + removed.push(offline); + } + + // If we've removed as many people as we can, break + if present_shares == t { + break; + } + } + } + + RemovedAsOfDkgAttempt::set( + self.txn, + genesis, + attempt, + &removed.iter().map(::G::to_bytes).collect(), + ); + + if DkgLocallyCompleted::get(self.txn, genesis).is_none() { + let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()) + else { + continue; + }; + + // Since it wasn't completed, instruct the processor to start the next attempt + let id = + processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt }; + + let params = + frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap(); + let shares = u16::from(our_i.end) - u16::from(our_i.start); + + self + .processors + .send( + self.spec.set().network, + processor_messages::key_gen::CoordinatorMessage::GenerateKey { id, params, shares }, + ) + .await; + } + } + Topic::DkgConfirmation => unreachable!(), + Topic::SubstrateSign(inner_id) => { + let id = processor_messages::coordinator::SubstrateSignId { + session: self.spec.set().session, + id: inner_id, + attempt, + }; + match inner_id { + SubstrateSignableId::CosigningSubstrateBlock(block) => { + let block_number = SeraiBlockNumber::get(self.txn, block) + .expect("couldn't get the block number for prior attempted cosign"); + + // Check if the cosigner has a signature from our set for this block/a newer one + let latest_cosign = + crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network) + .map_or(0, |cosign| cosign.block_number); + if latest_cosign < block_number { + // Instruct the processor to start the next attempt + self + .processors + .send( + self.spec.set().network, + processor_messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { + id, + block_number, + }, + ) + .await; + } + } + SubstrateSignableId::Batch(batch) => { + // If the Batch hasn't appeared on-chain... + if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() { + // Instruct the processor to start the next attempt + // The processor won't continue if it's already signed a Batch + // Prior checking if the Batch is on-chain just may reduce the non-participating + // 33% from publishing their re-attempt messages + self + .processors + .send( + self.spec.set().network, + processor_messages::coordinator::CoordinatorMessage::BatchReattempt { id }, + ) + .await; + } + } + } + } + Topic::Sign(id) => { + // Instruct the processor to start the next attempt + // If it has already noted a completion, it won't send a preprocess and will simply drop + // the re-attempt message + self + .processors + .send( + self.spec.set().network, + processor_messages::sign::CoordinatorMessage::Reattempt { + id: processor_messages::sign::SignId { + session: self.spec.set().session, + id, + attempt, + }, + }, + ) + .await; + } + } + } + } } #[allow(clippy::too_many_arguments)] pub(crate) async fn handle_new_blocks< D: Db, Pro: Processors, - FPst: Future, - PST: Clone + Fn(ValidatorSet, PstTxType, serai_client::Transaction) -> FPst, - FPtt: Future, - PTT: Clone + Fn(Transaction) -> FPtt, - FRid: Future, - RID: RIDTrait, + PST: PublishSeraiTransaction, + PTT: PTTTrait, + RID: RIDTrait, P: P2p, >( db: &mut D, key: &Zeroizing<::F>, - recognized_id: RID, + recognized_id: &RID, processors: &Pro, - publish_serai_tx: PST, + publish_serai_tx: &PST, publish_tributary_tx: &PTT, spec: &TributarySpec, tributary: &TributaryReader, ) { let genesis = tributary.genesis(); - let mut last_block = LastBlock::get(db, genesis).unwrap_or(genesis); + let mut last_block = LastHandledBlock::get(db, genesis).unwrap_or(genesis); + let mut block_number = TributaryBlockNumber::get(db, last_block).unwrap_or(0); while let Some(next) = tributary.block_after(&last_block) { let block = tributary.block(&next).unwrap(); + block_number += 1; // Make sure we have all of the provided transactions for this block for tx in &block.transactions { @@ -182,20 +583,26 @@ pub(crate) async fn handle_new_blocks< } } - handle_block::<_, _, _, _, _, _, _, _, P>( + let mut db_clone = db.clone(); + let mut txn = db_clone.txn(); + TributaryBlockNumber::set(&mut txn, next, &block_number); + (TributaryBlockHandler { db, - key, - recognized_id.clone(), + txn: &mut txn, + spec, + our_key: key, + recognized_id, processors, - publish_serai_tx.clone(), + publish_serai_tx, publish_tributary_tx, - spec, block, - ) + block_number, + _p2p: PhantomData::

, + }) + .handle() .await; last_block = next; - let mut txn = db.txn(); - LastBlock::set(&mut txn, genesis, &next); + LastHandledBlock::set(&mut txn, genesis, &next); txn.commit(); } } @@ -204,8 +611,7 @@ pub(crate) async fn scan_tributaries_task< D: Db, Pro: Processors, P: P2p, - FRid: Send + Future, - RID: 'static + Send + Sync + RIDTrait, + RID: 'static + Send + Sync + Clone + RIDTrait, >( raw_db: D, key: Zeroizing<::F>, @@ -240,81 +646,13 @@ pub(crate) async fn scan_tributaries_task< // the next block occurs let next_block_notification = tributary.next_block_notification().await; - handle_new_blocks::<_, _, _, _, _, _, _, _, P>( + handle_new_blocks::<_, _, _, _, _, P>( &mut tributary_db, &key, - recognized_id.clone(), + &recognized_id, &processors, - |set, tx_type, tx| { - let serai = serai.clone(); - async move { - loop { - match serai.publish(&tx).await { - Ok(_) => { - log::info!("set key pair for {set:?}"); - break; - } - // This is assumed to be some ephemeral error due to the assumed fault-free - // creation - // TODO2: Differentiate connection errors from invariants - Err(e) => { - if let Ok(serai) = serai.as_of_latest_finalized_block().await { - let serai = serai.validator_sets(); - - // The following block is irrelevant, and can/likely will fail, if - // we're publishing a TX for an old session - // If we're on a newer session, move on - if let Ok(Some(current_session)) = - serai.session(spec.set().network).await - { - if current_session.0 > spec.set().session.0 { - log::warn!( - "trying to publish a TX relevant to a set {} {:?}", - "which isn't the latest", - set - ); - break; - } - } - - // Check if someone else published the TX in question - match tx_type { - PstTxType::SetKeys => { - if matches!(serai.keys(spec.set()).await, Ok(Some(_))) { - log::info!("another coordinator set key pair for {:?}", set); - break; - } - } - PstTxType::RemoveParticipant(removed) => { - if let Ok(Some(participants)) = - serai.participants(spec.set().network).await - { - if !participants - .iter() - .any(|(participant, _)| participant.0 == removed) - { - log::info!( - "another coordinator published removal for {:?}", - hex::encode(removed) - ); - break; - } - } - } - } - } - - log::error!( - "couldn't connect to Serai node to publish {tx_type:?} TX: {:?}", - e - ); - tokio::time::sleep(core::time::Duration::from_secs(10)).await; - } - } - } - } - }, - &|tx| { + &*serai, + &|tx: Transaction| { let tributary = tributary.clone(); async move { match tributary.add_transaction(tx.clone()).await { diff --git a/coordinator/src/tributary/signing_protocol.rs b/coordinator/src/tributary/signing_protocol.rs new file mode 100644 index 000000000..a90ed4799 --- /dev/null +++ b/coordinator/src/tributary/signing_protocol.rs @@ -0,0 +1,331 @@ +/* + A MuSig-based signing protocol executed with the validators' keys. + + This is used for confirming the results of a DKG on-chain, an operation requiring all validators + which aren't specified as removed while still satisfying a supermajority. + + Since we're using the validator's keys, as needed for their being the root of trust, the + coordinator must perform the signing. This is distinct from all other group-signing operations, + as they're all done by the processor. + + The MuSig-aggregation achieves on-chain efficiency and enables a more secure design pattern. + While we could individually tack votes, that'd require logic to prevent voting multiple times and + tracking the accumulated votes. MuSig-aggregation simply requires checking the list is sorted and + the list's weight exceeds the threshold. + + Instead of maintaining state in memory, a combination of the DB and re-execution are used. This + is deemed acceptable re: performance as: + + 1) This is only done prior to a DKG being confirmed on Substrate and is assumed infrequent. + 2) This is an O(n) algorithm. + 3) The size of the validator set is bounded by MAX_KEY_SHARES_PER_SET. + + Accordingly, this should be tolerable. + + As for safety, it is explicitly unsafe to reuse nonces across signing sessions. This raises + concerns regarding our re-execution which is dependent on fixed nonces. Safety is derived from + the nonces being context-bound under a BFT protocol. The flow is as follows: + + 1) Decide the nonce. + 2) Publish the nonces' commitments, receiving everyone elses *and potentially the message to be + signed*. + 3) Sign and publish the signature share. + + In order for nonce re-use to occur, the received nonce commitments (or the message to be signed) + would have to be distinct and sign would have to be called again. + + Before we act on any received messages, they're ordered and finalized by a BFT algorithm. The + only way to operate on distinct received messages would be if: + + 1) A logical flaw exists, letting new messages over write prior messages + 2) A reorganization occurred from chain A to chain B, and with it, different messages + + Reorganizations are not supported, as BFT is assumed by the presence of a BFT algorithm. While + a significant amount of processes may be byzantine, leading to BFT being broken, that still will + not trigger a reorganization. The only way to move to a distinct chain, with distinct messages, + would be by rebuilding the local process (this time following chain B). Upon any complete + rebuild, we'd re-decide nonces, achieving safety. This does set a bound preventing partial + rebuilds which is accepted. + + Additionally, to ensure a rebuilt service isn't flagged as malicious, we have to check the + commitments generated from the decided nonces are in fact its commitments on-chain (TODO). + + TODO: We also need to review how we're handling Processor preprocesses and likely implement the + same on-chain-preprocess-matches-presumed-preprocess check before publishing shares. +*/ + +use core::ops::Deref; +use std::collections::HashMap; + +use zeroize::{Zeroize, Zeroizing}; + +use rand_core::OsRng; + +use blake2::{Digest, Blake2s256}; + +use ciphersuite::{ + group::{ff::PrimeField, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use frost::{ + FrostError, + dkg::{Participant, musig::musig}, + ThresholdKeys, + sign::*, +}; +use frost_schnorrkel::Schnorrkel; + +use scale::Encode; + +use serai_client::{ + Public, + validator_sets::primitives::{KeyPair, musig_context, set_keys_message}, +}; + +use serai_db::*; + +use crate::tributary::TributarySpec; + +create_db!( + SigningProtocolDb { + CachedPreprocesses: (context: &impl Encode) -> [u8; 32] + } +); + +struct SigningProtocol<'a, T: DbTxn, C: Encode> { + pub(crate) key: &'a Zeroizing<::F>, + pub(crate) spec: &'a TributarySpec, + pub(crate) txn: &'a mut T, + pub(crate) context: C, +} + +impl SigningProtocol<'_, T, C> { + fn preprocess_internal( + &mut self, + participants: &[::G], + ) -> (AlgorithmSignMachine, [u8; 64]) { + // Encrypt the cached preprocess as recovery of it will enable recovering the private key + // While the DB isn't expected to be arbitrarily readable, it isn't a proper secret store and + // shouldn't be trusted as one + let mut encryption_key = { + let mut encryption_key_preimage = + Zeroizing::new(b"Cached Preprocess Encryption Key".to_vec()); + encryption_key_preimage.extend(self.context.encode()); + let repr = Zeroizing::new(self.key.to_repr()); + encryption_key_preimage.extend(repr.deref()); + Blake2s256::digest(&encryption_key_preimage) + }; + let encryption_key_slice: &mut [u8] = encryption_key.as_mut(); + + let algorithm = Schnorrkel::new(b"substrate"); + let keys: ThresholdKeys = + musig(&musig_context(self.spec.set()), self.key, participants) + .expect("signing for a set we aren't in/validator present multiple times") + .into(); + + if CachedPreprocesses::get(self.txn, &self.context).is_none() { + let (machine, _) = + AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng); + + let mut cache = machine.cache(); + assert_eq!(cache.0.len(), 32); + #[allow(clippy::needless_range_loop)] + for b in 0 .. 32 { + cache.0[b] ^= encryption_key_slice[b]; + } + + CachedPreprocesses::set(self.txn, &self.context, &cache.0); + } + + let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap(); + let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached); + #[allow(clippy::needless_range_loop)] + for b in 0 .. 32 { + cached[b] ^= encryption_key_slice[b]; + } + encryption_key_slice.zeroize(); + let (machine, preprocess) = + AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached)); + + (machine, preprocess.serialize().try_into().unwrap()) + } + + fn share_internal( + &mut self, + participants: &[::G], + mut serialized_preprocesses: HashMap>, + msg: &[u8], + ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { + let machine = self.preprocess_internal(participants).0; + + let mut participants = serialized_preprocesses.keys().copied().collect::>(); + participants.sort(); + let mut preprocesses = HashMap::new(); + for participant in participants { + preprocesses.insert( + participant, + machine + .read_preprocess(&mut serialized_preprocesses.remove(&participant).unwrap().as_slice()) + .map_err(|_| participant)?, + ); + } + + let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e { + FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) => unreachable!("{e:?}"), + FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, + })?; + + Ok((machine, share.serialize().try_into().unwrap())) + } + + fn complete_internal( + machine: AlgorithmSignatureMachine, + shares: HashMap>, + ) -> Result<[u8; 64], Participant> { + let shares = shares + .into_iter() + .map(|(p, share)| { + machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p) + }) + .collect::, _>>()?; + let signature = machine.complete(shares).map_err(|e| match e { + FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"), + FrostError::InvalidParticipant(_, _) | + FrostError::InvalidSigningSet(_) | + FrostError::InvalidParticipantQuantity(_, _) | + FrostError::DuplicatedParticipant(_) | + FrostError::MissingParticipant(_) => unreachable!("{e:?}"), + FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p, + })?; + Ok(signature.to_bytes()) + } +} + +// Get the keys of the participants, noted by their threshold is, and return a new map indexed by +// the MuSig is. +fn threshold_i_map_to_keys_and_musig_i_map( + spec: &TributarySpec, + removed: &[::G], + our_key: &Zeroizing<::F>, + mut map: HashMap>, +) -> (Vec<::G>, HashMap>) { + // Insert our own index so calculations aren't offset + let our_threshold_i = spec + .i(removed, ::generator() * our_key.deref()) + .expect("MuSig t-of-n signing a for a protocol we were removed from") + .start; + assert!(map.insert(our_threshold_i, vec![]).is_none()); + + let spec_validators = spec.validators(); + let key_from_threshold_i = |threshold_i| { + for (key, _) in &spec_validators { + if threshold_i == spec.i(removed, *key).expect("MuSig t-of-n participant was removed").start { + return *key; + } + } + panic!("requested info for threshold i which doesn't exist") + }; + + let mut sorted = vec![]; + let mut threshold_is = map.keys().copied().collect::>(); + threshold_is.sort(); + for threshold_i in threshold_is { + sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap())); + } + + // Now that signers are sorted, with their shares, create a map with the is needed for MuSig + let mut participants = vec![]; + let mut map = HashMap::new(); + for (raw_i, (key, share)) in sorted.into_iter().enumerate() { + let musig_i = u16::try_from(raw_i).unwrap() + 1; + participants.push(key); + map.insert(Participant::new(musig_i).unwrap(), share); + } + + map.remove(&our_threshold_i).unwrap(); + + (participants, map) +} + +type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8; 12], u32)>; + +pub(crate) struct DkgConfirmer<'a, T: DbTxn> { + key: &'a Zeroizing<::F>, + spec: &'a TributarySpec, + removed: Vec<::G>, + txn: &'a mut T, + attempt: u32, +} + +impl DkgConfirmer<'_, T> { + pub(crate) fn new<'a>( + key: &'a Zeroizing<::F>, + spec: &'a TributarySpec, + txn: &'a mut T, + attempt: u32, + ) -> Option> { + // This relies on how confirmations are inlined into the DKG protocol and they accordingly + // share attempts + let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?; + Some(DkgConfirmer { key, spec, removed, txn, attempt }) + } + fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> { + let context = (b"DkgConfirmer", self.attempt); + SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context } + } + + fn preprocess_internal(&mut self) -> (AlgorithmSignMachine, [u8; 64]) { + let participants = self.spec.validators().iter().map(|val| val.0).collect::>(); + self.signing_protocol().preprocess_internal(&participants) + } + // Get the preprocess for this confirmation. + pub(crate) fn preprocess(&mut self) -> [u8; 64] { + self.preprocess_internal().1 + } + + fn share_internal( + &mut self, + preprocesses: HashMap>, + key_pair: &KeyPair, + ) -> Result<(AlgorithmSignatureMachine, [u8; 32]), Participant> { + let participants = self.spec.validators().iter().map(|val| val.0).collect::>(); + let preprocesses = + threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, preprocesses).1; + let msg = set_keys_message( + &self.spec.set(), + &self.removed.iter().map(|key| Public(key.to_bytes())).collect::>(), + key_pair, + ); + self.signing_protocol().share_internal(&participants, preprocesses, &msg) + } + // Get the share for this confirmation, if the preprocesses are valid. + pub(crate) fn share( + &mut self, + preprocesses: HashMap>, + key_pair: &KeyPair, + ) -> Result<[u8; 32], Participant> { + self.share_internal(preprocesses, key_pair).map(|(_, share)| share) + } + + pub(crate) fn complete( + &mut self, + preprocesses: HashMap>, + key_pair: &KeyPair, + shares: HashMap>, + ) -> Result<[u8; 64], Participant> { + let shares = + threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, shares).1; + + let machine = self + .share_internal(preprocesses, key_pair) + .expect("trying to complete a machine which failed to preprocess") + .0; + + DkgConfirmerSigningProtocol::<'_, T>::complete_internal(machine, shares) + } +} diff --git a/coordinator/src/tributary/spec.rs b/coordinator/src/tributary/spec.rs new file mode 100644 index 000000000..92905490f --- /dev/null +++ b/coordinator/src/tributary/spec.rs @@ -0,0 +1,156 @@ +use core::{ops::Range, fmt::Debug}; +use std::{io, collections::HashMap}; + +use transcript::{Transcript, RecommendedTranscript}; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use frost::Participant; + +use scale::Encode; +use borsh::{BorshSerialize, BorshDeserialize}; + +use serai_client::{primitives::PublicKey, validator_sets::primitives::ValidatorSet}; + +fn borsh_serialize_validators( + validators: &Vec<(::G, u16)>, + writer: &mut W, +) -> Result<(), io::Error> { + let len = u16::try_from(validators.len()).unwrap(); + BorshSerialize::serialize(&len, writer)?; + for validator in validators { + BorshSerialize::serialize(&validator.0.to_bytes(), writer)?; + BorshSerialize::serialize(&validator.1, writer)?; + } + Ok(()) +} + +fn borsh_deserialize_validators( + reader: &mut R, +) -> Result::G, u16)>, io::Error> { + let len: u16 = BorshDeserialize::deserialize_reader(reader)?; + let mut res = vec![]; + for _ in 0 .. len { + let compressed: [u8; 32] = BorshDeserialize::deserialize_reader(reader)?; + let point = Option::from(::G::from_bytes(&compressed)) + .ok_or_else(|| io::Error::other("invalid point for validator"))?; + let weight: u16 = BorshDeserialize::deserialize_reader(reader)?; + res.push((point, weight)); + } + Ok(res) +} + +#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] +pub struct TributarySpec { + serai_block: [u8; 32], + start_time: u64, + set: ValidatorSet, + #[borsh( + serialize_with = "borsh_serialize_validators", + deserialize_with = "borsh_deserialize_validators" + )] + validators: Vec<(::G, u16)>, +} + +impl TributarySpec { + pub fn new( + serai_block: [u8; 32], + start_time: u64, + set: ValidatorSet, + set_participants: Vec<(PublicKey, u16)>, + ) -> TributarySpec { + let mut validators = vec![]; + for (participant, shares) in set_participants { + let participant = ::read_G::<&[u8]>(&mut participant.0.as_ref()) + .expect("invalid key registered as participant"); + validators.push((participant, shares)); + } + + Self { serai_block, start_time, set, validators } + } + + pub fn set(&self) -> ValidatorSet { + self.set + } + + pub fn genesis(&self) -> [u8; 32] { + // Calculate the genesis for this Tributary + let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis"); + // This locks it to a specific Serai chain + genesis.append_message(b"serai_block", self.serai_block); + genesis.append_message(b"session", self.set.session.0.to_le_bytes()); + genesis.append_message(b"network", self.set.network.encode()); + let genesis = genesis.challenge(b"genesis"); + let genesis_ref: &[u8] = genesis.as_ref(); + genesis_ref[.. 32].try_into().unwrap() + } + + pub fn start_time(&self) -> u64 { + self.start_time + } + + pub fn n(&self, removed_validators: &[::G]) -> u16 { + self + .validators + .iter() + .map(|(validator, weight)| if removed_validators.contains(validator) { 0 } else { *weight }) + .sum() + } + + pub fn t(&self) -> u16 { + // t doesn't change with regards to the amount of removed validators + ((2 * self.n(&[])) / 3) + 1 + } + + pub fn i( + &self, + removed_validators: &[::G], + key: ::G, + ) -> Option> { + let mut all_is = HashMap::new(); + let mut i = 1; + for (validator, weight) in &self.validators { + all_is.insert( + *validator, + Range { start: Participant::new(i).unwrap(), end: Participant::new(i + weight).unwrap() }, + ); + i += weight; + } + + let original_i = all_is.get(&key)?.clone(); + let mut result_i = original_i.clone(); + for removed_validator in removed_validators { + let removed_i = all_is + .get(removed_validator) + .expect("removed validator wasn't present in set to begin with"); + // If the queried key was removed, return None + if &original_i == removed_i { + return None; + } + + // If the removed was before the queried, shift the queried down accordingly + if removed_i.start < original_i.start { + let removed_shares = u16::from(removed_i.end) - u16::from(removed_i.start); + result_i.start = Participant::new(u16::from(original_i.start) - removed_shares).unwrap(); + result_i.end = Participant::new(u16::from(original_i.end) - removed_shares).unwrap(); + } + } + Some(result_i) + } + + pub fn reverse_lookup_i( + &self, + removed_validators: &[::G], + i: Participant, + ) -> Option<::G> { + for (validator, _) in &self.validators { + if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) { + return Some(*validator); + } + } + None + } + + pub fn validators(&self) -> Vec<(::G, u64)> { + self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect() + } +} diff --git a/coordinator/src/tributary/transaction.rs b/coordinator/src/tributary/transaction.rs new file mode 100644 index 000000000..22650154a --- /dev/null +++ b/coordinator/src/tributary/transaction.rs @@ -0,0 +1,672 @@ +use core::{ops::Deref, fmt::Debug}; +use std::io; + +use zeroize::Zeroizing; +use rand_core::{RngCore, CryptoRng}; + +use blake2::{Digest, Blake2s256}; +use transcript::{Transcript, RecommendedTranscript}; + +use ciphersuite::{ + group::{ff::Field, GroupEncoding}, + Ciphersuite, Ristretto, +}; +use schnorr::SchnorrSignature; +use frost::Participant; + +use scale::{Encode, Decode}; +use processor_messages::coordinator::SubstrateSignableId; + +use tributary::{ + TRANSACTION_SIZE_LIMIT, ReadWrite, + transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait}, +}; + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)] +pub enum Label { + Preprocess, + Share, +} + +impl Label { + // TODO: Should nonces be u8 thanks to our use of topics? + pub fn nonce(&self) -> u32 { + match self { + Label::Preprocess => 0, + Label::Share => 1, + } + } +} + +#[derive(Clone, PartialEq, Eq)] +pub struct SignData { + pub plan: Id, + pub attempt: u32, + pub label: Label, + + pub data: Vec>, + + pub signed: Signed, +} + +impl Debug for SignData { + fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + fmt + .debug_struct("SignData") + .field("id", &hex::encode(self.plan.encode())) + .field("attempt", &self.attempt) + .field("label", &self.label) + .field("signer", &hex::encode(self.signed.signer.to_bytes())) + .finish_non_exhaustive() + } +} + +impl SignData { + pub(crate) fn read(reader: &mut R) -> io::Result { + let plan = Id::decode(&mut scale::IoReader(&mut *reader)) + .map_err(|_| io::Error::other("invalid plan in SignData"))?; + + let mut attempt = [0; 4]; + reader.read_exact(&mut attempt)?; + let attempt = u32::from_le_bytes(attempt); + + let mut label = [0; 1]; + reader.read_exact(&mut label)?; + let label = match label[0] { + 0 => Label::Preprocess, + 1 => Label::Share, + _ => Err(io::Error::other("invalid label in SignData"))?, + }; + + let data = { + let mut data_pieces = [0]; + reader.read_exact(&mut data_pieces)?; + if data_pieces[0] == 0 { + Err(io::Error::other("zero pieces of data in SignData"))?; + } + let mut all_data = vec![]; + for _ in 0 .. data_pieces[0] { + let mut data_len = [0; 2]; + reader.read_exact(&mut data_len)?; + let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))]; + reader.read_exact(&mut data)?; + all_data.push(data); + } + all_data + }; + + let signed = Signed::read_without_nonce(reader, label.nonce())?; + + Ok(SignData { plan, attempt, label, data, signed }) + } + + pub(crate) fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&self.plan.encode())?; + writer.write_all(&self.attempt.to_le_bytes())?; + writer.write_all(&[match self.label { + Label::Preprocess => 0, + Label::Share => 1, + }])?; + + writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?; + for data in &self.data { + if data.len() > u16::MAX.into() { + // Currently, the largest individual preprocess is a Monero transaction + // It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a + // key image and proof (96 bytes) + // Even with all of that, we could support 227 inputs in a single TX + // Monero is limited to ~120 inputs per TX + // + // Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess + Err(io::Error::other("signing data exceeded 65535 bytes"))?; + } + writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?; + writer.write_all(data)?; + } + + self.signed.write_without_nonce(writer) + } +} + +#[derive(Clone, PartialEq, Eq)] +pub enum Transaction { + RemoveParticipantDueToDkg { + participant: ::G, + signed: Signed, + }, + + DkgCommitments { + attempt: u32, + commitments: Vec>, + signed: Signed, + }, + DkgShares { + attempt: u32, + // Sending Participant, Receiving Participant, Share + shares: Vec>>, + confirmation_nonces: [u8; 64], + signed: Signed, + }, + InvalidDkgShare { + attempt: u32, + accuser: Participant, + faulty: Participant, + blame: Option>, + signed: Signed, + }, + DkgConfirmed { + attempt: u32, + confirmation_share: [u8; 32], + signed: Signed, + }, + + // Co-sign a Substrate block. + CosignSubstrateBlock([u8; 32]), + + // When we have synchrony on a batch, we can allow signing it + // TODO (never?): This is less efficient compared to an ExternalBlock provided transaction, + // which would be binding over the block hash and automatically achieve synchrony on all + // relevant batches. ExternalBlock was removed for this due to complexity around the pipeline + // with the current processor, yet it would still be an improvement. + Batch { + block: [u8; 32], + batch: u32, + }, + // When a Serai block is finalized, with the contained batches, we can allow the associated plan + // IDs + SubstrateBlock(u64), + + SubstrateSign(SignData), + Sign(SignData<[u8; 32]>), + // This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst + // reporters (who should all report the same thing) + // We do still track the signer in order to prevent a single signer from publishing arbitrarily + // many TXs without penalty + // Here, they're denoted as the first_signer, as only the signer of the first TX to be included + // with this pairing will be remembered on-chain + SignCompleted { + plan: [u8; 32], + tx_hash: Vec, + first_signer: ::G, + signature: SchnorrSignature, + }, +} + +impl Debug for Transaction { + fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { + match self { + Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt + .debug_struct("Transaction::RemoveParticipantDueToDkg") + .field("participant", &hex::encode(participant.to_bytes())) + .field("signer", &hex::encode(signed.signer.to_bytes())) + .finish_non_exhaustive(), + Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt + .debug_struct("Transaction::DkgCommitments") + .field("attempt", attempt) + .field("signer", &hex::encode(signed.signer.to_bytes())) + .finish_non_exhaustive(), + Transaction::DkgShares { attempt, signed, .. } => fmt + .debug_struct("Transaction::DkgShares") + .field("attempt", attempt) + .field("signer", &hex::encode(signed.signer.to_bytes())) + .finish_non_exhaustive(), + Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt + .debug_struct("Transaction::InvalidDkgShare") + .field("attempt", attempt) + .field("accuser", accuser) + .field("faulty", faulty) + .finish_non_exhaustive(), + Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt + .debug_struct("Transaction::DkgConfirmed") + .field("attempt", attempt) + .field("signer", &hex::encode(signed.signer.to_bytes())) + .finish_non_exhaustive(), + Transaction::CosignSubstrateBlock(block) => fmt + .debug_struct("Transaction::CosignSubstrateBlock") + .field("block", &hex::encode(block)) + .finish(), + Transaction::Batch { block, batch } => fmt + .debug_struct("Transaction::Batch") + .field("block", &hex::encode(block)) + .field("batch", &batch) + .finish(), + Transaction::SubstrateBlock(block) => { + fmt.debug_struct("Transaction::SubstrateBlock").field("block", block).finish() + } + Transaction::SubstrateSign(sign_data) => { + fmt.debug_struct("Transaction::SubstrateSign").field("sign_data", sign_data).finish() + } + Transaction::Sign(sign_data) => { + fmt.debug_struct("Transaction::Sign").field("sign_data", sign_data).finish() + } + Transaction::SignCompleted { plan, tx_hash, .. } => fmt + .debug_struct("Transaction::SignCompleted") + .field("plan", &hex::encode(plan)) + .field("tx_hash", &hex::encode(tx_hash)) + .finish_non_exhaustive(), + } + } +} + +impl ReadWrite for Transaction { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0]; + reader.read_exact(&mut kind)?; + + match kind[0] { + 0 => Ok(Transaction::RemoveParticipantDueToDkg { + participant: Ristretto::read_G(reader)?, + signed: Signed::read_without_nonce(reader, 0)?, + }), + + 1 => { + let mut attempt = [0; 4]; + reader.read_exact(&mut attempt)?; + let attempt = u32::from_le_bytes(attempt); + + let commitments = { + let mut commitments_len = [0; 1]; + reader.read_exact(&mut commitments_len)?; + let commitments_len = usize::from(commitments_len[0]); + if commitments_len == 0 { + Err(io::Error::other("zero commitments in DkgCommitments"))?; + } + + let mut each_commitments_len = [0; 2]; + reader.read_exact(&mut each_commitments_len)?; + let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len)); + if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT { + Err(io::Error::other( + "commitments present in transaction exceeded transaction size limit", + ))?; + } + let mut commitments = vec![vec![]; commitments_len]; + for commitments in &mut commitments { + *commitments = vec![0; each_commitments_len]; + reader.read_exact(commitments)?; + } + commitments + }; + + let signed = Signed::read_without_nonce(reader, 0)?; + + Ok(Transaction::DkgCommitments { attempt, commitments, signed }) + } + + 2 => { + let mut attempt = [0; 4]; + reader.read_exact(&mut attempt)?; + let attempt = u32::from_le_bytes(attempt); + + let shares = { + let mut share_quantity = [0; 1]; + reader.read_exact(&mut share_quantity)?; + + let mut key_share_quantity = [0; 1]; + reader.read_exact(&mut key_share_quantity)?; + + let mut share_len = [0; 2]; + reader.read_exact(&mut share_len)?; + let share_len = usize::from(u16::from_le_bytes(share_len)); + + let mut all_shares = vec![]; + for _ in 0 .. share_quantity[0] { + let mut shares = vec![]; + for _ in 0 .. key_share_quantity[0] { + let mut share = vec![0; share_len]; + reader.read_exact(&mut share)?; + shares.push(share); + } + all_shares.push(shares); + } + all_shares + }; + + let mut confirmation_nonces = [0; 64]; + reader.read_exact(&mut confirmation_nonces)?; + + let signed = Signed::read_without_nonce(reader, 1)?; + + Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed }) + } + + 3 => { + let mut attempt = [0; 4]; + reader.read_exact(&mut attempt)?; + let attempt = u32::from_le_bytes(attempt); + + let mut accuser = [0; 2]; + reader.read_exact(&mut accuser)?; + let accuser = Participant::new(u16::from_le_bytes(accuser)) + .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; + + let mut faulty = [0; 2]; + reader.read_exact(&mut faulty)?; + let faulty = Participant::new(u16::from_le_bytes(faulty)) + .ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?; + + let mut blame_len = [0; 2]; + reader.read_exact(&mut blame_len)?; + let mut blame = vec![0; u16::from_le_bytes(blame_len).into()]; + reader.read_exact(&mut blame)?; + + // This shares a nonce with DkgConfirmed as only one is expected + let signed = Signed::read_without_nonce(reader, 2)?; + + Ok(Transaction::InvalidDkgShare { + attempt, + accuser, + faulty, + blame: Some(blame).filter(|blame| !blame.is_empty()), + signed, + }) + } + + 4 => { + let mut attempt = [0; 4]; + reader.read_exact(&mut attempt)?; + let attempt = u32::from_le_bytes(attempt); + + let mut confirmation_share = [0; 32]; + reader.read_exact(&mut confirmation_share)?; + + let signed = Signed::read_without_nonce(reader, 2)?; + + Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed }) + } + + 5 => { + let mut block = [0; 32]; + reader.read_exact(&mut block)?; + Ok(Transaction::CosignSubstrateBlock(block)) + } + + 6 => { + let mut block = [0; 32]; + reader.read_exact(&mut block)?; + let mut batch = [0; 4]; + reader.read_exact(&mut batch)?; + Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) }) + } + + 7 => { + let mut block = [0; 8]; + reader.read_exact(&mut block)?; + Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block))) + } + + 8 => SignData::read(reader).map(Transaction::SubstrateSign), + 9 => SignData::read(reader).map(Transaction::Sign), + + 10 => { + let mut plan = [0; 32]; + reader.read_exact(&mut plan)?; + + let mut tx_hash_len = [0]; + reader.read_exact(&mut tx_hash_len)?; + let mut tx_hash = vec![0; usize::from(tx_hash_len[0])]; + reader.read_exact(&mut tx_hash)?; + + let first_signer = Ristretto::read_G(reader)?; + let signature = SchnorrSignature::::read(reader)?; + + Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature }) + } + + _ => Err(io::Error::other("invalid transaction type")), + } + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Transaction::RemoveParticipantDueToDkg { participant, signed } => { + writer.write_all(&[0])?; + writer.write_all(&participant.to_bytes())?; + signed.write_without_nonce(writer) + } + + Transaction::DkgCommitments { attempt, commitments, signed } => { + writer.write_all(&[1])?; + writer.write_all(&attempt.to_le_bytes())?; + if commitments.is_empty() { + Err(io::Error::other("zero commitments in DkgCommitments"))? + } + writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?; + for commitments_i in commitments { + if commitments_i.len() != commitments[0].len() { + Err(io::Error::other("commitments of differing sizes in DkgCommitments"))? + } + } + writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?; + for commitments in commitments { + writer.write_all(commitments)?; + } + signed.write_without_nonce(writer) + } + + Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => { + writer.write_all(&[2])?; + writer.write_all(&attempt.to_le_bytes())?; + + // `shares` is a Vec which is supposed to map to a HashMap>. Since we + // bound participants to 150, this conversion is safe if a valid in-memory transaction. + writer.write_all(&[u8::try_from(shares.len()).unwrap()])?; + // This assumes at least one share is being sent to another party + writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?; + let share_len = shares[0][0].len(); + // For BLS12-381 G2, this would be: + // - A 32-byte share + // - A 96-byte ephemeral key + // - A 128-byte signature + // Hence why this has to be u16 + writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?; + + for these_shares in shares { + assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable"); + for share in these_shares { + assert_eq!(share.len(), share_len, "sent shares were of variable length"); + writer.write_all(share)?; + } + } + + writer.write_all(confirmation_nonces)?; + signed.write_without_nonce(writer) + } + + Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => { + writer.write_all(&[3])?; + writer.write_all(&attempt.to_le_bytes())?; + writer.write_all(&u16::from(*accuser).to_le_bytes())?; + writer.write_all(&u16::from(*faulty).to_le_bytes())?; + + // Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length + assert!(blame.as_ref().map_or(1, Vec::len) != 0); + let blame_len = + u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB"); + writer.write_all(&blame_len.to_le_bytes())?; + writer.write_all(blame.as_ref().unwrap_or(&vec![]))?; + + signed.write_without_nonce(writer) + } + + Transaction::DkgConfirmed { attempt, confirmation_share, signed } => { + writer.write_all(&[4])?; + writer.write_all(&attempt.to_le_bytes())?; + writer.write_all(confirmation_share)?; + signed.write_without_nonce(writer) + } + + Transaction::CosignSubstrateBlock(block) => { + writer.write_all(&[5])?; + writer.write_all(block) + } + + Transaction::Batch { block, batch } => { + writer.write_all(&[6])?; + writer.write_all(block)?; + writer.write_all(&batch.to_le_bytes()) + } + + Transaction::SubstrateBlock(block) => { + writer.write_all(&[7])?; + writer.write_all(&block.to_le_bytes()) + } + + Transaction::SubstrateSign(data) => { + writer.write_all(&[8])?; + data.write(writer) + } + Transaction::Sign(data) => { + writer.write_all(&[9])?; + data.write(writer) + } + Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => { + writer.write_all(&[10])?; + writer.write_all(plan)?; + writer + .write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?; + writer.write_all(tx_hash)?; + writer.write_all(&first_signer.to_bytes())?; + signature.write(writer) + } + } + } +} + +impl TransactionTrait for Transaction { + fn kind(&self) -> TransactionKind<'_> { + match self { + Transaction::RemoveParticipantDueToDkg { participant, signed } => { + TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed) + } + + Transaction::DkgCommitments { attempt, commitments: _, signed } | + Transaction::DkgShares { attempt, signed, .. } | + Transaction::InvalidDkgShare { attempt, signed, .. } | + Transaction::DkgConfirmed { attempt, signed, .. } => { + TransactionKind::Signed((b"dkg", attempt).encode(), signed) + } + + Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"), + + Transaction::Batch { .. } => TransactionKind::Provided("batch"), + Transaction::SubstrateBlock(_) => TransactionKind::Provided("serai"), + + Transaction::SubstrateSign(data) => { + TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed) + } + Transaction::Sign(data) => { + TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed) + } + Transaction::SignCompleted { .. } => TransactionKind::Unsigned, + } + } + + fn hash(&self) -> [u8; 32] { + let mut tx = self.serialize(); + if let TransactionKind::Signed(_, signed) = self.kind() { + // Make sure the part we're cutting off is the signature + assert_eq!(tx.drain((tx.len() - 64) ..).collect::>(), signed.signature.serialize()); + } + Blake2s256::digest([b"Coordinator Tributary Transaction".as_slice(), &tx].concat()).into() + } + + fn verify(&self) -> Result<(), TransactionError> { + // TODO: Check SubstrateSign's lengths here + + if let Transaction::SignCompleted { first_signer, signature, .. } = self { + if !signature.verify(*first_signer, self.sign_completed_challenge()) { + Err(TransactionError::InvalidContent)?; + } + } + + Ok(()) + } +} + +impl Transaction { + // Used to initially construct transactions so we can then get sig hashes and perform signing + pub fn empty_signed() -> Signed { + Signed { + signer: Ristretto::generator(), + nonce: 0, + signature: SchnorrSignature:: { + R: Ristretto::generator(), + s: ::F::ZERO, + }, + } + } + + // Sign a transaction + pub fn sign( + &mut self, + rng: &mut R, + genesis: [u8; 32], + key: &Zeroizing<::F>, + ) { + fn signed(tx: &mut Transaction) -> (u32, &mut Signed) { + #[allow(clippy::match_same_arms)] // Doesn't make semantic sense here + let nonce = match tx { + Transaction::RemoveParticipantDueToDkg { .. } => 0, + + Transaction::DkgCommitments { .. } => 0, + Transaction::DkgShares { .. } => 1, + Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2, + + Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), + + Transaction::Batch { .. } => panic!("signing Batch"), + Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"), + + Transaction::SubstrateSign(data) => data.label.nonce(), + Transaction::Sign(data) => data.label.nonce(), + + Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), + }; + + ( + nonce, + match tx { + Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } | + Transaction::DkgCommitments { ref mut signed, .. } | + Transaction::DkgShares { ref mut signed, .. } | + Transaction::InvalidDkgShare { ref mut signed, .. } | + Transaction::DkgConfirmed { ref mut signed, .. } => signed, + + Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"), + + Transaction::Batch { .. } => panic!("signing Batch"), + Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"), + + Transaction::SubstrateSign(ref mut data) => &mut data.signed, + Transaction::Sign(ref mut data) => &mut data.signed, + + Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), + }, + ) + } + + let (nonce, signed_ref) = signed(self); + signed_ref.signer = Ristretto::generator() * key.deref(); + signed_ref.nonce = nonce; + + let sig_nonce = Zeroizing::new(::F::random(rng)); + signed(self).1.signature.R = ::generator() * sig_nonce.deref(); + let sig_hash = self.sig_hash(genesis); + signed(self).1.signature = SchnorrSignature::::sign(key, sig_nonce, sig_hash); + } + + pub fn sign_completed_challenge(&self) -> ::F { + if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self { + let mut transcript = + RecommendedTranscript::new(b"Coordinator Tributary Transaction SignCompleted"); + transcript.append_message(b"plan", plan); + transcript.append_message(b"tx_hash", tx_hash); + transcript.append_message(b"signer", first_signer.to_bytes()); + transcript.append_message(b"nonce", signature.R.to_bytes()); + Ristretto::hash_to_F(b"SignCompleted signature", &transcript.challenge(b"challenge")) + } else { + panic!("sign_completed_challenge called on transaction which wasn't SignCompleted") + } + } +} diff --git a/coordinator/tributary/Cargo.toml b/coordinator/tributary/Cargo.toml index ff6dc1e31..b6a5a2518 100644 --- a/coordinator/tributary/Cargo.toml +++ b/coordinator/tributary/Cargo.toml @@ -7,6 +7,13 @@ repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tribut authors = ["Luke Parker "] edition = "2021" +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + [dependencies] async-trait = { version = "0.1", default-features = false } thiserror = { version = "1", default-features = false } @@ -29,7 +36,8 @@ log = { version = "0.4", default-features = false, features = ["std"] } serai-db = { path = "../../common/db" } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] } -futures = { version = "0.3", default-features = false, features = ["std"] } +futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] } +futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } tendermint = { package = "tendermint-machine", path = "./tendermint" } tokio = { version = "1", default-features = false, features = ["sync", "time", "rt"] } diff --git a/coordinator/tributary/src/block.rs b/coordinator/tributary/src/block.rs index f931b5b76..6b9a0543f 100644 --- a/coordinator/tributary/src/block.rs +++ b/coordinator/tributary/src/block.rs @@ -174,7 +174,7 @@ impl Block { last_block: [u8; 32], mut locally_provided: HashMap<&'static str, VecDeque>, get_and_increment_nonce: &mut G, - schema: N::SignatureScheme, + schema: &N::SignatureScheme, commit: impl Fn(u32) -> Option>, unsigned_in_chain: impl Fn([u8; 32]) -> bool, provided_in_chain: impl Fn([u8; 32]) -> bool, // TODO: merge this with unsigned_on_chain? @@ -207,7 +207,7 @@ impl Block { let mut last_tx_order = Order::Provided; let mut included_in_block = HashSet::new(); let mut txs = Vec::with_capacity(self.transactions.len()); - for tx in self.transactions.iter() { + for tx in &self.transactions { let tx_hash = tx.hash(); txs.push(tx_hash); @@ -217,7 +217,7 @@ impl Block { Err(BlockError::ProvidedAlreadyIncluded)?; } - if let Some(local) = locally_provided.get_mut(order).and_then(|deque| deque.pop_front()) { + if let Some(local) = locally_provided.get_mut(order).and_then(VecDeque::pop_front) { // Since this was a provided TX, it must be an application TX let Transaction::Application(tx) = tx else { Err(BlockError::NonLocalProvided(txs.pop().unwrap()))? @@ -249,15 +249,11 @@ impl Block { } last_tx_order = current_tx_order; - // TODO: should we modify the verify_transaction to take `Transaction` or - // use this pattern of verifying tendermint Txs and app txs differently? match tx { - Transaction::Tendermint(tx) => { - match verify_tendermint_tx::(tx, schema.clone(), &commit) { - Ok(()) => {} - Err(e) => Err(BlockError::TransactionError(e))?, - } - } + Transaction::Tendermint(tx) => match verify_tendermint_tx::(tx, schema, &commit) { + Ok(()) => {} + Err(e) => Err(BlockError::TransactionError(e))?, + }, Transaction::Application(tx) => { match verify_transaction(tx, genesis, get_and_increment_nonce) { Ok(()) => {} diff --git a/coordinator/tributary/src/blockchain.rs b/coordinator/tributary/src/blockchain.rs index 71767f307..7063cea9b 100644 --- a/coordinator/tributary/src/blockchain.rs +++ b/coordinator/tributary/src/blockchain.rs @@ -73,7 +73,7 @@ impl Blockchain { let mut res = Self { db: Some(db.clone()), genesis, - participants: participants.iter().cloned().collect(), + participants: participants.iter().copied().collect(), block_number: 0, tip: genesis, @@ -139,25 +139,23 @@ impl Blockchain { order: &str, ) -> bool { let local_key = ProvidedTransactions::::locally_provided_quantity_key(genesis, order); - let local = - db.get(local_key).map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())).unwrap_or(0); + let local = db.get(local_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); let block_key = ProvidedTransactions::::block_provided_quantity_key(genesis, block, order); - let block = - db.get(block_key).map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())).unwrap_or(0); + let block = db.get(block_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); local >= block } pub(crate) fn tip_from_db(db: &D, genesis: [u8; 32]) -> [u8; 32] { - db.get(Self::tip_key(genesis)).map(|bytes| bytes.try_into().unwrap()).unwrap_or(genesis) + db.get(Self::tip_key(genesis)).map_or(genesis, |bytes| bytes.try_into().unwrap()) } pub(crate) fn add_transaction( &mut self, internal: bool, tx: Transaction, - schema: N::SignatureScheme, + schema: &N::SignatureScheme, ) -> Result { let db = self.db.as_ref().unwrap(); let genesis = self.genesis; @@ -177,8 +175,7 @@ impl Blockchain { if self.participants.contains(&signer) { Some( db.get(Self::next_nonce_key(&self.genesis, &signer, &order)) - .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) - .unwrap_or(0), + .map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())), ) } else { None @@ -211,15 +208,14 @@ impl Blockchain { .as_ref() .unwrap() .get(Self::next_nonce_key(&self.genesis, signer, order)) - .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) - .unwrap_or(0), + .map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())), ) } else { None } } - pub(crate) fn build_block(&mut self, schema: N::SignatureScheme) -> Block { + pub(crate) fn build_block(&mut self, schema: &N::SignatureScheme) -> Block { let block = Block::new( self.tip, self.provided.transactions.values().flatten().cloned().collect(), @@ -233,7 +229,7 @@ impl Blockchain { pub(crate) fn verify_block( &self, block: &Block, - schema: N::SignatureScheme, + schema: &N::SignatureScheme, allow_non_local_provided: bool, ) -> Result<(), BlockError> { let db = self.db.as_ref().unwrap(); @@ -258,8 +254,7 @@ impl Blockchain { let key = Self::next_nonce_key(&self.genesis, signer, order); let next = txn .get(&key) - .map(|next_nonce| u32::from_le_bytes(next_nonce.try_into().unwrap())) - .unwrap_or(0); + .map_or(0, |next_nonce| u32::from_le_bytes(next_nonce.try_into().unwrap())); txn.put(key, (next + 1).to_le_bytes()); Some(next) } else { @@ -272,6 +267,7 @@ impl Blockchain { provided_in_chain, allow_non_local_provided, ); + // Drop this TXN's changes as we're solely verifying the block drop(txn); res } @@ -281,7 +277,7 @@ impl Blockchain { &mut self, block: &Block, commit: Vec, - schema: N::SignatureScheme, + schema: &N::SignatureScheme, ) -> Result<(), BlockError> { self.verify_block::(block, schema, true)?; diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index a3d1bd705..dac7f4beb 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -8,7 +8,8 @@ use zeroize::Zeroizing; use ciphersuite::{Ciphersuite, Ristretto}; use scale::Decode; -use futures::{StreamExt, SinkExt, channel::mpsc::UnboundedReceiver}; +use futures_channel::mpsc::UnboundedReceiver; +use futures_util::{StreamExt, SinkExt}; use ::tendermint::{ ext::{BlockNumber, Commit, Block as BlockTrait, Network}, SignedMessageFor, SyncedBlock, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender, @@ -189,7 +190,7 @@ impl Tributary { start_time }; let proposal = TendermintBlock( - blockchain.build_block::>(validators.clone()).serialize(), + blockchain.build_block::>(&validators).serialize(), ); let blockchain = Arc::new(RwLock::new(blockchain)); @@ -272,7 +273,7 @@ impl Tributary { let res = self.network.blockchain.write().await.add_transaction::>( true, tx, - self.network.signature_scheme(), + &self.network.signature_scheme(), ); if res == Ok(true) { self.network.p2p.broadcast(self.genesis, to_broadcast).await; @@ -343,7 +344,7 @@ impl Tributary { self.network.blockchain.write().await.add_transaction::>( false, tx, - self.network.signature_scheme(), + &self.network.signature_scheme(), ); log::debug!("received transaction message. valid new transaction: {res:?}"); res == Ok(true) diff --git a/coordinator/tributary/src/mempool.rs b/coordinator/tributary/src/mempool.rs index 084d17300..344d45436 100644 --- a/coordinator/tributary/src/mempool.rs +++ b/coordinator/tributary/src/mempool.rs @@ -38,7 +38,6 @@ impl Mempool { let tx_hash = tx.hash(); let transaction_key = self.transaction_key(&tx_hash); let current_mempool_key = self.current_mempool_key(); - #[allow(clippy::unwrap_or_default)] let mut current_mempool = self.db.get(¤t_mempool_key).unwrap_or(vec![]); let mut txn = self.db.txn(); @@ -113,7 +112,7 @@ impl Mempool { blockchain_next_nonce: F, internal: bool, tx: Transaction, - schema: N::SignatureScheme, + schema: &N::SignatureScheme, unsigned_in_chain: impl Fn([u8; 32]) -> bool, commit: impl Fn(u32) -> Option>, ) -> Result { @@ -182,14 +181,14 @@ impl Mempool { signer: &::G, order: Vec, ) -> Option { - self.last_nonce_in_mempool.get(&(*signer, order)).cloned().map(|nonce| nonce + 1) + self.last_nonce_in_mempool.get(&(*signer, order)).copied().map(|nonce| nonce + 1) } /// Get transactions to include in a block. pub(crate) fn block(&mut self) -> Vec> { let mut unsigned = vec![]; let mut signed = vec![]; - for hash in self.txs.keys().cloned().collect::>() { + for hash in self.txs.keys().copied().collect::>() { let tx = &self.txs[&hash]; match tx.kind() { @@ -222,7 +221,6 @@ impl Mempool { pub(crate) fn remove(&mut self, tx: &[u8; 32]) { let transaction_key = self.transaction_key(tx); let current_mempool_key = self.current_mempool_key(); - #[allow(clippy::unwrap_or_default)] let current_mempool = self.db.get(¤t_mempool_key).unwrap_or(vec![]); let mut i = 0; diff --git a/coordinator/tributary/src/merkle.rs b/coordinator/tributary/src/merkle.rs index e9322b70e..2a3ee3a12 100644 --- a/coordinator/tributary/src/merkle.rs +++ b/coordinator/tributary/src/merkle.rs @@ -17,13 +17,7 @@ pub(crate) fn merkle(hash_args: &[[u8; 32]]) -> [u8; 32] { [ b"branch_hash".as_ref(), hashes[i].as_ref(), - hashes - .get(i + 1) - .map(|hash| { - let res: &[u8] = hash.as_ref(); - res - }) - .unwrap_or(zero.as_ref()), + hashes.get(i + 1).map_or(zero.as_ref(), AsRef::as_ref), ] .concat(), )); @@ -33,5 +27,5 @@ pub(crate) fn merkle(hash_args: &[[u8; 32]]) -> [u8; 32] { hashes = interim; } - hashes.first().copied().map(Into::into).unwrap_or(zero) + hashes.first().copied().map_or(zero, Into::into) } diff --git a/coordinator/tributary/src/provided.rs b/coordinator/tributary/src/provided.rs index 822120160..103286afb 100644 --- a/coordinator/tributary/src/provided.rs +++ b/coordinator/tributary/src/provided.rs @@ -103,17 +103,11 @@ impl ProvidedTransactions { // get local and on-chain tx numbers let local_key = Self::locally_provided_quantity_key(&self.genesis, order); - let mut local_quantity = self - .db - .get(&local_key) - .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) - .unwrap_or(0); + let mut local_quantity = + self.db.get(&local_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); let on_chain_key = Self::on_chain_provided_quantity_key(&self.genesis, order); - let on_chain_quantity = self - .db - .get(on_chain_key) - .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) - .unwrap_or(0); + let on_chain_quantity = + self.db.get(on_chain_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); let current_provided_key = self.current_provided_key(); @@ -136,7 +130,6 @@ impl ProvidedTransactions { } txn.commit(); } else { - #[allow(clippy::unwrap_or_default)] let mut currently_provided = txn.get(¤t_provided_key).unwrap_or(vec![]); currently_provided.extend(tx_hash); txn.put(current_provided_key, currently_provided); @@ -159,7 +152,7 @@ impl ProvidedTransactions { block: [u8; 32], tx: [u8; 32], ) { - if let Some(next_tx) = self.transactions.get_mut(order).and_then(|queue| queue.pop_front()) { + if let Some(next_tx) = self.transactions.get_mut(order).and_then(VecDeque::pop_front) { assert_eq!(next_tx.hash(), tx); let current_provided_key = self.current_provided_key(); @@ -185,11 +178,8 @@ impl ProvidedTransactions { // bump the on-chain tx number. let on_chain_key = Self::on_chain_provided_quantity_key(&self.genesis, order); let block_order_key = Self::block_provided_quantity_key(&self.genesis, &block, order); - let mut on_chain_quantity = self - .db - .get(&on_chain_key) - .map(|bytes| u32::from_le_bytes(bytes.try_into().unwrap())) - .unwrap_or(0); + let mut on_chain_quantity = + self.db.get(&on_chain_key).map_or(0, |bytes| u32::from_le_bytes(bytes.try_into().unwrap())); let this_provided_id = on_chain_quantity; txn.put(Self::on_chain_provided_key(&self.genesis, order, this_provided_id), tx); diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary/src/tendermint/mod.rs index 5662c1ed6..36f381c97 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -348,7 +348,7 @@ impl Network for TendermintNetwork if self.blockchain.write().await.add_transaction::( true, Transaction::Tendermint(tx), - self.signature_scheme(), + &self.signature_scheme(), ) == Ok(true) { self.p2p.broadcast(signer.genesis, to_broadcast).await; @@ -362,7 +362,7 @@ impl Network for TendermintNetwork .blockchain .read() .await - .verify_block::(&block, self.signature_scheme(), false) + .verify_block::(&block, &self.signature_scheme(), false) .map_err(|e| match e { BlockError::NonLocalProvided(_) => TendermintBlockError::Temporal, _ => { @@ -398,7 +398,7 @@ impl Network for TendermintNetwork let block_res = self.blockchain.write().await.add_block::( &block, encoded_commit.clone(), - self.signature_scheme(), + &self.signature_scheme(), ); match block_res { Ok(()) => { @@ -425,7 +425,7 @@ impl Network for TendermintNetwork *self.to_rebroadcast.write().await = vec![]; Some(TendermintBlock( - self.blockchain.write().await.build_block::(self.signature_scheme()).serialize(), + self.blockchain.write().await.build_block::(&self.signature_scheme()).serialize(), )) } } diff --git a/coordinator/tributary/src/tendermint/tx.rs b/coordinator/tributary/src/tendermint/tx.rs index 99d6015d2..328ff3868 100644 --- a/coordinator/tributary/src/tendermint/tx.rs +++ b/coordinator/tributary/src/tendermint/tx.rs @@ -88,7 +88,7 @@ fn decode_and_verify_signed_message( // re-implements an entire foreign library's checks for malicious behavior). pub(crate) fn verify_tendermint_tx( tx: &TendermintTx, - schema: N::SignatureScheme, + schema: &N::SignatureScheme, commit: impl Fn(u32) -> Option>, ) -> Result<(), TransactionError> { tx.verify()?; @@ -98,8 +98,8 @@ pub(crate) fn verify_tendermint_tx( TendermintTx::SlashEvidence(ev) => { match ev { Evidence::ConflictingMessages(first, second) => { - let first = decode_and_verify_signed_message::(first, &schema)?.msg; - let second = decode_and_verify_signed_message::(second, &schema)?.msg; + let first = decode_and_verify_signed_message::(first, schema)?.msg; + let second = decode_and_verify_signed_message::(second, schema)?.msg; // Make sure they're distinct messages, from the same sender, within the same block if (first == second) || (first.sender != second.sender) || (first.block != second.block) { @@ -112,8 +112,8 @@ pub(crate) fn verify_tendermint_tx( } } Evidence::ConflictingPrecommit(first, second) => { - let first = decode_and_verify_signed_message::(first, &schema)?.msg; - let second = decode_and_verify_signed_message::(second, &schema)?.msg; + let first = decode_and_verify_signed_message::(first, schema)?.msg; + let second = decode_and_verify_signed_message::(second, schema)?.msg; if (first.sender != second.sender) || (first.block != second.block) { Err(TransactionError::InvalidContent)?; @@ -136,7 +136,7 @@ pub(crate) fn verify_tendermint_tx( Err(TransactionError::InvalidContent)? } Evidence::InvalidPrecommit(msg) => { - let msg = decode_and_verify_signed_message::(msg, &schema)?.msg; + let msg = decode_and_verify_signed_message::(msg, schema)?.msg; let Data::Precommit(Some((id, sig))) = &msg.data else { Err(TransactionError::InvalidContent)? @@ -173,7 +173,7 @@ pub(crate) fn verify_tendermint_tx( } } Evidence::InvalidValidRound(msg) => { - let msg = decode_and_verify_signed_message::(msg, &schema)?.msg; + let msg = decode_and_verify_signed_message::(msg, schema)?.msg; let Data::Proposal(Some(vr), _) = &msg.data else { Err(TransactionError::InvalidContent)? diff --git a/coordinator/tributary/src/tests/block.rs b/coordinator/tributary/src/tests/block.rs index 2e16f6605..0df72e6da 100644 --- a/coordinator/tributary/src/tests/block.rs +++ b/coordinator/tributary/src/tests/block.rs @@ -89,7 +89,7 @@ fn empty_block() { LAST, HashMap::new(), &mut |_, _| None, - validators, + &validators, commit, unsigned_in_chain, provided_in_chain, @@ -129,7 +129,7 @@ fn duplicate_nonces() { last_nonce += 1; Some(res) }, - validators.clone(), + &validators, commit, unsigned_in_chain, provided_in_chain, diff --git a/coordinator/tributary/src/tests/blockchain.rs b/coordinator/tributary/src/tests/blockchain.rs index a7ef1e877..6103a62f4 100644 --- a/coordinator/tributary/src/tests/blockchain.rs +++ b/coordinator/tributary/src/tests/blockchain.rs @@ -44,12 +44,12 @@ fn block_addition() { let genesis = new_genesis(); let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let (db, mut blockchain) = new_blockchain::(genesis, &[]); - let block = blockchain.build_block::(validators.clone()); + let block = blockchain.build_block::(&validators); assert_eq!(block.header.parent, genesis); assert_eq!(block.header.transactions, [0; 32]); - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); - assert!(blockchain.add_block::(&block, vec![], validators).is_ok()); + blockchain.verify_block::(&block, &validators, false).unwrap(); + assert!(blockchain.add_block::(&block, vec![], &validators).is_ok()); assert_eq!(blockchain.tip(), block.hash()); assert_eq!(blockchain.block_number(), 1); assert_eq!( @@ -64,21 +64,21 @@ fn invalid_block() { let validators = Arc::new(Validators::new(genesis, vec![]).unwrap()); let (_, mut blockchain) = new_blockchain::(genesis, &[]); - let block = blockchain.build_block::(validators.clone()); + let block = blockchain.build_block::(&validators); // Mutate parent { #[allow(clippy::redundant_clone)] // False positive let mut block = block.clone(); block.header.parent = Blake2s256::digest(block.header.parent).into(); - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } - // Mutate tranactions merkle + // Mutate transactions merkle { let mut block = block; block.header.transactions = Blake2s256::digest(block.header.transactions).into(); - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } let key = Zeroizing::new(::F::random(&mut OsRng)); @@ -89,7 +89,7 @@ fn invalid_block() { // Manually create the block to bypass build_block's checks let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx.clone())]); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } // Run the rest of the tests with them as a participant @@ -99,22 +99,22 @@ fn invalid_block() { { let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx.clone())]); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); + blockchain.verify_block::(&block, &validators, false).unwrap(); } { // Add a valid transaction let (_, mut blockchain) = new_blockchain(genesis, &[tx.1.signer]); blockchain - .add_transaction::(true, Transaction::Application(tx.clone()), validators.clone()) + .add_transaction::(true, Transaction::Application(tx.clone()), &validators) .unwrap(); - let mut block = blockchain.build_block::(validators.clone()); + let mut block = blockchain.build_block::(&validators); assert_eq!(block.header.transactions, merkle(&[tx.hash()])); - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); + blockchain.verify_block::(&block, &validators, false).unwrap(); // And verify mutating the transactions merkle now causes a failure block.header.transactions = merkle(&[]); - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } { @@ -122,24 +122,22 @@ fn invalid_block() { let tx = crate::tests::signed_transaction(&mut OsRng, genesis, &key, 5); // Manually create the block to bypass build_block's checks let block = Block::new(blockchain.tip(), vec![], vec![Transaction::Application(tx)]); - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); } { // Invalid signature let (_, mut blockchain) = new_blockchain(genesis, &[tx.1.signer]); - blockchain - .add_transaction::(true, Transaction::Application(tx), validators.clone()) - .unwrap(); - let mut block = blockchain.build_block::(validators.clone()); - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); + blockchain.add_transaction::(true, Transaction::Application(tx), &validators).unwrap(); + let mut block = blockchain.build_block::(&validators); + blockchain.verify_block::(&block, &validators, false).unwrap(); match &mut block.transactions[0] { Transaction::Application(tx) => { tx.1.signature.s += ::F::ONE; } _ => panic!("non-signed tx found"), } - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); // Make sure this isn't because the merkle changed due to the transaction hash including the // signature (which it explicitly isn't allowed to anyways) @@ -166,12 +164,10 @@ fn signed_transaction() { panic!("tendermint tx found"); }; let next_nonce = blockchain.next_nonce(&signer, &[]).unwrap(); - blockchain - .add_transaction::(true, Transaction::Application(tx), validators.clone()) - .unwrap(); + blockchain.add_transaction::(true, Transaction::Application(tx), &validators).unwrap(); assert_eq!(next_nonce + 1, blockchain.next_nonce(&signer, &[]).unwrap()); } - let block = blockchain.build_block::(validators.clone()); + let block = blockchain.build_block::(&validators); assert_eq!(block, Block::new(blockchain.tip(), vec![], mempool.clone())); assert_eq!(blockchain.tip(), tip); assert_eq!(block.header.parent, tip); @@ -185,8 +181,8 @@ fn signed_transaction() { ); // Verify and add the block - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); - assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); + blockchain.verify_block::(&block, &validators, false).unwrap(); + assert!(blockchain.add_block::(&block, vec![], &validators).is_ok()); assert_eq!(blockchain.tip(), block.hash()); }; @@ -233,21 +229,21 @@ fn provided_transaction() { { // Non-provided transactions should fail verification because we don't have them locally. let block = Block::new(blockchain.tip(), vec![tx.clone()], vec![]); - assert!(blockchain.verify_block::(&block, validators.clone(), false).is_err()); + assert!(blockchain.verify_block::(&block, &validators, false).is_err()); // Provided transactions should pass verification blockchain.provide_transaction(tx.clone()).unwrap(); - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); + blockchain.verify_block::(&block, &validators, false).unwrap(); // add_block should work for verified blocks - assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); + assert!(blockchain.add_block::(&block, vec![], &validators).is_ok()); let block = Block::new(blockchain.tip(), vec![tx.clone()], vec![]); // The provided transaction should no longer considered provided but added to chain, // causing this error assert_eq!( - blockchain.verify_block::(&block, validators.clone(), false), + blockchain.verify_block::(&block, &validators, false), Err(BlockError::ProvidedAlreadyIncluded) ); } @@ -262,11 +258,11 @@ fn provided_transaction() { // add_block DOES NOT fail for unverified provided transactions if told to add them, // since now we can have them later. let block1 = Block::new(blockchain.tip(), vec![tx1.clone(), tx3.clone()], vec![]); - assert!(blockchain.add_block::(&block1, vec![], validators.clone()).is_ok()); + assert!(blockchain.add_block::(&block1, vec![], &validators).is_ok()); // in fact, we can have many blocks that have provided txs that we don't have locally. let block2 = Block::new(blockchain.tip(), vec![tx2.clone(), tx4.clone()], vec![]); - assert!(blockchain.add_block::(&block2, vec![], validators.clone()).is_ok()); + assert!(blockchain.add_block::(&block2, vec![], &validators).is_ok()); // make sure we won't return ok for the block before we actually got the txs let TransactionKind::Provided(order) = tx1.kind() else { panic!("tx wasn't provided") }; @@ -357,11 +353,9 @@ async fn tendermint_evidence_tx() { let Transaction::Tendermint(tx) = tx else { panic!("non-tendermint tx found"); }; - blockchain - .add_transaction::(true, Transaction::Tendermint(tx), validators.clone()) - .unwrap(); + blockchain.add_transaction::(true, Transaction::Tendermint(tx), &validators).unwrap(); } - let block = blockchain.build_block::(validators.clone()); + let block = blockchain.build_block::(&validators); assert_eq!(blockchain.tip(), tip); assert_eq!(block.header.parent, tip); @@ -371,8 +365,8 @@ async fn tendermint_evidence_tx() { } // Verify and add the block - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); - assert!(blockchain.add_block::(&block, vec![], validators.clone()).is_ok()); + blockchain.verify_block::(&block, &validators, false).unwrap(); + assert!(blockchain.add_block::(&block, vec![], &validators).is_ok()); assert_eq!(blockchain.tip(), block.hash()); }; @@ -467,7 +461,7 @@ async fn block_tx_ordering() { let signed_tx = Transaction::Application(SignedTx::Signed(Box::new( crate::tests::signed_transaction(&mut OsRng, genesis, &key, i), ))); - blockchain.add_transaction::(true, signed_tx.clone(), validators.clone()).unwrap(); + blockchain.add_transaction::(true, signed_tx.clone(), &validators).unwrap(); mempool.push(signed_tx); let unsigned_tx = Transaction::Tendermint( @@ -477,7 +471,7 @@ async fn block_tx_ordering() { ) .await, ); - blockchain.add_transaction::(true, unsigned_tx.clone(), validators.clone()).unwrap(); + blockchain.add_transaction::(true, unsigned_tx.clone(), &validators).unwrap(); mempool.push(unsigned_tx); let provided_tx = @@ -485,7 +479,7 @@ async fn block_tx_ordering() { blockchain.provide_transaction(provided_tx.clone()).unwrap(); provided_txs.push(provided_tx); } - let block = blockchain.build_block::(validators.clone()); + let block = blockchain.build_block::(&validators); assert_eq!(blockchain.tip(), tip); assert_eq!(block.header.parent, tip); @@ -509,7 +503,7 @@ async fn block_tx_ordering() { } // should be a valid block - blockchain.verify_block::(&block, validators.clone(), false).unwrap(); + blockchain.verify_block::(&block, &validators, false).unwrap(); // Unsigned before Provided { @@ -518,7 +512,7 @@ async fn block_tx_ordering() { let unsigned = block.transactions.remove(128); block.transactions.insert(0, unsigned); assert_eq!( - blockchain.verify_block::(&block, validators.clone(), false).unwrap_err(), + blockchain.verify_block::(&block, &validators, false).unwrap_err(), BlockError::WrongTransactionOrder ); } @@ -529,7 +523,7 @@ async fn block_tx_ordering() { let signed = block.transactions.remove(256); block.transactions.insert(0, signed); assert_eq!( - blockchain.verify_block::(&block, validators.clone(), false).unwrap_err(), + blockchain.verify_block::(&block, &validators, false).unwrap_err(), BlockError::WrongTransactionOrder ); } @@ -539,7 +533,7 @@ async fn block_tx_ordering() { let mut block = block; block.transactions.swap(128, 256); assert_eq!( - blockchain.verify_block::(&block, validators.clone(), false).unwrap_err(), + blockchain.verify_block::(&block, &validators, false).unwrap_err(), BlockError::WrongTransactionOrder ); } diff --git a/coordinator/tributary/src/tests/mempool.rs b/coordinator/tributary/src/tests/mempool.rs index 9d8590c2d..34ed4cf98 100644 --- a/coordinator/tributary/src/tests/mempool.rs +++ b/coordinator/tributary/src/tests/mempool.rs @@ -47,7 +47,7 @@ async fn mempool_addition() { &|_, _| Some(0), true, Transaction::Application(first_tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit, ) @@ -63,7 +63,7 @@ async fn mempool_addition() { &|_, _| None, true, Transaction::Tendermint(evidence_tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit, ) @@ -78,7 +78,7 @@ async fn mempool_addition() { &|_, _| Some(0), true, Transaction::Application(first_tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit, ), @@ -89,7 +89,7 @@ async fn mempool_addition() { &|_, _| None, true, Transaction::Tendermint(evidence_tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit, ), @@ -103,7 +103,7 @@ async fn mempool_addition() { &|_, _| Some(0), true, Transaction::Application(second_tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit, ), @@ -115,7 +115,7 @@ async fn mempool_addition() { &|_, _| Some(0), true, Transaction::Application(second_tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit, ), @@ -133,7 +133,7 @@ async fn mempool_addition() { &|_, _| Some(2), true, Transaction::Application(tx.clone()), - validators.clone(), + &validators, unsigned_in_chain, commit ) @@ -173,7 +173,7 @@ fn too_many_mempool() { &|_, _| Some(0), false, Transaction::Application(signed_transaction(&mut OsRng, genesis, &key, i)), - validators.clone(), + &validators, unsigned_in_chain, commit, ) @@ -190,7 +190,7 @@ fn too_many_mempool() { &key, ACCOUNT_MEMPOOL_LIMIT )), - validators.clone(), + &validators, unsigned_in_chain, commit, ), diff --git a/coordinator/tributary/src/tests/transaction/tendermint.rs b/coordinator/tributary/src/tests/transaction/tendermint.rs index aba077676..e701f1361 100644 --- a/coordinator/tributary/src/tests/transaction/tendermint.rs +++ b/coordinator/tributary/src/tests/transaction/tendermint.rs @@ -57,13 +57,13 @@ async fn invalid_valid_round() { // This should be invalid evidence if a valid valid round is specified let (_, tx) = valid_round_tx(None).await; - assert!(verify_tendermint_tx::(&tx, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); // If an invalid valid round is specified (>= current), this should be invalid evidence let (mut signed, tx) = valid_round_tx(Some(RoundNumber(0))).await; // should pass - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // change the signature let mut random_sig = [0u8; 64]; @@ -72,7 +72,7 @@ async fn invalid_valid_round() { let tx = TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode())); // should fail - assert!(verify_tendermint_tx::(&tx, validators, commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } #[tokio::test] @@ -94,7 +94,7 @@ async fn invalid_precommit_signature() { }; // Empty Precommit should fail. - assert!(verify_tendermint_tx::(&precommit(None).await.1, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&precommit(None).await.1, &validators, commit).is_err()); // valid precommit signature should fail. let block_id = [0x22u8; 32]; @@ -105,7 +105,7 @@ async fn invalid_precommit_signature() { assert!(verify_tendermint_tx::( &precommit(Some((block_id, signer.clone().sign(&commit_msg).await))).await.1, - validators.clone(), + &validators, commit ) .is_err()); @@ -113,14 +113,14 @@ async fn invalid_precommit_signature() { // any other signature can be used as evidence. { let (mut signed, tx) = precommit(Some((block_id, signer.sign(&[]).await))).await; - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // So long as we can authenticate where it came from let mut random_sig = [0u8; 64]; OsRng.fill_bytes(&mut random_sig); signed.sig = random_sig; let tx = TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(signed.encode())); - assert!(verify_tendermint_tx::(&tx, validators, commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } } @@ -170,10 +170,10 @@ async fn evidence_with_prevote() { // No prevote message alone should be valid as slash evidence at this time for prevote in prevote(None).await { - assert!(verify_tendermint_tx::(&prevote, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&prevote, &validators, commit).is_err()); } for prevote in prevote(Some([0x22u8; 32])).await { - assert!(verify_tendermint_tx::(&prevote, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&prevote, &validators, commit).is_err()); } } @@ -199,7 +199,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_1.encode(), )); - assert!(verify_tendermint_tx::(&tx, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); // conflicting data should pass let signed_2 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; @@ -207,7 +207,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // Except if it has a distinct round number, as we don't check cross-round conflicts // (except for Precommit) @@ -216,7 +216,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap_err(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap_err(); // Proposals for different block numbers should also fail as evidence let signed_2 = signed_for_b_r(1, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await; @@ -224,7 +224,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap_err(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap_err(); } // Prevote @@ -235,7 +235,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_1.encode(), )); - assert!(verify_tendermint_tx::(&tx, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); // conflicting data should pass let signed_2 = signed_for_b_r(0, 0, Data::Prevote(Some([0x22; 32]))).await; @@ -243,7 +243,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // Except if it has a distinct round number, as we don't check cross-round conflicts // (except for Precommit) @@ -252,7 +252,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap_err(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap_err(); // Proposals for different block numbers should also fail as evidence let signed_2 = signed_for_b_r(1, 0, Data::Prevote(Some([0x22; 32]))).await; @@ -260,7 +260,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap_err(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap_err(); } // Precommit @@ -272,7 +272,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_1.encode(), )); - assert!(verify_tendermint_tx::(&tx, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); // For precommit, the round number is ignored let signed_2 = signed_for_b_r(0, 1, Data::Precommit(Some(([0x22; 32], sig)))).await; @@ -280,7 +280,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - verify_tendermint_tx::(&tx, validators.clone(), commit).unwrap(); + verify_tendermint_tx::(&tx, &validators, commit).unwrap(); // Yet the block number isn't let signed_2 = signed_for_b_r(1, 0, Data::Precommit(Some(([0x22; 32], sig)))).await; @@ -288,7 +288,7 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - assert!(verify_tendermint_tx::(&tx, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } // msgs from different senders should fail @@ -320,7 +320,7 @@ async fn conflicting_msgs_evidence_tx() { let validators = Arc::new(Validators::new(genesis, vec![(signer_pub, 1), (signer_pub_2, 1)]).unwrap()); - assert!(verify_tendermint_tx::(&tx, validators, commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } // msgs with different steps should fail @@ -331,6 +331,6 @@ async fn conflicting_msgs_evidence_tx() { signed_1.encode(), signed_2.encode(), )); - assert!(verify_tendermint_tx::(&tx, validators.clone(), commit).is_err()); + assert!(verify_tendermint_tx::(&tx, &validators, commit).is_err()); } } diff --git a/coordinator/tributary/src/transaction.rs b/coordinator/tributary/src/transaction.rs index 6f238ef54..a773daa7a 100644 --- a/coordinator/tributary/src/transaction.rs +++ b/coordinator/tributary/src/transaction.rs @@ -179,7 +179,6 @@ pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite { pub trait GAIN: FnMut(&::G, &[u8]) -> Option {} impl::G, &[u8]) -> Option> GAIN for F {} -// This will only cause mutations when the transaction is valid pub(crate) fn verify_transaction( tx: &T, genesis: [u8; 32], @@ -192,8 +191,7 @@ pub(crate) fn verify_transaction( tx.verify()?; match tx.kind() { - TransactionKind::Provided(_) => {} - TransactionKind::Unsigned => {} + TransactionKind::Provided(_) | TransactionKind::Unsigned => {} TransactionKind::Signed(order, Signed { signer, nonce, signature }) => { if let Some(next_nonce) = get_and_increment_nonce(signer, &order) { if *nonce != next_nonce { @@ -204,7 +202,7 @@ pub(crate) fn verify_transaction( Err(TransactionError::InvalidSigner)?; } - // TODO: Use Schnorr half-aggregation and a batch verification here + // TODO: Use a batch verification here if !signature.verify(*signer, tx.sig_hash(genesis)) { Err(TransactionError::InvalidSignature)?; } diff --git a/coordinator/tributary/tendermint/Cargo.toml b/coordinator/tributary/tendermint/Cargo.toml index 00578938b..ba640391e 100644 --- a/coordinator/tributary/tendermint/Cargo.toml +++ b/coordinator/tributary/tendermint/Cargo.toml @@ -7,6 +7,13 @@ repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tender authors = ["Luke Parker "] edition = "2021" +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + [dependencies] async-trait = { version = "0.1", default-features = false } thiserror = { version = "1", default-features = false } @@ -16,7 +23,8 @@ log = { version = "0.4", default-features = false, features = ["std"] } parity-scale-codec = { version = "3", default-features = false, features = ["std", "derive"] } -futures = { version = "0.3", default-features = false, features = ["std", "async-await"] } +futures-util = { version = "0.3", default-features = false, features = ["std", "async-await-macro", "sink", "channel"] } +futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } tokio = { version = "1", default-features = false, features = ["time"] } [dev-dependencies] diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index 22a9972e5..c54160997 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -8,10 +8,10 @@ use std::{ use parity_scale_codec::{Encode, Decode}; -use futures::{ +use futures_channel::mpsc; +use futures_util::{ FutureExt, StreamExt, SinkExt, future::{self, Fuse}, - channel::mpsc, }; use tokio::time::sleep; @@ -30,7 +30,7 @@ pub mod ext; use ext::*; pub fn commit_msg(end_time: u64, id: &[u8]) -> Vec { - [&end_time.to_le_bytes(), id].concat().to_vec() + [&end_time.to_le_bytes(), id].concat() } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)] @@ -367,7 +367,7 @@ impl TendermintMachine { let mut queue_future = if self.queue.is_empty() { Fuse::terminated() } else { future::ready(()).fuse() }; - if let Some((our_message, msg, mut sig)) = futures::select_biased! { + if let Some((our_message, msg, mut sig)) = futures_util::select_biased! { // Handle a new block occurring externally (an external sync loop) // Has the highest priority as it makes all other futures here irrelevant msg = self.synced_block_recv.next() => { @@ -398,7 +398,7 @@ impl TendermintMachine { }, // Handle our messages - _ = queue_future => { + () = queue_future => { Some((true, self.queue.pop_front().unwrap(), None)) }, @@ -543,8 +543,7 @@ impl TendermintMachine { self.slash(sender, slash).await } - Err(TendermintError::Temporal) => (), - Err(TendermintError::AlreadyHandled) => (), + Err(TendermintError::Temporal | TendermintError::AlreadyHandled) => (), } } } @@ -627,7 +626,7 @@ impl TendermintMachine { // Uses a junk signature since message equality disregards the signature if self.block.log.has_consensus( msg.round, - Data::Precommit(Some((block.id(), self.signer.sign(&[]).await))), + &Data::Precommit(Some((block.id(), self.signer.sign(&[]).await))), ) { // If msg.round is in the future, these Precommits won't have their inner signatures // verified @@ -714,7 +713,7 @@ impl TendermintMachine { // of the round map if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) { let (participation, weight) = - self.block.log.message_instances(self.block.round().number, Data::Prevote(None)); + self.block.log.message_instances(self.block.round().number, &Data::Prevote(None)); // 34-35 if participation >= self.weights.threshold() { self.block.round_mut().set_timeout(Step::Prevote); @@ -752,7 +751,7 @@ impl TendermintMachine { if self.block.round().step == Step::Propose { // Delay error handling (triggering a slash) until after we vote. let (valid, err) = match self.network.validate(block).await { - Ok(_) => (true, Ok(None)), + Ok(()) => (true, Ok(None)), Err(BlockError::Temporal) => (false, Ok(None)), Err(BlockError::Fatal) => (false, { log::warn!(target: "tendermint", "Validator proposed a fatally invalid block"); @@ -767,7 +766,7 @@ impl TendermintMachine { // 23 and 29. If it's some, both are satisfied if they're for the same ID. If it's some // with different IDs, the function on 22 rejects yet the function on 28 has one other // condition - let locked = self.block.locked.as_ref().map(|(_, id)| id == &block.id()).unwrap_or(true); + let locked = self.block.locked.as_ref().map_or(true, |(_, id)| id == &block.id()); let mut vote = raw_vote.filter(|_| locked); if let Some(vr) = vr { @@ -780,7 +779,7 @@ impl TendermintMachine { ))?; } - if self.block.log.has_consensus(*vr, Data::Prevote(Some(block.id()))) { + if self.block.log.has_consensus(*vr, &Data::Prevote(Some(block.id()))) { // Allow differing locked values if the proposal has a newer valid round // This is the other condition described above if let Some((locked_round, _)) = self.block.locked.as_ref() { @@ -798,25 +797,18 @@ impl TendermintMachine { return Ok(None); } - if self - .block - .valid - .as_ref() - .map(|(round, _)| round != &self.block.round().number) - .unwrap_or(true) - { + if self.block.valid.as_ref().map_or(true, |(round, _)| round != &self.block.round().number) { // 36-43 // The run once condition is implemented above. Since valid will always be set by this, it // not being set, or only being set historically, means this has yet to be run - if self.block.log.has_consensus(self.block.round().number, Data::Prevote(Some(block.id()))) { + if self.block.log.has_consensus(self.block.round().number, &Data::Prevote(Some(block.id()))) { match self.network.validate(block).await { - Ok(_) => (), // BlockError::Temporal is due to a temporal error we have, yet a supermajority of the // network does not, Because we do not believe this block to be fatally invalid, and // because a supermajority deems it valid, accept it. - Err(BlockError::Temporal) => (), + Ok(()) | Err(BlockError::Temporal) => (), Err(BlockError::Fatal) => { log::warn!(target: "tendermint", "Validator proposed a fatally invalid block"); // TODO: Produce evidence of this for the higher level code to decide what to do with diff --git a/coordinator/tributary/tendermint/src/message_log.rs b/coordinator/tributary/tendermint/src/message_log.rs index 4af1fd1cd..85f4cf926 100644 --- a/coordinator/tributary/tendermint/src/message_log.rs +++ b/coordinator/tributary/tendermint/src/message_log.rs @@ -21,9 +21,7 @@ impl MessageLog { pub(crate) fn log(&mut self, signed: SignedMessageFor) -> Result> { let msg = &signed.msg; // Clarity, and safety around default != new edge cases - #[allow(clippy::unwrap_or_default)] let round = self.log.entry(msg.round).or_insert_with(HashMap::new); - #[allow(clippy::unwrap_or_default)] let msgs = round.entry(msg.sender).or_insert_with(HashMap::new); // Handle message replays without issue. It's only multiple messages which is malicious @@ -66,14 +64,14 @@ impl MessageLog { // For a given round, return the participating weight for this step, and the weight agreeing with // the data. - pub(crate) fn message_instances(&self, round: RoundNumber, data: DataFor) -> (u64, u64) { + pub(crate) fn message_instances(&self, round: RoundNumber, data: &DataFor) -> (u64, u64) { let mut participating = 0; let mut weight = 0; for (participant, msgs) in &self.log[&round] { if let Some(msg) = msgs.get(&data.step()) { let validator_weight = self.weights.weight(*participant); participating += validator_weight; - if data == msg.msg.data { + if data == &msg.msg.data { weight += validator_weight; } } @@ -104,7 +102,7 @@ impl MessageLog { } // Check if consensus has been reached on a specific piece of data - pub(crate) fn has_consensus(&self, round: RoundNumber, data: DataFor) -> bool { + pub(crate) fn has_consensus(&self, round: RoundNumber, data: &DataFor) -> bool { let (_, weight) = self.message_instances(round, data); weight >= self.weights.threshold() } diff --git a/coordinator/tributary/tendermint/src/round.rs b/coordinator/tributary/tendermint/src/round.rs index ce2b3898d..445c27848 100644 --- a/coordinator/tributary/tendermint/src/round.rs +++ b/coordinator/tributary/tendermint/src/round.rs @@ -4,7 +4,7 @@ use std::{ collections::HashMap, }; -use futures::{FutureExt, future}; +use futures_util::{FutureExt, future}; use tokio::time::sleep; use crate::{ @@ -80,9 +80,9 @@ impl RoundData { let propose_timeout = timeout_future(Step::Propose); let prevote_timeout = timeout_future(Step::Prevote); let precommit_timeout = timeout_future(Step::Precommit); - futures::pin_mut!(propose_timeout, prevote_timeout, precommit_timeout); + futures_util::pin_mut!(propose_timeout, prevote_timeout, precommit_timeout); - futures::select_biased! { + futures_util::select_biased! { step = propose_timeout => step, step = prevote_timeout => step, step = precommit_timeout => step, diff --git a/coordinator/tributary/tendermint/tests/ext.rs b/coordinator/tributary/tendermint/tests/ext.rs index 295d9a840..e3df7e489 100644 --- a/coordinator/tributary/tendermint/tests/ext.rs +++ b/coordinator/tributary/tendermint/tests/ext.rs @@ -7,7 +7,7 @@ use async_trait::async_trait; use parity_scale_codec::{Encode, Decode}; -use futures::SinkExt; +use futures_util::sink::SinkExt; use tokio::{sync::RwLock, time::sleep}; use tendermint_machine::{ diff --git a/crypto/ciphersuite/Cargo.toml b/crypto/ciphersuite/Cargo.toml index 4641e59f0..9fcf60a6d 100644 --- a/crypto/ciphersuite/Cargo.toml +++ b/crypto/ciphersuite/Cargo.toml @@ -13,12 +13,15 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] std-shims = { path = "../../common/std-shims", version = "^0.1.1", default-features = false, optional = true } rand_core = { version = "0.6", default-features = false } -zeroize = { version = "^1.5", default-features = false } +zeroize = { version = "^1.5", default-features = false, features = ["derive"] } subtle = { version = "^2.4", default-features = false } digest = { version = "0.10", default-features = false } diff --git a/crypto/ciphersuite/src/lib.rs b/crypto/ciphersuite/src/lib.rs index 3954047d8..e5ea66453 100644 --- a/crypto/ciphersuite/src/lib.rs +++ b/crypto/ciphersuite/src/lib.rs @@ -52,7 +52,7 @@ pub trait Ciphersuite: /// Group element type. type G: Group + GroupOps + PrimeGroup + Zeroize + ConstantTimeEq; /// Hash algorithm used with this curve. - // Requires BlockSizeUser so it can be used within Hkdf which requies that. + // Requires BlockSizeUser so it can be used within Hkdf which requires that. type H: Send + Clone + BlockSizeUser + Digest + HashMarker + SecureDigest; /// ID for this curve. diff --git a/crypto/dalek-ff-group/Cargo.toml b/crypto/dalek-ff-group/Cargo.toml index 50c4abcf2..0fe4bce0b 100644 --- a/crypto/dalek-ff-group/Cargo.toml +++ b/crypto/dalek-ff-group/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.65" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] rustversion = "1" diff --git a/crypto/dalek-ff-group/src/field.rs b/crypto/dalek-ff-group/src/field.rs index 2e83f7bf5..b1af27114 100644 --- a/crypto/dalek-ff-group/src/field.rs +++ b/crypto/dalek-ff-group/src/field.rs @@ -222,7 +222,7 @@ impl FieldElement { FieldElement(reduce(U512::from(value.mul_wide(&value)))) } - /// Perform an exponentation. + /// Perform an exponentiation. pub fn pow(&self, other: FieldElement) -> FieldElement { let mut table = [FieldElement::ONE; 16]; table[1] = *self; diff --git a/crypto/dalek-ff-group/src/lib.rs b/crypto/dalek-ff-group/src/lib.rs index a88ef306a..dcbcacc08 100644 --- a/crypto/dalek-ff-group/src/lib.rs +++ b/crypto/dalek-ff-group/src/lib.rs @@ -50,6 +50,7 @@ fn u8_from_bool(bit_ref: &mut bool) -> u8 { let bit_ref = black_box(bit_ref); let mut bit = black_box(*bit_ref); + #[allow(clippy::cast_lossless)] let res = black_box(bit as u8); bit.zeroize(); debug_assert!((res | 1) == 1); diff --git a/crypto/dkg/Cargo.toml b/crypto/dkg/Cargo.toml index 2b8cd25db..a8d3f0a8d 100644 --- a/crypto/dkg/Cargo.toml +++ b/crypto/dkg/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.70" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] thiserror = { version = "1", default-features = false, optional = true } @@ -23,7 +26,6 @@ zeroize = { version = "^1.5", default-features = false, features = ["zeroize_der std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false } borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } -serde = { version = "1", default-features = false, features = ["derive"], optional = true } transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] } chacha20 = { version = "0.9", default-features = false, features = ["zeroize"] } @@ -47,7 +49,6 @@ std = [ "std-shims/std", "borsh?/std", - "serde?/std", "transcript/std", "chacha20/std", @@ -61,6 +62,5 @@ std = [ "dleq/serialize" ] borsh = ["dep:borsh"] -serde = ["dep:serde"] tests = ["rand_core/getrandom"] default = ["std"] diff --git a/crypto/dkg/src/encryption.rs b/crypto/dkg/src/encryption.rs index 4d68929ce..51cf6b060 100644 --- a/crypto/dkg/src/encryption.rs +++ b/crypto/dkg/src/encryption.rs @@ -118,7 +118,7 @@ fn cipher(context: &str, ecdh: &Zeroizing) -> ChaCha20 { zeroize(challenge.as_mut()); // Since the key is single-use, it doesn't matter what we use for the IV - // The isssue is key + IV reuse. If we never reuse the key, we can't have the opportunity to + // The issue is key + IV reuse. If we never reuse the key, we can't have the opportunity to // reuse a nonce // Use a static IV in acknowledgement of this let mut iv = Cc20Iv::default(); @@ -226,7 +226,7 @@ impl EncryptedMessage { use ciphersuite::group::ff::PrimeField; let mut repr = ::Repr::default(); - for b in repr.as_mut().iter_mut() { + for b in repr.as_mut() { *b = 255; } // Tries to guarantee the above assumption. diff --git a/crypto/dkg/src/lib.rs b/crypto/dkg/src/lib.rs index fd49856f7..77a3bdbe0 100644 --- a/crypto/dkg/src/lib.rs +++ b/crypto/dkg/src/lib.rs @@ -31,8 +31,7 @@ pub mod tests; /// The ID of a participant, defined as a non-zero u16. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Zeroize)] -#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))] pub struct Participant(pub(crate) u16); impl Participant { /// Create a new Participant identifier from a u16. @@ -118,6 +117,14 @@ mod lib { Ciphersuite, }; + #[cfg(feature = "borsh")] + impl borsh::BorshDeserialize for Participant { + fn deserialize_reader(reader: &mut R) -> io::Result { + Participant::new(u16::deserialize_reader(reader)?) + .ok_or_else(|| io::Error::other("invalid participant")) + } + } + // Validate a map of values to have the expected included participants pub(crate) fn validate_map( map: &HashMap, @@ -147,8 +154,7 @@ mod lib { /// Parameters for a multisig. // These fields should not be made public as they should be static #[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)] - #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] - #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] + #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))] pub struct ThresholdParams { /// Participants needed to sign on behalf of the group. pub(crate) t: u16, @@ -189,6 +195,16 @@ mod lib { } } + #[cfg(feature = "borsh")] + impl borsh::BorshDeserialize for ThresholdParams { + fn deserialize_reader(reader: &mut R) -> io::Result { + let t = u16::deserialize_reader(reader)?; + let n = u16::deserialize_reader(reader)?; + let i = Participant::deserialize_reader(reader)?; + ThresholdParams::new(t, n, i).map_err(|e| io::Error::other(format!("{e:?}"))) + } + } + /// Calculate the lagrange coefficient for a signing set. pub fn lagrange(i: Participant, included: &[Participant]) -> F { let i_f = F::from(u64::from(u16::from(i))); @@ -241,7 +257,7 @@ mod lib { self.params.zeroize(); self.secret_share.zeroize(); self.group_key.zeroize(); - for (_, share) in self.verification_shares.iter_mut() { + for share in self.verification_shares.values_mut() { share.zeroize(); } } @@ -394,10 +410,10 @@ mod lib { self.group_key.zeroize(); self.included.zeroize(); self.secret_share.zeroize(); - for (_, share) in self.original_verification_shares.iter_mut() { + for share in self.original_verification_shares.values_mut() { share.zeroize(); } - for (_, share) in self.verification_shares.iter_mut() { + for share in self.verification_shares.values_mut() { share.zeroize(); } } @@ -468,7 +484,7 @@ mod lib { ); let mut verification_shares = self.verification_shares(); - for (i, share) in verification_shares.iter_mut() { + for (i, share) in &mut verification_shares { *share *= lagrange::(*i, &included); } diff --git a/crypto/dkg/src/promote.rs b/crypto/dkg/src/promote.rs index ac94beb6d..010abf80f 100644 --- a/crypto/dkg/src/promote.rs +++ b/crypto/dkg/src/promote.rs @@ -19,7 +19,7 @@ pub trait CiphersuitePromote { fn promote(self) -> ThresholdKeys; } -fn transcript(key: G, i: Participant) -> RecommendedTranscript { +fn transcript(key: &G, i: Participant) -> RecommendedTranscript { let mut transcript = RecommendedTranscript::new(b"DKG Generator Promotion v0.2"); transcript.append_message(b"group_key", key.to_bytes()); transcript.append_message(b"participant", i.to_bytes()); @@ -79,7 +79,7 @@ where share: C2::generator() * base.secret_share().deref(), proof: DLEqProof::prove( rng, - &mut transcript(base.core.group_key(), base.params().i), + &mut transcript(&base.core.group_key(), base.params().i), &[C1::generator(), C2::generator()], base.secret_share(), ), @@ -105,7 +105,7 @@ where proof .proof .verify( - &mut transcript(self.base.core.group_key(), i), + &mut transcript(&self.base.core.group_key(), i), &[C1::generator(), C2::generator()], &[original_shares[&i], proof.share], ) diff --git a/crypto/dkg/src/tests/frost.rs b/crypto/dkg/src/tests/frost.rs index 92f687c44..01af35626 100644 --- a/crypto/dkg/src/tests/frost.rs +++ b/crypto/dkg/src/tests/frost.rs @@ -135,10 +135,10 @@ mod literal { const TWO: Participant = Participant(2); fn test_blame( - commitment_msgs: HashMap>>, + commitment_msgs: &HashMap>>, machines: Vec>, - msg: FrostEncryptedMessage, - blame: Option>, + msg: &FrostEncryptedMessage, + blame: &Option>, ) { for machine in machines { let (additional, blamed) = machine.blame(ONE, TWO, msg.clone(), blame.clone()); @@ -188,7 +188,7 @@ mod literal { }) .collect::>(); - test_blame(commitment_msgs, machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap()); + test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap()); } #[test] @@ -228,7 +228,7 @@ mod literal { .collect::>(); blame.as_mut().unwrap().as_mut().unwrap().invalidate_key(); - test_blame(commitment_msgs, machines, secret_shares[&TWO][&ONE].clone(), blame.unwrap()); + test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap()); } // This should be largely equivalent to the prior test @@ -263,7 +263,7 @@ mod literal { .collect::>(); blame.as_mut().unwrap().as_mut().unwrap().invalidate_dleq(); - test_blame(commitment_msgs, machines, secret_shares[&TWO][&ONE].clone(), blame.unwrap()); + test_blame(&commitment_msgs, machines, &secret_shares[&TWO][&ONE].clone(), &blame.unwrap()); } #[test] @@ -296,7 +296,7 @@ mod literal { }) .collect::>(); - test_blame(commitment_msgs, machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap()); + test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap()); } #[test] @@ -329,6 +329,6 @@ mod literal { }) .collect::>(); - test_blame(commitment_msgs, machines, secret_shares[&ONE][&TWO].clone(), blame.unwrap()); + test_blame(&commitment_msgs, machines, &secret_shares[&ONE][&TWO].clone(), &blame.unwrap()); } } diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index 5afe03b39..7d8c87e9c 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -12,6 +12,9 @@ rust-version = "1.73" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] rustversion = "1" diff --git a/crypto/dleq/src/cross_group/aos.rs b/crypto/dleq/src/cross_group/aos.rs index 4cba3c89e..dac3356ac 100644 --- a/crypto/dleq/src/cross_group/aos.rs +++ b/crypto/dleq/src/cross_group/aos.rs @@ -102,7 +102,7 @@ where #[allow(non_snake_case)] pub(crate) fn prove( rng: &mut R, - transcript: T, + transcript: &T, generators: (Generators, Generators), ring: &[(G0, G1)], mut actual: usize, @@ -122,7 +122,7 @@ where #[allow(non_snake_case)] let mut R = original_R; - for i in ((actual + 1) .. (actual + RING_LEN + 1)).map(|i| i % RING_LEN) { + for i in ((actual + 1) ..= (actual + RING_LEN)).map(|i| i % RING_LEN) { let e = Self::nonces(transcript.clone(), R); if i == 0 { match Re_0 { @@ -144,11 +144,10 @@ where r.0.zeroize(); r.1.zeroize(); break; - // Generate a decoy response - } else { - s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); } + // Generate a decoy response + s[i] = (G0::Scalar::random(&mut *rng), G1::Scalar::random(&mut *rng)); R = Self::R(generators, s[i], ring[i], e); } @@ -159,7 +158,7 @@ where pub(crate) fn verify( &self, rng: &mut R, - transcript: T, + transcript: &T, generators: (Generators, Generators), batch: &mut (BatchVerifier<(), G0>, BatchVerifier<(), G1>), ring: &[(G0, G1)], @@ -240,7 +239,7 @@ where } let mut s = [(G0::Scalar::ZERO, G1::Scalar::ZERO); RING_LEN]; - for s in s.iter_mut() { + for s in &mut s { *s = (read_scalar(r)?, read_scalar(r)?); } diff --git a/crypto/dleq/src/cross_group/bits.rs b/crypto/dleq/src/cross_group/bits.rs index 1995fad18..a5de897a0 100644 --- a/crypto/dleq/src/cross_group/bits.rs +++ b/crypto/dleq/src/cross_group/bits.rs @@ -45,12 +45,10 @@ impl BitSignature { } } - pub(crate) const fn bits(&self) -> usize { + pub(crate) const fn bits(&self) -> u8 { match self { - BitSignature::ClassicLinear => 1, - BitSignature::ConciseLinear => 2, - BitSignature::EfficientLinear => 1, - BitSignature::CompromiseLinear => 2, + BitSignature::ClassicLinear | BitSignature::EfficientLinear => 1, + BitSignature::ConciseLinear | BitSignature::CompromiseLinear => 2, } } @@ -60,10 +58,8 @@ impl BitSignature { fn aos_form(&self) -> Re { match self { - BitSignature::ClassicLinear => Re::e_default(), - BitSignature::ConciseLinear => Re::e_default(), - BitSignature::EfficientLinear => Re::R_default(), - BitSignature::CompromiseLinear => Re::R_default(), + BitSignature::ClassicLinear | BitSignature::ConciseLinear => Re::e_default(), + BitSignature::EfficientLinear | BitSignature::CompromiseLinear => Re::R_default(), } } } @@ -129,7 +125,7 @@ where let signature = Aos::prove( rng, - transcript.clone(), + transcript, generators, &Self::ring(*pow_2, commitments), usize::from(bits), @@ -155,7 +151,7 @@ where self.signature.verify( rng, - transcript.clone(), + transcript, generators, batch, &Self::ring(*pow_2, self.commitments), diff --git a/crypto/dleq/src/cross_group/mod.rs b/crypto/dleq/src/cross_group/mod.rs index 172648662..77569c7ca 100644 --- a/crypto/dleq/src/cross_group/mod.rs +++ b/crypto/dleq/src/cross_group/mod.rs @@ -42,6 +42,7 @@ fn u8_from_bool(bit_ref: &mut bool) -> u8 { let bit_ref = black_box(bit_ref); let mut bit = black_box(*bit_ref); + #[allow(clippy::cast_lossless)] let res = black_box(bit as u8); bit.zeroize(); debug_assert!((res | 1) == 1); @@ -278,7 +279,7 @@ where }; let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); - let bits_per_group = BitSignature::from(SIGNATURE).bits(); + let bits_per_group = usize::from(BitSignature::from(SIGNATURE).bits()); let mut pow_2 = (generators.0.primary, generators.1.primary); @@ -391,7 +392,7 @@ where generators: (Generators, Generators), ) -> Result<(G0, G1), DLEqError> { let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); - let bits_per_group = BitSignature::from(SIGNATURE).bits(); + let bits_per_group = usize::from(BitSignature::from(SIGNATURE).bits()); let has_remainder = (capacity % bits_per_group) != 0; // These shouldn't be possible, as locally created and deserialized proofs should be properly @@ -407,10 +408,8 @@ where Self::transcript(transcript, generators, keys); let batch_capacity = match BitSignature::from(SIGNATURE) { - BitSignature::ClassicLinear => 3, - BitSignature::ConciseLinear => 3, - BitSignature::EfficientLinear => (self.bits.len() + 1) * 3, - BitSignature::CompromiseLinear => (self.bits.len() + 1) * 3, + BitSignature::ClassicLinear | BitSignature::ConciseLinear => 3, + BitSignature::EfficientLinear | BitSignature::CompromiseLinear => (self.bits.len() + 1) * 3, }; let mut batch = (BatchVerifier::new(batch_capacity), BatchVerifier::new(batch_capacity)); @@ -449,7 +448,7 @@ where #[cfg(feature = "serialize")] pub fn read(r: &mut R) -> io::Result { let capacity = usize::try_from(G0::Scalar::CAPACITY.min(G1::Scalar::CAPACITY)).unwrap(); - let bits_per_group = BitSignature::from(SIGNATURE).bits(); + let bits_per_group = usize::from(BitSignature::from(SIGNATURE).bits()); let mut bits = Vec::with_capacity(capacity / bits_per_group); for _ in 0 .. (capacity / bits_per_group) { diff --git a/crypto/dleq/src/cross_group/scalar.rs b/crypto/dleq/src/cross_group/scalar.rs index 1b8eb4e58..8f216a882 100644 --- a/crypto/dleq/src/cross_group/scalar.rs +++ b/crypto/dleq/src/cross_group/scalar.rs @@ -20,7 +20,7 @@ pub fn scalar_normalize( let mut res1 = F0::ZERO; let mut res2 = F1::ZERO; - // Uses the bits API to ensure a consistent endianess + // Uses the bits API to ensure a consistent endianness let mut bits = scalar.to_le_bits(); scalar.zeroize(); // Convert it to big endian @@ -29,7 +29,7 @@ pub fn scalar_normalize( let mut skip = bits.len() - usize::try_from(mutual_capacity).unwrap(); // Needed to zero out the bits #[allow(unused_assignments)] - for mut bit in bits.iter_mut() { + for mut bit in &mut bits { if skip > 0 { bit.deref_mut().zeroize(); skip -= 1; diff --git a/crypto/dleq/src/lib.rs b/crypto/dleq/src/lib.rs index ae5391927..5b813b648 100644 --- a/crypto/dleq/src/lib.rs +++ b/crypto/dleq/src/lib.rs @@ -28,7 +28,7 @@ mod tests; pub(crate) fn challenge(transcript: &mut T) -> F { // From here, there are three ways to get a scalar under the ff/group API // 1: Scalar::random(ChaCha20Rng::from_seed(self.transcript.rng_seed(b"challenge"))) - // 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianess + // 2: Grabbing a UInt library to perform reduction by the modulus, then determining endianness // and loading it in // 3: Iterating over each byte and manually doubling/adding. This is simplest diff --git a/crypto/dleq/src/tests/cross_group/aos.rs b/crypto/dleq/src/tests/cross_group/aos.rs index 69139a93b..cf3182890 100644 --- a/crypto/dleq/src/tests/cross_group/aos.rs +++ b/crypto/dleq/src/tests/cross_group/aos.rs @@ -11,14 +11,14 @@ use crate::{ #[allow(non_snake_case)] #[cfg(feature = "serialize")] -fn test_aos_serialization(proof: Aos, Re_0: Re) { +fn test_aos_serialization(proof: &Aos, Re_0: Re) { let mut buf = vec![]; proof.write(&mut buf).unwrap(); let deserialized = Aos::read::<&[u8]>(&mut buf.as_ref(), Re_0).unwrap(); - assert_eq!(proof, deserialized); + assert_eq!(proof, &deserialized); } -fn test_aos(default: Re) { +fn test_aos(default: &Re) { let generators = generators(); let mut ring_keys = [(::Scalar::ZERO, ::Scalar::ZERO); RING_LEN]; @@ -34,7 +34,7 @@ fn test_aos(default: Re) { for (actual, key) in ring_keys.iter_mut().enumerate() { let proof = Aos::<_, _, RING_LEN>::prove( &mut OsRng, - transcript(), + &transcript(), generators, &ring, actual, @@ -43,25 +43,25 @@ fn test_aos(default: Re) { ); let mut batch = (BatchVerifier::new(0), BatchVerifier::new(0)); - proof.verify(&mut OsRng, transcript(), generators, &mut batch, &ring).unwrap(); + proof.verify(&mut OsRng, &transcript(), generators, &mut batch, &ring).unwrap(); // For e, these should have nothing. For R, these should have 6 elements each which sum to 0 assert!(batch.0.verify_vartime()); assert!(batch.1.verify_vartime()); #[cfg(feature = "serialize")] - test_aos_serialization(proof, default.clone()); + test_aos_serialization(&proof, default.clone()); } } #[test] fn test_aos_e() { - test_aos::<2>(Re::e_default()); - test_aos::<4>(Re::e_default()); + test_aos::<2>(&Re::e_default()); + test_aos::<4>(&Re::e_default()); } #[allow(non_snake_case)] #[test] fn test_aos_R() { // Batch verification appreciates the longer vectors, which means not batching bits - test_aos::<2>(Re::R_default()); + test_aos::<2>(&Re::R_default()); } diff --git a/crypto/dleq/src/tests/mod.rs b/crypto/dleq/src/tests/mod.rs index c80115dee..412dfcaf3 100644 --- a/crypto/dleq/src/tests/mod.rs +++ b/crypto/dleq/src/tests/mod.rs @@ -117,7 +117,7 @@ fn test_multi_dleq() { // 0: 0 // 1: 1, 2 // 2: 2, 3, 4 - let key_generators = generators[i .. (i + i + 1)].to_vec(); + let key_generators = generators[i ..= (i + i)].to_vec(); let mut these_pub_keys = vec![]; for generator in &key_generators { these_pub_keys.push(generator * key.deref()); diff --git a/crypto/ed448/Cargo.toml b/crypto/ed448/Cargo.toml index ce8f0f5e2..2302d7b3b 100644 --- a/crypto/ed448/Cargo.toml +++ b/crypto/ed448/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.65" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] rustversion = "1" diff --git a/crypto/ed448/src/backend.rs b/crypto/ed448/src/backend.rs index ad2d571f0..83dc3fcaa 100644 --- a/crypto/ed448/src/backend.rs +++ b/crypto/ed448/src/backend.rs @@ -12,6 +12,7 @@ pub(crate) fn u8_from_bool(bit_ref: &mut bool) -> u8 { let bit_ref = black_box(bit_ref); let mut bit = black_box(*bit_ref); + #[allow(clippy::cast_lossless)] let res = black_box(bit as u8); bit.zeroize(); debug_assert!((res | 1) == 1); @@ -138,7 +139,7 @@ macro_rules! field { } impl $FieldName { - /// Perform an exponentation. + /// Perform an exponentiation. pub fn pow(&self, other: $FieldName) -> $FieldName { let mut table = [Self(Residue::ONE); 16]; table[1] = *self; diff --git a/crypto/ed448/src/point.rs b/crypto/ed448/src/point.rs index 3ea70b852..c3b10f791 100644 --- a/crypto/ed448/src/point.rs +++ b/crypto/ed448/src/point.rs @@ -214,7 +214,7 @@ impl Sum for Point { impl<'a> Sum<&'a Point> for Point { fn sum>(iter: I) -> Point { - Point::sum(iter.cloned()) + Point::sum(iter.copied()) } } diff --git a/crypto/ff-group-tests/Cargo.toml b/crypto/ff-group-tests/Cargo.toml index 641e5da86..bb55d5a15 100644 --- a/crypto/ff-group-tests/Cargo.toml +++ b/crypto/ff-group-tests/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.60" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] rand_core = "0.6" diff --git a/crypto/ff-group-tests/src/field.rs b/crypto/ff-group-tests/src/field.rs index e34f4c813..cece37a01 100644 --- a/crypto/ff-group-tests/src/field.rs +++ b/crypto/ff-group-tests/src/field.rs @@ -130,8 +130,8 @@ pub fn test_sqrt() { assert_eq!(root * root, has_root, "sqrt(x)^2 != x"); let check = |value: (_, _), expected: (_, F), msg| { - assert_eq!(bool::from(value.0), bool::from(expected.0), "{}", msg); - assert!((value.1 == expected.1) || (value.1 == -expected.1), "{}", msg); + assert_eq!(bool::from(value.0), bool::from(expected.0), "{msg}"); + assert!((value.1 == expected.1) || (value.1 == -expected.1), "{msg}"); }; check( F::sqrt_ratio(&has_root, &F::ONE), diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 8ba3ef6ea..128a36678 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] thiserror = "1" diff --git a/crypto/frost/src/algorithm.rs b/crypto/frost/src/algorithm.rs index 073b483ff..f2da59ea1 100644 --- a/crypto/frost/src/algorithm.rs +++ b/crypto/frost/src/algorithm.rs @@ -51,7 +51,7 @@ pub trait Algorithm: Send + Sync + Clone { /// Read an addendum from a reader. fn read_addendum(&self, reader: &mut R) -> io::Result; - /// Proccess the addendum for the specified participant. Guaranteed to be called in order. + /// Process the addendum for the specified participant. Guaranteed to be called in order. fn process_addendum( &mut self, params: &ThresholdView, @@ -184,7 +184,7 @@ impl> Algorithm fo &mut self, _: &ThresholdView, _: Participant, - _: (), + (): (), ) -> Result<(), FrostError> { Ok(()) } diff --git a/crypto/frost/src/nonce.rs b/crypto/frost/src/nonce.rs index 921480a06..8638baff8 100644 --- a/crypto/frost/src/nonce.rs +++ b/crypto/frost/src/nonce.rs @@ -256,7 +256,7 @@ impl BindingFactor { } pub(crate) fn calculate_binding_factors(&mut self, transcript: &T) { - for (l, binding) in self.0.iter_mut() { + for (l, binding) in &mut self.0 { let mut transcript = transcript.clone(); transcript.append_message(b"participant", C::F::from(u64::from(u16::from(*l))).to_repr()); // It *should* be perfectly fine to reuse a binding factor for multiple nonces diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index ac4c53a64..a716dc583 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -224,13 +224,15 @@ pub trait SignMachine: Send + Sync + Sized { /// security as your private key share. fn cache(self) -> CachedPreprocess; - /// Create a sign machine from a cached preprocess. After this, the preprocess must be deleted so - /// it's never reused. Any reuse would cause the signer to leak their secret share. + /// Create a sign machine from a cached preprocess. + + /// After this, the preprocess must be deleted so it's never reused. Any reuse will presumably + /// cause the signer to leak their secret share. fn from_cache( params: Self::Params, keys: Self::Keys, cache: CachedPreprocess, - ) -> Result; + ) -> (Self, Self::Preprocess); /// Read a Preprocess message. Despite taking self, this does not save the preprocess. /// It must be externally cached and passed into sign. @@ -277,9 +279,8 @@ impl> SignMachine for AlgorithmSignMachi algorithm: A, keys: ThresholdKeys, cache: CachedPreprocess, - ) -> Result { - let (machine, _) = AlgorithmMachine::new(algorithm, keys).seeded_preprocess(cache); - Ok(machine) + ) -> (Self, Self::Preprocess) { + AlgorithmMachine::new(algorithm, keys).seeded_preprocess(cache) } fn read_preprocess(&self, reader: &mut R) -> io::Result { diff --git a/crypto/frost/src/tests/literal/dalek.rs b/crypto/frost/src/tests/literal/dalek.rs index 9a11c5d2d..e9f5a0f4a 100644 --- a/crypto/frost/src/tests/literal/dalek.rs +++ b/crypto/frost/src/tests/literal/dalek.rs @@ -10,7 +10,7 @@ use crate::{ fn ristretto_vectors() { test_with_vectors::<_, curve::Ristretto, curve::IetfRistrettoHram>( &mut OsRng, - Vectors::from( + &Vectors::from( serde_json::from_str::(include_str!( "vectors/frost-ristretto255-sha512.json" )) @@ -24,7 +24,7 @@ fn ristretto_vectors() { fn ed25519_vectors() { test_with_vectors::<_, curve::Ed25519, curve::IetfEd25519Hram>( &mut OsRng, - Vectors::from( + &Vectors::from( serde_json::from_str::(include_str!("vectors/frost-ed25519-sha512.json")) .unwrap(), ), diff --git a/crypto/frost/src/tests/literal/ed448.rs b/crypto/frost/src/tests/literal/ed448.rs index 79b7679b6..95a315058 100644 --- a/crypto/frost/src/tests/literal/ed448.rs +++ b/crypto/frost/src/tests/literal/ed448.rs @@ -57,7 +57,7 @@ fn ed448_8032_vector() { fn ed448_vectors() { test_with_vectors::<_, Ed448, IetfEd448Hram>( &mut OsRng, - Vectors::from( + &Vectors::from( serde_json::from_str::(include_str!("vectors/frost-ed448-shake256.json")) .unwrap(), ), diff --git a/crypto/frost/src/tests/literal/kp256.rs b/crypto/frost/src/tests/literal/kp256.rs index 175039e4d..99bdc1570 100644 --- a/crypto/frost/src/tests/literal/kp256.rs +++ b/crypto/frost/src/tests/literal/kp256.rs @@ -13,7 +13,7 @@ use crate::curve::{P256, IetfP256Hram}; fn secp256k1_vectors() { test_with_vectors::<_, Secp256k1, IetfSecp256k1Hram>( &mut OsRng, - Vectors::from( + &Vectors::from( serde_json::from_str::(include_str!( "vectors/frost-secp256k1-sha256.json" )) @@ -27,7 +27,7 @@ fn secp256k1_vectors() { fn p256_vectors() { test_with_vectors::<_, P256, IetfP256Hram>( &mut OsRng, - Vectors::from( + &Vectors::from( serde_json::from_str::(include_str!("vectors/frost-p256-sha256.json")) .unwrap(), ), diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index e36bd7110..e457c7037 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -39,7 +39,7 @@ pub fn clone_without( /// Spawn algorithm machines for a random selection of signers, each executing the given algorithm. pub fn algorithm_machines>( rng: &mut R, - algorithm: A, + algorithm: &A, keys: &HashMap>, ) -> HashMap> { let mut included = vec![]; @@ -167,7 +167,7 @@ pub fn sign_without_caching( /// successfully. pub fn sign( rng: &mut R, - params: >::Params, + params: &>::Params, mut keys: HashMap>::Keys>, machines: HashMap, msg: &[u8], @@ -177,13 +177,13 @@ pub fn sign( machines, |rng, machines| { // Cache and rebuild half of the machines - let included = machines.keys().cloned().collect::>(); + let included = machines.keys().copied().collect::>(); for i in included { if (rng.next_u64() % 2) == 0 { let cache = machines.remove(&i).unwrap().cache(); machines.insert( i, - M::SignMachine::from_cache(params.clone(), keys.remove(&i).unwrap(), cache).unwrap(), + M::SignMachine::from_cache(params.clone(), keys.remove(&i).unwrap(), cache).0, ); } } @@ -195,12 +195,12 @@ pub fn sign( /// Test a basic Schnorr signature with the provided keys. pub fn test_schnorr_with_keys>( rng: &mut R, - keys: HashMap>, + keys: &HashMap>, ) { const MSG: &[u8] = b"Hello, World!"; - let machines = algorithm_machines(&mut *rng, IetfSchnorr::::ietf(), &keys); - let sig = sign(&mut *rng, IetfSchnorr::::ietf(), keys.clone(), machines, MSG); + let machines = algorithm_machines(&mut *rng, &IetfSchnorr::::ietf(), keys); + let sig = sign(&mut *rng, &IetfSchnorr::::ietf(), keys.clone(), machines, MSG); let group_key = keys[&Participant::new(1).unwrap()].group_key(); assert!(sig.verify(group_key, H::hram(&sig.R, &group_key, MSG))); } @@ -208,13 +208,13 @@ pub fn test_schnorr_with_keys>( /// Test a basic Schnorr signature. pub fn test_schnorr>(rng: &mut R) { let keys = key_gen(&mut *rng); - test_schnorr_with_keys::<_, _, H>(&mut *rng, keys) + test_schnorr_with_keys::<_, _, H>(&mut *rng, &keys) } /// Test a basic Schnorr signature, yet with MuSig. pub fn test_musig_schnorr>(rng: &mut R) { let keys = musig_key_gen(&mut *rng); - test_schnorr_with_keys::<_, _, H>(&mut *rng, keys) + test_schnorr_with_keys::<_, _, H>(&mut *rng, &keys) } /// Test an offset Schnorr signature. @@ -231,8 +231,8 @@ pub fn test_offset_schnorr>(rng: &m assert_eq!(keys.group_key(), offset_key); } - let machines = algorithm_machines(&mut *rng, IetfSchnorr::::ietf(), &keys); - let sig = sign(&mut *rng, IetfSchnorr::::ietf(), keys.clone(), machines, MSG); + let machines = algorithm_machines(&mut *rng, &IetfSchnorr::::ietf(), &keys); + let sig = sign(&mut *rng, &IetfSchnorr::::ietf(), keys.clone(), machines, MSG); let group_key = keys[&Participant::new(1).unwrap()].group_key(); assert!(sig.verify(offset_key, H::hram(&sig.R, &group_key, MSG))); } @@ -242,7 +242,7 @@ pub fn test_schnorr_blame>(rng: &mu const MSG: &[u8] = b"Hello, World!"; let keys = key_gen(&mut *rng); - let machines = algorithm_machines(&mut *rng, IetfSchnorr::::ietf(), &keys); + let machines = algorithm_machines(&mut *rng, &IetfSchnorr::::ietf(), &keys); let (mut machines, shares) = preprocess_and_shares(&mut *rng, machines, |_, _| {}, MSG); diff --git a/crypto/frost/src/tests/nonces.rs b/crypto/frost/src/tests/nonces.rs index 8031013c5..ee060befd 100644 --- a/crypto/frost/src/tests/nonces.rs +++ b/crypto/frost/src/tests/nonces.rs @@ -82,7 +82,7 @@ impl Algorithm for MultiNonce { &mut self, _: &ThresholdView, _: Participant, - _: (), + (): (), ) -> Result<(), FrostError> { Ok(()) } @@ -154,14 +154,14 @@ impl Algorithm for MultiNonce { // 3) Provide algorithms with nonces which match the group nonces pub fn test_multi_nonce(rng: &mut R) { let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, MultiNonce::::new(), &keys); - sign(&mut *rng, MultiNonce::::new(), keys.clone(), machines, &[]); + let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); + sign(&mut *rng, &MultiNonce::::new(), keys.clone(), machines, &[]); } /// Test malleating a commitment for a nonce across generators causes the preprocess to error. pub fn test_invalid_commitment(rng: &mut R) { let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, MultiNonce::::new(), &keys); + let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {}); // Select a random participant to give an invalid commitment @@ -193,7 +193,7 @@ pub fn test_invalid_commitment(rng: &mut R) { /// Test malleating the DLEq proof for a preprocess causes it to error. pub fn test_invalid_dleq_proof(rng: &mut R) { let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, MultiNonce::::new(), &keys); + let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {}); // Select a random participant to give an invalid DLEq proof diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index f653513ec..3356a6cdd 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -43,7 +43,7 @@ pub struct Vectors { } // Vectors are expected to be formatted per the IETF proof of concept -// The included vectors are direcly from +// The included vectors are directly from // https://github.com/cfrg/draft-irtf-cfrg-frost/tree/draft-irtf-cfrg-frost-14/poc #[cfg(test)] impl From for Vectors { @@ -143,12 +143,12 @@ fn vectors_to_multisig_keys(vectors: &Vectors) -> HashMap>( rng: &mut R, - vectors: Vectors, + vectors: &Vectors, ) { test_ciphersuite::(rng); // Test against the vectors - let keys = vectors_to_multisig_keys::(&vectors); + let keys = vectors_to_multisig_keys::(vectors); { let group_key = ::read_G::<&[u8]>(&mut hex::decode(&vectors.group_key).unwrap().as_ref()) diff --git a/crypto/multiexp/Cargo.toml b/crypto/multiexp/Cargo.toml index 2bc9ece55..27b47ea9e 100644 --- a/crypto/multiexp/Cargo.toml +++ b/crypto/multiexp/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "multiexp" version = "0.4.0" -description = "Multiexponentation algorithms for ff/group" +description = "Multiexponentiation algorithms for ff/group" license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/multiexp" authors = ["Luke Parker "] @@ -13,6 +13,9 @@ rust-version = "1.70" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] rustversion = "1" diff --git a/crypto/multiexp/README.md b/crypto/multiexp/README.md index 4ee3c56c2..1366f7a6b 100644 --- a/crypto/multiexp/README.md +++ b/crypto/multiexp/README.md @@ -2,7 +2,7 @@ A multiexp implementation for ff/group implementing Straus and Pippenger. A batch verification API is also available via the "batch" feature, which enables -secure multiexponentation batch verification given a series of values which +secure multiexponentiation batch verification given a series of values which should sum to the identity, identifying which doesn't via binary search if they don't. diff --git a/crypto/multiexp/src/lib.rs b/crypto/multiexp/src/lib.rs index 191977b65..cf0133fca 100644 --- a/crypto/multiexp/src/lib.rs +++ b/crypto/multiexp/src/lib.rs @@ -38,6 +38,7 @@ fn u8_from_bool(bit_ref: &mut bool) -> u8 { let bit_ref = black_box(bit_ref); let mut bit = black_box(*bit_ref); + #[allow(clippy::cast_lossless)] let res = black_box(bit as u8); bit.zeroize(); debug_assert!((res | 1) == 1); @@ -172,7 +173,7 @@ fn algorithm(len: usize) -> Algorithm { } } -/// Performs a multiexponentation, automatically selecting the optimal algorithm based on the +/// Performs a multiexponentiation, automatically selecting the optimal algorithm based on the /// amount of pairs. pub fn multiexp(pairs: &[(G::Scalar, G)]) -> G where @@ -187,7 +188,7 @@ where } } -/// Performs a multiexponentation in variable time, automatically selecting the optimal algorithm +/// Performs a multiexponentiation in variable time, automatically selecting the optimal algorithm /// based on the amount of pairs. pub fn multiexp_vartime(pairs: &[(G::Scalar, G)]) -> G where diff --git a/crypto/multiexp/src/pippenger.rs b/crypto/multiexp/src/pippenger.rs index e182d51ea..10d7d1410 100644 --- a/crypto/multiexp/src/pippenger.rs +++ b/crypto/multiexp/src/pippenger.rs @@ -5,7 +5,7 @@ use group::Group; use crate::prep_bits; -// Pippenger's algorithm for multiexponentation, as published in the SIAM Journal on Computing +// Pippenger's algorithm for multiexponentiation, as published in the SIAM Journal on Computing // DOI: 10.1137/0209022 pub(crate) fn pippenger(pairs: &[(G::Scalar, G)], window: u8) -> G where diff --git a/crypto/multiexp/src/straus.rs b/crypto/multiexp/src/straus.rs index 4eadd2130..6f472c057 100644 --- a/crypto/multiexp/src/straus.rs +++ b/crypto/multiexp/src/straus.rs @@ -22,7 +22,7 @@ fn prep_tables(pairs: &[(G::Scalar, G)], window: u8) -> Vec> { tables } -// Straus's algorithm for multiexponentation, as published in The American Mathematical Monthly +// Straus's algorithm for multiexponentiation, as published in The American Mathematical Monthly // DOI: 10.2307/2310929 pub(crate) fn straus(pairs: &[(G::Scalar, G)], window: u8) -> G where diff --git a/crypto/schnorr/Cargo.toml b/crypto/schnorr/Cargo.toml index 06abd5715..91f8722b4 100644 --- a/crypto/schnorr/Cargo.toml +++ b/crypto/schnorr/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] std-shims = { path = "../../common/std-shims", version = "^0.1.1", default-features = false } diff --git a/crypto/schnorr/src/lib.rs b/crypto/schnorr/src/lib.rs index 77d033d05..ecca87f73 100644 --- a/crypto/schnorr/src/lib.rs +++ b/crypto/schnorr/src/lib.rs @@ -69,6 +69,7 @@ impl SchnorrSignature { /// This challenge must be properly crafted, which means being binding to the public key, nonce, /// and any message. Failure to do so will let a malicious adversary to forge signatures for /// different keys/messages. + #[allow(clippy::needless_pass_by_value)] // Prevents further-use of this single-use value pub fn sign( private_key: &Zeroizing, nonce: Zeroizing, @@ -82,7 +83,7 @@ impl SchnorrSignature { } /// Return the series of pairs whose products sum to zero for a valid signature. - /// This is inteded to be used with a multiexp. + /// This is intended to be used with a multiexp. pub fn batch_statements(&self, public_key: C::G, challenge: C::F) -> [(C::F, C::G); 3] { // s = r + ca // sG == R + cA diff --git a/crypto/schnorrkel/Cargo.toml b/crypto/schnorrkel/Cargo.toml index 46df8f20a..f58190707 100644 --- a/crypto/schnorrkel/Cargo.toml +++ b/crypto/schnorrkel/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] rand_core = "0.6" zeroize = "^1.5" diff --git a/crypto/schnorrkel/src/lib.rs b/crypto/schnorrkel/src/lib.rs index 7d3b33339..bb46bc024 100644 --- a/crypto/schnorrkel/src/lib.rs +++ b/crypto/schnorrkel/src/lib.rs @@ -99,7 +99,7 @@ impl Algorithm for Schnorrkel { &mut self, _: &ThresholdView, _: Participant, - _: (), + (): (), ) -> Result<(), FrostError> { Ok(()) } diff --git a/crypto/schnorrkel/src/tests.rs b/crypto/schnorrkel/src/tests.rs index 2b01ad43f..2f3c758b3 100644 --- a/crypto/schnorrkel/src/tests.rs +++ b/crypto/schnorrkel/src/tests.rs @@ -17,8 +17,9 @@ fn test() { let keys = key_gen(&mut OsRng); let key = keys[&Participant::new(1).unwrap()].group_key(); - let machines = algorithm_machines(&mut OsRng, Schnorrkel::new(CONTEXT), &keys); - let signature = sign(&mut OsRng, Schnorrkel::new(CONTEXT), keys, machines, MSG); + let algorithm = Schnorrkel::new(CONTEXT); + let machines = algorithm_machines(&mut OsRng, &algorithm, &keys); + let signature = sign(&mut OsRng, &algorithm, keys, machines, MSG); let key = PublicKey::from_bytes(key.to_bytes().as_ref()).unwrap(); key.verify(&mut SigningContext::new(CONTEXT).bytes(MSG), &signature).unwrap() diff --git a/crypto/transcript/Cargo.toml b/crypto/transcript/Cargo.toml index 920fb22f7..566ad56bf 100644 --- a/crypto/transcript/Cargo.toml +++ b/crypto/transcript/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.73" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] rustversion = "1" diff --git a/crypto/transcript/src/lib.rs b/crypto/transcript/src/lib.rs index 66d5d7cfb..3956f51d9 100644 --- a/crypto/transcript/src/lib.rs +++ b/crypto/transcript/src/lib.rs @@ -50,6 +50,7 @@ pub trait Transcript: Send + Clone { fn rng_seed(&mut self, label: &'static [u8]) -> [u8; 32]; } +#[derive(Clone, Copy)] enum DigestTranscriptMember { Name, Domain, diff --git a/deny.toml b/deny.toml index 72ffdf033..1fe2cd86e 100644 --- a/deny.toml +++ b/deny.toml @@ -65,8 +65,6 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-runtime" }, { allow = ["AGPL-3.0"], name = "serai-node" }, - { allow = ["AGPL-3.0"], name = "serai-client" }, - { allow = ["AGPL-3.0"], name = "mini-serai" }, { allow = ["AGPL-3.0"], name = "serai-docker-tests" }, @@ -100,4 +98,5 @@ allow-git = [ "https://github.com/serai-dex/substrate-bip39", "https://github.com/serai-dex/substrate", "https://github.com/monero-rs/base58-monero", + "https://github.com/kayabaNerve/dockertest-rs", ] diff --git a/message-queue/Cargo.toml b/message-queue/Cargo.toml index b775d6a97..9eeaa5ce2 100644 --- a/message-queue/Cargo.toml +++ b/message-queue/Cargo.toml @@ -13,6 +13,9 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] # Macros once_cell = { version = "1", default-features = false } @@ -37,6 +40,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim # Uses a single threaded runtime since this shouldn't ever be CPU-bound tokio = { version = "1", default-features = false, features = ["rt", "time", "io-util", "net", "macros"] } +zalloc = { path = "../common/zalloc" } serai-db = { path = "../common/db", optional = true } serai-env = { path = "../common/env" } diff --git a/message-queue/src/client.rs b/message-queue/src/client.rs index e46987b6a..3aaf5a24e 100644 --- a/message-queue/src/client.rs +++ b/message-queue/src/client.rs @@ -67,10 +67,14 @@ impl MessageQueue { #[must_use] async fn send(socket: &mut TcpStream, msg: MessageQueueRequest) -> bool { let msg = borsh::to_vec(&msg).unwrap(); - let Ok(_) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { + let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { + log::warn!("couldn't send the message len"); + return false; + }; + let Ok(()) = socket.write_all(&msg).await else { + log::warn!("couldn't write the message"); return false; }; - let Ok(_) = socket.write_all(&msg).await else { return false }; true } @@ -118,20 +122,32 @@ impl MessageQueue { 'outer: loop { if !first { tokio::time::sleep(core::time::Duration::from_secs(5)).await; - continue; } first = false; - let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue }; + log::trace!("opening socket to message-queue for next"); + let mut socket = match TcpStream::connect(&self.url).await { + Ok(socket) => socket, + Err(e) => { + log::warn!("couldn't connect to message-queue server: {e:?}"); + continue; + } + }; + log::trace!("opened socket for next"); loop { if !Self::send(&mut socket, msg.clone()).await { continue 'outer; } - let Ok(status) = socket.read_u8().await else { - continue 'outer; + let status = match socket.read_u8().await { + Ok(status) => status, + Err(e) => { + log::warn!("couldn't read status u8: {e:?}"); + continue 'outer; + } }; // If there wasn't a message, check again in 1s + // TODO: Use a notification system here if status == 0 { tokio::time::sleep(core::time::Duration::from_secs(1)).await; continue; @@ -143,12 +159,17 @@ impl MessageQueue { // Timeout after 5 seconds in case there's an issue with the length handling let Ok(msg) = tokio::time::timeout(core::time::Duration::from_secs(5), async { // Read the message length - let Ok(len) = socket.read_u32_le().await else { - return vec![]; + let len = match socket.read_u32_le().await { + Ok(len) => len, + Err(e) => { + log::warn!("couldn't read len: {e:?}"); + return vec![]; + } }; let mut buf = vec![0; usize::try_from(len).unwrap()]; // Read the message let Ok(_) = socket.read_exact(&mut buf).await else { + log::warn!("couldn't read the message"); return vec![]; }; buf diff --git a/message-queue/src/main.rs b/message-queue/src/main.rs index 80fe604c9..c43cc3c84 100644 --- a/message-queue/src/main.rs +++ b/message-queue/src/main.rs @@ -1,6 +1,3 @@ -mod messages; -mod queue; - pub(crate) use std::{ sync::{Arc, RwLock}, collections::HashMap, @@ -38,6 +35,13 @@ mod clippy { } pub(crate) use self::clippy::*; +mod messages; +mod queue; + +#[global_allocator] +static ALLOCATOR: zalloc::ZeroizingAlloc = + zalloc::ZeroizingAlloc(std::alloc::System); + // queue RPC method /* Queues a message to be delivered from a processor to a coordinator, or vice versa. @@ -54,7 +58,7 @@ pub(crate) use self::clippy::*; */ pub(crate) fn queue_message( db: &mut Db, - meta: Metadata, + meta: &Metadata, msg: Vec, sig: SchnorrSignature, ) { @@ -115,7 +119,7 @@ pub(crate) fn queue_message( pub(crate) fn get_next_message(from: Service, to: Service) -> Option { let queue_outer = QUEUES.read().unwrap(); let queue = queue_outer[&(from, to)].read().unwrap(); - let next = queue.last_acknowledged().map(|i| i + 1).unwrap_or(0); + let next = queue.last_acknowledged().map_or(0, |i| i + 1); queue.get_message(next) } @@ -246,22 +250,22 @@ async fn main() { MessageQueueRequest::Queue { meta, msg, sig } => { queue_message( &mut db, - meta, + &meta, msg, SchnorrSignature::::read(&mut sig.as_slice()).unwrap(), ); - let Ok(_) = socket.write_all(&[1]).await else { break }; + let Ok(()) = socket.write_all(&[1]).await else { break }; } MessageQueueRequest::Next { from, to } => match get_next_message(from, to) { Some(msg) => { - let Ok(_) = socket.write_all(&[1]).await else { break }; + let Ok(()) = socket.write_all(&[1]).await else { break }; let msg = borsh::to_vec(&msg).unwrap(); let len = u32::try_from(msg.len()).unwrap(); - let Ok(_) = socket.write_all(&len.to_le_bytes()).await else { break }; - let Ok(_) = socket.write_all(&msg).await else { break }; + let Ok(()) = socket.write_all(&len.to_le_bytes()).await else { break }; + let Ok(()) = socket.write_all(&msg).await else { break }; } None => { - let Ok(_) = socket.write_all(&[0]).await else { break }; + let Ok(()) = socket.write_all(&[0]).await else { break }; } }, MessageQueueRequest::Ack { from, to, id, sig } => { @@ -271,7 +275,7 @@ async fn main() { id, SchnorrSignature::::read(&mut sig.as_slice()).unwrap(), ); - let Ok(_) = socket.write_all(&[1]).await else { break }; + let Ok(()) = socket.write_all(&[1]).await else { break }; } } } diff --git a/message-queue/src/messages.rs b/message-queue/src/messages.rs index 65c18dd20..942f3ff51 100644 --- a/message-queue/src/messages.rs +++ b/message-queue/src/messages.rs @@ -61,7 +61,7 @@ pub fn ack_challenge( id: u64, nonce: ::G, ) -> ::F { - let mut transcript = RecommendedTranscript::new(b"Serai Message Queue v0.1 Ackowledgement"); + let mut transcript = RecommendedTranscript::new(b"Serai Message Queue v0.1 Acknowledgement"); transcript.domain_separate(b"metadata"); transcript.append_message(b"to", borsh::to_vec(&to).unwrap()); transcript.append_message(b"to_key", to_key.to_bytes()); diff --git a/message-queue/src/queue.rs b/message-queue/src/queue.rs index 46148d414..d8d6ca79c 100644 --- a/message-queue/src/queue.rs +++ b/message-queue/src/queue.rs @@ -16,8 +16,7 @@ impl Queue { self .0 .get(self.message_count_key()) - .map(|bytes| u64::from_le_bytes(bytes.try_into().unwrap())) - .unwrap_or(0) + .map_or(0, |bytes| u64::from_le_bytes(bytes.try_into().unwrap())) } fn last_acknowledged_key(&self) -> Vec { diff --git a/mini/Cargo.toml b/mini/Cargo.toml index fc0abf8a0..dfef7e565 100644 --- a/mini/Cargo.toml +++ b/mini/Cargo.toml @@ -13,5 +13,8 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] loom = "0.7" diff --git a/orchestration/Dockerfile.parts/Dockerfile.serai.build b/orchestration/Dockerfile.parts/Dockerfile.serai.build index be3af4233..8bc5a498d 100644 --- a/orchestration/Dockerfile.parts/Dockerfile.serai.build +++ b/orchestration/Dockerfile.parts/Dockerfile.serai.build @@ -1,4 +1,4 @@ -FROM rust:1.74-slim-bookworm as builder +FROM rust:1.75-slim-bookworm as builder COPY --from=mimalloc libmimalloc.so /usr/lib RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload @@ -24,6 +24,7 @@ ADD coordinator /serai/coordinator ADD substrate /serai/substrate ADD mini /serai/mini ADD tests /serai/tests +ADD patches /serai/patches ADD Cargo.toml /serai ADD Cargo.lock /serai ADD AGPL-3.0 /serai diff --git a/orchestration/coins/bitcoin/Dockerfile b/orchestration/coins/bitcoin/Dockerfile index 0356aa6d3..e2bd81d16 100644 --- a/orchestration/coins/bitcoin/Dockerfile +++ b/orchestration/coins/bitcoin/Dockerfile @@ -11,12 +11,12 @@ RUN git clone https://github.com/microsoft/mimalloc && \ cp ./libmimalloc-secure.so ../../../libmimalloc.so FROM alpine:latest as bitcoin -ENV BITCOIN_VERSION=25.1 +ENV BITCOIN_VERSION=26.0 RUN apk --no-cache add git gnupg # Download Bitcoin -RUN wget https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/bitcoin-${BITCOIN_VERSION}-x86_64-linux-gnu.tar.gz \ +RUN wget https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz \ && wget https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/SHA256SUMS \ && wget https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/SHA256SUMS.asc @@ -26,10 +26,10 @@ RUN git clone https://github.com/bitcoin-core/guix.sigs && \ find . -iname '*.gpg' -exec gpg --import {} \; && \ gpg --verify --status-fd 1 --verify ../../SHA256SUMS.asc ../../SHA256SUMS | grep "^\[GNUPG:\] VALIDSIG.*71A3B16735405025D447E8F274810B012346C9A6" -RUN grep bitcoin-${BITCOIN_VERSION}-x86_64-linux-gnu.tar.gz SHA256SUMS | sha256sum -c +RUN grep bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz SHA256SUMS | sha256sum -c # Prepare Image -RUN tar xzvf bitcoin-${BITCOIN_VERSION}-x86_64-linux-gnu.tar.gz +RUN tar xzvf bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz RUN mv bitcoin-${BITCOIN_VERSION}/bin/bitcoind . FROM debian:bookworm-slim as image diff --git a/orchestration/coins/bitcoin/Dockerfile.bitcoin b/orchestration/coins/bitcoin/Dockerfile.bitcoin index f9ca845e5..b2aca0cb5 100644 --- a/orchestration/coins/bitcoin/Dockerfile.bitcoin +++ b/orchestration/coins/bitcoin/Dockerfile.bitcoin @@ -1,11 +1,11 @@ FROM alpine:latest as bitcoin -ENV BITCOIN_VERSION=25.1 +ENV BITCOIN_VERSION=26.0 RUN apk --no-cache add git gnupg # Download Bitcoin -RUN wget https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/bitcoin-${BITCOIN_VERSION}-x86_64-linux-gnu.tar.gz \ +RUN wget https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz \ && wget https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/SHA256SUMS \ && wget https://bitcoincore.org/bin/bitcoin-core-${BITCOIN_VERSION}/SHA256SUMS.asc @@ -15,8 +15,8 @@ RUN git clone https://github.com/bitcoin-core/guix.sigs && \ find . -iname '*.gpg' -exec gpg --import {} \; && \ gpg --verify --status-fd 1 --verify ../../SHA256SUMS.asc ../../SHA256SUMS | grep "^\[GNUPG:\] VALIDSIG.*71A3B16735405025D447E8F274810B012346C9A6" -RUN grep bitcoin-${BITCOIN_VERSION}-x86_64-linux-gnu.tar.gz SHA256SUMS | sha256sum -c +RUN grep bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz SHA256SUMS | sha256sum -c # Prepare Image -RUN tar xzvf bitcoin-${BITCOIN_VERSION}-x86_64-linux-gnu.tar.gz +RUN tar xzvf bitcoin-${BITCOIN_VERSION}-$(uname -m)-linux-gnu.tar.gz RUN mv bitcoin-${BITCOIN_VERSION}/bin/bitcoind . diff --git a/orchestration/coordinator/Dockerfile b/orchestration/coordinator/Dockerfile index e58301fb8..f30734c56 100644 --- a/orchestration/coordinator/Dockerfile +++ b/orchestration/coordinator/Dockerfile @@ -9,7 +9,7 @@ RUN git clone https://github.com/microsoft/mimalloc && \ cmake -DMI_SECURE=ON ../.. && \ make && \ cp ./libmimalloc-secure.so ../../../libmimalloc.so -FROM rust:1.74-slim-bookworm as builder +FROM rust:1.75-slim-bookworm as builder COPY --from=mimalloc libmimalloc.so /usr/lib RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload @@ -35,6 +35,7 @@ ADD coordinator /serai/coordinator ADD substrate /serai/substrate ADD mini /serai/mini ADD tests /serai/tests +ADD patches /serai/patches ADD Cargo.toml /serai ADD Cargo.lock /serai ADD AGPL-3.0 /serai @@ -47,7 +48,7 @@ RUN --mount=type=cache,target=/root/.cargo \ --mount=type=cache,target=/usr/local/cargo/git \ --mount=type=cache,target=/serai/target \ mkdir /serai/bin && \ - cargo build -p serai-coordinator --features parity-db && \ + cargo build -p serai-coordinator --features "parity-db longer-reattempts" && \ mv /serai/target/debug/serai-coordinator /serai/bin FROM debian:bookworm-slim as image @@ -65,8 +66,8 @@ USER coordinator WORKDIR /home/coordinator # Copy the Coordinator binary and relevant license -COPY --from=builder --chown=processsor /serai/bin/serai-coordinator /bin/ -COPY --from=builder --chown=processsor /serai/AGPL-3.0 . +COPY --from=builder --chown=processor /serai/bin/serai-coordinator /bin/ +COPY --from=builder --chown=processor /serai/AGPL-3.0 . # Run coordinator CMD ["serai-coordinator"] diff --git a/orchestration/coordinator/Dockerfile.coordinator b/orchestration/coordinator/Dockerfile.coordinator index 8fca56220..8c1f6a22e 100644 --- a/orchestration/coordinator/Dockerfile.coordinator +++ b/orchestration/coordinator/Dockerfile.coordinator @@ -1,2 +1,2 @@ - cargo build -p serai-coordinator --features parity-db && \ + cargo build -p serai-coordinator --features "parity-db longer-reattempts" && \ mv /serai/target/debug/serai-coordinator /serai/bin diff --git a/orchestration/coordinator/Dockerfile.coordinator.end b/orchestration/coordinator/Dockerfile.coordinator.end index c96e49f2a..9c8bcd3dc 100644 --- a/orchestration/coordinator/Dockerfile.coordinator.end +++ b/orchestration/coordinator/Dockerfile.coordinator.end @@ -8,8 +8,8 @@ USER coordinator WORKDIR /home/coordinator # Copy the Coordinator binary and relevant license -COPY --from=builder --chown=processsor /serai/bin/serai-coordinator /bin/ -COPY --from=builder --chown=processsor /serai/AGPL-3.0 . +COPY --from=builder --chown=processor /serai/bin/serai-coordinator /bin/ +COPY --from=builder --chown=processor /serai/AGPL-3.0 . # Run coordinator CMD ["serai-coordinator"] diff --git a/orchestration/message-queue/Dockerfile b/orchestration/message-queue/Dockerfile index 0a669de69..63ee9f72e 100644 --- a/orchestration/message-queue/Dockerfile +++ b/orchestration/message-queue/Dockerfile @@ -9,7 +9,7 @@ RUN git clone https://github.com/microsoft/mimalloc && \ cmake -DMI_SECURE=ON ../.. && \ make && \ cp ./libmimalloc-secure.so ../../../libmimalloc.so -FROM rust:1.74-slim-bookworm as builder +FROM rust:1.75-slim-bookworm as builder COPY --from=mimalloc libmimalloc.so /usr/lib RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload @@ -35,6 +35,7 @@ ADD coordinator /serai/coordinator ADD substrate /serai/substrate ADD mini /serai/mini ADD tests /serai/tests +ADD patches /serai/patches ADD Cargo.toml /serai ADD Cargo.lock /serai ADD AGPL-3.0 /serai diff --git a/orchestration/processor/Dockerfile.processor.end b/orchestration/processor/Dockerfile.processor.end index cd0e6e970..410ba5e80 100644 --- a/orchestration/processor/Dockerfile.processor.end +++ b/orchestration/processor/Dockerfile.processor.end @@ -8,8 +8,8 @@ USER processor WORKDIR /home/processor # Copy the Processor binary and relevant license -COPY --from=builder --chown=processsor /serai/bin/serai-processor /bin/ -COPY --from=builder --chown=processsor /serai/AGPL-3.0 . +COPY --from=builder --chown=processor /serai/bin/serai-processor /bin/ +COPY --from=builder --chown=processor /serai/AGPL-3.0 . # Run processor CMD ["serai-processor"] diff --git a/orchestration/processor/bitcoin/Dockerfile b/orchestration/processor/bitcoin/Dockerfile index 28c3d15c1..e5f287e76 100644 --- a/orchestration/processor/bitcoin/Dockerfile +++ b/orchestration/processor/bitcoin/Dockerfile @@ -9,7 +9,7 @@ RUN git clone https://github.com/microsoft/mimalloc && \ cmake -DMI_SECURE=ON ../.. && \ make && \ cp ./libmimalloc-secure.so ../../../libmimalloc.so -FROM rust:1.74-slim-bookworm as builder +FROM rust:1.75-slim-bookworm as builder COPY --from=mimalloc libmimalloc.so /usr/lib RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload @@ -35,6 +35,7 @@ ADD coordinator /serai/coordinator ADD substrate /serai/substrate ADD mini /serai/mini ADD tests /serai/tests +ADD patches /serai/patches ADD Cargo.toml /serai ADD Cargo.lock /serai ADD AGPL-3.0 /serai @@ -65,8 +66,8 @@ USER processor WORKDIR /home/processor # Copy the Processor binary and relevant license -COPY --from=builder --chown=processsor /serai/bin/serai-processor /bin/ -COPY --from=builder --chown=processsor /serai/AGPL-3.0 . +COPY --from=builder --chown=processor /serai/bin/serai-processor /bin/ +COPY --from=builder --chown=processor /serai/AGPL-3.0 . # Run processor CMD ["serai-processor"] diff --git a/orchestration/processor/monero/Dockerfile b/orchestration/processor/monero/Dockerfile index 835647f36..4cd7ed51f 100644 --- a/orchestration/processor/monero/Dockerfile +++ b/orchestration/processor/monero/Dockerfile @@ -9,7 +9,7 @@ RUN git clone https://github.com/microsoft/mimalloc && \ cmake -DMI_SECURE=ON ../.. && \ make && \ cp ./libmimalloc-secure.so ../../../libmimalloc.so -FROM rust:1.74-slim-bookworm as builder +FROM rust:1.75-slim-bookworm as builder COPY --from=mimalloc libmimalloc.so /usr/lib RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload @@ -35,6 +35,7 @@ ADD coordinator /serai/coordinator ADD substrate /serai/substrate ADD mini /serai/mini ADD tests /serai/tests +ADD patches /serai/patches ADD Cargo.toml /serai ADD Cargo.lock /serai ADD AGPL-3.0 /serai @@ -65,8 +66,8 @@ USER processor WORKDIR /home/processor # Copy the Processor binary and relevant license -COPY --from=builder --chown=processsor /serai/bin/serai-processor /bin/ -COPY --from=builder --chown=processsor /serai/AGPL-3.0 . +COPY --from=builder --chown=processor /serai/bin/serai-processor /bin/ +COPY --from=builder --chown=processor /serai/AGPL-3.0 . # Run processor CMD ["serai-processor"] diff --git a/orchestration/runtime/Dockerfile b/orchestration/runtime/Dockerfile index cbc825520..fd8ecf077 100644 --- a/orchestration/runtime/Dockerfile +++ b/orchestration/runtime/Dockerfile @@ -1,9 +1,9 @@ -FROM rust:1.74.0-slim-bookworm as builder +FROM rust:1.75.0-slim-bookworm as builder # Move to a Debian package snapshot RUN rm -rf /etc/apt/sources.list.d/debian.sources && \ rm -rf /var/lib/apt/lists/* && \ - echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20230703T000000Z bookworm main" > /etc/apt/sources.list && \ + echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20231201T000000Z bookworm main" > /etc/apt/sources.list && \ apt update # Install dependencies @@ -22,6 +22,7 @@ ADD coordinator /serai/coordinator ADD substrate /serai/substrate ADD mini /serai/mini ADD tests /serai/tests +ADD patches /serai/patches ADD Cargo.toml /serai ADD Cargo.lock /serai ADD AGPL-3.0 /serai diff --git a/orchestration/serai/Dockerfile b/orchestration/serai/Dockerfile index 291b35696..c06205f46 100644 --- a/orchestration/serai/Dockerfile +++ b/orchestration/serai/Dockerfile @@ -9,7 +9,7 @@ RUN git clone https://github.com/microsoft/mimalloc && \ cmake -DMI_SECURE=ON ../.. && \ make && \ cp ./libmimalloc-secure.so ../../../libmimalloc.so -FROM rust:1.74-slim-bookworm as builder +FROM rust:1.75-slim-bookworm as builder COPY --from=mimalloc libmimalloc.so /usr/lib RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload @@ -35,6 +35,7 @@ ADD coordinator /serai/coordinator ADD substrate /serai/substrate ADD mini /serai/mini ADD tests /serai/tests +ADD patches /serai/patches ADD Cargo.toml /serai ADD Cargo.lock /serai ADD AGPL-3.0 /serai diff --git a/patches/directories-next/Cargo.toml b/patches/directories-next/Cargo.toml new file mode 100644 index 000000000..8c2b21dce --- /dev/null +++ b/patches/directories-next/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "directories-next" +version = "2.0.0" +description = "Patch from directories-next back to directories" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/patches/directories-next" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.74" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +directories = "5" diff --git a/patches/directories-next/src/lib.rs b/patches/directories-next/src/lib.rs new file mode 100644 index 000000000..fb4871e65 --- /dev/null +++ b/patches/directories-next/src/lib.rs @@ -0,0 +1 @@ +pub use directories::*; diff --git a/patches/is-terminal/Cargo.toml b/patches/is-terminal/Cargo.toml new file mode 100644 index 000000000..ef8f8f137 --- /dev/null +++ b/patches/is-terminal/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "is-terminal" +version = "0.4.10" +description = "is-terminal written around std::io::IsTerminal" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/patches/is-terminal" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.70" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/patches/is-terminal/src/lib.rs b/patches/is-terminal/src/lib.rs new file mode 100644 index 000000000..e3ad95c21 --- /dev/null +++ b/patches/is-terminal/src/lib.rs @@ -0,0 +1 @@ +pub use std::io::IsTerminal; diff --git a/patches/mach/Cargo.toml b/patches/mach/Cargo.toml new file mode 100644 index 000000000..bf2da1ff1 --- /dev/null +++ b/patches/mach/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "mach" +version = "0.3.2" +description = "Replacement for mach which uses the mach2 implementation" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/patches/mach" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.56" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[target.'cfg(any(target_os = "macos", target_os = "ios"))'.dependencies] +mach2 = "0.4" diff --git a/patches/mach/src/lib.rs b/patches/mach/src/lib.rs new file mode 100644 index 000000000..e7e1a5a22 --- /dev/null +++ b/patches/mach/src/lib.rs @@ -0,0 +1,2 @@ +#[cfg(any(target_os = "macos", target_os = "ios"))] +pub use mach2::*; diff --git a/patches/matches/Cargo.toml b/patches/matches/Cargo.toml new file mode 100644 index 000000000..b3b16e8b1 --- /dev/null +++ b/patches/matches/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "matches" +version = "0.1.10" +description = "Replacement for the matches polyfill which uses the std impl" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/patches/matches" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.56" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/patches/matches/src/lib.rs b/patches/matches/src/lib.rs new file mode 100644 index 000000000..bbe0e81a4 --- /dev/null +++ b/patches/matches/src/lib.rs @@ -0,0 +1 @@ +pub use std::matches; diff --git a/patches/option-ext/Cargo.toml b/patches/option-ext/Cargo.toml new file mode 100644 index 000000000..6f039c31c --- /dev/null +++ b/patches/option-ext/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "option-ext" +version = "0.2.0" +description = "Non-MPL option-ext with the exactly needed API for directories" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/patches/option-ext" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.74" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] diff --git a/patches/option-ext/src/lib.rs b/patches/option-ext/src/lib.rs new file mode 100644 index 000000000..b075111ce --- /dev/null +++ b/patches/option-ext/src/lib.rs @@ -0,0 +1,8 @@ +pub trait OptionExt { + fn contains(&self, x: &T) -> bool; +} +impl OptionExt for Option { + fn contains(&self, x: &T) -> bool { + self.as_ref() == Some(x) + } +} diff --git a/patches/proc-macro-crate/Cargo.toml b/patches/proc-macro-crate/Cargo.toml new file mode 100644 index 000000000..1b37535a0 --- /dev/null +++ b/patches/proc-macro-crate/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "proc-macro-crate" +version = "2.0.1" +description = "Patches proc-macro-crate 2 to 3" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/patches/proc-macro-crate" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.66" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +proc-macro-crate = "3" diff --git a/patches/proc-macro-crate/src/lib.rs b/patches/proc-macro-crate/src/lib.rs new file mode 100644 index 000000000..4232bfb0d --- /dev/null +++ b/patches/proc-macro-crate/src/lib.rs @@ -0,0 +1 @@ +pub use proc_macro_crate::*; diff --git a/processor/Cargo.toml b/processor/Cargo.toml index 6bcb37b12..a213b983c 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -13,6 +13,9 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] # Macros async-trait = { version = "0.1", default-features = false } @@ -51,6 +54,7 @@ log = { version = "0.4", default-features = false, features = ["std"] } env_logger = { version = "0.10", default-features = false, features = ["humantime"], optional = true } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } +zalloc = { path = "../common/zalloc" } serai-db = { path = "../common/db", optional = true } serai-env = { path = "../common/env", optional = true } # TODO: Replace with direct usage of primitives diff --git a/processor/messages/Cargo.toml b/processor/messages/Cargo.toml index 951ea974f..0eba999df 100644 --- a/processor/messages/Cargo.toml +++ b/processor/messages/Cargo.toml @@ -13,6 +13,9 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } diff --git a/processor/messages/src/lib.rs b/processor/messages/src/lib.rs index fe4f8f892..f9f29790d 100644 --- a/processor/messages/src/lib.rs +++ b/processor/messages/src/lib.rs @@ -130,8 +130,8 @@ pub mod sign { pub fn session(&self) -> Session { match self { - CoordinatorMessage::Preprocesses { id, .. } => id.session, - CoordinatorMessage::Shares { id, .. } => id.session, + CoordinatorMessage::Preprocesses { id, .. } | + CoordinatorMessage::Shares { id, .. } | CoordinatorMessage::Reattempt { id } => id.session, CoordinatorMessage::Completed { session, .. } => *session, } @@ -168,7 +168,7 @@ pub mod coordinator { )] pub enum SubstrateSignableId { CosigningSubstrateBlock([u8; 32]), - Batch([u8; 5]), + Batch(u32), } #[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)] @@ -193,12 +193,7 @@ pub mod coordinator { // network *and the local node* // This synchrony obtained lets us ignore the synchrony requirement offered here pub fn required_block(&self) -> Option { - match self { - CoordinatorMessage::CosignSubstrateBlock { .. } => None, - CoordinatorMessage::SubstratePreprocesses { .. } => None, - CoordinatorMessage::SubstrateShares { .. } => None, - CoordinatorMessage::BatchReattempt { .. } => None, - } + None } } @@ -240,7 +235,7 @@ pub mod substrate { impl CoordinatorMessage { pub fn required_block(&self) -> Option { let context = match self { - CoordinatorMessage::ConfirmKeyPair { context, .. } => context, + CoordinatorMessage::ConfirmKeyPair { context, .. } | CoordinatorMessage::SubstrateBlock { context, .. } => context, }; Some(context.network_latest_finalized_block) @@ -311,7 +306,7 @@ impl_from!(substrate, ProcessorMessage, Substrate); // Intent generation code const COORDINATOR_UID: u8 = 0; -const PROCESSSOR_UID: u8 = 1; +const PROCESSOR_UID: u8 = 1; const TYPE_KEY_GEN_UID: u8 = 2; const TYPE_SIGN_UID: u8 = 3; @@ -406,7 +401,7 @@ impl ProcessorMessage { key_gen::ProcessorMessage::Blame { id, .. } => (5, id), }; - let mut res = vec![PROCESSSOR_UID, TYPE_KEY_GEN_UID, sub]; + let mut res = vec![PROCESSOR_UID, TYPE_KEY_GEN_UID, sub]; res.extend(&id.encode()); res } @@ -420,7 +415,7 @@ impl ProcessorMessage { sign::ProcessorMessage::Completed { id, .. } => (3, id.to_vec()), }; - let mut res = vec![PROCESSSOR_UID, TYPE_SIGN_UID, sub]; + let mut res = vec![PROCESSOR_UID, TYPE_SIGN_UID, sub]; res.extend(&id); res } @@ -435,7 +430,7 @@ impl ProcessorMessage { coordinator::ProcessorMessage::CosignedBlock { block, .. } => (5, block.encode()), }; - let mut res = vec![PROCESSSOR_UID, TYPE_COORDINATOR_UID, sub]; + let mut res = vec![PROCESSOR_UID, TYPE_COORDINATOR_UID, sub]; res.extend(&id); res } @@ -448,7 +443,7 @@ impl ProcessorMessage { } }; - let mut res = vec![PROCESSSOR_UID, TYPE_SUBSTRATE_UID, sub]; + let mut res = vec![PROCESSOR_UID, TYPE_SUBSTRATE_UID, sub]; res.extend(&id); res } diff --git a/processor/src/batch_signer.rs b/processor/src/batch_signer.rs index 9b8cb995a..6110b84fe 100644 --- a/processor/src/batch_signer.rs +++ b/processor/src/batch_signer.rs @@ -16,7 +16,6 @@ use frost_schnorrkel::Schnorrkel; use log::{info, debug, warn}; -use scale::Encode; use serai_client::{ primitives::{NetworkId, BlockHash}, in_instructions::primitives::{Batch, SignedBatch, batch_message}, @@ -26,15 +25,10 @@ use serai_client::{ use messages::coordinator::*; use crate::{Get, DbTxn, Db, create_db}; -// Generate an ID unique to a Batch -fn batch_sign_id(network: NetworkId, id: u32) -> [u8; 5] { - (network, id).encode().try_into().unwrap() -} - create_db!( BatchSignerDb { - CompletedDb: (id: [u8; 5]) -> (), - AttemptDb: (id: [u8; 5], attempt: u32) -> (), + CompletedDb: (id: u32) -> (), + AttemptDb: (id: u32, attempt: u32) -> (), BatchDb: (block: BlockHash) -> SignedBatch } ); @@ -51,14 +45,12 @@ pub struct BatchSigner { session: Session, keys: Vec>, - signable: HashMap<[u8; 5], Batch>, - attempt: HashMap<[u8; 5], u32>, + signable: HashMap, + attempt: HashMap, #[allow(clippy::type_complexity)] - preprocessing: - HashMap<[u8; 5], (Vec>, Vec)>, + preprocessing: HashMap>, Vec)>, #[allow(clippy::type_complexity)] - signing: - HashMap<[u8; 5], (AlgorithmSignatureMachine, Vec)>, + signing: HashMap, Vec)>, } impl fmt::Debug for BatchSigner { @@ -92,7 +84,7 @@ impl BatchSigner { } } - fn verify_id(&self, id: &SubstrateSignId) -> Result<(Session, [u8; 5], u32), ()> { + fn verify_id(&self, id: &SubstrateSignId) -> Result<(Session, u32, u32), ()> { let SubstrateSignId { session, id, attempt } = id; let SubstrateSignableId::Batch(id) = id else { panic!("BatchSigner handed non-Batch") }; @@ -104,17 +96,12 @@ impl BatchSigner { // rebooted OR we detected the signed batch on chain // The latter is the expected flow for batches not actively being participated in None => { - warn!("not attempting batch {} #{}", hex::encode(id), attempt); + warn!("not attempting batch {id} #{attempt}"); Err(())?; } Some(our_attempt) => { if attempt != our_attempt { - warn!( - "sent signing data for batch {} #{} yet we have attempt #{}", - hex::encode(id), - attempt, - attempt - ); + warn!("sent signing data for batch {id} #{attempt} yet we have attempt #{our_attempt}"); Err(())?; } } @@ -124,10 +111,10 @@ impl BatchSigner { } #[must_use] - async fn attempt( + fn attempt( &mut self, txn: &mut D::Transaction<'_>, - id: [u8; 5], + id: u32, attempt: u32, ) -> Option { // See above commentary for why this doesn't emit SignedBatch @@ -138,12 +125,7 @@ impl BatchSigner { // Check if we're already working on this attempt if let Some(curr_attempt) = self.attempt.get(&id) { if curr_attempt >= &attempt { - warn!( - "told to attempt {} #{} yet we're already working on {}", - hex::encode(id), - attempt, - curr_attempt - ); + warn!("told to attempt {id} #{attempt} yet we're already working on {curr_attempt}"); return None; } } @@ -163,7 +145,7 @@ impl BatchSigner { // Update the attempt number self.attempt.insert(id, attempt); - info!("signing batch {} #{}", hex::encode(id), attempt); + info!("signing batch {id} #{attempt}"); // If we reboot mid-sign, the current design has us abort all signs and wait for latter // attempts/new signing protocols @@ -180,9 +162,7 @@ impl BatchSigner { // TODO: This isn't complete as this txn may not be committed with the expected timing if AttemptDb::get(txn, id, attempt).is_some() { warn!( - "already attempted batch {}, attempt #{}. this is an error if we didn't reboot", - hex::encode(id), - attempt + "already attempted batch {id}, attempt #{attempt}. this is an error if we didn't reboot" ); return None; } @@ -209,13 +189,9 @@ impl BatchSigner { } #[must_use] - pub async fn sign( - &mut self, - txn: &mut D::Transaction<'_>, - batch: Batch, - ) -> Option { + pub fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) -> Option { debug_assert_eq!(self.network, batch.network); - let id = batch_sign_id(batch.network, batch.id); + let id = batch.id; if CompletedDb::get(txn, id).is_some() { debug!("Sign batch order for ID we've already completed signing"); // See batch_signed for commentary on why this simply returns @@ -223,11 +199,11 @@ impl BatchSigner { } self.signable.insert(id, batch); - self.attempt(txn, id, 0).await + self.attempt(txn, id, 0) } #[must_use] - pub async fn handle( + pub fn handle( &mut self, txn: &mut D::Transaction<'_>, msg: CoordinatorMessage, @@ -246,10 +222,7 @@ impl BatchSigner { let (machines, our_preprocesses) = match self.preprocessing.remove(&id) { // Either rebooted or RPC error, or some invariant None => { - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(id), - ); + warn!("not preprocessing for {id}. this is an error if we didn't reboot"); return None; } Some(preprocess) => preprocess, @@ -257,7 +230,7 @@ impl BatchSigner { let mut parsed = HashMap::new(); for l in { - let mut keys = preprocesses.keys().cloned().collect::>(); + let mut keys = preprocesses.keys().copied().collect::>(); keys.sort(); keys } { @@ -344,10 +317,7 @@ impl BatchSigner { panic!("never preprocessed yet signing?"); } - warn!( - "not preprocessing for {}. this is an error if we didn't reboot", - hex::encode(id) - ); + warn!("not preprocessing for {id}. this is an error if we didn't reboot"); return None; } Some(signing) => signing, @@ -355,7 +325,7 @@ impl BatchSigner { let mut parsed = HashMap::new(); for l in { - let mut keys = shares.keys().cloned().collect::>(); + let mut keys = shares.keys().copied().collect::>(); keys.sort(); keys } { @@ -399,7 +369,7 @@ impl BatchSigner { }, }; - info!("signed batch {} with attempt #{}", hex::encode(id), attempt); + info!("signed batch {id} with attempt #{attempt}"); let batch = SignedBatch { batch: self.signable.remove(&id).unwrap(), signature: sig.into() }; @@ -420,21 +390,19 @@ impl BatchSigner { let SubstrateSignableId::Batch(batch_id) = id.id else { panic!("BatchReattempt passed non-Batch ID") }; - self.attempt(txn, batch_id, id.attempt).await.map(Into::into) + self.attempt(txn, batch_id, id.attempt).map(Into::into) } } } pub fn batch_signed(&mut self, txn: &mut D::Transaction<'_>, id: u32) { - let sign_id = batch_sign_id(self.network, id); - // Stop trying to sign for this batch - CompletedDb::set(txn, sign_id, &()); + CompletedDb::set(txn, id, &()); - self.signable.remove(&sign_id); - self.attempt.remove(&sign_id); - self.preprocessing.remove(&sign_id); - self.signing.remove(&sign_id); + self.signable.remove(&id); + self.attempt.remove(&id); + self.preprocessing.remove(&id); + self.signing.remove(&id); // This doesn't emit SignedBatch because it doesn't have access to the SignedBatch // This function is expected to only be called once Substrate acknowledges this block, diff --git a/processor/src/cosigner.rs b/processor/src/cosigner.rs index 551a14c50..a324da776 100644 --- a/processor/src/cosigner.rs +++ b/processor/src/cosigner.rs @@ -16,7 +16,6 @@ use frost_schnorrkel::Schnorrkel; use log::{info, warn}; -use scale::Encode; use serai_client::validator_sets::primitives::Session; use messages::coordinator::*; @@ -115,7 +114,7 @@ impl Cosigner { } #[must_use] - pub async fn handle( + pub fn handle( &mut self, txn: &mut impl DbTxn, msg: CoordinatorMessage, @@ -151,7 +150,7 @@ impl Cosigner { let mut parsed = HashMap::new(); for l in { - let mut keys = preprocesses.keys().cloned().collect::>(); + let mut keys = preprocesses.keys().copied().collect::>(); keys.sort(); keys } { @@ -242,7 +241,7 @@ impl Cosigner { let mut parsed = HashMap::new(); for l in { - let mut keys = shares.keys().cloned().collect::>(); + let mut keys = shares.keys().copied().collect::>(); keys.sort(); keys } { diff --git a/processor/src/db.rs b/processor/src/db.rs index e02051d8e..ffd7c43ad 100644 --- a/processor/src/db.rs +++ b/processor/src/db.rs @@ -32,7 +32,7 @@ impl PendingActivationsDb { } pub fn set_pending_activation( txn: &mut impl DbTxn, - block_before_queue_block: >::Id, + block_before_queue_block: &>::Id, session: Session, key_pair: KeyPair, ) { diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs index 9cd6657f3..f1a5b47c0 100644 --- a/processor/src/key_gen.rs +++ b/processor/src/key_gen.rs @@ -16,7 +16,6 @@ use frost::{ use log::info; -use scale::Encode; use serai_client::validator_sets::primitives::{Session, KeyPair}; use messages::key_gen::*; @@ -30,7 +29,7 @@ pub struct KeyConfirmed { create_db!( KeyGenDb { - ParamsDb: (session: &Session) -> (ThresholdParams, u16), + ParamsDb: (session: &Session, attempt: u32) -> (ThresholdParams, u16), // Not scoped to the set since that'd have latter attempts overwrite former // A former attempt may become the finalized attempt, even if it doesn't in a timely manner // Overwriting its commitments would be accordingly poor @@ -90,7 +89,7 @@ impl KeysDb { fn confirm_keys( txn: &mut impl DbTxn, session: Session, - key_pair: KeyPair, + key_pair: &KeyPair, ) -> (Vec>, Vec>) { let (keys_vec, keys) = GeneratedKeysDb::read_keys::( txn, @@ -153,7 +152,10 @@ impl KeyGen { pub fn in_set(&self, session: &Session) -> bool { // We determine if we're in set using if we have the parameters for a session's key generation - ParamsDb::get(&self.db, session).is_some() + // The usage of 0 for the attempt is valid so long as we aren't malicious and accordingly + // aren't fatally slashed + // TODO: Revisit once we do DKG removals for being offline + ParamsDb::get(&self.db, session, 0).is_some() } #[allow(clippy::type_complexity)] @@ -173,7 +175,7 @@ impl KeyGen { KeysDb::substrate_keys_by_session::(&self.db, session) } - pub async fn handle( + pub fn handle( &mut self, txn: &mut D::Transaction<'_>, msg: CoordinatorMessage, @@ -303,7 +305,7 @@ impl KeyGen { let mut these_shares: HashMap<_, _> = substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect(); - for (i, share) in these_shares.iter_mut() { + for (i, share) in &mut these_shares { share.extend(network_shares[i].serialize()); } shares.push(these_shares); @@ -320,7 +322,7 @@ impl KeyGen { self.active_share.remove(&id.session).is_none() { // If we haven't handled this session before, save the params - ParamsDb::set(txn, &id.session, &(params, shares)); + ParamsDb::set(txn, &id.session, id.attempt, &(params, shares)); } let (machines, commitments) = key_gen_machines(id, params, shares); @@ -339,7 +341,7 @@ impl KeyGen { panic!("commitments when already handled commitments"); } - let (params, share_quantity) = ParamsDb::get(txn, &id.session).unwrap(); + let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap(); // Unwrap the machines, rebuilding them if we didn't have them in our cache // We won't if the processor rebooted @@ -374,7 +376,7 @@ impl KeyGen { CoordinatorMessage::Shares { id, shares } => { info!("Received shares for {:?}", id); - let (params, share_quantity) = ParamsDb::get(txn, &id.session).unwrap(); + let (params, share_quantity) = ParamsDb::get(txn, &id.session, id.attempt).unwrap(); // Same commentary on inconsistency as above exists let (machines, our_shares) = self.active_share.remove(&id.session).unwrap_or_else(|| { @@ -515,7 +517,7 @@ impl KeyGen { } CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => { - let params = ParamsDb::get(txn, &id.session).unwrap().0; + let params = ParamsDb::get(txn, &id.session, id.attempt).unwrap().0; let mut share_ref = share.as_slice(); let Ok(substrate_share) = EncryptedMessage::< @@ -570,7 +572,7 @@ impl KeyGen { .unwrap() .blame(accuser, accused, network_share, network_blame); - // If thw accused was blamed for either, mark them as at fault + // If the accused was blamed for either, mark them as at fault if (substrate_blame == accused) || (network_blame == accused) { return ProcessorMessage::Blame { id, participant: accused }; } @@ -580,11 +582,13 @@ impl KeyGen { } } - pub async fn confirm( + // This should only be called if we're participating, hence taking our instance + #[allow(clippy::unused_self)] + pub fn confirm( &mut self, txn: &mut D::Transaction<'_>, session: Session, - key_pair: KeyPair, + key_pair: &KeyPair, ) -> KeyConfirmed { info!( "Confirmed key pair {} {} for {:?}", diff --git a/processor/src/main.rs b/processor/src/main.rs index a80f93dc0..8aa7ef196 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -61,6 +61,10 @@ use multisigs::{MultisigEvent, MultisigManager}; #[cfg(test)] mod tests; +#[global_allocator] +static ALLOCATOR: zalloc::ZeroizingAlloc = + zalloc::ZeroizingAlloc(std::alloc::System); + // Items which are mutably borrowed by Tributary. // Any exceptions to this have to be carefully monitored in order to ensure consistency isn't // violated. @@ -199,7 +203,7 @@ async fn handle_coordinator_msg( if tributary_mutable.key_gen.in_set(&session) { // See TributaryMutable's struct definition for why this block is safe let KeyConfirmed { substrate_keys, network_keys } = - tributary_mutable.key_gen.confirm(txn, session, key_pair.clone()).await; + tributary_mutable.key_gen.confirm(txn, session, &key_pair); if session.0 == 0 { tributary_mutable.batch_signer = Some(BatchSigner::new(N::NETWORK, session, substrate_keys)); @@ -214,7 +218,7 @@ async fn handle_coordinator_msg( match msg.msg.clone() { CoordinatorMessage::KeyGen(msg) => { - coordinator.send(tributary_mutable.key_gen.handle(txn, msg).await).await; + coordinator.send(tributary_mutable.key_gen.handle(txn, msg)).await; } CoordinatorMessage::Sign(msg) => { @@ -232,9 +236,7 @@ async fn handle_coordinator_msg( CoordinatorMessage::Coordinator(msg) => { let is_batch = match msg { CoordinatorCoordinatorMessage::CosignSubstrateBlock { .. } => false, - CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } => { - matches!(&id.id, SubstrateSignableId::Batch(_)) - } + CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } | CoordinatorCoordinatorMessage::SubstrateShares { ref id, .. } => { matches!(&id.id, SubstrateSignableId::Batch(_)) } @@ -248,7 +250,6 @@ async fn handle_coordinator_msg( "coordinator told us to sign a batch when we don't currently have a Substrate signer", ) .handle(txn, msg) - .await { coordinator.send(msg).await; } @@ -272,7 +273,7 @@ async fn handle_coordinator_msg( } _ => { if let Some(cosigner) = tributary_mutable.cosigner.as_mut() { - if let Some(msg) = cosigner.handle(txn, msg).await { + if let Some(msg) = cosigner.handle(txn, msg) { coordinator.send(msg).await; } } else { @@ -355,7 +356,7 @@ async fn handle_coordinator_msg( // Set this variable so when we get the next Batch event, we can handle it PendingActivationsDb::set_pending_activation::( txn, - block_before_queue_block, + &block_before_queue_block, session, key_pair, ); @@ -429,7 +430,7 @@ async fn handle_coordinator_msg( for (key, id, tx, eventuality) in to_sign { if let Some(session) = SessionDb::get(txn, key.to_bytes().as_ref()) { let signer = signers.get_mut(&session).unwrap(); - if let Some(msg) = signer.sign_transaction(txn, id, tx, eventuality).await { + if let Some(msg) = signer.sign_transaction(txn, id, tx, &eventuality).await { coordinator.send(msg).await; } } @@ -521,7 +522,7 @@ async fn boot( if plan.key == network_key { let mut txn = raw_db.txn(); if let Some(msg) = - signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality.clone()).await + signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality).await { coordinator.send(msg).await; } @@ -562,6 +563,8 @@ async fn run(mut raw_db: D, network: N, mut loop { let mut txn = raw_db.txn(); + log::trace!("new db txn in run"); + let mut outer_msg = None; tokio::select! { @@ -622,7 +625,7 @@ async fn run(mut raw_db: D, network: N, mut ).await; if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() { - if let Some(msg) = batch_signer.sign(&mut txn, batch).await { + if let Some(msg) = batch_signer.sign(&mut txn, batch) { coordinator.send(msg).await; } } @@ -644,7 +647,7 @@ async fn run(mut raw_db: D, network: N, mut MultisigEvent::Completed(key, id, tx) => { if let Some(session) = SessionDb::get(&txn, &key) { let signer = tributary_mutable.signers.get_mut(&session).unwrap(); - if let Some(msg) = signer.completed(&mut txn, id, tx) { + if let Some(msg) = signer.completed(&mut txn, id, &tx) { coordinator.send(msg).await; } } diff --git a/processor/src/multisigs/db.rs b/processor/src/multisigs/db.rs index 0a5fbbf08..51287a0e8 100644 --- a/processor/src/multisigs/db.rs +++ b/processor/src/multisigs/db.rs @@ -102,7 +102,7 @@ impl ResolvedDb { txn: &mut impl DbTxn, key: &[u8], plan: [u8; 32], - resolution: >::Id, + resolution: &>::Id, ) { let mut signing = SigningDb::get(txn, key).unwrap_or_default(); assert_eq!(signing.len() % 32, 0); @@ -113,7 +113,7 @@ impl ResolvedDb { let end = i + 32; if signing[start .. end] == plan { found = true; - signing = [&signing[.. start], &signing[end ..]].concat().to_vec(); + signing = [&signing[.. start], &signing[end ..]].concat(); break; } } @@ -160,7 +160,7 @@ impl PlansFromScanningDb { } impl ForwardedOutputDb { - pub fn save_forwarded_output(txn: &mut impl DbTxn, instruction: InInstructionWithBalance) { + pub fn save_forwarded_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) { let mut existing = Self::get(txn, instruction.balance).unwrap_or_default(); existing.extend(instruction.encode()); Self::set(txn, instruction.balance, &existing); @@ -184,7 +184,7 @@ impl ForwardedOutputDb { } impl DelayedOutputDb { - pub fn save_delayed_output(txn: &mut impl DbTxn, instruction: InInstructionWithBalance) { + pub fn save_delayed_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) { let mut existing = Self::get(txn).unwrap_or_default(); existing.extend(instruction.encode()); Self::set(txn, &existing); diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs index 264c373fe..a6e8bbc9d 100644 --- a/processor/src/multisigs/mod.rs +++ b/processor/src/multisigs/mod.rs @@ -7,7 +7,7 @@ use scale::{Encode, Decode}; use messages::SubstrateContext; use serai_client::{ - primitives::{MAX_DATA_LEN, NetworkId, Coin, ExternalAddress, BlockHash}, + primitives::{MAX_DATA_LEN, NetworkId, Coin, ExternalAddress, BlockHash, Data}, in_instructions::primitives::{ InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE, }, @@ -198,12 +198,12 @@ impl MultisigManager { ( MultisigManager { scanner, - existing: current_keys.first().cloned().map(|(activation_block, key)| MultisigViewer { + existing: current_keys.first().copied().map(|(activation_block, key)| MultisigViewer { activation_block, key, scheduler: schedulers.remove(0), }), - new: current_keys.get(1).cloned().map(|(activation_block, key)| MultisigViewer { + new: current_keys.get(1).copied().map(|(activation_block, key)| MultisigViewer { activation_block, key, scheduler: schedulers.remove(0), @@ -316,7 +316,7 @@ impl MultisigManager { assert_eq!(balance.coin.network(), N::NETWORK); if let Ok(address) = N::Address::try_from(address.consume()) { - payments.push(Payment { address, data: data.map(|data| data.consume()), balance }); + payments.push(Payment { address, data: data.map(Data::consume), balance }); } } @@ -513,7 +513,7 @@ impl MultisigManager { let mut plans = vec![]; existing_outputs.retain(|output| { match output.kind() { - OutputType::External => false, + OutputType::External | OutputType::Forwarded => false, OutputType::Branch => { let scheduler = &mut self.existing.as_mut().unwrap().scheduler; // There *would* be a race condition here due to the fact we only mark a `Branch` output @@ -576,7 +576,6 @@ impl MultisigManager { } false } - OutputType::Forwarded => false, } }); plans @@ -873,7 +872,7 @@ impl MultisigManager { // letting it die out if let Some(tx) = &tx { instruction.balance.amount.0 -= tx.0.fee(); - ForwardedOutputDb::save_forwarded_output(txn, instruction); + ForwardedOutputDb::save_forwarded_output(txn, &instruction); } } else if let Some(refund_to) = refund_to { if let Ok(refund_to) = refund_to.consume().try_into() { @@ -907,9 +906,7 @@ impl MultisigManager { } let (refund_to, instruction) = instruction_from_output::(&output); - let instruction = if let Some(instruction) = instruction { - instruction - } else { + let Some(instruction) = instruction else { if let Some(refund_to) = refund_to { if let Ok(refund_to) = refund_to.consume().try_into() { plans.push(Self::refund_plan(output.clone(), refund_to)); @@ -922,7 +919,7 @@ impl MultisigManager { if Some(output.key()) == self.new.as_ref().map(|new| new.key) { match step { RotationStep::UseExisting => { - DelayedOutputDb::save_delayed_output(txn, instruction); + DelayedOutputDb::save_delayed_output(txn, &instruction); continue; } RotationStep::NewAsChange | @@ -1003,7 +1000,7 @@ impl MultisigManager { // within the block. Unknown Eventualities may have their Completed events emitted after // ScannerEvent::Block however. ScannerEvent::Completed(key, block_number, id, tx) => { - ResolvedDb::resolve_plan::(txn, &key, id, tx.id()); + ResolvedDb::resolve_plan::(txn, &key, id, &tx.id()); (block_number, MultisigEvent::Completed(key, id, tx)) } }; diff --git a/processor/src/multisigs/scanner.rs b/processor/src/multisigs/scanner.rs index f25867e75..cefa8a255 100644 --- a/processor/src/multisigs/scanner.rs +++ b/processor/src/multisigs/scanner.rs @@ -415,7 +415,7 @@ impl Scanner { ) } - async fn emit(&mut self, event: ScannerEvent) -> bool { + fn emit(&mut self, event: ScannerEvent) -> bool { if self.events.send(event).is_err() { info!("Scanner handler was dropped. Shutting down?"); return false; @@ -484,7 +484,7 @@ impl Scanner { let needing_ack = { let scanner_lock = scanner_hold.read().await; let scanner = scanner_lock.as_ref().unwrap(); - scanner.need_ack.front().cloned() + scanner.need_ack.front().copied() }; if let Some(needing_ack) = needing_ack { @@ -496,12 +496,9 @@ impl Scanner { } } - let block = match network.get_block(block_being_scanned).await { - Ok(block) => block, - Err(_) => { - warn!("couldn't get block {block_being_scanned}"); - break; - } + let Ok(block) = network.get_block(block_being_scanned).await else { + warn!("couldn't get block {block_being_scanned}"); + break; }; let block_id = block.id(); @@ -550,7 +547,7 @@ impl Scanner { let key_vec = key.to_bytes().as_ref().to_vec(); - // TODO: These lines are the ones which will cause a really long-lived lock acquisiton + // TODO: These lines are the ones which will cause a really long-lived lock acquisition for output in network.get_outputs(&block, key).await { assert_eq!(output.key(), key); if output.balance().amount.0 >= N::DUST { @@ -570,7 +567,7 @@ impl Scanner { completion_block_numbers.push(block_number); // This must be before the mission of ScannerEvent::Block, per commentary in mod.rs - if !scanner.emit(ScannerEvent::Completed(key_vec.clone(), block_number, id, tx)).await { + if !scanner.emit(ScannerEvent::Completed(key_vec.clone(), block_number, id, tx)) { return; } } @@ -687,10 +684,7 @@ impl Scanner { txn.commit(); // Send all outputs - if !scanner - .emit(ScannerEvent::Block { is_retirement_block, block: block_id, outputs }) - .await - { + if !scanner.emit(ScannerEvent::Block { is_retirement_block, block: block_id, outputs }) { return; } diff --git a/processor/src/multisigs/scheduler.rs b/processor/src/multisigs/scheduler.rs index 4a7d980d8..abc81a80b 100644 --- a/processor/src/multisigs/scheduler.rs +++ b/processor/src/multisigs/scheduler.rs @@ -18,7 +18,7 @@ pub struct Scheduler { key: ::G, coin: Coin, - // Serai, when it has more outputs expected than it can handle in a single tranaction, will + // Serai, when it has more outputs expected than it can handle in a single transaction, will // schedule the outputs to be handled later. Immediately, it just creates additional outputs // which will eventually handle those outputs // @@ -197,7 +197,6 @@ impl Scheduler { let mut add_plan = |payments| { let amount = payment_amounts(&payments); - #[allow(clippy::unwrap_or_default)] self.queued_plans.entry(amount).or_insert(VecDeque::new()).push_back(payments); amount }; @@ -322,7 +321,7 @@ impl Scheduler { // If we don't have UTXOs available, don't try to continue if self.utxos.is_empty() { - log::info!("no utxos currently avilable"); + log::info!("no utxos currently available"); return plans; } @@ -336,7 +335,7 @@ impl Scheduler { // Since we do multiple aggregation TXs at once, this will execute in logarithmic time let utxos = self.utxos.drain(..).collect::>(); let mut utxo_chunks = - utxos.chunks(N::MAX_INPUTS).map(|chunk| chunk.to_vec()).collect::>(); + utxos.chunks(N::MAX_INPUTS).map(<[::Output]>::to_vec).collect::>(); // Use the first chunk for any scheduled payments, since it has the most value let utxos = utxo_chunks.remove(0); @@ -457,10 +456,7 @@ impl Scheduler { } // If we didn't actually create this output, return, dropping the child payments - let actual = match actual { - Some(actual) => actual, - None => return, - }; + let Some(actual) = actual else { return }; // Amortize the fee amongst all payments underneath this branch { @@ -474,7 +470,7 @@ impl Scheduler { let per_payment = to_amortize / payments_len; let mut overage = to_amortize % payments_len; - for payment in payments.iter_mut() { + for payment in &mut payments { let to_subtract = per_payment + overage; // Only subtract the overage once overage = 0; @@ -499,7 +495,6 @@ impl Scheduler { return; } - #[allow(clippy::unwrap_or_default)] self.plans.entry(actual).or_insert(VecDeque::new()).push_back(payments); // TODO2: This shows how ridiculous the serialize function is diff --git a/processor/src/networks/bitcoin.rs b/processor/src/networks/bitcoin.rs index 873b3e9e1..c3cffdbea 100644 --- a/processor/src/networks/bitcoin.rs +++ b/processor/src/networks/bitcoin.rs @@ -385,7 +385,7 @@ impl Bitcoin { } } fees.sort(); - let fee = fees.get(fees.len() / 2).cloned().unwrap_or(0); + let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); // The DUST constant documentation notes a relay rule practically enforcing a // 1000 sat/kilo-vbyte minimum fee. @@ -454,7 +454,7 @@ impl Bitcoin { match BSignableTransaction::new( inputs.iter().map(|input| input.output.clone()).collect(), &payments, - change.as_ref().map(|change| change.0.clone()), + change.as_ref().map(|change| &change.0), None, fee.0, ) { @@ -462,16 +462,14 @@ impl Bitcoin { Err(TransactionError::NoInputs) => { panic!("trying to create a bitcoin transaction without inputs") } - // No outputs left and the change isn't worth enough - Err(TransactionError::NoOutputs) => Ok(None), + // No outputs left and the change isn't worth enough/not even enough funds to pay the fee + Err(TransactionError::NoOutputs | TransactionError::NotEnoughFunds) => Ok(None), // amortize_fee removes payments which fall below the dust threshold Err(TransactionError::DustPayment) => panic!("dust payment despite removing dust"), Err(TransactionError::TooMuchData) => panic!("too much data despite not specifying data"), Err(TransactionError::TooLowFee) => { panic!("created a transaction whose fee is below the minimum") } - // Mot even enough funds to pay the fee - Err(TransactionError::NotEnoughFunds) => Ok(None), Err(TransactionError::TooLargeTransaction) => { panic!("created a too large transaction despite limiting inputs/outputs") } @@ -615,14 +613,14 @@ impl Network for Bitcoin { // The output should be ~36 bytes, or 144 weight units // The overhead should be ~20 bytes at most, or 80 weight units // 684 weight units, 171 vbytes, round up to 200 - // 200 vbytes at 1 sat/weight (our current minumum fee, 4 sat/vbyte) = 800 sat fee for the + // 200 vbytes at 1 sat/weight (our current minimum fee, 4 sat/vbyte) = 800 sat fee for the // aggregation TX const COST_TO_AGGREGATE: u64 = 800; // Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) // A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes // While our inputs are entirely SegWit, such fine tuning is not necessary and could create - // issues in the future (if the size decreases or we mis-evaluate it) + // issues in the future (if the size decreases or we misevaluate it) // It also offers a minimal amount of benefit when we are able to logarithmically accumulate // inputs // For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and @@ -712,7 +710,7 @@ impl Network for Bitcoin { return res; } - async fn check_block( + fn check_block( eventualities: &mut EventualitiesTracker, block: &Block, res: &mut HashMap<[u8; 32], (usize, Transaction)>, @@ -753,11 +751,11 @@ impl Network for Bitcoin { block.unwrap() }; - check_block(eventualities, &block, &mut res).await; + check_block(eventualities, &block, &mut res); } // Also check the current block - check_block(eventualities, block, &mut res).await; + check_block(eventualities, block, &mut res); assert_eq!(eventualities.block_number, this_block_num); res @@ -808,7 +806,7 @@ impl Network for Bitcoin { transaction .actual .clone() - .multisig(keys.clone(), transaction.transcript) + .multisig(&keys, transaction.transcript) .expect("used the wrong keys"), ) } diff --git a/processor/src/networks/monero.rs b/processor/src/networks/monero.rs index d2ac279d6..1659bd5a8 100644 --- a/processor/src/networks/monero.rs +++ b/processor/src/networks/monero.rs @@ -174,9 +174,9 @@ impl BlockTrait for Block { const BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW: u64 = 60; // If Monero doesn't have enough blocks to build a window, it doesn't define a network time - if (u64::try_from(self.number()).unwrap() + 1) < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { + if (self.number().unwrap() + 1) < BLOCKCHAIN_TIMESTAMP_CHECK_WINDOW { // Use the block number as the time - return self.number().try_into().unwrap(); + return self.number().unwrap(); } let mut timestamps = vec![self.header.timestamp]; @@ -194,7 +194,7 @@ impl BlockTrait for Block { timestamps.push(parent_block.header.timestamp); parent = parent_block.parent(); - if parent_block.number() == 0 { + if parent_block.number().unwrap() == 0 { break; } } @@ -212,7 +212,7 @@ impl BlockTrait for Block { // Monero also solely requires the block's time not be less than the median, it doesn't ensure // it advances the median forward // Ensure monotonicity despite both these issues by adding the block number to the median time - res + u64::try_from(self.number()).unwrap() + res + self.number().unwrap() } } @@ -229,6 +229,7 @@ impl PartialEq for Monero { } impl Eq for Monero {} +#[allow(clippy::needless_pass_by_value)] // Needed to satisfy API expectations fn map_rpc_err(err: RpcError) -> NetworkError { if let RpcError::InvalidNode(reason) = &err { log::error!("Monero RpcError::InvalidNode({reason})"); @@ -285,7 +286,7 @@ impl Monero { fees.push(tx.rct_signatures.base.fee / u64::try_from(tx.serialize().len()).unwrap()); } fees.sort(); - let fee = fees.get(fees.len() / 2).cloned().unwrap_or(0); + let fee = fees.get(fees.len() / 2).copied().unwrap_or(0); // TODO: Set a sane minimum fee Ok(Fee { per_weight: fee.max(1500000), mask: 10000 }) @@ -384,7 +385,7 @@ impl Monero { Some(Zeroizing::new(*plan_id)), inputs.clone(), payments, - Change::fingerprintable(change.as_ref().map(|change| change.clone().into())), + &Change::fingerprintable(change.as_ref().map(|change| change.clone().into())), vec![], fee_rate, ) { @@ -584,16 +585,21 @@ impl Network for Monero { if let Some((_, eventuality)) = eventualities.map.get(&tx.prefix.extra) { if eventuality.matches(&tx) { - res.insert(eventualities.map.remove(&tx.prefix.extra).unwrap().0, (block.number(), tx)); + res.insert( + eventualities.map.remove(&tx.prefix.extra).unwrap().0, + (usize::try_from(block.number().unwrap()).unwrap(), tx), + ); } } } eventualities.block_number += 1; - assert_eq!(eventualities.block_number, block.number()); + assert_eq!(eventualities.block_number, usize::try_from(block.number().unwrap()).unwrap()); } - for block_num in (eventualities.block_number + 1) .. block.number() { + for block_num in + (eventualities.block_number + 1) .. usize::try_from(block.number().unwrap()).unwrap() + { let block = { let mut block; while { @@ -611,7 +617,7 @@ impl Network for Monero { // Also check the current block check_block(self, eventualities, block, &mut res).await; - assert_eq!(eventualities.block_number, block.number()); + assert_eq!(eventualities.block_number, usize::try_from(block.number().unwrap()).unwrap()); res } @@ -657,7 +663,7 @@ impl Network for Monero { keys: ThresholdKeys, transaction: SignableTransaction, ) -> Result { - match transaction.actual.clone().multisig(keys, transaction.transcript) { + match transaction.actual.clone().multisig(&keys, transaction.transcript) { Ok(machine) => Ok(machine), Err(e) => panic!("failed to create a multisig machine for TX: {e}"), } @@ -665,7 +671,7 @@ impl Network for Monero { async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> { match self.rpc.publish_transaction(tx).await { - Ok(_) => Ok(()), + Ok(()) => Ok(()), Err(RpcError::ConnectionError(e)) => { log::debug!("Monero ConnectionError: {e}"); Err(NetworkError::ConnectionError)? @@ -686,7 +692,7 @@ impl Network for Monero { #[cfg(test)] async fn get_block_number(&self, id: &[u8; 32]) -> usize { - self.rpc.get_block(*id).await.unwrap().number() + self.rpc.get_block(*id).await.unwrap().number().unwrap().try_into().unwrap() } #[cfg(test)] @@ -753,7 +759,7 @@ impl Network for Monero { None, inputs, vec![(address.into(), amount - fee)], - Change::fingerprintable(Some(Self::test_address().into())), + &Change::fingerprintable(Some(Self::test_address().into())), vec![], self.rpc.get_fee(protocol, FeePriority::Low).await.unwrap(), ) diff --git a/processor/src/plan.rs b/processor/src/plan.rs index 35146a9cb..3e10c7d39 100644 --- a/processor/src/plan.rs +++ b/processor/src/plan.rs @@ -77,7 +77,7 @@ impl Payment { pub struct Plan { pub key: ::G, pub inputs: Vec, - /// The payments this Plan is inteded to create. + /// The payments this Plan is intended to create. /// /// This should only contain payments leaving Serai. While it is acceptable for users to enter /// Serai's address(es) as the payment address, as that'll be handled by anything which expects @@ -99,7 +99,7 @@ impl core::fmt::Debug for Plan { .field("key", &hex::encode(self.key.to_bytes())) .field("inputs", &self.inputs) .field("payments", &self.payments) - .field("change", &self.change.as_ref().map(|change| change.to_string())) + .field("change", &self.change.as_ref().map(ToString::to_string)) .finish() } } @@ -152,7 +152,7 @@ impl Plan { let change = if let Some(change) = &self.change { change.clone().try_into().map_err(|_| { io::Error::other(format!( - "an address we said to use as change couldn't be convered to a Vec: {}", + "an address we said to use as change couldn't be converted to a Vec: {}", change.to_string(), )) })? diff --git a/processor/src/signer.rs b/processor/src/signer.rs index 3fcb0d70b..7a4fcbedb 100644 --- a/processor/src/signer.rs +++ b/processor/src/signer.rs @@ -10,7 +10,6 @@ use frost::{ use log::{info, debug, warn, error}; -use scale::Encode; use serai_client::validator_sets::primitives::Session; use messages::sign::*; @@ -98,7 +97,11 @@ impl CompletionsDb { } impl EventualityDb { - fn save_eventuality(txn: &mut impl DbTxn, id: [u8; 32], eventuality: N::Eventuality) { + fn save_eventuality( + txn: &mut impl DbTxn, + id: [u8; 32], + eventuality: &N::Eventuality, + ) { txn.put(Self::key(id), eventuality.serialize()); } @@ -114,7 +117,7 @@ impl TransactionDb { fn transaction( getter: &impl Get, - id: >::Id, + id: &>::Id, ) -> Option { Self::get(getter, id.as_ref()).map(|tx| N::Transaction::read(&mut tx.as_slice()).unwrap()) } @@ -165,7 +168,7 @@ impl Signer { log::info!("rebroadcasting {}", hex::encode(&completion)); // TODO: Don't drop the error entirely. Check for invariants let _ = network - .publish_transaction(&TransactionDb::transaction::(&db, completion).unwrap()) + .publish_transaction(&TransactionDb::transaction::(&db, &completion).unwrap()) .await; } } @@ -222,7 +225,7 @@ impl Signer { } #[must_use] - fn already_completed(&self, txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool { + fn already_completed(txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool { if !CompletionsDb::completions::(txn, id).is_empty() { debug!( "SignTransaction/Reattempt order for {}, which we've already completed signing", @@ -239,7 +242,7 @@ impl Signer { fn complete( &mut self, id: [u8; 32], - tx_id: >::Id, + tx_id: &>::Id, ) -> ProcessorMessage { // Assert we're actively signing for this TX assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for"); @@ -261,16 +264,16 @@ impl Signer { &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], - tx: N::Transaction, + tx: &N::Transaction, ) -> Option { - let first_completion = !self.already_completed(txn, id); + let first_completion = !Self::already_completed(txn, id); // Save this completion to the DB CompletedOnChainDb::complete_on_chain(txn, &id); - CompletionsDb::complete::(txn, id, &tx); + CompletionsDb::complete::(txn, id, tx); if first_completion { - Some(self.complete(id, tx.id())) + Some(self.complete(id, &tx.id())) } else { None } @@ -303,13 +306,13 @@ impl Signer { if self.network.confirm_completion(&eventuality, &tx) { info!("signer eventuality for {} resolved in TX {}", hex::encode(id), hex::encode(tx_id)); - let first_completion = !self.already_completed(txn, id); + let first_completion = !Self::already_completed(txn, id); // Save this completion to the DB CompletionsDb::complete::(txn, id, &tx); if first_completion { - return Some(self.complete(id, tx.id())); + return Some(self.complete(id, &tx.id())); } } else { warn!( @@ -338,7 +341,7 @@ impl Signer { id: [u8; 32], attempt: u32, ) -> Option { - if self.already_completed(txn, id) { + if Self::already_completed(txn, id) { return None; } @@ -428,13 +431,13 @@ impl Signer { txn: &mut D::Transaction<'_>, id: [u8; 32], tx: N::SignableTransaction, - eventuality: N::Eventuality, + eventuality: &N::Eventuality, ) -> Option { // The caller is expected to re-issue sign orders on reboot // This is solely used by the rebroadcast task ActiveSignsDb::add_active_sign(txn, &id); - if self.already_completed(txn, id) { + if Self::already_completed(txn, id) { return None; } @@ -470,7 +473,7 @@ impl Signer { let mut parsed = HashMap::new(); for l in { - let mut keys = preprocesses.keys().cloned().collect::>(); + let mut keys = preprocesses.keys().copied().collect::>(); keys.sort(); keys } { @@ -550,7 +553,7 @@ impl Signer { let mut parsed = HashMap::new(); for l in { - let mut keys = shares.keys().cloned().collect::>(); + let mut keys = shares.keys().copied().collect::>(); keys.sort(); keys } { @@ -597,7 +600,7 @@ impl Signer { } // Stop trying to sign for this TX - Some(self.complete(id.id, tx_id)) + Some(self.complete(id.id, &tx_id)) } CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await, diff --git a/processor/src/tests/addresses.rs b/processor/src/tests/addresses.rs index 8d7baa9ed..da20091be 100644 --- a/processor/src/tests/addresses.rs +++ b/processor/src/tests/addresses.rs @@ -78,7 +78,7 @@ async fn spend( pub async fn test_addresses(network: N) { let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng); - for (_, keys) in keys.iter_mut() { + for keys in keys.values_mut() { N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); diff --git a/processor/src/tests/batch_signer.rs b/processor/src/tests/batch_signer.rs index 0564db5ae..dc45ff312 100644 --- a/processor/src/tests/batch_signer.rs +++ b/processor/src/tests/batch_signer.rs @@ -13,7 +13,6 @@ use sp_application_crypto::{RuntimePublic, sr25519::Public}; use serai_db::{DbTxn, Db, MemDb}; -use scale::Encode; #[rustfmt::skip] use serai_client::{primitives::*, in_instructions::primitives::*, validator_sets::primitives::Session}; @@ -24,8 +23,8 @@ use messages::{ }; use crate::batch_signer::BatchSigner; -#[tokio::test] -async fn test_batch_signer() { +#[test] +fn test_batch_signer() { let keys = key_gen::<_, Ristretto>(&mut OsRng); let participant_one = Participant::new(1).unwrap(); @@ -49,11 +48,8 @@ async fn test_batch_signer() { ], }; - let actual_id = SubstrateSignId { - session: Session(0), - id: SubstrateSignableId::Batch((batch.network, batch.id).encode().try_into().unwrap()), - attempt: 0, - }; + let actual_id = + SubstrateSignId { session: Session(0), id: SubstrateSignableId::Batch(batch.id), attempt: 0 }; let mut signing_set = vec![]; while signing_set.len() < usize::from(keys.values().next().unwrap().params().t()) { @@ -78,7 +74,7 @@ async fn test_batch_signer() { let mut db = MemDb::new(); let mut txn = db.txn(); - match signer.sign(&mut txn, batch.clone()).await.unwrap() { + match signer.sign(&mut txn, batch.clone()).unwrap() { // All participants should emit a preprocess coordinator::ProcessorMessage::BatchPreprocess { id, @@ -113,7 +109,6 @@ async fn test_batch_signer() { preprocesses: clone_without(&preprocesses, i), }, ) - .await .unwrap() { ProcessorMessage::Coordinator(coordinator::ProcessorMessage::SubstrateShare { @@ -141,7 +136,6 @@ async fn test_batch_signer() { shares: clone_without(&shares, i), }, ) - .await .unwrap() { ProcessorMessage::Substrate(substrate::ProcessorMessage::SignedBatch { diff --git a/processor/src/tests/cosigner.rs b/processor/src/tests/cosigner.rs index b7cc1a80a..a66161bf7 100644 --- a/processor/src/tests/cosigner.rs +++ b/processor/src/tests/cosigner.rs @@ -18,8 +18,8 @@ use serai_client::{primitives::*, validator_sets::primitives::Session}; use messages::coordinator::*; use crate::cosigner::Cosigner; -#[tokio::test] -async fn test_cosigner() { +#[test] +fn test_cosigner() { let keys = key_gen::<_, Ristretto>(&mut OsRng); let participant_one = Participant::new(1).unwrap(); @@ -88,7 +88,6 @@ async fn test_cosigner() { preprocesses: clone_without(&preprocesses, i), }, ) - .await .unwrap() { ProcessorMessage::SubstrateShare { id, shares: mut these_shares } => { @@ -113,7 +112,6 @@ async fn test_cosigner() { shares: clone_without(&shares, i), }, ) - .await .unwrap() { ProcessorMessage::CosignedBlock { block_number, block: signed_block, signature } => { diff --git a/processor/src/tests/key_gen.rs b/processor/src/tests/key_gen.rs index beb158da4..047e006ac 100644 --- a/processor/src/tests/key_gen.rs +++ b/processor/src/tests/key_gen.rs @@ -20,7 +20,7 @@ use crate::{ const ID: KeyGenId = KeyGenId { session: Session(1), attempt: 3 }; -pub async fn test_key_gen() { +pub fn test_key_gen() { let mut entropies = HashMap::new(); let mut dbs = HashMap::new(); let mut key_gens = HashMap::new(); @@ -37,18 +37,15 @@ pub async fn test_key_gen() { for i in 1 ..= 5 { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); - if let ProcessorMessage::Commitments { id, mut commitments } = key_gen - .handle( - &mut txn, - CoordinatorMessage::GenerateKey { - id: ID, - params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()) - .unwrap(), - shares: 1, - }, - ) - .await - { + if let ProcessorMessage::Commitments { id, mut commitments } = key_gen.handle( + &mut txn, + CoordinatorMessage::GenerateKey { + id: ID, + params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()) + .unwrap(), + shares: 1, + }, + ) { assert_eq!(id, ID); assert_eq!(commitments.len(), 1); all_commitments @@ -74,16 +71,10 @@ pub async fn test_key_gen() { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); - if let ProcessorMessage::Shares { id, mut shares } = key_gen - .handle( - &mut txn, - CoordinatorMessage::Commitments { - id: ID, - commitments: clone_without(&all_commitments, &i), - }, - ) - .await - { + if let ProcessorMessage::Shares { id, mut shares } = key_gen.handle( + &mut txn, + CoordinatorMessage::Commitments { id: ID, commitments: clone_without(&all_commitments, &i) }, + ) { assert_eq!(id, ID); assert_eq!(shares.len(), 1); all_shares.insert(i, shares.swap_remove(0)); @@ -102,19 +93,16 @@ pub async fn test_key_gen() { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); - if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } = key_gen - .handle( - &mut txn, - CoordinatorMessage::Shares { - id: ID, - shares: vec![all_shares - .iter() - .filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) }) - .collect()], - }, - ) - .await - { + if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } = key_gen.handle( + &mut txn, + CoordinatorMessage::Shares { + id: ID, + shares: vec![all_shares + .iter() + .filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) }) + .collect()], + }, + ) { assert_eq!(id, ID); if res.is_none() { res = Some((substrate_key, network_key.clone())); @@ -134,13 +122,11 @@ pub async fn test_key_gen() { for i in 1 ..= 5 { let key_gen = key_gens.get_mut(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); - let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen - .confirm( - &mut txn, - ID.session, - KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()), - ) - .await; + let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen.confirm( + &mut txn, + ID.session, + &KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()), + ); txn.commit(); assert_eq!(substrate_keys.len(), 1); diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index 2454acbbd..974be10b5 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -46,7 +46,7 @@ macro_rules! test_network { #[tokio::test] async fn $key_gen() { init_logger(); - test_key_gen::<$N>().await; + test_key_gen::<$N>(); } #[test] diff --git a/processor/src/tests/scanner.rs b/processor/src/tests/scanner.rs index 88cfe2933..5aad5bb51 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/src/tests/scanner.rs @@ -121,7 +121,7 @@ pub async fn test_no_deadlock_in_multisig_completed(network: N) { network.get_latest_block_number().await.unwrap() + N::CONFIRMATIONS + i, { let mut keys = key_gen(&mut OsRng); - for (_, keys) in keys.iter_mut() { + for keys in keys.values_mut() { N::tweak_keys(keys); } keys[&Participant::new(1).unwrap()].group_key() @@ -154,7 +154,7 @@ pub async fn test_no_deadlock_in_multisig_completed(network: N) { } }; - // The ack_block acquisiton shows the Scanner isn't maintaining the lock on its own thread after + // The ack_block acquisition shows the Scanner isn't maintaining the lock on its own thread after // emitting the Block event // TODO: This is incomplete. Also test after emitting Completed let mut txn = db.txn(); diff --git a/processor/src/tests/signer.rs b/processor/src/tests/signer.rs index 9f76ae85f..89d57bf39 100644 --- a/processor/src/tests/signer.rs +++ b/processor/src/tests/signer.rs @@ -72,7 +72,7 @@ pub async fn sign( match signers .get_mut(&i) .unwrap() - .sign_transaction(&mut txn, actual_id.id, tx, eventuality) + .sign_transaction(&mut txn, actual_id.id, tx, &eventuality) .await { // All participants should emit a preprocess @@ -147,7 +147,7 @@ pub async fn sign( pub async fn test_signer(network: N) { let mut keys = key_gen(&mut OsRng); - for (_, keys) in keys.iter_mut() { + for keys in keys.values_mut() { N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); diff --git a/processor/src/tests/wallet.rs b/processor/src/tests/wallet.rs index 74a6dd54f..c9cc6c667 100644 --- a/processor/src/tests/wallet.rs +++ b/processor/src/tests/wallet.rs @@ -31,7 +31,7 @@ pub async fn test_wallet(network: N) { } let mut keys = key_gen(&mut OsRng); - for (_, keys) in keys.iter_mut() { + for keys in keys.values_mut() { N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); diff --git a/rust-toolchain.toml b/rust-toolchain.toml index ffac6d61a..8dac336ee 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.74" +channel = "1.75" targets = ["wasm32-unknown-unknown"] profile = "minimal" components = ["rust-src", "rustfmt", "clippy"] diff --git a/substrate/abi/Cargo.toml b/substrate/abi/Cargo.toml index 0b2715766..043504868 100644 --- a/substrate/abi/Cargo.toml +++ b/substrate/abi/Cargo.toml @@ -12,6 +12,9 @@ rust-version = "1.69" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] scale = { package = "parity-scale-codec", version = "3", features = ["derive"] } scale-info = { version = "2", features = ["derive"] } @@ -34,5 +37,19 @@ serai-signals-primitives = { path = "../signals/primitives", version = "0.1" } frame-support = { git = "https://github.com/serai-dex/substrate" } [features] -borsh = ["dep:borsh"] -serde = ["dep:serde"] +borsh = [ + "dep:borsh", + "serai-primitives/borsh", + "serai-coins-primitives/borsh", + "serai-validator-sets-primitives/borsh", + "serai-in-instructions-primitives/borsh", + "serai-signals-primitives/borsh", +] +serde = [ + "dep:serde", + "serai-primitives/serde", + "serai-coins-primitives/serde", + "serai-validator-sets-primitives/serde", + "serai-in-instructions-primitives/serde", + "serai-signals-primitives/serde", +] diff --git a/substrate/abi/src/babe.rs b/substrate/abi/src/babe.rs index b5fe89cb2..29bbee9ce 100644 --- a/substrate/abi/src/babe.rs +++ b/substrate/abi/src/babe.rs @@ -1,11 +1,11 @@ use sp_consensus_babe::EquivocationProof; -use serai_primitives::Header; +use serai_primitives::{Header, SeraiAddress}; #[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)] pub struct ReportEquivocation { pub equivocation_proof: Box>, - pub key_owner_proof: (), + pub key_owner_proof: SeraiAddress, } // We could define a Babe Config here and use the literal pallet_babe::Call diff --git a/substrate/abi/src/dex.rs b/substrate/abi/src/dex.rs index 5b69456ad..5136e9740 100644 --- a/substrate/abi/src/dex.rs +++ b/substrate/abi/src/dex.rs @@ -3,7 +3,6 @@ use sp_runtime::BoundedVec; use serai_primitives::*; type PoolId = Coin; -type PoolCoinId = Coin; type MaxSwapPathLength = sp_core::ConstU32<3>; #[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)] @@ -44,7 +43,6 @@ pub enum Event { PoolCreated { pool_id: PoolId, pool_account: SeraiAddress, - lp_token: PoolCoinId, }, LiquidityAdded { @@ -53,7 +51,6 @@ pub enum Event { pool_id: PoolId, coin_amount: SubstrateAmount, sri_amount: SubstrateAmount, - lp_token: PoolCoinId, lp_token_minted: SubstrateAmount, }, @@ -63,7 +60,6 @@ pub enum Event { pool_id: PoolId, coin_amount: SubstrateAmount, sri_amount: SubstrateAmount, - lp_token: PoolCoinId, lp_token_burned: SubstrateAmount, }, diff --git a/substrate/abi/src/grandpa.rs b/substrate/abi/src/grandpa.rs index 33c01dacb..54de8182a 100644 --- a/substrate/abi/src/grandpa.rs +++ b/substrate/abi/src/grandpa.rs @@ -5,7 +5,7 @@ use serai_primitives::{BlockNumber, SeraiAddress}; #[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)] pub struct ReportEquivocation { pub equivocation_proof: Box>, - pub key_owner_proof: (), + pub key_owner_proof: SeraiAddress, } #[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)] diff --git a/substrate/abi/src/validator_sets.rs b/substrate/abi/src/validator_sets.rs index f6244b1d8..64eb8b4a6 100644 --- a/substrate/abi/src/validator_sets.rs +++ b/substrate/abi/src/validator_sets.rs @@ -8,15 +8,10 @@ use serai_validator_sets_primitives::*; pub enum Call { set_keys { network: NetworkId, + removed_participants: Vec, key_pair: KeyPair, signature: Signature, }, - remove_participant { - network: NetworkId, - to_remove: SeraiAddress, - signers: Vec, - signature: Signature, - }, allocate { network: NetworkId, amount: Amount, diff --git a/substrate/client/Cargo.toml b/substrate/client/Cargo.toml index 6901a83ff..f97e40fba 100644 --- a/substrate/client/Cargo.toml +++ b/substrate/client/Cargo.toml @@ -13,6 +13,9 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] zeroize = "^1.5" thiserror = { version = "1", optional = true } @@ -24,6 +27,7 @@ serde_json = { version = "1", optional = true } serai-abi = { path = "../abi", version = "0.1" } +multiaddr = { version = "0.18", optional = true } sp-core = { git = "https://github.com/serai-dex/substrate", optional = true } sp-runtime = { git = "https://github.com/serai-dex/substrate", optional = true } frame-system = { git = "https://github.com/serai-dex/substrate", optional = true } @@ -53,7 +57,8 @@ dockertest = "0.4" serai-docker-tests = { path = "../../tests/docker" } [features] -serai = ["thiserror", "serde", "serde_json", "sp-core", "sp-runtime", "frame-system", "simple-request"] +serai = ["thiserror", "serde", "serde_json", "serai-abi/serde", "multiaddr", "sp-core", "sp-runtime", "frame-system", "simple-request"] +borsh = ["serai-abi/borsh"] networks = [] bitcoin = ["networks", "dep:bitcoin"] diff --git a/substrate/client/src/serai/mod.rs b/substrate/client/src/serai/mod.rs index be9b0cffb..59a2e7632 100644 --- a/substrate/client/src/serai/mod.rs +++ b/substrate/client/src/serai/mod.rs @@ -16,7 +16,7 @@ pub use abi::{primitives, Transaction}; use abi::*; pub use primitives::{SeraiAddress, Signature, Amount}; -use primitives::Header; +use primitives::{Header, NetworkId}; pub mod coins; pub use coins::SeraiCoins; @@ -306,6 +306,14 @@ impl Serai { pub fn as_of(&self, block: [u8; 32]) -> TemporalSerai { TemporalSerai { serai: self, block, events: RwLock::new(None) } } + + /// Return the P2P Multiaddrs for the validators of the specified network. + pub async fn p2p_validators( + &self, + network: NetworkId, + ) -> Result, SeraiError> { + self.call("p2p_validators", network).await + } } impl<'a> TemporalSerai<'a> { @@ -317,7 +325,6 @@ impl<'a> TemporalSerai<'a> { if events.is_none() { drop(events); let mut events_write = self.events.write().await; - #[allow(clippy::unwrap_or_default)] if events_write.is_none() { *events_write = Some(self.storage("System", "Events", ()).await?.unwrap_or(vec![])); } diff --git a/substrate/client/src/serai/validator_sets.rs b/substrate/client/src/serai/validator_sets.rs index be9b64b01..9b283635b 100644 --- a/substrate/client/src/serai/validator_sets.rs +++ b/substrate/client/src/serai/validator_sets.rs @@ -35,6 +35,23 @@ impl<'a> SeraiValidatorSets<'a> { .await } + pub async fn participant_removed_events(&self) -> Result, SeraiError> { + self + .0 + .events(|event| { + if let serai_abi::Event::ValidatorSets(event) = event { + if matches!(event, ValidatorSetsEvent::ParticipantRemoved { .. }) { + Some(event.clone()) + } else { + None + } + } else { + None + } + }) + .await + } + pub async fn key_gen_events(&self) -> Result, SeraiError> { self .0 @@ -109,36 +126,22 @@ impl<'a> SeraiValidatorSets<'a> { .await } - pub async fn musig_key(&self, set: ValidatorSet) -> Result, SeraiError> { - self.0.storage(PALLET, "MuSigKeys", (sp_core::hashing::twox_64(&set.encode()), set)).await - } - // TODO: Store these separately since we almost never need both at once? pub async fn keys(&self, set: ValidatorSet) -> Result, SeraiError> { self.0.storage(PALLET, "Keys", (sp_core::hashing::twox_64(&set.encode()), set)).await } - pub fn set_keys(network: NetworkId, key_pair: KeyPair, signature: Signature) -> Transaction { + pub fn set_keys( + network: NetworkId, + removed_participants: Vec, + key_pair: KeyPair, + signature: Signature, + ) -> Transaction { Serai::unsigned(serai_abi::Call::ValidatorSets(serai_abi::validator_sets::Call::set_keys { network, + removed_participants, key_pair, signature, })) } - - pub fn remove_participant( - network: NetworkId, - to_remove: SeraiAddress, - signers: Vec, - signature: Signature, - ) -> Transaction { - Serai::unsigned(serai_abi::Call::ValidatorSets( - serai_abi::validator_sets::Call::remove_participant { - network, - to_remove, - signers, - signature, - }, - )) - } } diff --git a/substrate/client/tests/common/in_instructions.rs b/substrate/client/tests/common/in_instructions.rs index 0435b6194..b4c248980 100644 --- a/substrate/client/tests/common/in_instructions.rs +++ b/substrate/client/tests/common/in_instructions.rs @@ -24,7 +24,7 @@ use crate::common::{tx::publish_tx, validator_sets::set_keys}; pub async fn provide_batch(serai: &Serai, batch: Batch) -> [u8; 32] { // TODO: Get the latest session let set = ValidatorSet { session: Session(0), network: batch.network }; - let pair = insecure_pair_from_name(&format!("ValidatorSet {:?}", set)); + let pair = insecure_pair_from_name(&format!("ValidatorSet {set:?}")); let keys = if let Some(keys) = serai.as_of_latest_finalized_block().await.unwrap().validator_sets().keys(set).await.unwrap() { diff --git a/substrate/client/tests/common/validator_sets.rs b/substrate/client/tests/common/validator_sets.rs index 057742dbe..22d0c005d 100644 --- a/substrate/client/tests/common/validator_sets.rs +++ b/substrate/client/tests/common/validator_sets.rs @@ -5,14 +5,14 @@ use rand_core::OsRng; use sp_core::{Pair, sr25519::Signature}; -use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; +use ciphersuite::{Ciphersuite, Ristretto}; use frost::dkg::musig::musig; use schnorrkel::Schnorrkel; use serai_client::{ primitives::insecure_pair_from_name, validator_sets::{ - primitives::{ValidatorSet, KeyPair, musig_context, musig_key, set_keys_message}, + primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message}, ValidatorSetsEvent, }, SeraiValidatorSets, Serai, @@ -26,19 +26,6 @@ pub async fn set_keys(serai: &Serai, set: ValidatorSet, key_pair: KeyPair) -> [u let public = pair.public(); let public_key = ::read_G::<&[u8]>(&mut public.0.as_ref()).unwrap(); - assert_eq!( - serai - .as_of_latest_finalized_block() - .await - .unwrap() - .validator_sets() - .musig_key(set) - .await - .unwrap() - .unwrap(), - musig_key(set, &[public]).0 - ); - let secret_key = ::read_F::<&[u8]>( &mut pair.as_ref().secret.to_bytes()[.. 32].as_ref(), ) @@ -46,33 +33,21 @@ pub async fn set_keys(serai: &Serai, set: ValidatorSet, key_pair: KeyPair) -> [u assert_eq!(Ristretto::generator() * secret_key, public_key); let threshold_keys = musig::(&musig_context(set), &Zeroizing::new(secret_key), &[public_key]).unwrap(); - assert_eq!( - serai - .as_of_latest_finalized_block() - .await - .unwrap() - .validator_sets() - .musig_key(set) - .await - .unwrap() - .unwrap(), - threshold_keys.group_key().to_bytes() - ); let sig = frost::tests::sign_without_caching( &mut OsRng, frost::tests::algorithm_machines( &mut OsRng, - Schnorrkel::new(b"substrate"), + &Schnorrkel::new(b"substrate"), &HashMap::from([(threshold_keys.params().i(), threshold_keys.into())]), ), - &set_keys_message(&set, &key_pair), + &set_keys_message(&set, &[], &key_pair), ); // Set the key pair let block = publish_tx( serai, - &SeraiValidatorSets::set_keys(set.network, key_pair.clone(), Signature(sig.to_bytes())), + &SeraiValidatorSets::set_keys(set.network, vec![], key_pair.clone(), Signature(sig.to_bytes())), ) .await; diff --git a/substrate/client/tests/dex.rs b/substrate/client/tests/dex.rs index 5b096adff..8796fe0b1 100644 --- a/substrate/client/tests/dex.rs +++ b/substrate/client/tests/dex.rs @@ -38,22 +38,18 @@ serai_test!( DexEvent::PoolCreated { pool_id: Coin::Bitcoin, pool_account: PublicKey::from_raw(blake2_256(&Coin::Bitcoin.encode())).into(), - lp_token: Coin::Bitcoin, }, DexEvent::PoolCreated { pool_id: Coin::Ether, pool_account: PublicKey::from_raw(blake2_256(&Coin::Ether.encode())).into(), - lp_token: Coin::Ether, }, DexEvent::PoolCreated { pool_id: Coin::Dai, pool_account: PublicKey::from_raw(blake2_256(&Coin::Dai.encode())).into(), - lp_token: Coin::Dai, }, DexEvent::PoolCreated { pool_id: Coin::Monero, pool_account: PublicKey::from_raw(blake2_256(&Coin::Monero.encode())).into(), - lp_token: Coin::Monero, }, ] ); @@ -96,7 +92,6 @@ serai_test!( pool_id: Coin::Monero, coin_amount: coin_amount.0, sri_amount: sri_amount.0, - lp_token: Coin::Monero, lp_token_minted: 49_999999990000 }] ); @@ -280,7 +275,6 @@ serai_test!( pool_id: Coin::Bitcoin, coin_amount: 10_000_000_000_000, // half of sent amount sri_amount: 6_947_918_403_646, - lp_token: Coin::Bitcoin, lp_token_minted: 8333333333332 }] ); diff --git a/substrate/client/tests/dht.rs b/substrate/client/tests/dht.rs new file mode 100644 index 000000000..deed79031 --- /dev/null +++ b/substrate/client/tests/dht.rs @@ -0,0 +1,59 @@ +use serai_client::{primitives::NetworkId, Serai}; + +#[tokio::test] +async fn dht() { + use dockertest::{ + PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image, + TestBodySpecification, DockerTest, + }; + + serai_docker_tests::build("serai".to_string()); + + let handle = |name| format!("serai_client-serai_node-{name}"); + let composition = |name| { + TestBodySpecification::with_image( + Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never), + ) + .replace_cmd(vec![ + "serai-node".to_string(), + "--unsafe-rpc-external".to_string(), + "--rpc-cors".to_string(), + "all".to_string(), + "--chain".to_string(), + "local".to_string(), + format!("--{name}"), + ]) + .set_publish_all_ports(true) + .set_handle(handle(name)) + .set_start_policy(StartPolicy::Strict) + .set_log_options(Some(LogOptions { + action: LogAction::Forward, + policy: LogPolicy::Always, + source: LogSource::Both, + })) + }; + + let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); + test.provide_container(composition("alice")); + test.provide_container(composition("bob")); + test.provide_container(composition("charlie")); + test.provide_container(composition("dave")); + test + .run_async(|ops| async move { + // Sleep until the Substrate RPC starts + let alice = handle("alice"); + let serai_rpc = ops.handle(&alice).host_port(9944).unwrap(); + let serai_rpc = format!("http://{}:{}", serai_rpc.0, serai_rpc.1); + // Sleep for a minute + tokio::time::sleep(core::time::Duration::from_secs(60)).await; + // Check the DHT has been populated + assert!(!Serai::new(serai_rpc.clone()) + .await + .unwrap() + .p2p_validators(NetworkId::Bitcoin) + .await + .unwrap() + .is_empty()); + }) + .await; +} diff --git a/substrate/client/tests/validator_sets.rs b/substrate/client/tests/validator_sets.rs index d66b68fb3..a487b51cb 100644 --- a/substrate/client/tests/validator_sets.rs +++ b/substrate/client/tests/validator_sets.rs @@ -5,7 +5,7 @@ use sp_core::{sr25519::Public, Pair}; use serai_client::{ primitives::{NETWORKS, NetworkId, insecure_pair_from_name}, validator_sets::{ - primitives::{Session, ValidatorSet, KeyPair, musig_key}, + primitives::{Session, ValidatorSet, KeyPair}, ValidatorSetsEvent, }, Serai, @@ -58,7 +58,6 @@ serai_test!( .collect::>(); let participants_ref: &[_] = participants.as_ref(); assert_eq!(participants_ref, [public].as_ref()); - assert_eq!(vs_serai.musig_key(set).await.unwrap().unwrap(), musig_key(set, &[public]).0); } let block = set_keys(&serai, set, key_pair.clone()).await; diff --git a/substrate/coins/pallet/Cargo.toml b/substrate/coins/pallet/Cargo.toml index afbae927a..75011add0 100644 --- a/substrate/coins/pallet/Cargo.toml +++ b/substrate/coins/pallet/Cargo.toml @@ -12,8 +12,14 @@ rust-version = "1.70" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[package.metadata.cargo-machete] +ignored = ["scale", "scale-info"] + +[lints] +workspace = true + [dependencies] -parity-scale-codec = { version = "3", default-features = false, features = ["derive"] } +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } scale-info = { version = "2", default-features = false, features = ["derive"] } frame-system = { git = "https://github.com/serai-dex/substrate", default-features = false } diff --git a/substrate/coins/pallet/src/lib.rs b/substrate/coins/pallet/src/lib.rs index b2a83eafb..510e0edfb 100644 --- a/substrate/coins/pallet/src/lib.rs +++ b/substrate/coins/pallet/src/lib.rs @@ -12,6 +12,8 @@ impl AllowMint for () { } } +// TODO: Investigate why Substrate generates this +#[allow(clippy::cast_possible_truncation)] #[frame_support::pallet] pub mod pallet { use super::*; @@ -143,7 +145,7 @@ pub mod pallet { fn increase_balance_internal(to: Public, balance: Balance) -> Result<(), Error> { let coin = &balance.coin; - // sub amount from account + // add amount to account let new_amount = Self::balances(to, coin) .checked_add(balance.amount.0) .ok_or(Error::::AmountOverflowed)?; diff --git a/substrate/coins/primitives/Cargo.toml b/substrate/coins/primitives/Cargo.toml index 8117bb8b6..322016da1 100644 --- a/substrate/coins/primitives/Cargo.toml +++ b/substrate/coins/primitives/Cargo.toml @@ -11,6 +11,9 @@ rust-version = "1.69" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } diff --git a/substrate/dex/pallet/Cargo.toml b/substrate/dex/pallet/Cargo.toml index 971d5c6d8..83192d6e0 100644 --- a/substrate/dex/pallet/Cargo.toml +++ b/substrate/dex/pallet/Cargo.toml @@ -12,8 +12,14 @@ rust-version = "1.70" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[package.metadata.cargo-machete] +ignored = ["scale", "scale-info"] + +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3.6.1", default-features = false } +scale = { package = "parity-scale-codec", version = "3.6.1", default-features = false } scale-info = { version = "2.5.0", default-features = false, features = ["derive"] } sp-std = { git = "https://github.com/serai-dex/substrate", default-features = false } @@ -33,7 +39,7 @@ serai-primitives = { path = "../../primitives", default-features = false } [features] default = ["std"] std = [ - "codec/std", + "scale/std", "scale-info/std", "sp-std/std", diff --git a/substrate/dex/pallet/src/benchmarking.rs b/substrate/dex/pallet/src/benchmarking.rs index 4fe301f61..a00b6edcd 100644 --- a/substrate/dex/pallet/src/benchmarking.rs +++ b/substrate/dex/pallet/src/benchmarking.rs @@ -52,9 +52,7 @@ fn create_coin(coin: &Coin) -> (T::AccountId, AccountIdLookupOf) { (caller, caller_lookup) } -fn create_coin_and_pool( - coin: &Coin, -) -> (PoolCoinId, T::AccountId, AccountIdLookupOf) { +fn create_coin_and_pool(coin: &Coin) -> (Coin, T::AccountId, AccountIdLookupOf) { let (caller, caller_lookup) = create_coin::(coin); assert_ok!(Dex::::create_pool(*coin)); diff --git a/substrate/dex/pallet/src/lib.rs b/substrate/dex/pallet/src/lib.rs index 20753fb95..352205029 100644 --- a/substrate/dex/pallet/src/lib.rs +++ b/substrate/dex/pallet/src/lib.rs @@ -94,6 +94,8 @@ use sp_std::prelude::*; pub use types::*; pub use weights::WeightInfo; +// TODO: Investigate why Substrate generates these +#[allow(clippy::cast_possible_truncation, clippy::no_effect_underscore_binding)] #[frame_support::pallet] pub mod pallet { use super::*; @@ -112,9 +114,6 @@ pub mod pallet { /// migration. pub type PoolId = Coin; - /// Liquidity token id is the same as pool id. - pub type PoolCoinId = Coin; - /// LiquidityTokens Pallet as an instance of coins pallet. pub type LiquidityTokens = coins_pallet::Pallet; @@ -149,10 +148,10 @@ pub mod pallet { type WeightInfo: WeightInfo; } - /// Map from `PoolCoinId` to `PoolInfo`. This establishes whether a pool has been officially + /// Map from `PoolId` to `()`. This establishes whether a pool has been officially /// created rather than people sending tokens directly to a pool's public account. #[pallet::storage] - pub type Pools = StorageMap<_, Blake2_128Concat, PoolId, PoolInfo, OptionQuery>; + pub type Pools = StorageMap<_, Blake2_128Concat, PoolId, (), OptionQuery>; #[pallet::storage] #[pallet::getter(fn spot_price_for_block)] @@ -196,9 +195,6 @@ pub mod pallet { pool_id: PoolId, /// The account ID of the pool. pool_account: T::AccountId, - /// The id of the liquidity tokens that will be minted when coins are added to this - /// pool. - lp_token: PoolCoinId, }, /// A successful call of the `AddLiquidity` extrinsic will create this event. @@ -213,8 +209,6 @@ pub mod pallet { coin_amount: SubstrateAmount, /// The amount of the SRI that was added to the pool. sri_amount: SubstrateAmount, - /// The id of the lp token that was minted. - lp_token: PoolCoinId, /// The amount of lp tokens that were minted of that id. lp_token_minted: SubstrateAmount, }, @@ -231,8 +225,6 @@ pub mod pallet { coin_amount: SubstrateAmount, /// The amount of the second coin that was removed from the pool. sri_amount: SubstrateAmount, - /// The id of the lp token that was burned. - lp_token: PoolCoinId, /// The amount of lp tokens that were burned of that id. lp_token_burned: SubstrateAmount, }, @@ -289,8 +281,6 @@ pub mod pallet { pub enum Error { /// Provided coins are equal. EqualCoins, - /// Provided coin is not supported for pool. - UnsupportedCoin, /// Pool already exists. PoolExists, /// Desired amount can't be zero. @@ -326,22 +316,16 @@ pub mod pallet { ZeroLiquidity, /// Amount can't be zero. ZeroAmount, - /// Insufficient liquidity in the pool. - InsufficientLiquidity, /// Calculated amount out is less than provided minimum amount. ProvidedMinimumNotSufficientForSwap, /// Provided maximum amount is not sufficient for swap. ProvidedMaximumNotSufficientForSwap, - /// Only pools with native on one side are valid. - PoolMustContainNativeCurrency, /// The provided path must consists of 2 coins at least. InvalidPath, /// It was not possible to calculate path data. PathError, /// The provided path must consists of unique coins. NonUniquePath, - /// It was not possible to get or increment the Id of the pool. - IncorrectPoolCoinId, /// Unable to find an element in an array/vec that should have one-to-one correspondence /// with another. For example, an array of coins constituting a `path` should have a /// corresponding array of `amounts` along the path. @@ -427,10 +411,9 @@ pub mod pallet { let pool_account = Self::get_pool_account(pool_id); frame_system::Pallet::::inc_providers(&pool_account); - let pool_info = PoolInfo { lp_token: coin }; - Pools::::insert(pool_id, pool_info); + Pools::::insert(pool_id, ()); - Self::deposit_event(Event::PoolCreated { pool_id, pool_account, lp_token: coin }); + Self::deposit_event(Event::PoolCreated { pool_id, pool_account }); Ok(()) } @@ -474,8 +457,7 @@ pub mod pallet { let pool_id = Self::get_pool_id(coin, Coin::Serai).unwrap(); - let maybe_pool = Pools::::get(pool_id); - let pool = maybe_pool.as_ref().ok_or(Error::::PoolNotFound)?; + Pools::::get(pool_id).as_ref().ok_or(Error::::PoolNotFound)?; let pool_account = Self::get_pool_account(pool_id); let sri_reserve = Self::get_balance(&pool_account, Coin::Serai); @@ -540,7 +522,6 @@ pub mod pallet { pool_id, coin_amount, sri_amount, - lp_token: pool.lp_token, lp_token_minted: lp_token_amount, }); @@ -566,8 +547,7 @@ pub mod pallet { let pool_id = Self::get_pool_id(coin, Coin::Serai).unwrap(); ensure!(lp_token_burn > 0, Error::::ZeroLiquidity); - let maybe_pool = Pools::::get(pool_id); - let pool = maybe_pool.as_ref().ok_or(Error::::PoolNotFound)?; + Pools::::get(pool_id).as_ref().ok_or(Error::::PoolNotFound)?; let pool_account = Self::get_pool_account(pool_id); let sri_reserve = Self::get_balance(&pool_account, Coin::Serai); @@ -609,7 +589,6 @@ pub mod pallet { pool_id, coin_amount, sri_amount, - lp_token: pool.lp_token, lp_token_burned: lp_token_burn, }); @@ -774,7 +753,7 @@ pub mod pallet { )?; let mut i = 0; - let path_len = path.len() as u32; + let path_len = u32::try_from(path.len()).unwrap(); #[allow(clippy::explicit_counter_loop)] for coins_pair in path.windows(2) { if let [coin1, coin2] = coins_pair { diff --git a/substrate/dex/pallet/src/tests.rs b/substrate/dex/pallet/src/tests.rs index 522b65902..a1809b738 100644 --- a/substrate/dex/pallet/src/tests.rs +++ b/substrate/dex/pallet/src/tests.rs @@ -74,7 +74,7 @@ fn check_pool_accounts_dont_collide() { for coin in coins() { let account = Dex::get_pool_account(coin); if map.contains(&account) { - panic!("Collision at {:?}", coin); + panic!("Collision at {coin:?}"); } map.insert(account); } @@ -101,7 +101,6 @@ fn can_create_pool() { let coin2 = Coin::Monero; let pool_id = Dex::get_pool_id(coin1, coin2).unwrap(); - let lp_token = coin2; assert_ok!(CoinsPallet::::mint(user, Balance { coin: coin1, amount: Amount(1000) })); assert_ok!(Dex::create_pool(coin2)); @@ -109,11 +108,7 @@ fn can_create_pool() { assert_eq!( events(), - [Event::::PoolCreated { - pool_id, - pool_account: Dex::get_pool_account(pool_id), - lp_token - }] + [Event::::PoolCreated { pool_id, pool_account: Dex::get_pool_account(pool_id) }] ); assert_eq!(pools(), vec![pool_id]); @@ -148,7 +143,6 @@ fn different_pools_should_have_different_lp_tokens() { [Event::::PoolCreated { pool_id: pool_id_1_2, pool_account: Dex::get_pool_account(pool_id_1_2), - lp_token: lp_token2_1 }] ); @@ -158,7 +152,6 @@ fn different_pools_should_have_different_lp_tokens() { [Event::::PoolCreated { pool_id: pool_id_1_3, pool_account: Dex::get_pool_account(pool_id_1_3), - lp_token: lp_token3_1, }] ); @@ -195,7 +188,6 @@ fn can_add_liquidity() { pool_id, sri_amount: 10000, coin_amount: 10, - lp_token: lp_token1, lp_token_minted: 216, })); let pallet_account = Dex::get_pool_account(pool_id); @@ -215,7 +207,6 @@ fn can_add_liquidity() { pool_id, sri_amount: 10000, coin_amount: 10, - lp_token: lp_token2, lp_token_minted: 216, })); let pallet_account = Dex::get_pool_account(pool_id); @@ -324,7 +315,6 @@ fn can_remove_liquidity() { pool_id, sri_amount: 999990000, coin_amount: 99999, - lp_token, lp_token_burned: total_lp_received, })); @@ -715,7 +705,6 @@ fn check_no_panic_when_try_swap_close_to_empty_pool() { pool_id, sri_amount: liquidity1, coin_amount: liquidity2, - lp_token, lp_token_minted, })); diff --git a/substrate/dex/pallet/src/types.rs b/substrate/dex/pallet/src/types.rs index 94dd340e4..ee344564d 100644 --- a/substrate/dex/pallet/src/types.rs +++ b/substrate/dex/pallet/src/types.rs @@ -20,20 +20,10 @@ use super::*; -use codec::{Decode, Encode, MaxEncodedLen}; -use scale_info::TypeInfo; - /// This needs to be long enough for arbitrage to occur and make holding /// any fake price up sufficiently unprofitable. pub const ORACLE_WINDOW_SIZE: u32 = 1000; -/// Stores the lp_token coin id a particular pool has been assigned. -#[derive(Decode, Encode, Default, PartialEq, Eq, MaxEncodedLen, TypeInfo)] -pub struct PoolInfo { - /// Liquidity pool coin - pub lp_token: PoolCoinId, -} - /// Trait for providing methods to swap between the various coin classes. pub trait Swap { /// Swap exactly `amount_in` of coin `path[0]` for coin `path[1]`. diff --git a/substrate/in-instructions/pallet/Cargo.toml b/substrate/in-instructions/pallet/Cargo.toml index fc342c3bd..f313a22a6 100644 --- a/substrate/in-instructions/pallet/Cargo.toml +++ b/substrate/in-instructions/pallet/Cargo.toml @@ -12,6 +12,12 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[package.metadata.cargo-machete] +ignored = ["scale", "scale-info"] + +[lints] +workspace = true + [dependencies] scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive", "max-encoded-len"] } scale-info = { version = "2", default-features = false, features = ["derive"] } diff --git a/substrate/in-instructions/pallet/src/lib.rs b/substrate/in-instructions/pallet/src/lib.rs index 1bf010a8e..3ec63ae58 100644 --- a/substrate/in-instructions/pallet/src/lib.rs +++ b/substrate/in-instructions/pallet/src/lib.rs @@ -9,6 +9,8 @@ use serai_primitives::{BlockHash, NetworkId}; pub use in_instructions_primitives as primitives; use primitives::*; +// TODO: Investigate why Substrate generates these +#[allow(clippy::cast_possible_truncation, clippy::no_effect_underscore_binding)] #[frame_support::pallet] pub mod pallet { use sp_std::vec; diff --git a/substrate/in-instructions/primitives/Cargo.toml b/substrate/in-instructions/primitives/Cargo.toml index f7cadccbd..f579f59d4 100644 --- a/substrate/in-instructions/primitives/Cargo.toml +++ b/substrate/in-instructions/primitives/Cargo.toml @@ -11,6 +11,9 @@ rust-version = "1.69" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } diff --git a/substrate/node/Cargo.toml b/substrate/node/Cargo.toml index 5d34fa9ba..c34f70445 100644 --- a/substrate/node/Cargo.toml +++ b/substrate/node/Cargo.toml @@ -9,15 +9,17 @@ edition = "2021" publish = false rust-version = "1.74" +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + [[bin]] name = "serai-node" [dependencies] -clap = { version = "4", features = ["derive"] } - -futures = "0.3" -jsonrpsee = { version = "0.16", features = ["server"] } - sp-core = { git = "https://github.com/serai-dex/substrate" } sp-timestamp = { git = "https://github.com/serai-dex/substrate" } sp-io = { git = "https://github.com/serai-dex/substrate" } @@ -30,6 +32,12 @@ frame-benchmarking = { git = "https://github.com/serai-dex/substrate" } serai-runtime = { path = "../runtime", features = ["std"] } +clap = { version = "4", features = ["derive"] } + +futures-util = "0.3" +tokio = { version = "1", features = ["sync", "rt-multi-thread"] } +jsonrpsee = { version = "0.16", features = ["server"] } + sc-offchain = { git = "https://github.com/serai-dex/substrate" } sc-transaction-pool = { git = "https://github.com/serai-dex/substrate" } sc-transaction-pool-api = { git = "https://github.com/serai-dex/substrate" } diff --git a/substrate/node/src/command.rs b/substrate/node/src/command.rs index a61411801..91df761b5 100644 --- a/substrate/node/src/command.rs +++ b/substrate/node/src/command.rs @@ -100,7 +100,7 @@ pub fn run() -> sc_cli::Result<()> { if config.role.is_authority() { config.state_pruning = Some(PruningMode::ArchiveAll); } - service::new_full(config).await.map_err(sc_cli::Error::Service) + service::new_full(config).map_err(sc_cli::Error::Service) }), } } diff --git a/substrate/node/src/rpc.rs b/substrate/node/src/rpc.rs index 45e2391a4..f5ed25820 100644 --- a/substrate/node/src/rpc.rs +++ b/substrate/node/src/rpc.rs @@ -1,16 +1,18 @@ -use std::sync::Arc; - -use jsonrpsee::RpcModule; +use std::{sync::Arc, collections::HashSet}; use sp_blockchain::{Error as BlockchainError, HeaderBackend, HeaderMetadata}; use sp_block_builder::BlockBuilder; use sp_api::ProvideRuntimeApi; use serai_runtime::{ - primitives::{SubstrateAmount, PublicKey}, - Nonce, Block, + primitives::{NetworkId, SubstrateAmount, PublicKey}, + Nonce, Block, SeraiRuntimeApi, }; +use tokio::sync::RwLock; + +use jsonrpsee::RpcModule; + pub use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; @@ -18,6 +20,7 @@ pub struct FullDeps { pub client: Arc, pub pool: Arc

, pub deny_unsafe: DenyUnsafe, + pub authority_discovery: Option, } pub fn create_full< @@ -34,16 +37,56 @@ pub fn create_full< where C::Api: substrate_frame_rpc_system::AccountNonceApi + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + + SeraiRuntimeApi + BlockBuilder, { use substrate_frame_rpc_system::{System, SystemApiServer}; use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; let mut module = RpcModule::new(()); - let FullDeps { client, pool, deny_unsafe } = deps; + let FullDeps { client, pool, deny_unsafe, authority_discovery } = deps; module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; - module.merge(TransactionPayment::new(client).into_rpc())?; + module.merge(TransactionPayment::new(client.clone()).into_rpc())?; + + if let Some(authority_discovery) = authority_discovery { + let mut authority_discovery_module = RpcModule::new((client, RwLock::new(authority_discovery))); + authority_discovery_module.register_async_method( + "p2p_validators", + |params, context| async move { + let network: NetworkId = params.parse()?; + let (client, authority_discovery) = &*context; + let latest_block = client.info().best_hash; + + let validators = client.runtime_api().validators(latest_block, network).map_err(|_| { + jsonrpsee::core::Error::to_call_error(std::io::Error::other(format!( + "couldn't get validators from the latest block, which is likely a fatal bug. {}", + "please report this at https://github.com/serai-dex/serai", + ))) + })?; + let mut all_p2p_addresses = vec![]; + for validator in validators { + let mut returned_addresses = authority_discovery + .write() + .await + .get_addresses_by_authority_id(validator.into()) + .await + .unwrap_or_else(HashSet::new) + .into_iter(); + // Only take a single address + // There should be one, there may be two if their IP address changed, and more should only + // occur if they have multiple proxies/an IP address changing frequently/some issue + // preventing consistent self-identification + // It isn't beneficial to use multiple addresses for a single peer here + if let Some(address) = returned_addresses.next() { + all_p2p_addresses.push(address); + } + } + Ok(all_p2p_addresses) + }, + )?; + module.merge(authority_discovery_module)?; + } Ok(module) } diff --git a/substrate/node/src/service.rs b/substrate/node/src/service.rs index 54eed109a..5a124eeba 100644 --- a/substrate/node/src/service.rs +++ b/substrate/node/src/service.rs @@ -1,6 +1,6 @@ use std::{boxed::Box, sync::Arc}; -use futures::stream::StreamExt; +use futures_util::stream::StreamExt; use sp_timestamp::InherentDataProvider as TimestampInherent; use sp_consensus_babe::{SlotDuration, inherents::InherentDataProvider as BabeInherent}; @@ -124,7 +124,7 @@ pub fn new_partial(config: &Configuration) -> Result Result Result { +pub fn new_full(config: Configuration) -> Result { let sc_service::PartialComponents { client, backend, @@ -206,6 +206,42 @@ pub async fn new_full(config: Configuration) -> Result Some(e), + _ => None, + } + })), + sc_authority_discovery::Role::PublishAndDiscover(keystore.clone()), + prometheus_registry.clone(), + ); + task_manager.spawn_handle().spawn( + "authority-discovery-worker", + Some("networking"), + worker.run(), + ); + + Some(service) + } else { + None + }; + let rpc_builder = { let client = client.clone(); let pool = transaction_pool.clone(); @@ -215,18 +251,15 @@ pub async fn new_full(config: Configuration) -> Result Result Result Result Some(e), - _ => None, - } - })), - sc_authority_discovery::Role::PublishAndDiscover(keystore.clone()), - prometheus_registry.clone(), - ) - .0 - .run(), - ); - } - if enable_grandpa { task_manager.spawn_essential_handle().spawn_blocking( "grandpa-voter", diff --git a/substrate/primitives/Cargo.toml b/substrate/primitives/Cargo.toml index 4ff632125..54137aba7 100644 --- a/substrate/primitives/Cargo.toml +++ b/substrate/primitives/Cargo.toml @@ -12,6 +12,9 @@ rust-version = "1.69" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } diff --git a/substrate/primitives/src/networks.rs b/substrate/primitives/src/networks.rs index 51ba7e547..fd713ca1b 100644 --- a/substrate/primitives/src/networks.rs +++ b/substrate/primitives/src/networks.rs @@ -65,8 +65,7 @@ impl Coin { match self { Coin::Serai => NetworkId::Serai, Coin::Bitcoin => NetworkId::Bitcoin, - Coin::Ether => NetworkId::Ethereum, - Coin::Dai => NetworkId::Ethereum, + Coin::Ether | Coin::Dai => NetworkId::Ethereum, Coin::Monero => NetworkId::Monero, } } @@ -93,11 +92,8 @@ impl Coin { pub fn decimals(&self) -> u32 { match self { - Coin::Serai => 8, - Coin::Bitcoin => 8, // Ether and DAI have 18 decimals, yet we only track 8 in order to fit them within u64s - Coin::Ether => 8, - Coin::Dai => 8, + Coin::Serai | Coin::Bitcoin | Coin::Ether | Coin::Dai => 8, Coin::Monero => 12, } } @@ -112,7 +108,7 @@ impl Coin { // more liquidity, the only reason we'd have so many coins from a network is if there's no DEX // on-chain // There's probably no chain with so many *worthwhile* coins and no on-chain DEX -// This could probably be just 4, yet 8 is a hedge for the unforseen +// This could probably be just 4, yet 8 is a hedge for the unforeseen // If necessary, this can be increased with a fork pub const MAX_COINS_PER_NETWORK: u32 = 8; diff --git a/substrate/primitives/src/tx.rs b/substrate/primitives/src/tx.rs index bb3de1f4f..2b1e1e84a 100644 --- a/substrate/primitives/src/tx.rs +++ b/substrate/primitives/src/tx.rs @@ -46,7 +46,7 @@ mod _serde { fn deserialize>(de: D) -> Result { let bytes = sp_core::bytes::deserialize(de)?; scale::Decode::decode(&mut &bytes[..]) - .map_err(|e| serde::de::Error::custom(format!("invalid transaction: {}", e))) + .map_err(|e| serde::de::Error::custom(format!("invalid transaction: {e}"))) } } } diff --git a/substrate/runtime/Cargo.toml b/substrate/runtime/Cargo.toml index 1e905894f..9d31feb85 100644 --- a/substrate/runtime/Cargo.toml +++ b/substrate/runtime/Cargo.toml @@ -12,8 +12,16 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[package.metadata.cargo-machete] +ignored = ["scale", "scale-info"] + +[lints] +workspace = true + [dependencies] -codec = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } +hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } scale-info = { version = "2", default-features = false, features = ["derive"] } sp-core = { git = "https://github.com/serai-dex/substrate", default-features = false } @@ -43,6 +51,7 @@ frame-benchmarking = { git = "https://github.com/serai-dex/substrate", default-f serai-primitives = { path = "../primitives", default-features = false } pallet-timestamp = { git = "https://github.com/serai-dex/substrate", default-features = false } +pallet-authorship = { git = "https://github.com/serai-dex/substrate", default-features = false } pallet-transaction-payment = { git = "https://github.com/serai-dex/substrate", default-features = false } @@ -66,7 +75,7 @@ substrate-wasm-builder = { git = "https://github.com/serai-dex/substrate" } [features] std = [ - "codec/std", + "scale/std", "scale-info/std", "sp-core/std", @@ -95,6 +104,7 @@ std = [ "serai-primitives/std", "pallet-timestamp/std", + "pallet-authorship/std", "pallet-transaction-payment/std", diff --git a/substrate/runtime/src/lib.rs b/substrate/runtime/src/lib.rs index b4828cde2..f083befbd 100644 --- a/substrate/runtime/src/lib.rs +++ b/substrate/runtime/src/lib.rs @@ -6,9 +6,12 @@ #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +use core::marker::PhantomData; + // Re-export all components pub use serai_primitives as primitives; pub use primitives::{BlockNumber, Header}; +use primitives::{NetworkId, NETWORKS}; pub use frame_system as system; pub use frame_support as support; @@ -41,7 +44,7 @@ use sp_runtime::{ create_runtime_str, generic, impl_opaque_keys, KeyTypeId, traits::{Convert, BlakeTwo256, Block as BlockT}, transaction_validity::{TransactionSource, TransactionValidity}, - Perbill, ApplyExtrinsicResult, + BoundedVec, Perbill, ApplyExtrinsicResult, }; use primitives::{PublicKey, AccountLookup, SubstrateAmount}; @@ -55,9 +58,11 @@ use support::{ parameter_types, construct_runtime, }; +use validator_sets::MembershipProof; + +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; use babe::AuthorityId as BabeId; use grandpa::AuthorityId as GrandpaId; -use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; /// Nonce of a transaction in the chain, for a given account. pub type Nonce = u32; @@ -141,8 +146,6 @@ parameter_types! { Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX), NORMAL_DISPATCH_RATIO, ); - - pub const MaxAuthorities: u32 = validator_sets::primitives::MAX_KEY_SHARES_PER_SET; } pub struct CallFilter; @@ -157,10 +160,8 @@ impl Contains for CallFilter { // All of these pallets are our own, and all of their written calls are intended to be called RuntimeCall::Coins(call) => !matches!(call, coins::Call::__Ignore(_, _)), RuntimeCall::LiquidityTokens(call) => match call { - coins::Call::transfer { .. } => true, - coins::Call::burn { .. } => true, - coins::Call::burn_with_instruction { .. } => false, - coins::Call::__Ignore(_, _) => false, + coins::Call::transfer { .. } | coins::Call::burn { .. } => true, + coins::Call::burn_with_instruction { .. } | coins::Call::__Ignore(_, _) => false, }, RuntimeCall::Dex(call) => !matches!(call, dex::Call::__Ignore(_, _)), RuntimeCall::ValidatorSets(call) => !matches!(call, validator_sets::Call::__Ignore(_, _)), @@ -168,17 +169,15 @@ impl Contains for CallFilter { RuntimeCall::Signals(call) => !matches!(call, signals::Call::__Ignore(_, _)), RuntimeCall::Babe(call) => match call { - babe::Call::report_equivocation { .. } => true, + babe::Call::report_equivocation { .. } | babe::Call::report_equivocation_unsigned { .. } => true, - babe::Call::plan_config_change { .. } => false, - babe::Call::__Ignore(_, _) => false, + babe::Call::plan_config_change { .. } | babe::Call::__Ignore(_, _) => false, }, RuntimeCall::Grandpa(call) => match call { - grandpa::Call::report_equivocation { .. } => true, + grandpa::Call::report_equivocation { .. } | grandpa::Call::report_equivocation_unsigned { .. } => true, - grandpa::Call::note_stalled { .. } => false, - grandpa::Call::__Ignore(_, _) => false, + grandpa::Call::note_stalled { .. } | grandpa::Call::__Ignore(_, _) => false, }, } } @@ -266,8 +265,10 @@ impl Convert> for IdentityValidatorIdOf { impl signals::Config for Runtime { type RuntimeEvent = RuntimeEvent; // 1 week + #[allow(clippy::cast_possible_truncation)] type RetirementValidityDuration = ConstU32<{ (7 * 24 * 60 * 60) / (TARGET_BLOCK_TIME as u32) }>; // 2 weeks + #[allow(clippy::cast_possible_truncation)] type RetirementLockInDuration = ConstU32<{ (2 * 7 * 24 * 60 * 60) / (TARGET_BLOCK_TIME as u32) }>; } @@ -275,20 +276,43 @@ impl in_instructions::Config for Runtime { type RuntimeEvent = RuntimeEvent; } +// for publishing equivocation evidences. +impl frame_system::offchain::SendTransactionTypes for Runtime +where + RuntimeCall: From, +{ + type Extrinsic = Transaction; + type OverarchingCall = RuntimeCall; +} + +// for validating equivocation evidences. +// The following runtime construction doesn't actually implement the pallet as doing so is +// unnecessary +// TODO: Replace the requirement on Config for a requirement on FindAuthor directly +impl pallet_authorship::Config for Runtime { + type FindAuthor = ValidatorSets; + type EventHandler = (); +} + +// Maximum number of authorities per session. +pub type MaxAuthorities = ConstU32<{ validator_sets::primitives::MAX_KEY_SHARES_PER_SET }>; + +/// Longevity of an offence report. +pub type ReportLongevity = ::EpochDuration; + impl babe::Config for Runtime { #[allow(clippy::identity_op)] type EpochDuration = ConstU64<{ 1 * DAYS }>; type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>; - type EpochChangeTrigger = pallet_babe::ExternalTrigger; + type EpochChangeTrigger = babe::ExternalTrigger; type DisabledValidators = ValidatorSets; type WeightInfo = (); - type MaxAuthorities = MaxAuthorities; - // TODO: Handle equivocation reports - type KeyOwnerProof = sp_core::Void; - type EquivocationReportSystem = (); + type KeyOwnerProof = MembershipProof; + type EquivocationReportSystem = + babe::EquivocationReportSystem; } impl grandpa::Config for Runtime { @@ -297,10 +321,10 @@ impl grandpa::Config for Runtime { type WeightInfo = (); type MaxAuthorities = MaxAuthorities; - // TODO: Handle equivocation reports type MaxSetIdSessionEntries = ConstU64<0>; - type KeyOwnerProof = sp_core::Void; - type EquivocationReportSystem = (); + type KeyOwnerProof = MembershipProof; + type EquivocationReportSystem = + grandpa::EquivocationReportSystem; } pub type Executive = frame_executive::Executive< @@ -354,6 +378,13 @@ mod benches { ); } +sp_api::decl_runtime_apis! { + #[api_version(1)] + pub trait SeraiRuntimeApi { + fn validators(network_id: NetworkId) -> Vec; + } +} + sp_api::impl_runtime_apis! { impl sp_api::Core for Runtime { fn version() -> RuntimeVersion { @@ -459,18 +490,22 @@ sp_api::impl_runtime_apis! { Babe::next_epoch() } + // This refers to a key being 'owned' by an authority in a system with multiple keys per + // validator + // Since we do not have such an infrastructure, we do not need this fn generate_key_ownership_proof( - _: sp_consensus_babe::Slot, - _: BabeId, + _slot: sp_consensus_babe::Slot, + _authority_id: BabeId, ) -> Option { - None + Some(sp_consensus_babe::OpaqueKeyOwnershipProof::new(vec![])) } fn submit_report_equivocation_unsigned_extrinsic( - _: sp_consensus_babe::EquivocationProof

, + equivocation_proof: sp_consensus_babe::EquivocationProof
, _: sp_consensus_babe::OpaqueKeyOwnershipProof, ) -> Option<()> { - None + let proof = MembershipProof(equivocation_proof.offender.clone().into(), PhantomData); + Babe::submit_unsigned_equivocation_report(equivocation_proof, proof) } } @@ -483,18 +518,19 @@ sp_api::impl_runtime_apis! { Grandpa::current_set_id() } - fn submit_report_equivocation_unsigned_extrinsic( - _: sp_consensus_grandpa::EquivocationProof<::Hash, u64>, - _: sp_consensus_grandpa::OpaqueKeyOwnershipProof, - ) -> Option<()> { - None - } - fn generate_key_ownership_proof( _set_id: sp_consensus_grandpa::SetId, _authority_id: GrandpaId, ) -> Option { - None + Some(sp_consensus_grandpa::OpaqueKeyOwnershipProof::new(vec![])) + } + + fn submit_report_equivocation_unsigned_extrinsic( + equivocation_proof: sp_consensus_grandpa::EquivocationProof<::Hash, u64>, + _: sp_consensus_grandpa::OpaqueKeyOwnershipProof, + ) -> Option<()> { + let proof = MembershipProof(equivocation_proof.offender().clone().into(), PhantomData); + Grandpa::submit_unsigned_equivocation_report(equivocation_proof, proof) } } @@ -533,10 +569,47 @@ sp_api::impl_runtime_apis! { impl sp_authority_discovery::AuthorityDiscoveryApi for Runtime { fn authorities() -> Vec { - Babe::authorities() + // Converts to `[u8; 32]` so it can be hashed + let serai_validators = Babe::authorities() .into_iter() - .map(|(id, _)| AuthorityDiscoveryId::from(id.into_inner())) - .collect() + .map(|(id, _)| id.into_inner().0) + .collect::>(); + let mut all = serai_validators; + for network in NETWORKS { + if network == NetworkId::Serai { + continue; + } + // Returning the latest-decided, not latest and active, means the active set + // may fail to peer find if there isn't sufficient overlap. If a large amount reboot, + // forcing some validators to successfully peer find in order for the threshold to become + // online again, this may cause a liveness failure. + // + // This is assumed not to matter in real life, yet an interesting note. + let participants = + ValidatorSets::participants_for_latest_decided_set(network) + .map_or(vec![], BoundedVec::into_inner); + for (participant, _) in participants { + all.insert(participant.0); + } + } + all.into_iter().map(|id| AuthorityDiscoveryId::from(PublicKey::from_raw(id))).collect() + } + } + + impl crate::SeraiRuntimeApi for Runtime { + fn validators(network_id: NetworkId) -> Vec { + if network_id == NetworkId::Serai { + Babe::authorities() + .into_iter() + .map(|(id, _)| id.into_inner()) + .collect() + } else { + ValidatorSets::participants_for_latest_decided_set(network_id) + .map_or( + vec![], + |vec| vec.into_inner().into_iter().map(|(validator, _)| validator).collect() + ) + } } } } diff --git a/substrate/signals/pallet/Cargo.toml b/substrate/signals/pallet/Cargo.toml index 50b9c8a58..582a3e097 100644 --- a/substrate/signals/pallet/Cargo.toml +++ b/substrate/signals/pallet/Cargo.toml @@ -12,6 +12,12 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[package.metadata.cargo-machete] +ignored = ["scale", "scale-info"] + +[lints] +workspace = true + [dependencies] scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } scale-info = { version = "2", default-features = false, features = ["derive"] } diff --git a/substrate/signals/pallet/src/lib.rs b/substrate/signals/pallet/src/lib.rs index 07f284485..3fad27c92 100644 --- a/substrate/signals/pallet/src/lib.rs +++ b/substrate/signals/pallet/src/lib.rs @@ -1,6 +1,11 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[allow(deprecated, clippy::let_unit_value)] // TODO +#[allow( + deprecated, + clippy::let_unit_value, + clippy::cast_possible_truncation, + clippy::ignored_unit_patterns +)] // TODO #[frame_support::pallet] pub mod pallet { use scale_info::TypeInfo; @@ -52,7 +57,7 @@ pub mod pallet { pub struct RegisteredRetirementSignal { in_favor_of: [u8; 32], registrant: T::AccountId, - registed_at: BlockNumberFor, + registered_at: BlockNumberFor, } impl RegisteredRetirementSignal { @@ -130,10 +135,10 @@ pub mod pallet { RetirementSignalLockedIn, RetirementSignalAlreadyRegistered, NotRetirementSignalRegistrant, - NonExistantRetirementSignal, + NonExistentRetirementSignal, ExpiredRetirementSignal, NotValidator, - RevokingNonExistantFavor, + RevokingNonExistentFavor, } // 80% threshold @@ -144,7 +149,7 @@ pub mod pallet { // Returns true if this network's current set is in favor of the signal. // // Must only be called for networks which have a set decided. - fn tally_for_network(signal_id: SignalId, network: NetworkId) -> Result> { + fn tally_for_network(signal_id: SignalId, network: NetworkId) -> bool { let this_network_session = VsPallet::::latest_decided_session(network).unwrap(); let this_set = ValidatorSet { network, session: this_network_session }; @@ -192,18 +197,18 @@ pub mod pallet { SetsInFavor::::set((signal_id, this_set), Some(())); Self::deposit_event(Event::SetInFavor { signal_id, set: this_set }); } - Ok(true) + true } else { if SetsInFavor::::contains_key((signal_id, this_set)) { // This should no longer be under the current tally SetsInFavor::::remove((signal_id, this_set)); Self::deposit_event(Event::SetNoLongerInFavor { signal_id, set: this_set }); } - Ok(false) + false } } - fn tally_for_all_networks(signal_id: SignalId) -> Result> { + fn tally_for_all_networks(signal_id: SignalId) -> bool { let mut total_in_favor_stake = 0; let mut total_allocated_stake = 0; for network in serai_primitives::NETWORKS { @@ -221,10 +226,8 @@ pub mod pallet { total_allocated_stake += network_stake.0; } - Ok( - total_in_favor_stake >= - (total_allocated_stake * REQUIREMENT_NUMERATOR).div_ceil(REQUIREMENT_DIVISOR), - ) + total_in_favor_stake >= + (total_allocated_stake * REQUIREMENT_NUMERATOR).div_ceil(REQUIREMENT_DIVISOR) } fn revoke_favor_internal( @@ -233,7 +236,7 @@ pub mod pallet { for_network: NetworkId, ) -> DispatchResult { if !Favors::::contains_key((signal_id, for_network), account) { - Err::<(), _>(Error::::RevokingNonExistantFavor)?; + Err::<(), _>(Error::::RevokingNonExistentFavor)?; } Favors::::remove((signal_id, for_network), account); Self::deposit_event(Event::::FavorRevoked { signal_id, by: account, for_network }); @@ -242,7 +245,7 @@ pub mod pallet { // Technically, this tally may make the network in favor and justify re-tallying for all // networks // Its assumed not to - Self::tally_for_network(signal_id, for_network)?; + Self::tally_for_network(signal_id, for_network); Ok(()) } } @@ -272,7 +275,7 @@ pub mod pallet { let signal = RegisteredRetirementSignal { in_favor_of, registrant: account, - registed_at: frame_system::Pallet::::block_number(), + registered_at: frame_system::Pallet::::block_number(), }; let signal_id = signal.id(); @@ -298,7 +301,7 @@ pub mod pallet { let account = ensure_signed(origin)?; let Some(registered_signal) = RegisteredRetirementSignals::::get(retirement_signal_id) else { - return Err::<(), _>(Error::::NonExistantRetirementSignal.into()); + return Err::<(), _>(Error::::NonExistentRetirementSignal.into()); }; if account != registered_signal.registrant { Err::<(), _>(Error::::NotRetirementSignalRegistrant)?; @@ -338,7 +341,7 @@ pub mod pallet { // We don't have to do this for a `Halt` signal as `Halt` doesn't have the registration // process let Some(registered_signal) = RegisteredRetirementSignals::::get(signal_id) else { - return Err::<(), _>(Error::::NonExistantRetirementSignal.into()); + return Err::<(), _>(Error::::NonExistentRetirementSignal.into()); }; // Check the signal isn't out of date @@ -347,7 +350,7 @@ pub mod pallet { // The reason to still have it is because locking in a dated runtime may cause a corrupt // blockchain and lead to a failure in system integrity // `Halt`, which doesn't have this check, at worst causes temporary downtime - if (registered_signal.registed_at + T::RetirementValidityDuration::get().into()) < + if (registered_signal.registered_at + T::RetirementValidityDuration::get().into()) < frame_system::Pallet::::block_number() { Err::<(), _>(Error::::ExpiredRetirementSignal)?; @@ -373,7 +376,7 @@ pub mod pallet { // Check if the network is in favor // tally_for_network expects the network to be active, which is implied by being in the // latest decided set - let network_in_favor = Self::tally_for_network(signal_id, for_network)?; + let network_in_favor = Self::tally_for_network(signal_id, for_network); // If this network is in favor, check if enough networks are // We could optimize this by only running the following code when the network is *newly* in @@ -382,7 +385,7 @@ pub mod pallet { // to each other, any new votes will cause a re-tally if network_in_favor { // If enough are, lock in the signal - if Self::tally_for_all_networks(signal_id)? { + if Self::tally_for_all_networks(signal_id) { match signal_id { SignalId::Retirement(signal_id) => { LockedInRetirement::::set(Some(( @@ -445,7 +448,7 @@ pub mod pallet { // Check this Signal exists (which would've been implied by Favors for it existing) if let SignalId::Retirement(signal_id) = signal_id { if RegisteredRetirementSignals::::get(signal_id).is_none() { - Err::<(), _>(Error::::NonExistantRetirementSignal)?; + Err::<(), _>(Error::::NonExistentRetirementSignal)?; } } } diff --git a/substrate/signals/primitives/Cargo.toml b/substrate/signals/primitives/Cargo.toml index 7e8f41bb7..1c3381459 100644 --- a/substrate/signals/primitives/Cargo.toml +++ b/substrate/signals/primitives/Cargo.toml @@ -12,6 +12,9 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } diff --git a/substrate/validator-sets/pallet/Cargo.toml b/substrate/validator-sets/pallet/Cargo.toml index 1974abbed..3b5537884 100644 --- a/substrate/validator-sets/pallet/Cargo.toml +++ b/substrate/validator-sets/pallet/Cargo.toml @@ -12,7 +12,15 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[package.metadata.cargo-machete] +ignored = ["scale", "scale-info"] + +[lints] +workspace = true + [dependencies] +hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] } + scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } scale-info = { version = "2", default-features = false, features = ["derive"] } @@ -22,6 +30,7 @@ sp-std = { git = "https://github.com/serai-dex/substrate", default-features = fa sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false } sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false } sp-session = { git = "https://github.com/serai-dex/substrate", default-features = false } +sp-staking = { git = "https://github.com/serai-dex/substrate", default-features = false } frame-system = { git = "https://github.com/serai-dex/substrate", default-features = false } frame-support = { git = "https://github.com/serai-dex/substrate", default-features = false } @@ -46,6 +55,7 @@ std = [ "sp-application-crypto/std", "sp-runtime/std", "sp-session/std", + "sp-staking/std", "frame-system/std", "frame-support/std", diff --git a/substrate/validator-sets/pallet/src/lib.rs b/substrate/validator-sets/pallet/src/lib.rs index 3b26f6ad5..c1c7b1798 100644 --- a/substrate/validator-sets/pallet/src/lib.rs +++ b/substrate/validator-sets/pallet/src/lib.rs @@ -1,33 +1,72 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[allow(deprecated, clippy::let_unit_value)] // TODO +use core::marker::PhantomData; + +use scale::{Encode, Decode}; +use scale_info::TypeInfo; + +use sp_std::{vec, vec::Vec}; +use sp_core::sr25519::{Public, Signature}; +use sp_application_crypto::RuntimePublic; +use sp_session::{ShouldEndSession, GetSessionNumber, GetValidatorCount}; +use sp_runtime::{KeyTypeId, ConsensusEngineId, traits::IsMember}; +use sp_staking::offence::{ReportOffence, Offence, OffenceError}; + +use frame_system::{pallet_prelude::*, RawOrigin}; +use frame_support::{ + pallet_prelude::*, + traits::{DisabledValidators, KeyOwnerProofSystem, FindAuthor}, + BoundedVec, WeakBoundedVec, StoragePrefixedMap, +}; + +use serai_primitives::*; +pub use validator_sets_primitives as primitives; +use primitives::*; + +use coins_pallet::{Pallet as Coins, AllowMint}; +use dex_pallet::Pallet as Dex; + +use pallet_babe::{ + Pallet as Babe, AuthorityId as BabeAuthorityId, EquivocationOffence as BabeEquivocationOffence, +}; +use pallet_grandpa::{ + Pallet as Grandpa, AuthorityId as GrandpaAuthorityId, + EquivocationOffence as GrandpaEquivocationOffence, +}; + +#[derive(Debug, Encode, Decode, TypeInfo, PartialEq, Eq, Clone)] +pub struct MembershipProof(pub Public, pub PhantomData); +impl GetSessionNumber for MembershipProof { + fn session(&self) -> u32 { + let current = Pallet::::session(NetworkId::Serai).unwrap().0; + if Babe::::is_member(&BabeAuthorityId::from(self.0)) { + current + } else { + // if it isn't in the current session, it should have been in the previous one. + current - 1 + } + } +} +impl GetValidatorCount for MembershipProof { + // We only implement and this interface to satisfy trait requirements + // Although this might return the wrong count if the offender was in the previous set, we don't + // rely on it and Substrate only relies on it to offer economic calculations we also don't rely + // on + fn validator_count(&self) -> u32 { + u32::try_from(Babe::::authorities().len()).unwrap() + } +} + +#[allow( + deprecated, + clippy::let_unit_value, + clippy::cast_possible_truncation, + clippy::ignored_unit_patterns +)] // TODO #[frame_support::pallet] pub mod pallet { use super::*; - use scale_info::TypeInfo; - - use sp_core::sr25519::{Public, Signature}; - use sp_std::{vec, vec::Vec}; - use sp_application_crypto::RuntimePublic; - use sp_session::ShouldEndSession; - use sp_runtime::traits::IsMember; - - use frame_system::pallet_prelude::*; - use frame_support::{ - pallet_prelude::*, traits::DisabledValidators, BoundedVec, WeakBoundedVec, StoragePrefixedMap, - }; - - use serai_primitives::*; - pub use validator_sets_primitives as primitives; - use primitives::*; - - use coins_pallet::{Pallet as Coins, AllowMint}; - use dex_pallet::Pallet as Dex; - - use pallet_babe::{Pallet as Babe, AuthorityId as BabeAuthorityId}; - use pallet_grandpa::{Pallet as Grandpa, AuthorityId as GrandpaAuthorityId}; - #[pallet::config] pub trait Config: frame_system::Config @@ -45,8 +84,6 @@ pub mod pallet { #[pallet::genesis_config] #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] pub struct GenesisConfig { - /// Stake requirement to join the initial validator sets. - /// Networks to spawn Serai with, and the stake requirement per key share. /// /// Every participant at genesis will automatically be assumed to have this much stake. @@ -87,8 +124,9 @@ pub mod pallet { #[pallet::getter(fn allocation_per_key_share)] pub type AllocationPerKeyShare = StorageMap<_, Identity, NetworkId, Amount, OptionQuery>; - /// The validators selected to be in-set who haven't been removed. + /// The validators selected to be in-set. #[pallet::storage] + #[pallet::getter(fn participants_for_latest_decided_set)] pub(crate) type Participants = StorageMap< _, Identity, @@ -254,18 +292,25 @@ pub mod pallet { /// Pending deallocations, keyed by the Session they become unlocked on. #[pallet::storage] - type PendingDeallocations = - StorageMap<_, Blake2_128Concat, (NetworkId, Session, Public), Amount, OptionQuery>; - - /// The MuSig key for a validator set. - #[pallet::storage] - pub type MuSigKeys = StorageMap<_, Twox64Concat, ValidatorSet, Public, OptionQuery>; + type PendingDeallocations = StorageDoubleMap< + _, + Blake2_128Concat, + (NetworkId, Public), + Identity, + Session, + Amount, + OptionQuery, + >; /// The generated key pair for a given validator set instance. #[pallet::storage] #[pallet::getter(fn keys)] pub type Keys = StorageMap<_, Twox64Concat, ValidatorSet, KeyPair, OptionQuery>; + /// Disabled validators. + #[pallet::storage] + pub type SeraiDisabledIndices = StorageMap<_, Identity, u32, Public, OptionQuery>; + #[pallet::event] #[pallet::generate_deposit(pub(super) fn deposit_event)] pub enum Event { @@ -307,9 +352,8 @@ pub mod pallet { // Update CurrentSession let session = { - let new_session = CurrentSession::::get(network) - .map(|session| Session(session.0 + 1)) - .unwrap_or(Session(0)); + let new_session = + CurrentSession::::get(network).map_or(Session(0), |session| Session(session.0 + 1)); CurrentSession::::set(network, Some(new_session)); new_session }; @@ -343,13 +387,6 @@ pub mod pallet { let set = ValidatorSet { network, session }; Pallet::::deposit_event(Event::NewSet { set }); - // Only set the MuSig key for non-Serai sets, as only non-Serai sets should publish keys - if network != NetworkId::Serai { - MuSigKeys::::set( - set, - Some(musig_key(set, &participants.iter().map(|(id, _)| *id).collect::>())), - ); - } Participants::::set(network, Some(participants.try_into().unwrap())); } } @@ -411,26 +448,6 @@ pub mod pallet { } } - impl Pallet { - fn verify_signature( - set: ValidatorSet, - key_pair: &KeyPair, - signature: &Signature, - ) -> Result<(), Error> { - // Confirm a key hasn't been set for this set instance - if Keys::::get(set).is_some() { - Err(Error::AlreadyGeneratedKeys)? - } - - let Some(musig_key) = MuSigKeys::::get(set) else { Err(Error::NonExistentValidatorSet)? }; - if !musig_key.verify(&set_keys_message(&set, key_pair), signature) { - Err(Error::BadSignature)?; - } - - Ok(()) - } - } - impl Pallet { fn account() -> T::AccountId { system_address(b"ValidatorSets").into() @@ -514,6 +531,20 @@ pub mod pallet { Ok(()) } + fn session_to_unlock_on_for_current_set(network: NetworkId) -> Option { + let mut to_unlock_on = Self::session(network)?; + // Move to the next session, as deallocating currently in-use stake is obviously invalid + to_unlock_on.0 += 1; + if network == NetworkId::Serai { + // Since the next Serai set will already have been decided, we can only deallocate one + // session later + to_unlock_on.0 += 1; + } + // Increase the session by one, creating a cooldown period + to_unlock_on.0 += 1; + Some(to_unlock_on) + } + /// Decreases a validator's allocation to a set. /// /// Errors if the capacity provided by this allocation is in use. @@ -588,20 +619,12 @@ pub mod pallet { // Set it to PendingDeallocations, letting it be released upon a future session // This unwrap should be fine as this account is active, meaning a session has occurred - let mut to_unlock_on = Self::session(network).unwrap(); - if network == NetworkId::Serai { - // Since the next Serai set will already have been decided, we can only deallocate once the - // next set ends - to_unlock_on.0 += 2; - } else { - to_unlock_on.0 += 1; - } - // Increase the session by one, creating a cooldown period - to_unlock_on.0 += 1; + let to_unlock_on = Self::session_to_unlock_on_for_current_set(network).unwrap(); let existing = - PendingDeallocations::::get((network, to_unlock_on, account)).unwrap_or(Amount(0)); + PendingDeallocations::::get((network, account), to_unlock_on).unwrap_or(Amount(0)); PendingDeallocations::::set( - (network, to_unlock_on, account), + (network, account), + to_unlock_on, Some(Amount(existing.0 + amount.0)), ); @@ -658,7 +681,6 @@ pub mod pallet { } pub fn retire_set(set: ValidatorSet) { - MuSigKeys::::remove(set); Keys::::remove(set); Pallet::::deposit_event(Event::SetRetired { set }); } @@ -675,11 +697,12 @@ pub mod pallet { if !Self::handover_completed(network, session) { return None; } - PendingDeallocations::::take((network, session, key)) + PendingDeallocations::::take((network, key), session) } fn rotate_session() { - let prior_serai_participants = Participants::::get(NetworkId::Serai) + // next serai validators that is in the queue. + let now_validators = Participants::::get(NetworkId::Serai) .expect("no Serai participants upon rotate_session"); let prior_serai_session = Self::session(NetworkId::Serai).unwrap(); @@ -692,16 +715,14 @@ pub mod pallet { // Update Babe and Grandpa let session = prior_serai_session.0 + 1; - let validators = prior_serai_participants; - let next_validators = - Participants::::get(NetworkId::Serai).expect("no Serai participants after new_session"); + let next_validators = Participants::::get(NetworkId::Serai).unwrap(); Babe::::enact_epoch_change( WeakBoundedVec::force_from( - validators.iter().copied().map(|(id, w)| (BabeAuthorityId::from(id), w)).collect(), + now_validators.iter().copied().map(|(id, w)| (BabeAuthorityId::from(id), w)).collect(), None, ), WeakBoundedVec::force_from( - next_validators.into_iter().map(|(id, w)| (BabeAuthorityId::from(id), w)).collect(), + next_validators.iter().copied().map(|(id, w)| (BabeAuthorityId::from(id), w)).collect(), None, ), Some(session), @@ -709,8 +730,18 @@ pub mod pallet { Grandpa::::new_session( true, session, - validators.into_iter().map(|(id, w)| (GrandpaAuthorityId::from(id), w)).collect(), + next_validators.into_iter().map(|(id, w)| (GrandpaAuthorityId::from(id), w)).collect(), ); + + // Clear SeraiDisabledIndices, only preserving keys still present in the new session + // First drain so we don't mutate as we iterate + let mut disabled = vec![]; + for (_, validator) in SeraiDisabledIndices::::drain() { + disabled.push(validator); + } + for disabled in disabled { + Self::disable_serai_validator(disabled); + } } /// Returns the required stake in terms SRI for a given `Balance`. @@ -745,6 +776,75 @@ pub mod pallet { } total_required } + + fn can_slash_serai_validator(validator: Public) -> bool { + // Checks if they're active or actively deallocating (letting us still slash them) + // We could check if they're upcoming/still allocating, yet that'd mean the equivocation is + // invalid (as they aren't actively signing anything) or severely dated + // It's not an edge case worth being comprehensive to due to the complexity of being so + Babe::::is_member(&BabeAuthorityId::from(validator)) || + PendingDeallocations::::iter_prefix((NetworkId::Serai, validator)).next().is_some() + } + + fn slash_serai_validator(validator: Public) { + let network = NetworkId::Serai; + + let mut allocation = Self::allocation((network, validator)).unwrap_or(Amount(0)); + // reduce the current allocation to 0. + Self::set_allocation(network, validator, Amount(0)); + + // Take the pending deallocation from the current session + allocation.0 += PendingDeallocations::::take( + (network, validator), + Self::session_to_unlock_on_for_current_set(network).unwrap(), + ) + .unwrap_or(Amount(0)) + .0; + + // Reduce the TotalAllocatedStake for the network, if in set + // TotalAllocatedStake is the sum of allocations and pending deallocations from the current + // session, since pending deallocations can still be slashed and therefore still contribute + // to economic security, hence the allocation calculations above being above and the ones + // below being below + if InSet::::contains_key(NetworkId::Serai, validator) { + let current_staked = Self::total_allocated_stake(network).unwrap(); + TotalAllocatedStake::::set(network, Some(current_staked - allocation)); + } + + // Clear any other pending deallocations. + for (_, pending) in PendingDeallocations::::drain_prefix((network, validator)) { + allocation.0 += pending.0; + } + + // burn the allocation from the stake account + Coins::::burn( + RawOrigin::Signed(Self::account()).into(), + Balance { coin: Coin::Serai, amount: allocation }, + ) + .unwrap(); + } + + /// Disable a Serai validator, preventing them from further authoring blocks. + /// + /// Returns true if the validator-to-disable was actually a validator. + /// Returns false if they weren't. + fn disable_serai_validator(validator: Public) -> bool { + if let Some(index) = + Babe::::authorities().into_iter().position(|(id, _)| id.into_inner() == validator) + { + SeraiDisabledIndices::::set(u32::try_from(index).unwrap(), Some(validator)); + + let session = Self::session(NetworkId::Serai).unwrap(); + Self::deposit_event(Event::ParticipantRemoved { + set: ValidatorSet { network: NetworkId::Serai, session }, + removed: validator, + }); + + true + } else { + false + } + } } #[pallet::call] @@ -754,6 +854,7 @@ pub mod pallet { pub fn set_keys( origin: OriginFor, network: NetworkId, + removed_participants: Vec, key_pair: KeyPair, signature: Signature, ) -> DispatchResult { @@ -763,32 +864,21 @@ pub mod pallet { // (called by pre_dispatch) checks it let _ = signature; - let session = Self::session(NetworkId::Serai).unwrap(); - - let set = ValidatorSet { session, network }; + let session = Self::session(network).unwrap(); + let set = ValidatorSet { network, session }; Keys::::set(set, Some(key_pair.clone())); - Self::deposit_event(Event::KeyGen { set, key_pair }); - - Ok(()) - } - - #[pallet::call_index(1)] - #[pallet::weight(0)] // TODO - pub fn remove_participant( - origin: OriginFor, - network: NetworkId, - to_remove: Public, - signers: Vec, - signature: Signature, - ) -> DispatchResult { - ensure_none(origin)?; - // Nothing occurs here as validate_unsigned does everything - let _ = network; - let _ = to_remove; - let _ = signers; - let _ = signature; + // This does not remove from TotalAllocatedStake or InSet in order to: + // 1) Not decrease the stake present in this set. This means removed participants are + // still liable for the economic security of the external network. This prevents + // a decided set, which is economically secure, from falling below the threshold. + // 2) Not allow parties removed to immediately deallocate, per commentary on deallocation + // scheduling (https://github.com/serai-dex/serai/issues/394). + for removed in removed_participants { + Self::deposit_event(Event::ParticipantRemoved { set, removed }); + } + Self::deposit_event(Event::KeyGen { set, key_pair }); Ok(()) } @@ -850,128 +940,79 @@ pub mod pallet { fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity { // Match to be exhaustive match call { - Call::set_keys { network, ref key_pair, ref signature } => { - // Don't allow the Serai set to set_keys, as they have no reason to do so - // This should already be covered by the lack of key in MuSigKeys, yet it doesn't hurt to - // be explicit - if network == &NetworkId::Serai { - Err(InvalidTransaction::Custom(0))?; - } - - let session = Self::session(NetworkId::Serai).unwrap(); - - let set = ValidatorSet { session, network: *network }; - match Self::verify_signature(set, key_pair, signature) { - Err(Error::AlreadyGeneratedKeys) => Err(InvalidTransaction::Stale)?, - Err(Error::NonExistentValidatorSet) | - Err(Error::InsufficientAllocation) | - Err(Error::NotEnoughAllocated) | - Err(Error::AllocationWouldRemoveFaultTolerance) | - Err(Error::AllocationWouldPreventFaultTolerance) | - Err(Error::DeallocationWouldRemoveParticipant) | - Err(Error::DeallocationWouldRemoveFaultTolerance) | - Err(Error::NonExistentDeallocation) | - Err(Error::NonExistentValidator) | - Err(Error::DeallocationWouldRemoveEconomicSecurity) | - Err(Error::BadSignature) => Err(InvalidTransaction::BadProof)?, - Err(Error::__Ignore(_, _)) => unreachable!(), - Ok(()) => (), - } + Call::set_keys { network, ref removed_participants, ref key_pair, ref signature } => { + let network = *network; - ValidTransaction::with_tag_prefix("ValidatorSets") - .and_provides((0, set)) - .longevity(u64::MAX) - .propagate(true) - .build() - } - Call::remove_participant { network, to_remove, signers, signature } => { - if network == &NetworkId::Serai { + // Don't allow the Serai set to set_keys, as they have no reason to do so + if network == NetworkId::Serai { Err(InvalidTransaction::Custom(0))?; } // Confirm this set has a session - let Some(current_session) = Self::session(*network) else { + let Some(current_session) = Self::session(network) else { Err(InvalidTransaction::Custom(1))? }; - // This is needed as modify storage variables of the latest decided session - assert_eq!(Pallet::::latest_decided_session(*network), Some(current_session)); - let set = ValidatorSet { network: *network, session: current_session }; + + let set = ValidatorSet { network, session: current_session }; + // Confirm it has yet to set keys if Keys::::get(set).is_some() { - Err(InvalidTransaction::Custom(2))?; + Err(InvalidTransaction::Stale)?; } - let mut participants = + // This is a needed precondition as this uses storage variables for the latest decided + // session on this assumption + assert_eq!(Pallet::::latest_decided_session(network), Some(current_session)); + + // This does not slash the removed participants as that'll be done at the end of the + // set's lifetime + let mut removed = hashbrown::HashSet::new(); + for participant in removed_participants { + // Confirm this wasn't duplicated + if removed.contains(&participant.0) { + Err(InvalidTransaction::Custom(2))?; + } + removed.insert(participant.0); + } + + let participants = Participants::::get(network).expect("session existed without participants"); - // Require signers be sorted to ensure no duplicates are present - let mut last_signer = None; + let mut all_key_shares = 0; + let mut signers = vec![]; let mut signing_key_shares = 0; - for signer in signers { - if let Some(last_signer) = last_signer { - if last_signer >= signer { - Err(InvalidTransaction::Custom(3))?; - } + for participant in participants { + let participant = participant.0; + let shares = InSet::::get(network, participant) + .expect("participant from Participants wasn't InSet"); + all_key_shares += shares; + + if removed.contains(&participant.0) { + continue; } - last_signer = Some(signer); - - // Doesn't use InSet as InSet *includes* removed validators - // Only non-removed validators should be considered as contributing - let Some(shares) = participants - .iter() - .find(|(participant, _)| participant == to_remove) - .map(|(_, shares)| shares) - else { - Err(InvalidTransaction::Custom(4))? - }; + + signers.push(participant); signing_key_shares += shares; } - // Check 67% are participating in this removal - // This is done by iterating over InSet, which isn't mutated on removal, and reading the - // shares from that - let mut all_key_shares = 0; - for shares in InSet::::iter_prefix_values(network) { - all_key_shares += shares; - } - // 2f + 1 - if signing_key_shares < ((2 * (all_key_shares - signing_key_shares)) + 1) { - Err(InvalidTransaction::Custom(5))?; + { + let f = all_key_shares - signing_key_shares; + if signing_key_shares < ((2 * f) + 1) { + Err(InvalidTransaction::Custom(3))?; + } } - // Perform the removal - let Some(removal_index) = - participants.iter().position(|participant| &participant.0 == to_remove) - else { - Err(InvalidTransaction::Custom(6))? - }; - participants.remove(removal_index); - // Verify the signature with the MuSig key of the signers - if !musig_key(set, signers) - .verify(&remove_participant_message(&set, *to_remove), signature) + // We theoretically don't need set_keys_message to bind to removed_participants, as the + // key we're signing with effectively already does so, yet there's no reason not to + if !musig_key(set, &signers) + .verify(&set_keys_message(&set, removed_participants, key_pair), signature) { Err(InvalidTransaction::BadProof)?; } - // Set the new MuSig key - MuSigKeys::::set( - set, - Some(musig_key(set, &participants.iter().map(|(id, _)| *id).collect::>())), - ); - Participants::::set(network, Some(participants)); - - // This does not remove from TotalAllocatedStake or InSet in order to: - // 1) Not decrease the stake present in this set. This means removed participants are - // still liable for the economic security of the external network. This prevents - // a decided set, which is economically secure, from falling below the threshold. - // 2) Not allow parties removed to immediately deallocate, per commentary on deallocation - // scheduling (https://github.com/serai-dex/serai/issues/394). - - Pallet::::deposit_event(Event::ParticipantRemoved { set, removed: *to_remove }); - ValidTransaction::with_tag_prefix("ValidatorSets") - .and_provides((1, set, to_remove)) + .and_provides(set) .longevity(u64::MAX) .propagate(true) .build() @@ -1001,10 +1042,104 @@ pub mod pallet { } } + #[rustfmt::skip] + impl + From> KeyOwnerProofSystem<(KeyTypeId, V)> for Pallet { + type Proof = MembershipProof; + type IdentificationTuple = Public; + + fn prove(key: (KeyTypeId, V)) -> Option { + Some(MembershipProof(key.1.into(), PhantomData)) + } + + fn check_proof(key: (KeyTypeId, V), proof: Self::Proof) -> Option { + let validator = key.1.into(); + + // check the offender and the proof offender are the same. + if validator != proof.0 { + return None; + } + + // check validator is valid + if !Self::can_slash_serai_validator(validator) { + return None; + } + + Some(validator) + } + } + + impl ReportOffence> for Pallet { + /// Report an `offence` and reward given `reporters`. + fn report_offence( + _: Vec, + offence: BabeEquivocationOffence, + ) -> Result<(), OffenceError> { + // slash the offender + let offender = offence.offender; + Self::slash_serai_validator(offender); + + // disable it + Self::disable_serai_validator(offender); + + Ok(()) + } + + fn is_known_offence( + offenders: &[Public], + _: & as Offence>::TimeSlot, + ) -> bool { + for offender in offenders { + // It's not a known offence if we can still slash them + if Self::can_slash_serai_validator(*offender) { + return false; + } + } + true + } + } + + impl ReportOffence> for Pallet { + /// Report an `offence` and reward given `reporters`. + fn report_offence( + _: Vec, + offence: GrandpaEquivocationOffence, + ) -> Result<(), OffenceError> { + // slash the offender + let offender = offence.offender; + Self::slash_serai_validator(offender); + + // disable it + Self::disable_serai_validator(offender); + + Ok(()) + } + + fn is_known_offence( + offenders: &[Public], + _slot: & as Offence>::TimeSlot, + ) -> bool { + for offender in offenders { + if Self::can_slash_serai_validator(*offender) { + return false; + } + } + true + } + } + + impl FindAuthor for Pallet { + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator, + { + let i = Babe::::find_author(digests)?; + Some(Babe::::authorities()[i as usize].0.clone().into()) + } + } + impl DisabledValidators for Pallet { - fn is_disabled(_: u32) -> bool { - // TODO - false + fn is_disabled(index: u32) -> bool { + SeraiDisabledIndices::::get(index).is_some() } } } diff --git a/substrate/validator-sets/primitives/Cargo.toml b/substrate/validator-sets/primitives/Cargo.toml index fc37c27ac..844e61347 100644 --- a/substrate/validator-sets/primitives/Cargo.toml +++ b/substrate/validator-sets/primitives/Cargo.toml @@ -12,6 +12,9 @@ rust-version = "1.74" all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] zeroize = { version = "^1.5", features = ["derive"], optional = true } diff --git a/substrate/validator-sets/primitives/src/lib.rs b/substrate/validator-sets/primitives/src/lib.rs index 358199b8c..df7cf18e5 100644 --- a/substrate/validator-sets/primitives/src/lib.rs +++ b/substrate/validator-sets/primitives/src/lib.rs @@ -98,14 +98,13 @@ pub fn musig_key(set: ValidatorSet, set_keys: &[Public]) -> Public { Public(dkg::musig::musig_key::(&musig_context(set), &keys).unwrap().to_bytes()) } -/// The message for the remove_participant signature. -pub fn remove_participant_message(set: &ValidatorSet, removed: Public) -> Vec { - (b"ValidatorSets-remove_participant", set, removed).encode() -} - /// The message for the set_keys signature. -pub fn set_keys_message(set: &ValidatorSet, key_pair: &KeyPair) -> Vec { - (b"ValidatorSets-set_keys", set, key_pair).encode() +pub fn set_keys_message( + set: &ValidatorSet, + removed_participants: &[Public], + key_pair: &KeyPair, +) -> Vec { + (b"ValidatorSets-set_keys", set, removed_participants, key_pair).encode() } /// For a set of validators whose key shares may exceed the maximum, reduce until they equal the diff --git a/tests/coordinator/Cargo.toml b/tests/coordinator/Cargo.toml index 2aa805ce1..a331d484b 100644 --- a/tests/coordinator/Cargo.toml +++ b/tests/coordinator/Cargo.toml @@ -13,9 +13,14 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] hex = "0.4" +async-trait = "0.1" +async-recursion = "1" zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false } diff --git a/tests/coordinator/src/lib.rs b/tests/coordinator/src/lib.rs index a07d7ea40..82fad2f26 100644 --- a/tests/coordinator/src/lib.rs +++ b/tests/coordinator/src/lib.rs @@ -1,9 +1,8 @@ #![allow(clippy::needless_pass_by_ref_mut)] // False positives use std::{ - sync::{OnceLock, Arc, Mutex}, + sync::{OnceLock, Arc}, time::Duration, - fs, }; use tokio::{task::AbortHandle, sync::Mutex as AsyncMutex}; @@ -27,16 +26,11 @@ use serai_message_queue::{Service, Metadata, client::MessageQueue}; use serai_client::{primitives::Signature, Serai}; -use dockertest::{ - PullPolicy, Image, LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, - TestBodySpecification, DockerOperations, -}; +use dockertest::{PullPolicy, Image, TestBodySpecification, DockerOperations}; #[cfg(test)] mod tests; -static UNIQUE_ID: OnceLock> = OnceLock::new(); - pub fn coordinator_instance( name: &str, message_queue_key: ::F, @@ -81,78 +75,6 @@ pub fn serai_composition(name: &str) -> TestBodySpecification { .set_publish_all_ports(true) } -pub type Handles = (String, String, String); -pub fn coordinator_stack( - name: &str, -) -> (Handles, ::F, Vec) { - let serai_composition = serai_composition(name); - - let (coord_key, message_queue_keys, message_queue_composition) = - serai_message_queue_tests::instance(); - - let coordinator_composition = coordinator_instance(name, coord_key); - - // Give every item in this stack a unique ID - // Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits - let (first, unique_id) = { - let unique_id_mutex = UNIQUE_ID.get_or_init(|| Mutex::new(0)); - let mut unique_id_lock = unique_id_mutex.lock().unwrap(); - let first = *unique_id_lock == 0; - let unique_id = *unique_id_lock; - *unique_id_lock += 1; - (first, unique_id) - }; - - let logs_path = [std::env::current_dir().unwrap().to_str().unwrap(), ".test-logs", "coordinator"] - .iter() - .collect::(); - if first { - let _ = fs::remove_dir_all(&logs_path); - fs::create_dir_all(&logs_path).expect("couldn't create logs directory"); - assert!( - fs::read_dir(&logs_path).expect("couldn't read the logs folder").next().is_none(), - "logs folder wasn't empty, despite removing it at the start of the run", - ); - } - let logs_path = logs_path.to_str().unwrap().to_string(); - - let mut compositions = vec![]; - let mut handles = vec![]; - for (name, composition) in [ - ("serai_node", serai_composition), - ("message_queue", message_queue_composition), - ("coordinator", coordinator_composition), - ] { - let handle = format!("coordinator-{name}-{unique_id}"); - - compositions.push( - composition.set_start_policy(StartPolicy::Strict).set_handle(handle.clone()).set_log_options( - Some(LogOptions { - action: if std::env::var("GITHUB_CI") == Ok("true".to_string()) { - LogAction::Forward - } else { - LogAction::ForwardToFile { path: logs_path.clone() } - }, - policy: LogPolicy::Always, - source: LogSource::Both, - }), - ), - ); - - handles.push(handle); - } - - let coordinator_composition = compositions.last_mut().unwrap(); - coordinator_composition.inject_container_name(handles[0].clone(), "SERAI_HOSTNAME"); - coordinator_composition.inject_container_name(handles[1].clone(), "MESSAGE_QUEUE_RPC"); - - ( - (handles[0].clone(), handles[1].clone(), handles[2].clone()), - message_queue_keys[&NetworkId::Bitcoin], - compositions, - ) -} - fn is_cosign_message(msg: &CoordinatorMessage) -> bool { matches!( msg, @@ -176,15 +98,19 @@ fn is_cosign_message(msg: &CoordinatorMessage) -> bool { ) } +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Handles { + pub(crate) serai: String, + pub(crate) message_queue: String, +} + #[derive(Clone)] pub struct Processor { network: NetworkId, serai_rpc: String, #[allow(unused)] - message_queue_handle: String, - #[allow(unused)] - coordinator_handle: String, + handles: Handles, queue: Arc>, abort_handle: Option>, @@ -205,14 +131,14 @@ impl Processor { raw_i: u8, network: NetworkId, ops: &DockerOperations, - handles: (String, String, String), + handles: Handles, processor_key: ::F, ) -> Processor { - let message_queue_rpc = ops.handle(&handles.1).host_port(2287).unwrap(); + let message_queue_rpc = ops.handle(&handles.message_queue).host_port(2287).unwrap(); let message_queue_rpc = format!("{}:{}", message_queue_rpc.0, message_queue_rpc.1); // Sleep until the Substrate RPC starts - let serai_rpc = ops.handle(&handles.0).host_port(9944).unwrap(); + let serai_rpc = ops.handle(&handles.serai).host_port(9944).unwrap(); let serai_rpc = format!("http://{}:{}", serai_rpc.0, serai_rpc.1); // Bound execution to 60 seconds for _ in 0 .. 60 { @@ -231,8 +157,7 @@ impl Processor { network, serai_rpc, - message_queue_handle: handles.1, - coordinator_handle: handles.2, + handles, queue: Arc::new(AsyncMutex::new(( 0, @@ -269,9 +194,22 @@ impl Processor { assert_eq!(msg.id, *next_recv_id); let msg_msg = borsh::from_slice(&msg.msg).unwrap(); - if !is_cosign_message(&msg_msg) { + // Remove any BatchReattempts clogging the pipe + // TODO: Set up a wrapper around serai-client so we aren't throwing this away yet + // leave it for the tests + if matches!( + msg_msg, + messages::CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::BatchReattempt { .. } + ) + ) { + queue.ack(Service::Coordinator, msg.id).await; + *next_recv_id += 1; continue; } + if !is_cosign_message(&msg_msg) { + continue; + }; queue.ack(Service::Coordinator, msg.id).await; *next_recv_id += 1; msg_msg @@ -294,12 +232,11 @@ impl Processor { block_number, }, ) => { - let block = match id { - SubstrateSignId { - id: SubstrateSignableId::CosigningSubstrateBlock(block), - .. - } => block, - _ => panic!("CosignSubstrateBlock didn't have CosigningSubstrateBlock ID"), + let SubstrateSignId { + id: SubstrateSignableId::CosigningSubstrateBlock(block), .. + } = id + else { + panic!("CosignSubstrateBlock didn't have CosigningSubstrateBlock ID") }; let new_cosign = CurrentCosign { block_number, block }; @@ -393,17 +330,13 @@ impl Processor { *next_send_id += 1; } - /// Receive a message from the coordinator as a processor. - pub async fn recv_message(&mut self) -> CoordinatorMessage { + async fn recv_message_inner(&mut self) -> CoordinatorMessage { loop { tokio::task::yield_now().await; let mut queue_lock = self.queue.lock().await; let (_, next_recv_id, queue) = &mut *queue_lock; - // Set a timeout of an entire 6 minutes as cosigning may be delayed by up to 5 minutes - let msg = tokio::time::timeout(Duration::from_secs(6 * 60), queue.next(Service::Coordinator)) - .await - .unwrap(); + let msg = queue.next(Service::Coordinator).await; assert_eq!(msg.from, Service::Coordinator); assert_eq!(msg.id, *next_recv_id); @@ -419,6 +352,13 @@ impl Processor { } } + /// Receive a message from the coordinator as a processor. + pub async fn recv_message(&mut self) -> CoordinatorMessage { + // Set a timeout of 15 minutes to allow effectively any protocol to occur without a fear of + // an arbitrary timeout cutting it short + tokio::time::timeout(Duration::from_secs(15 * 60), self.recv_message_inner()).await.unwrap() + } + pub async fn set_substrate_key( &mut self, substrate_key: Zeroizing<::F>, diff --git a/tests/coordinator/src/tests/batch.rs b/tests/coordinator/src/tests/batch.rs index 501cdc0a4..67bafa24c 100644 --- a/tests/coordinator/src/tests/batch.rs +++ b/tests/coordinator/src/tests/batch.rs @@ -1,5 +1,4 @@ use std::{ - sync::Mutex, time::Duration, collections::{HashSet, HashMap}, }; @@ -38,11 +37,9 @@ pub async fn batch( substrate_key: &Zeroizing<::F>, batch: Batch, ) -> u64 { - let mut id = [0; 5]; - OsRng.fill_bytes(&mut id); - let id = SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt: 0 }; + let id = SubstrateSignId { session, id: SubstrateSignableId::Batch(batch.id), attempt: 0 }; - for processor in processors.iter_mut() { + for processor in &mut *processors { processor .send_message(messages::substrate::ProcessorMessage::Batch { batch: batch.clone() }) .await; @@ -94,14 +91,14 @@ pub async fn batch( let known_signer_i = Participant::new(u16::from(processor_is[known_signer])).unwrap(); assert!(!preprocesses.contains_key(&known_signer_i)); - let mut participants = preprocesses.keys().cloned().collect::>(); + let mut participants = preprocesses.keys().copied().collect::>(); for (p, preprocess) in preprocesses { assert_eq!(preprocess, [u8::try_from(u16::from(p)).unwrap(); 64]); } participants.insert(known_signer_i); participants } - other => panic!("coordinator didn't send back SubstratePreprocesses: {:?}", other), + other => panic!("coordinator didn't send back SubstratePreprocesses: {other:?}"), }; for i in participants.clone() { @@ -221,9 +218,20 @@ pub async fn batch( // Verify the coordinator sends SubstrateBlock to all processors let last_block = serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap(); - for processor in processors { + for processor in &mut *processors { + // Handle a potential re-attempt message in the pipeline + let mut received = processor.recv_message().await; + if matches!( + received, + messages::CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::BatchReattempt { .. } + ) + ) { + received = processor.recv_message().await + } + assert_eq!( - processor.recv_message().await, + received, messages::CoordinatorMessage::Substrate( messages::substrate::CoordinatorMessage::SubstrateBlock { context: SubstrateContext { @@ -252,43 +260,21 @@ pub async fn batch( #[tokio::test] async fn batch_test() { - let _one_at_a_time = ONE_AT_A_TIME.get_or_init(|| Mutex::new(())).lock(); - let (processors, test) = new_test(); - - test - .run_async(|ops| async move { - // Wait for the Serai node to boot, and for the Tendermint chain to get past the first block - // TODO: Replace this with a Coordinator RPC - tokio::time::sleep(Duration::from_secs(150)).await; - - // Sleep even longer if in the CI due to it being slower than commodity hardware - if std::env::var("GITHUB_CI") == Ok("true".to_string()) { - tokio::time::sleep(Duration::from_secs(120)).await; - } - - // Connect to the Message Queues as the processor - let mut new_processors: Vec = vec![]; - for (i, (handles, key)) in processors.into_iter().enumerate() { - new_processors.push( - Processor::new(i.try_into().unwrap(), NetworkId::Bitcoin, &ops, handles, key).await, - ); - } - let mut processors = new_processors; - - let (processor_is, substrate_key, _) = key_gen::(&mut processors).await; - batch( - &mut processors, - &processor_is, - Session(0), - &substrate_key, - Batch { - network: NetworkId::Bitcoin, - id: 0, - block: BlockHash([0x22; 32]), - instructions: vec![], - }, - ) - .await; - }) + new_test(|mut processors: Vec| async move { + let (processor_is, substrate_key, _) = key_gen::(&mut processors).await; + batch( + &mut processors, + &processor_is, + Session(0), + &substrate_key, + Batch { + network: NetworkId::Bitcoin, + id: 0, + block: BlockHash([0x22; 32]), + instructions: vec![], + }, + ) .await; + }) + .await; } diff --git a/tests/coordinator/src/tests/key_gen.rs b/tests/coordinator/src/tests/key_gen.rs index 7e90ba1ff..8250b3bf3 100644 --- a/tests/coordinator/src/tests/key_gen.rs +++ b/tests/coordinator/src/tests/key_gen.rs @@ -1,5 +1,4 @@ use std::{ - sync::Mutex, time::{Duration, SystemTime}, collections::HashMap, }; @@ -166,7 +165,7 @@ pub async fn key_gen( } } let mut message = None; - for processor in processors.iter_mut() { + for processor in &mut *processors { let msg = processor.recv_message().await; if message.is_none() { match msg { @@ -208,7 +207,7 @@ pub async fn key_gen( KeyPair(Public(substrate_key), network_key.try_into().unwrap()) ); - for processor in processors.iter_mut() { + for processor in &mut *processors { processor.set_substrate_key(substrate_priv_key.clone()).await; } @@ -221,30 +220,8 @@ pub async fn key_gen( #[tokio::test] async fn key_gen_test() { - let _one_at_a_time = ONE_AT_A_TIME.get_or_init(|| Mutex::new(())).lock(); - let (processors, test) = new_test(); - - test - .run_async(|ops| async move { - // Wait for the Serai node to boot, and for the Tendermint chain to get past the first block - // TODO: Replace this with a Coordinator RPC - tokio::time::sleep(Duration::from_secs(150)).await; - - // Sleep even longer if in the CI due to it being slower than commodity hardware - if std::env::var("GITHUB_CI") == Ok("true".to_string()) { - tokio::time::sleep(Duration::from_secs(120)).await; - } - - // Connect to the Message Queues as the processor - let mut new_processors: Vec = vec![]; - for (i, (handles, key)) in processors.into_iter().enumerate() { - new_processors.push( - Processor::new(i.try_into().unwrap(), NetworkId::Bitcoin, &ops, handles, key).await, - ); - } - let mut processors = new_processors; - - key_gen::(&mut processors).await; - }) - .await; + new_test(|mut processors: Vec| async move { + key_gen::(&mut processors).await; + }) + .await; } diff --git a/tests/coordinator/src/tests/mod.rs b/tests/coordinator/src/tests/mod.rs index 0e84ec667..5f0acab66 100644 --- a/tests/coordinator/src/tests/mod.rs +++ b/tests/coordinator/src/tests/mod.rs @@ -1,8 +1,14 @@ -use std::sync::OnceLock; +use core::future::Future; +use std::{sync::OnceLock, collections::HashMap}; -use ciphersuite::Ristretto; +use tokio::sync::Mutex; -use dockertest::DockerTest; +use dockertest::{ + LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, TestBodySpecification, + DockerOperations, DockerTest, +}; + +use serai_docker_tests::fresh_logs_folder; use crate::*; @@ -19,13 +25,28 @@ pub use sign::sign; pub(crate) const COORDINATORS: usize = 4; pub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1; -pub(crate) static ONE_AT_A_TIME: OnceLock> = OnceLock::new(); +// Provide a unique ID and ensures only one invocation occurs at a time. +static UNIQUE_ID: OnceLock> = OnceLock::new(); + +#[async_trait::async_trait] +pub(crate) trait TestBody: 'static + Send + Sync { + async fn body(&self, processors: Vec); +} +#[async_trait::async_trait] +impl) -> F> TestBody for TB { + async fn body(&self, processors: Vec) { + (self)(processors).await; + } +} + +pub(crate) async fn new_test(test_body: impl TestBody) { + let mut unique_id_lock = UNIQUE_ID.get_or_init(|| Mutex::new(0)).lock().await; -pub(crate) fn new_test() -> (Vec<(Handles, ::F)>, DockerTest) { let mut coordinators = vec![]; let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); + let mut coordinator_compositions = vec![]; for i in 0 .. COORDINATORS { - let (handles, coord_key, compositions) = coordinator_stack(match i { + let name = match i { 0 => "Alice", 1 => "Bob", 2 => "Charlie", @@ -33,21 +54,166 @@ pub(crate) fn new_test() -> (Vec<(Handles, ::F)>, Dock 4 => "Eve", 5 => "Ferdie", _ => panic!("needed a 7th name for a serai node"), - }); - coordinators.push((handles, coord_key)); + }; + let serai_composition = serai_composition(name); + + let (processor_key, message_queue_keys, message_queue_composition) = + serai_message_queue_tests::instance(); + + let coordinator_composition = coordinator_instance(name, processor_key); + + // Give every item in this stack a unique ID + // Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits + let (first, unique_id) = { + let first = *unique_id_lock == 0; + let unique_id = *unique_id_lock; + *unique_id_lock += 1; + (first, unique_id) + }; + + let logs_path = fresh_logs_folder(first, "coordinator"); + + let mut compositions = vec![]; + let mut handles = HashMap::new(); + for (name, composition) in [ + ("serai_node", serai_composition), + ("message_queue", message_queue_composition), + ("coordinator", coordinator_composition), + ] { + let handle = format!("coordinator-{name}-{unique_id}"); + + compositions.push( + composition + .set_start_policy(StartPolicy::Strict) + .set_handle(handle.clone()) + .set_log_options(Some(LogOptions { + action: if std::env::var("GITHUB_CI") == Ok("true".to_string()) { + LogAction::Forward + } else { + LogAction::ForwardToFile { path: logs_path.clone() } + }, + policy: LogPolicy::Always, + source: LogSource::Both, + })), + ); + + handles.insert(name, handle); + } + + let processor_key = message_queue_keys[&NetworkId::Bitcoin]; + + coordinators.push(( + Handles { + serai: handles.remove("serai_node").unwrap(), + message_queue: handles.remove("message_queue").unwrap(), + }, + processor_key, + )); + coordinator_compositions.push(compositions.pop().unwrap()); for composition in compositions { test.provide_container(composition); } } - (coordinators, test) + + struct Context { + pending_coordinator_compositions: Mutex>, + handles_and_keys: Vec<(Handles, ::F)>, + test_body: Box, + } + static CONTEXT: OnceLock>> = OnceLock::new(); + *CONTEXT.get_or_init(|| Mutex::new(None)).lock().await = Some(Context { + pending_coordinator_compositions: Mutex::new(coordinator_compositions), + handles_and_keys: coordinators, + test_body: Box::new(test_body), + }); + + // The DockerOperations from the first invocation, containing the Message Queue servers and the + // Serai nodes. + static OUTER_OPS: OnceLock>> = OnceLock::new(); + + // Reset OUTER_OPS + *OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None; + + // Spawns a coordinator, if one has yet to be spawned, or else runs the test. + #[async_recursion::async_recursion] + async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) { + // If the outer operations have yet to be set, these *are* the outer operations + let outer_ops = OUTER_OPS.get().unwrap(); + if outer_ops.lock().await.is_none() { + *outer_ops.lock().await = Some(inner_ops); + } + + let context_lock = CONTEXT.get().unwrap().lock().await; + let Context { pending_coordinator_compositions, handles_and_keys: coordinators, test_body } = + context_lock.as_ref().unwrap(); + + // Check if there is a coordinator left + let maybe_coordinator = { + let mut remaining = pending_coordinator_compositions.lock().await; + let maybe_coordinator = if !remaining.is_empty() { + let handles = coordinators[coordinators.len() - remaining.len()].0.clone(); + let composition = remaining.remove(0); + Some((composition, handles)) + } else { + None + }; + drop(remaining); + maybe_coordinator + }; + + if let Some((mut composition, handles)) = maybe_coordinator { + let network = { + let outer_ops = outer_ops.lock().await; + let outer_ops = outer_ops.as_ref().unwrap(); + // Spawn it by building another DockerTest which recursively calls this function + // TODO: Spawn this outside of DockerTest so we can remove the recursion + let serai_container = outer_ops.handle(&handles.serai); + composition.modify_env("SERAI_HOSTNAME", serai_container.ip()); + let message_queue_container = outer_ops.handle(&handles.message_queue); + composition.modify_env("MESSAGE_QUEUE_RPC", message_queue_container.ip()); + + format!("container:{}", serai_container.name()) + }; + let mut test = DockerTest::new().with_network(dockertest::Network::External(network)); + test.provide_container(composition); + + drop(context_lock); + test.run_async(spawn_coordinator_or_run_test).await; + } else { + let outer_ops = outer_ops.lock().await.take().unwrap(); + + // Wait for the Serai node to boot, and for the Tendermint chain to get past the first block + // TODO: Replace this with a Coordinator RPC we can query + tokio::time::sleep(Duration::from_secs(60)).await; + + // Connect to the Message Queues as the processor + let mut processors: Vec = vec![]; + for (i, (handles, key)) in coordinators.iter().enumerate() { + processors.push( + Processor::new( + i.try_into().unwrap(), + NetworkId::Bitcoin, + &outer_ops, + handles.clone(), + *key, + ) + .await, + ); + } + + test_body.body(processors).await; + } + } + + test.run_async(spawn_coordinator_or_run_test).await; } // TODO: Don't use a pessimistic sleep // Use an RPC to enaluate if a condition was met, with the following time being a timeout // https://github.com/serai-dex/serai/issues/340 pub(crate) async fn wait_for_tributary() { - tokio::time::sleep(Duration::from_secs(20)).await; + tokio::time::sleep(Duration::from_secs(15)).await; if std::env::var("GITHUB_CI") == Ok("true".to_string()) { - tokio::time::sleep(Duration::from_secs(40)).await; + tokio::time::sleep(Duration::from_secs(6)).await; } } diff --git a/tests/coordinator/src/tests/sign.rs b/tests/coordinator/src/tests/sign.rs index f2cea1dfc..e46e8890b 100644 --- a/tests/coordinator/src/tests/sign.rs +++ b/tests/coordinator/src/tests/sign.rs @@ -1,5 +1,4 @@ use std::{ - sync::Mutex, time::Duration, collections::{HashSet, HashMap}, }; @@ -77,7 +76,7 @@ pub async fn sign( let known_signer_i = Participant::new(u16::from(processor_is[known_signer])).unwrap(); assert!(!preprocesses.contains_key(&known_signer_i)); - let mut participants = preprocesses.keys().cloned().collect::>(); + let mut participants = preprocesses.keys().copied().collect::>(); for (p, preprocess) in preprocesses { assert_eq!(preprocess, vec![u8::try_from(u16::from(p)).unwrap(); 128]); } @@ -169,186 +168,161 @@ pub async fn sign( #[tokio::test] async fn sign_test() { - let _one_at_a_time = ONE_AT_A_TIME.get_or_init(|| Mutex::new(())).lock(); - let (processors, test) = new_test(); - - test - .run_async(|ops| async move { - // Wait for the Serai node to boot, and for the Tendermint chain to get past the first block - // TODO: Replace this with a Coordinator RPC - tokio::time::sleep(Duration::from_secs(150)).await; - - // Sleep even longer if in the CI due to it being slower than commodity hardware - if std::env::var("GITHUB_CI") == Ok("true".to_string()) { - tokio::time::sleep(Duration::from_secs(120)).await; - } - - // Connect to the Message Queues as the processor - let mut new_processors: Vec = vec![]; - for (i, (handles, key)) in processors.into_iter().enumerate() { - new_processors.push( - Processor::new(i.try_into().unwrap(), NetworkId::Bitcoin, &ops, handles, key).await, - ); - } - let mut processors = new_processors; - - let (participant_is, substrate_key, _) = key_gen::(&mut processors).await; - - // 'Send' external coins into Serai - let serai = processors[0].serai().await; - let (serai_pair, serai_addr) = { - let mut name = [0; 4]; - OsRng.fill_bytes(&mut name); - let pair = insecure_pair_from_name(&hex::encode(name)); - let address = SeraiAddress::from(pair.public()); - - // Fund the new account to pay for fees - let balance = Balance { coin: Coin::Serai, amount: Amount(1_000_000_000) }; - serai - .publish(&serai.sign( - &insecure_pair_from_name("Ferdie"), - SeraiCoins::transfer(address, balance), - 0, - Default::default(), - )) - .await - .unwrap(); - - (pair, address) - }; - - #[allow(clippy::inconsistent_digit_grouping)] - let amount = Amount(1_000_000_00); - let balance = Balance { coin: Coin::Bitcoin, amount }; - - let coin_block = BlockHash([0x33; 32]); - let block_included_in = batch( - &mut processors, - &participant_is, - Session(0), - &substrate_key, - Batch { - network: NetworkId::Bitcoin, - id: 0, - block: coin_block, - instructions: vec![InInstructionWithBalance { - instruction: InInstruction::Transfer(serai_addr), - balance, - }], - }, - ) - .await; - - { - let block_included_in_hash = - serai.finalized_block_by_number(block_included_in).await.unwrap().unwrap().hash(); - - let serai = serai.as_of(block_included_in_hash); - let serai = serai.coins(); - assert_eq!( - serai.coin_balance(Coin::Serai, serai_addr).await.unwrap(), - Amount(1_000_000_000) - ); - - // Verify the mint occurred as expected - assert_eq!( - serai.mint_events().await.unwrap(), - vec![CoinsEvent::Mint { to: serai_addr, balance }] - ); - assert_eq!(serai.coin_supply(Coin::Bitcoin).await.unwrap(), amount); - assert_eq!(serai.coin_balance(Coin::Bitcoin, serai_addr).await.unwrap(), amount); - } - - // Trigger a burn - let out_instruction = OutInstructionWithBalance { - balance, - instruction: OutInstruction { - address: ExternalAddress::new(b"external".to_vec()).unwrap(), - data: None, - }, - }; + new_test(|mut processors: Vec| async move { + let (participant_is, substrate_key, _) = key_gen::(&mut processors).await; + + // 'Send' external coins into Serai + let serai = processors[0].serai().await; + let (serai_pair, serai_addr) = { + let mut name = [0; 4]; + OsRng.fill_bytes(&mut name); + let pair = insecure_pair_from_name(&hex::encode(name)); + let address = SeraiAddress::from(pair.public()); + + // Fund the new account to pay for fees + let balance = Balance { coin: Coin::Serai, amount: Amount(1_000_000_000) }; serai .publish(&serai.sign( - &serai_pair, - SeraiCoins::burn_with_instruction(out_instruction.clone()), + &insecure_pair_from_name("Ferdie"), + SeraiCoins::transfer(address, balance), 0, Default::default(), )) .await .unwrap(); - // TODO: We *really* need a helper for this pattern - let mut last_serai_block = block_included_in; - 'outer: for _ in 0 .. 20 { + (pair, address) + }; + + #[allow(clippy::inconsistent_digit_grouping)] + let amount = Amount(1_000_000_00); + let balance = Balance { coin: Coin::Bitcoin, amount }; + + let coin_block = BlockHash([0x33; 32]); + let block_included_in = batch( + &mut processors, + &participant_is, + Session(0), + &substrate_key, + Batch { + network: NetworkId::Bitcoin, + id: 0, + block: coin_block, + instructions: vec![InInstructionWithBalance { + instruction: InInstruction::Transfer(serai_addr), + balance, + }], + }, + ) + .await; + + { + let block_included_in_hash = + serai.finalized_block_by_number(block_included_in).await.unwrap().unwrap().hash(); + + let serai = serai.as_of(block_included_in_hash); + let serai = serai.coins(); + assert_eq!(serai.coin_balance(Coin::Serai, serai_addr).await.unwrap(), Amount(1_000_000_000)); + + // Verify the mint occurred as expected + assert_eq!( + serai.mint_events().await.unwrap(), + vec![CoinsEvent::Mint { to: serai_addr, balance }] + ); + assert_eq!(serai.coin_supply(Coin::Bitcoin).await.unwrap(), amount); + assert_eq!(serai.coin_balance(Coin::Bitcoin, serai_addr).await.unwrap(), amount); + } + + // Trigger a burn + let out_instruction = OutInstructionWithBalance { + balance, + instruction: OutInstruction { + address: ExternalAddress::new(b"external".to_vec()).unwrap(), + data: None, + }, + }; + serai + .publish(&serai.sign( + &serai_pair, + SeraiCoins::burn_with_instruction(out_instruction.clone()), + 0, + Default::default(), + )) + .await + .unwrap(); + + // TODO: We *really* need a helper for this pattern + let mut last_serai_block = block_included_in; + 'outer: for _ in 0 .. 20 { + tokio::time::sleep(Duration::from_secs(6)).await; + if std::env::var("GITHUB_CI") == Ok("true".to_string()) { tokio::time::sleep(Duration::from_secs(6)).await; - if std::env::var("GITHUB_CI") == Ok("true".to_string()) { - tokio::time::sleep(Duration::from_secs(6)).await; - } + } - while last_serai_block <= serai.latest_finalized_block().await.unwrap().number() { - let burn_events = serai - .as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash()) - .coins() - .burn_with_instruction_events() - .await - .unwrap(); - - if !burn_events.is_empty() { - assert_eq!(burn_events.len(), 1); - assert_eq!( - burn_events[0], - CoinsEvent::BurnWithInstruction { - from: serai_addr, - instruction: out_instruction.clone() - } - ); - break 'outer; - } - last_serai_block += 1; + while last_serai_block <= serai.latest_finalized_block().await.unwrap().number() { + let burn_events = serai + .as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash()) + .coins() + .burn_with_instruction_events() + .await + .unwrap(); + + if !burn_events.is_empty() { + assert_eq!(burn_events.len(), 1); + assert_eq!( + burn_events[0], + CoinsEvent::BurnWithInstruction { + from: serai_addr, + instruction: out_instruction.clone() + } + ); + break 'outer; } + last_serai_block += 1; } + } - let last_serai_block = - serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap(); - let last_serai_block_hash = last_serai_block.hash(); - let serai = serai.as_of(last_serai_block_hash); - let serai = serai.coins(); - assert_eq!(serai.coin_supply(Coin::Bitcoin).await.unwrap(), Amount(0)); - assert_eq!(serai.coin_balance(Coin::Bitcoin, serai_addr).await.unwrap(), Amount(0)); - - let mut plan_id = [0; 32]; - OsRng.fill_bytes(&mut plan_id); - let plan_id = plan_id; - - // We should now get a SubstrateBlock - for processor in &mut processors { - assert_eq!( - processor.recv_message().await, - messages::CoordinatorMessage::Substrate( - messages::substrate::CoordinatorMessage::SubstrateBlock { - context: SubstrateContext { - serai_time: last_serai_block.time().unwrap() / 1000, - network_latest_finalized_block: coin_block, - }, - block: last_serai_block.number(), - burns: vec![out_instruction.clone()], - batches: vec![], - } - ) - ); - - // Send the ACK, claiming there's a plan to sign - processor - .send_message(messages::ProcessorMessage::Coordinator( - messages::coordinator::ProcessorMessage::SubstrateBlockAck { - block: last_serai_block.number(), - plans: vec![PlanMeta { session: Session(0), id: plan_id }], + let last_serai_block = + serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap(); + let last_serai_block_hash = last_serai_block.hash(); + let serai = serai.as_of(last_serai_block_hash); + let serai = serai.coins(); + assert_eq!(serai.coin_supply(Coin::Bitcoin).await.unwrap(), Amount(0)); + assert_eq!(serai.coin_balance(Coin::Bitcoin, serai_addr).await.unwrap(), Amount(0)); + + let mut plan_id = [0; 32]; + OsRng.fill_bytes(&mut plan_id); + let plan_id = plan_id; + + // We should now get a SubstrateBlock + for processor in &mut processors { + assert_eq!( + processor.recv_message().await, + messages::CoordinatorMessage::Substrate( + messages::substrate::CoordinatorMessage::SubstrateBlock { + context: SubstrateContext { + serai_time: last_serai_block.time().unwrap() / 1000, + network_latest_finalized_block: coin_block, }, - )) - .await; - } + block: last_serai_block.number(), + burns: vec![out_instruction.clone()], + batches: vec![], + } + ) + ); + + // Send the ACK, claiming there's a plan to sign + processor + .send_message(messages::ProcessorMessage::Coordinator( + messages::coordinator::ProcessorMessage::SubstrateBlockAck { + block: last_serai_block.number(), + plans: vec![PlanMeta { session: Session(0), id: plan_id }], + }, + )) + .await; + } - sign(&mut processors, &participant_is, Session(0), plan_id).await; - }) - .await; + sign(&mut processors, &participant_is, Session(0), plan_id).await; + }) + .await; } diff --git a/tests/docker/Cargo.toml b/tests/docker/Cargo.toml index 8db507730..1b3a8a6e8 100644 --- a/tests/docker/Cargo.toml +++ b/tests/docker/Cargo.toml @@ -13,5 +13,8 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] chrono = "0.4" diff --git a/tests/docker/src/lib.rs b/tests/docker/src/lib.rs index 572df4563..2ae847e3b 100644 --- a/tests/docker/src/lib.rs +++ b/tests/docker/src/lib.rs @@ -7,6 +7,21 @@ use std::{ process::Command, }; +pub fn fresh_logs_folder(first: bool, label: &str) -> String { + let logs_path = [std::env::current_dir().unwrap().to_str().unwrap(), ".test-logs", label] + .iter() + .collect::(); + if first { + let _ = fs::remove_dir_all(&logs_path); + fs::create_dir_all(&logs_path).expect("couldn't create logs directory"); + assert!( + fs::read_dir(&logs_path).expect("couldn't read the logs folder").next().is_none(), + "logs folder wasn't empty, despite removing it at the start of the run", + ); + } + logs_path.to_str().unwrap().to_string() +} + static BUILT: OnceLock>> = OnceLock::new(); pub fn build(name: String) { let built = BUILT.get_or_init(|| Mutex::new(HashMap::new())); @@ -70,8 +85,7 @@ pub fn build(name: String) { // Check any additionally specified paths let meta = |path: PathBuf| (path.clone(), fs::metadata(path)); let mut metadatas = match name.as_str() { - "bitcoin" => vec![], - "monero" => vec![], + "bitcoin" | "monero" => vec![], "message-queue" => vec![ meta(repo_path.join("common")), meta(repo_path.join("crypto")), @@ -94,12 +108,7 @@ pub fn build(name: String) { meta(repo_path.join("message-queue")), meta(repo_path.join("coordinator")), ], - "runtime" => vec![ - meta(repo_path.join("common")), - meta(repo_path.join("crypto")), - meta(repo_path.join("substrate")), - ], - "serai" => vec![ + "runtime" | "serai" => vec![ meta(repo_path.join("common")), meta(repo_path.join("crypto")), meta(repo_path.join("substrate")), @@ -132,7 +141,7 @@ pub fn build(name: String) { if let Some(last_modified) = last_modified { if last_modified < created_time { - println!("{} was built after the most recent source code edits, assuming built.", name); + println!("{name} was built after the most recent source code edits, assuming built."); built_lock.insert(name, true); return; } diff --git a/tests/full-stack/Cargo.toml b/tests/full-stack/Cargo.toml index aa7fc11a9..b45d7b53c 100644 --- a/tests/full-stack/Cargo.toml +++ b/tests/full-stack/Cargo.toml @@ -13,9 +13,15 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] hex = "0.4" +async-trait = "0.1" +async-recursion = "1" + zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false } @@ -35,6 +41,7 @@ serai-client = { path = "../../substrate/client", features = ["serai"] } tokio = { version = "1", features = ["time"] } dockertest = "0.4" +serai-docker-tests = { path = "../docker" } serai-message-queue-tests = { path = "../message-queue" } serai-processor-tests = { path = "../processor" } serai-coordinator-tests = { path = "../coordinator" } diff --git a/tests/full-stack/src/lib.rs b/tests/full-stack/src/lib.rs index def23519c..5e39c70d9 100644 --- a/tests/full-stack/src/lib.rs +++ b/tests/full-stack/src/lib.rs @@ -1,24 +1,14 @@ -use std::{ - sync::{OnceLock, Mutex}, - time::Duration, - fs, -}; +use std::time::Duration; -use serai_client::{primitives::NetworkId, Serai}; +use serai_client::Serai; -use dockertest::{ - LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, TestBodySpecification, DockerOperations, -}; +use dockertest::DockerOperations; + +use serai_processor_tests::{RPC_USER, RPC_PASS}; #[cfg(test)] mod tests; -static UNIQUE_ID: OnceLock> = OnceLock::new(); - -use serai_processor_tests::{RPC_USER, RPC_PASS, network_instance, processor_instance}; -use serai_message_queue_tests::instance as message_queue_instance; -use serai_coordinator_tests::{coordinator_instance, serai_composition}; - #[allow(unused)] #[derive(Clone, Debug)] pub struct Handles { @@ -27,108 +17,9 @@ pub struct Handles { monero: (String, u32), monero_processor: String, message_queue: String, - coordinator: String, serai: String, } -pub fn full_stack(name: &str) -> (Handles, Vec) { - let (coord_key, message_queue_keys, message_queue_composition) = message_queue_instance(); - - let (bitcoin_composition, bitcoin_port) = network_instance(NetworkId::Bitcoin); - let bitcoin_processor_composition = - processor_instance(NetworkId::Bitcoin, bitcoin_port, message_queue_keys[&NetworkId::Bitcoin]); - - let (monero_composition, monero_port) = network_instance(NetworkId::Monero); - let monero_processor_composition = - processor_instance(NetworkId::Monero, monero_port, message_queue_keys[&NetworkId::Monero]); - - let coordinator_composition = coordinator_instance(name, coord_key); - let serai_composition = serai_composition(name); - - // Give every item in this stack a unique ID - // Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits - let (first, unique_id) = { - let unique_id_mutex = UNIQUE_ID.get_or_init(|| Mutex::new(0)); - let mut unique_id_lock = unique_id_mutex.lock().unwrap(); - let first = *unique_id_lock == 0; - let unique_id = *unique_id_lock; - *unique_id_lock += 1; - (first, unique_id) - }; - - let logs_path = [std::env::current_dir().unwrap().to_str().unwrap(), ".test-logs", "full-stack"] - .iter() - .collect::(); - if first { - let _ = fs::remove_dir_all(&logs_path); - fs::create_dir_all(&logs_path).expect("couldn't create logs directory"); - assert!( - fs::read_dir(&logs_path).expect("couldn't read the logs folder").next().is_none(), - "logs folder wasn't empty, despite removing it at the start of the run", - ); - } - let logs_path = logs_path.to_str().unwrap().to_string(); - - let mut compositions = vec![]; - let mut handles = vec![]; - for (name, composition) in [ - ("message_queue", message_queue_composition), - ("bitcoin", bitcoin_composition), - ("bitcoin_processor", bitcoin_processor_composition), - ("monero", monero_composition), - ("monero_processor", monero_processor_composition), - ("coordinator", coordinator_composition), - ("serai", serai_composition), - ] { - let handle = format!("full_stack-{name}-{unique_id}"); - compositions.push( - composition.set_start_policy(StartPolicy::Strict).set_handle(handle.clone()).set_log_options( - Some(LogOptions { - action: if std::env::var("GITHUB_CI") == Ok("true".to_string()) { - LogAction::Forward - } else { - LogAction::ForwardToFile { path: logs_path.clone() } - }, - policy: LogPolicy::Always, - source: LogSource::Both, - }), - ), - ); - handles.push(handle); - } - let handles = Handles { - message_queue: handles[0].clone(), - bitcoin: (handles[1].clone(), bitcoin_port), - bitcoin_processor: handles[2].clone(), - monero: (handles[3].clone(), monero_port), - monero_processor: handles[4].clone(), - coordinator: handles[5].clone(), - serai: handles[6].clone(), - }; - - { - let bitcoin_processor_composition = compositions.get_mut(2).unwrap(); - bitcoin_processor_composition - .inject_container_name(handles.message_queue.clone(), "MESSAGE_QUEUE_RPC"); - bitcoin_processor_composition - .inject_container_name(handles.bitcoin.0.clone(), "NETWORK_RPC_HOSTNAME"); - } - - { - let monero_processor_composition = compositions.get_mut(4).unwrap(); - monero_processor_composition - .inject_container_name(handles.message_queue.clone(), "MESSAGE_QUEUE_RPC"); - monero_processor_composition - .inject_container_name(handles.monero.0.clone(), "NETWORK_RPC_HOSTNAME"); - } - - let coordinator_composition = compositions.get_mut(5).unwrap(); - coordinator_composition.inject_container_name(handles.message_queue.clone(), "MESSAGE_QUEUE_RPC"); - coordinator_composition.inject_container_name(handles.serai.clone(), "SERAI_HOSTNAME"); - - (handles, compositions) -} - impl Handles { pub async fn serai(&self, ops: &DockerOperations) -> Serai { let serai_rpc = ops.handle(&self.serai).host_port(9944).unwrap(); diff --git a/tests/full-stack/src/tests/mint_and_burn.rs b/tests/full-stack/src/tests/mint_and_burn.rs index 856834fa9..4fe1378ed 100644 --- a/tests/full-stack/src/tests/mint_and_burn.rs +++ b/tests/full-stack/src/tests/mint_and_burn.rs @@ -25,599 +25,594 @@ use crate::tests::*; // stack tests #[tokio::test] async fn mint_and_burn_test() { - let _one_at_a_time = ONE_AT_A_TIME.get_or_init(|| Mutex::new(())).lock(); - let (handles, test) = new_test(); - - test - .run_async(|ops| async move { - let ops = Arc::new(ops); - let serai = handles[0].serai(&ops).await; - - // Helper to mine a block on each network - async fn mine_blocks( - handles: &[Handles], - ops: &DockerOperations, - producer: &mut usize, - count: usize, - ) { - static MINE_BLOCKS_CALL: OnceLock> = OnceLock::new(); - - // Only let one instance of this function run at a time - let _lock = MINE_BLOCKS_CALL.get_or_init(|| tokio::sync::Mutex::new(())).lock().await; - - // Pick a block producer via a round robin - let producer_handles = &handles[*producer]; - *producer += 1; - *producer %= handles.len(); - - // Mine a Bitcoin block - let bitcoin_blocks = { - use bitcoin_serai::bitcoin::{ - secp256k1::{SECP256K1, SecretKey}, - PrivateKey, PublicKey, - consensus::Encodable, - network::Network, - address::Address, - }; + new_test(|ops, handles: Vec| async move { + let ops = Arc::new(ops); + let serai = handles[0].serai(&ops).await; + + // Helper to mine a block on each network + async fn mine_blocks( + handles: &[Handles], + ops: &DockerOperations, + producer: &mut usize, + count: usize, + ) { + static MINE_BLOCKS_CALL: OnceLock> = OnceLock::new(); + + // Only let one instance of this function run at a time + let _lock = MINE_BLOCKS_CALL.get_or_init(|| tokio::sync::Mutex::new(())).lock().await; + + // Pick a block producer via a round robin + let producer_handles = &handles[*producer]; + *producer += 1; + *producer %= handles.len(); + + // Mine a Bitcoin block + let bitcoin_blocks = { + use bitcoin_serai::bitcoin::{ + secp256k1::{SECP256K1, SecretKey}, + PrivateKey, PublicKey, + consensus::Encodable, + network::Network, + address::Address, + }; - let addr = Address::p2pkh( - &PublicKey::from_private_key( - SECP256K1, - &PrivateKey::new(SecretKey::from_slice(&[0x01; 32]).unwrap(), Network::Bitcoin), - ), - Network::Regtest, - ); + let addr = Address::p2pkh( + &PublicKey::from_private_key( + SECP256K1, + &PrivateKey::new(SecretKey::from_slice(&[0x01; 32]).unwrap(), Network::Bitcoin), + ), + Network::Regtest, + ); - let rpc = producer_handles.bitcoin(ops).await; - let mut res = Vec::with_capacity(count); - for _ in 0 .. count { - let hash = rpc - .rpc_call::>("generatetoaddress", serde_json::json!([1, addr])) - .await - .unwrap() - .swap_remove(0); + let rpc = producer_handles.bitcoin(ops).await; + let mut res = Vec::with_capacity(count); + for _ in 0 .. count { + let hash = rpc + .rpc_call::>("generatetoaddress", serde_json::json!([1, addr])) + .await + .unwrap() + .swap_remove(0); + + let mut bytes = vec![]; + rpc + .get_block(&hex::decode(hash).unwrap().try_into().unwrap()) + .await + .unwrap() + .consensus_encode(&mut bytes) + .unwrap(); + res.push(serde_json::json!([hex::encode(bytes)])); + } + res + }; - let mut bytes = vec![]; - rpc - .get_block(&hex::decode(hash).unwrap().try_into().unwrap()) - .await - .unwrap() - .consensus_encode(&mut bytes) - .unwrap(); - res.push(serde_json::json!([hex::encode(bytes)])); - } - res + // Mine a Monero block + let monero_blocks = { + use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar}; + use monero_serai::wallet::{ + ViewPair, + address::{Network, AddressSpec}, }; - // Mine a Monero block - let monero_blocks = { - use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar}; - use monero_serai::wallet::{ - ViewPair, - address::{Network, AddressSpec}, - }; - - let addr = ViewPair::new(ED25519_BASEPOINT_POINT, Zeroizing::new(Scalar::ONE)) - .address(Network::Mainnet, AddressSpec::Standard) - .to_string(); + let addr = ViewPair::new(ED25519_BASEPOINT_POINT, Zeroizing::new(Scalar::ONE)) + .address(Network::Mainnet, AddressSpec::Standard) + .to_string(); - let rpc = producer_handles.monero(ops).await; - let mut res = Vec::with_capacity(count); - for _ in 0 .. count { - let block = - rpc.get_block(rpc.generate_blocks(&addr, 1).await.unwrap()[0]).await.unwrap(); + let rpc = producer_handles.monero(ops).await; + let mut res = Vec::with_capacity(count); + for _ in 0 .. count { + let block = rpc.get_block(rpc.generate_blocks(&addr, 1).await.unwrap()[0]).await.unwrap(); - let mut txs = Vec::with_capacity(block.txs.len()); - for tx in &block.txs { - txs.push(rpc.get_transaction(*tx).await.unwrap()); - } - res.push((serde_json::json!([hex::encode(block.serialize())]), txs)); + let mut txs = Vec::with_capacity(block.txs.len()); + for tx in &block.txs { + txs.push(rpc.get_transaction(*tx).await.unwrap()); } - res - }; + res.push((serde_json::json!([hex::encode(block.serialize())]), txs)); + } + res + }; - // Relay it to all other nodes - // If the producer is 0, the producer variable will be 1 since we already incremented - // it - // With 4 nodes, this will run 1 .. 4, which is the correct range - for receiver in *producer .. (*producer + (handles.len() - 1)) { - let receiver = receiver % handles.len(); - let handles = &handles[receiver]; - - { - let rpc = handles.bitcoin(ops).await; - for block in &bitcoin_blocks { - let _: () = rpc.rpc_call("submitblock", block.clone()).await.unwrap(); - } + // Relay it to all other nodes + // If the producer is 0, the producer variable will be 1 since we already incremented + // it + // With 4 nodes, this will run 1 .. 4, which is the correct range + for receiver in *producer .. (*producer + (handles.len() - 1)) { + let receiver = receiver % handles.len(); + let handles = &handles[receiver]; + + { + let rpc = handles.bitcoin(ops).await; + for block in &bitcoin_blocks { + let _: () = rpc.rpc_call("submitblock", block.clone()).await.unwrap(); } + } - { - let rpc = handles.monero(ops).await; - - for (block, txs) in &monero_blocks { - // Broadcast the Monero TXs, as they're not simply included with the block - for tx in txs { - // Ignore any errors since the TX already being present will return an error - let _ = rpc.publish_transaction(tx).await; - } + { + let rpc = handles.monero(ops).await; - #[derive(Debug, serde::Deserialize)] - struct EmptyResponse {} - let _: EmptyResponse = - rpc.json_rpc_call("submit_block", Some(block.clone())).await.unwrap(); + for (block, txs) in &monero_blocks { + // Broadcast the Monero TXs, as they're not simply included with the block + for tx in txs { + // Ignore any errors since the TX already being present will return an error + let _ = rpc.publish_transaction(tx).await; } - } - } - } - // Mine blocks to create mature funds - mine_blocks(&handles, &ops, &mut 0, 101).await; - - // Spawn a background task to mine blocks on Bitcoin/Monero - let keep_mining = Arc::new(Mutex::new(true)); - { - let keep_mining = keep_mining.clone(); - let existing = std::panic::take_hook(); - std::panic::set_hook(Box::new(move |panic| { - // On panic, set keep_mining to false - if let Ok(mut keep_mining) = keep_mining.lock() { - *keep_mining = false; - } else { - println!("panic which poisoned keep_mining"); + #[derive(Debug, serde::Deserialize)] + struct EmptyResponse {} + let _: EmptyResponse = + rpc.json_rpc_call("submit_block", Some(block.clone())).await.unwrap(); } - existing(panic); - })); + } } - - let mining_task = { - let ops = ops.clone(); - let handles = handles.clone(); - let keep_mining = keep_mining.clone(); - tokio::spawn(async move { - let start = Instant::now(); - let mut producer = 0; - while { - // Ensure this is deref'd to a bool, not any permutation of the lock - let keep_mining: bool = *keep_mining.lock().unwrap(); - // Bound execution to 60m - keep_mining && (Instant::now().duration_since(start) < Duration::from_secs(60 * 60)) - } { - // Mine a block every 3s - tokio::time::sleep(Duration::from_secs(3)).await; - mine_blocks(&handles, &ops, &mut producer, 1).await; - } - }) - }; - - // Get the generated keys - let (bitcoin_key_pair, monero_key_pair) = { - let key_pair = { - let serai = &serai; - move |additional, network| async move { - // If this is an additional key pair, it should've completed with the first barring - // misc latency, so only sleep up to 5 minutes - // If this is the first key pair, wait up to 10 minutes - let halt_at = if additional { 5 * 10 } else { 10 * 10 }; - let print_at = halt_at / 2; - for i in 0 .. halt_at { - if let Some(key_pair) = serai - .as_of_latest_finalized_block() - .await - .unwrap() - .validator_sets() - .keys(ValidatorSet { network, session: Session(0) }) - .await - .unwrap() - { - return key_pair; - } - - if i == print_at { - println!( - "waiting for {}key gen to complete, it has been {} minutes", - if additional { "another " } else { "" }, - print_at / 10, - ); - } - tokio::time::sleep(Duration::from_secs(6)).await; + } + + // Mine blocks to create mature funds + mine_blocks(&handles, &ops, &mut 0, 101).await; + + // Spawn a background task to mine blocks on Bitcoin/Monero + let keep_mining = Arc::new(Mutex::new(true)); + { + let keep_mining = keep_mining.clone(); + let existing = std::panic::take_hook(); + std::panic::set_hook(Box::new(move |panic| { + // On panic, set keep_mining to false + if let Ok(mut keep_mining) = keep_mining.lock() { + *keep_mining = false; + } else { + println!("panic which poisoned keep_mining"); + } + existing(panic); + })); + } + + let mining_task = { + let ops = ops.clone(); + let handles = handles.clone(); + let keep_mining = keep_mining.clone(); + tokio::spawn(async move { + let start = Instant::now(); + let mut producer = 0; + while { + // Ensure this is deref'd to a bool, not any permutation of the lock + let keep_mining: bool = *keep_mining.lock().unwrap(); + // Bound execution to 60m + keep_mining && (Instant::now().duration_since(start) < Duration::from_secs(60 * 60)) + } { + // Mine a block every 3s + tokio::time::sleep(Duration::from_secs(3)).await; + mine_blocks(&handles, &ops, &mut producer, 1).await; + } + }) + }; + + // Get the generated keys + let (bitcoin_key_pair, monero_key_pair) = { + let key_pair = { + let serai = &serai; + move |additional, network| async move { + // If this is an additional key pair, it should've completed with the first barring + // misc latency, so only sleep up to 5 minutes + // If this is the first key pair, wait up to 10 minutes + let halt_at = if additional { 5 * 10 } else { 10 * 10 }; + let print_at = halt_at / 2; + for i in 0 .. halt_at { + if let Some(key_pair) = serai + .as_of_latest_finalized_block() + .await + .unwrap() + .validator_sets() + .keys(ValidatorSet { network, session: Session(0) }) + .await + .unwrap() + { + return key_pair; } - panic!( - "{}key gen did not complete within {} minutes", - if additional { "another " } else { "" }, - halt_at / 10, - ); + if i == print_at { + println!( + "waiting for {}key gen to complete, it has been {} minutes", + if additional { "another " } else { "" }, + print_at / 10, + ); + } + tokio::time::sleep(Duration::from_secs(6)).await; } - }; - (key_pair(false, NetworkId::Bitcoin).await, key_pair(true, NetworkId::Monero).await) + panic!( + "{}key gen did not complete within {} minutes", + if additional { "another " } else { "" }, + halt_at / 10, + ); + } }; - // Because the initial keys only become active when the network's time matches the Serai - // time, the Serai time is real yet the network time may be significantly delayed due to - // potentially being a median, mine a bunch of blocks now - mine_blocks(&handles, &ops, &mut 0, 100).await; - - // Create a Serai address to receive the sriBTC/sriXMR to - let (serai_pair, serai_addr) = { - let mut name = [0; 4]; - OsRng.fill_bytes(&mut name); - let pair = insecure_pair_from_name(&hex::encode(name)); - let address = SeraiAddress::from(pair.public()); - - // Fund the new account to pay for fees - let balance = Balance { coin: Coin::Serai, amount: Amount(1_000_000_000) }; - serai - .publish(&serai.sign( - &insecure_pair_from_name("Ferdie"), - SeraiCoins::transfer(address, balance), - 0, - Default::default(), - )) - .await - .unwrap(); - - (pair, address) - }; + (key_pair(false, NetworkId::Bitcoin).await, key_pair(true, NetworkId::Monero).await) + }; + + // Because the initial keys only become active when the network's time matches the Serai + // time, the Serai time is real yet the network time may be significantly delayed due to + // potentially being a median, mine a bunch of blocks now + mine_blocks(&handles, &ops, &mut 0, 100).await; + + // Create a Serai address to receive the sriBTC/sriXMR to + let (serai_pair, serai_addr) = { + let mut name = [0; 4]; + OsRng.fill_bytes(&mut name); + let pair = insecure_pair_from_name(&hex::encode(name)); + let address = SeraiAddress::from(pair.public()); + + // Fund the new account to pay for fees + let balance = Balance { coin: Coin::Serai, amount: Amount(1_000_000_000) }; + serai + .publish(&serai.sign( + &insecure_pair_from_name("Ferdie"), + SeraiCoins::transfer(address, balance), + 0, + Default::default(), + )) + .await + .unwrap(); - // Send in BTC - { - use bitcoin_serai::bitcoin::{ - secp256k1::{SECP256K1, SecretKey, Message}, - PrivateKey, PublicKey, - key::{XOnlyPublicKey, TweakedPublicKey}, - sighash::{EcdsaSighashType, SighashCache}, - script::{PushBytesBuf, Script, ScriptBuf, Builder}, - absolute::LockTime, - transaction::{Version, Transaction}, - address::Payload, - Sequence, Witness, OutPoint, TxIn, Amount, TxOut, Network, - }; + (pair, address) + }; + + // Send in BTC + { + use bitcoin_serai::bitcoin::{ + secp256k1::{SECP256K1, SecretKey, Message}, + PrivateKey, PublicKey, + key::{XOnlyPublicKey, TweakedPublicKey}, + sighash::{EcdsaSighashType, SighashCache}, + script::{PushBytesBuf, Script, ScriptBuf, Builder}, + absolute::LockTime, + transaction::{Version, Transaction}, + address::Payload, + Sequence, Witness, OutPoint, TxIn, Amount, TxOut, Network, + }; - let private_key = - PrivateKey::new(SecretKey::from_slice(&[0x01; 32]).unwrap(), Network::Bitcoin); - let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let addr = Payload::p2pkh(&public_key); - - // Use the first block's coinbase - let rpc = handles[0].bitcoin(&ops).await; - let tx = - rpc.get_block(&rpc.get_block_hash(1).await.unwrap()).await.unwrap().txdata.swap_remove(0); - #[allow(clippy::inconsistent_digit_grouping)] - let mut tx = Transaction { - version: Version(2), - lock_time: LockTime::ZERO, - input: vec![TxIn { - previous_output: OutPoint { txid: tx.txid(), vout: 0 }, - script_sig: Script::new().into(), - sequence: Sequence(u32::MAX), - witness: Witness::default(), - }], - output: vec![ - TxOut { - value: Amount::from_sat(1_100_000_00), - script_pubkey: Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked( - XOnlyPublicKey::from_slice(&bitcoin_key_pair.1[1 ..]).unwrap(), - )) - .script_pubkey(), - }, - TxOut { - // change = amount spent - fee - value: Amount::from_sat(tx.output[0].value.to_sat() - 1_100_000_00 - 1_000_00), - script_pubkey: Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked( - XOnlyPublicKey::from_slice(&public_key.inner.serialize()[1 ..]).unwrap(), - )) - .script_pubkey(), - }, - TxOut { - value: Amount::ZERO, - script_pubkey: ScriptBuf::new_op_return( - PushBytesBuf::try_from(Shorthand::transfer(None, serai_addr).encode()).unwrap(), - ), - }, - ], - }; + let private_key = + PrivateKey::new(SecretKey::from_slice(&[0x01; 32]).unwrap(), Network::Bitcoin); + let public_key = PublicKey::from_private_key(SECP256K1, &private_key); + let addr = Payload::p2pkh(&public_key); - let mut der = SECP256K1 - .sign_ecdsa_low_r( - &Message::from( - SighashCache::new(&tx) - .legacy_signature_hash(0, &addr.script_pubkey(), EcdsaSighashType::All.to_u32()) - .unwrap() - .to_raw_hash(), + // Use the first block's coinbase + let rpc = handles[0].bitcoin(&ops).await; + let tx = + rpc.get_block(&rpc.get_block_hash(1).await.unwrap()).await.unwrap().txdata.swap_remove(0); + #[allow(clippy::inconsistent_digit_grouping)] + let mut tx = Transaction { + version: Version(2), + lock_time: LockTime::ZERO, + input: vec![TxIn { + previous_output: OutPoint { txid: tx.txid(), vout: 0 }, + script_sig: Script::new().into(), + sequence: Sequence(u32::MAX), + witness: Witness::default(), + }], + output: vec![ + TxOut { + value: Amount::from_sat(1_100_000_00), + script_pubkey: Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked( + XOnlyPublicKey::from_slice(&bitcoin_key_pair.1[1 ..]).unwrap(), + )) + .script_pubkey(), + }, + TxOut { + // change = amount spent - fee + value: Amount::from_sat(tx.output[0].value.to_sat() - 1_100_000_00 - 1_000_00), + script_pubkey: Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked( + XOnlyPublicKey::from_slice(&public_key.inner.serialize()[1 ..]).unwrap(), + )) + .script_pubkey(), + }, + TxOut { + value: Amount::ZERO, + script_pubkey: ScriptBuf::new_op_return( + PushBytesBuf::try_from(Shorthand::transfer(None, serai_addr).encode()).unwrap(), ), - &private_key.inner, - ) - .serialize_der() - .to_vec(); - der.push(1); - tx.input[0].script_sig = Builder::new() - .push_slice(PushBytesBuf::try_from(der).unwrap()) - .push_key(&public_key) - .into_script(); - - rpc.send_raw_transaction(&tx).await.unwrap(); - } - - // Send in XMR - { - use curve25519_dalek::{ - constants::ED25519_BASEPOINT_POINT, scalar::Scalar, edwards::CompressedEdwardsY, - }; - use monero_serai::{ - Protocol, - transaction::Timelock, - wallet::{ - ViewPair, Scanner, Decoys, Change, FeePriority, SignableTransaction, - address::{Network, AddressType, AddressMeta, MoneroAddress}, }, - }; + ], + }; - // Grab the first output on the chain - let rpc = handles[0].monero(&ops).await; - let view_pair = ViewPair::new(ED25519_BASEPOINT_POINT, Zeroizing::new(Scalar::ONE)); - let mut scanner = Scanner::from_view(view_pair.clone(), Some(HashSet::new())); - let output = scanner - .scan(&rpc, &rpc.get_block_by_number(1).await.unwrap()) - .await - .unwrap() - .swap_remove(0) - .unlocked(Timelock::Block(rpc.get_height().await.unwrap())) - .unwrap() - .swap_remove(0); - - let decoys = Decoys::select( - &mut OsRng, - &rpc, - Protocol::v16.ring_len(), - rpc.get_height().await.unwrap() - 1, - &[output.clone()], + let mut der = SECP256K1 + .sign_ecdsa_low_r( + &Message::from( + SighashCache::new(&tx) + .legacy_signature_hash(0, &addr.script_pubkey(), EcdsaSighashType::All.to_u32()) + .unwrap() + .to_raw_hash(), + ), + &private_key.inner, ) + .serialize_der() + .to_vec(); + der.push(1); + tx.input[0].script_sig = Builder::new() + .push_slice(PushBytesBuf::try_from(der).unwrap()) + .push_key(&public_key) + .into_script(); + + rpc.send_raw_transaction(&tx).await.unwrap(); + } + + // Send in XMR + { + use curve25519_dalek::{ + constants::ED25519_BASEPOINT_POINT, scalar::Scalar, edwards::CompressedEdwardsY, + }; + use monero_serai::{ + Protocol, + transaction::Timelock, + wallet::{ + ViewPair, Scanner, Decoys, Change, FeePriority, SignableTransaction, + address::{Network, AddressType, AddressMeta, MoneroAddress}, + }, + }; + + // Grab the first output on the chain + let rpc = handles[0].monero(&ops).await; + let view_pair = ViewPair::new(ED25519_BASEPOINT_POINT, Zeroizing::new(Scalar::ONE)); + let mut scanner = Scanner::from_view(view_pair.clone(), Some(HashSet::new())); + let output = scanner + .scan(&rpc, &rpc.get_block_by_number(1).await.unwrap()) .await .unwrap() + .swap_remove(0) + .unlocked(Timelock::Block(rpc.get_height().await.unwrap())) + .unwrap() .swap_remove(0); - let tx = SignableTransaction::new( - Protocol::v16, - None, - vec![(output, decoys)], - vec![( - MoneroAddress::new( - AddressMeta::new( - Network::Mainnet, - AddressType::Featured { guaranteed: true, subaddress: false, payment_id: None }, - ), - CompressedEdwardsY(monero_key_pair.1.to_vec().try_into().unwrap()) - .decompress() - .unwrap(), - ED25519_BASEPOINT_POINT * - processor::additional_key::(0).0, + let decoys = Decoys::select( + &mut OsRng, + &rpc, + Protocol::v16.ring_len(), + rpc.get_height().await.unwrap() - 1, + &[output.clone()], + ) + .await + .unwrap() + .swap_remove(0); + + let tx = SignableTransaction::new( + Protocol::v16, + None, + vec![(output, decoys)], + vec![( + MoneroAddress::new( + AddressMeta::new( + Network::Mainnet, + AddressType::Featured { guaranteed: true, subaddress: false, payment_id: None }, ), - 1_100_000_000_000, - )], - Change::new(&view_pair, false), - vec![Shorthand::transfer(None, serai_addr).encode()], - rpc.get_fee(Protocol::v16, FeePriority::Low).await.unwrap(), - ) - .unwrap() - .sign(&mut OsRng, &Zeroizing::new(Scalar::ONE)) - .unwrap(); - - rpc.publish_transaction(&tx).await.unwrap() - } - - // Wait for Batch publication - // TODO: Merge this block with the above one - // (take in a lambda for the specific checks to execute?) - { - let wait_for_batch = { - let serai = &serai; - move |additional, network| async move { - let halt_at = if additional { 5 * 10 } else { 10 * 10 }; - let print_at = halt_at / 2; - for i in 0 .. halt_at { - if serai - .as_of_latest_finalized_block() - .await - .unwrap() - .in_instructions() - .last_batch_for_network(network) - .await - .unwrap() - .is_some() - { - return; - } - - if i == print_at { - println!( - "waiting for {}batch to complete, it has been {} minutes", - if additional { "another " } else { "" }, - print_at / 10, - ); - } - tokio::time::sleep(Duration::from_secs(6)).await; + CompressedEdwardsY(monero_key_pair.1.to_vec().try_into().unwrap()) + .decompress() + .unwrap(), + ED25519_BASEPOINT_POINT * + processor::additional_key::(0).0, + ), + 1_100_000_000_000, + )], + &Change::new(&view_pair, false), + vec![Shorthand::transfer(None, serai_addr).encode()], + rpc.get_fee(Protocol::v16, FeePriority::Low).await.unwrap(), + ) + .unwrap() + .sign(&mut OsRng, &Zeroizing::new(Scalar::ONE)) + .unwrap(); + + rpc.publish_transaction(&tx).await.unwrap() + } + + // Wait for Batch publication + // TODO: Merge this block with the above one + // (take in a lambda for the specific checks to execute?) + { + let wait_for_batch = { + let serai = &serai; + move |additional, network| async move { + let halt_at = if additional { 5 * 10 } else { 10 * 10 }; + let print_at = halt_at / 2; + for i in 0 .. halt_at { + if serai + .as_of_latest_finalized_block() + .await + .unwrap() + .in_instructions() + .last_batch_for_network(network) + .await + .unwrap() + .is_some() + { + return; } - panic!( - "{}batch did not complete within {} minutes", - if additional { "another " } else { "" }, - halt_at / 10, - ); + if i == print_at { + println!( + "waiting for {}batch to complete, it has been {} minutes", + if additional { "another " } else { "" }, + print_at / 10, + ); + } + tokio::time::sleep(Duration::from_secs(6)).await; } - }; - wait_for_batch(false, NetworkId::Bitcoin).await; - wait_for_batch(true, NetworkId::Monero).await; - } - // TODO: Verify the mints - - // Create a random Bitcoin/Monero address - let bitcoin_addr = { - use bitcoin_serai::bitcoin::{network::Network, key::PublicKey, address::Address}; - // Uses Network::Bitcoin since it doesn't actually matter, Serai strips it out - // TODO: Move Serai to Payload from Address - Address::p2pkh( - &loop { - let mut bytes = [0; 33]; - OsRng.fill_bytes(&mut bytes); - bytes[0] %= 4; - if let Ok(key) = PublicKey::from_slice(&bytes) { - break key; - } - }, - Network::Bitcoin, - ) + panic!( + "{}batch did not complete within {} minutes", + if additional { "another " } else { "" }, + halt_at / 10, + ); + } }; + wait_for_batch(false, NetworkId::Bitcoin).await; + wait_for_batch(true, NetworkId::Monero).await; + } + + // TODO: Verify the mints + + // Create a random Bitcoin/Monero address + let bitcoin_addr = { + use bitcoin_serai::bitcoin::{network::Network, key::PublicKey, address::Address}; + // Uses Network::Bitcoin since it doesn't actually matter, Serai strips it out + // TODO: Move Serai to Payload from Address + Address::p2pkh( + &loop { + let mut bytes = [0; 33]; + OsRng.fill_bytes(&mut bytes); + bytes[0] %= 4; + if let Ok(key) = PublicKey::from_slice(&bytes) { + break key; + } + }, + Network::Bitcoin, + ) + }; + + let (monero_spend, monero_view, monero_addr) = { + use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar}; + let spend = ED25519_BASEPOINT_TABLE * &Scalar::random(&mut OsRng); + let view = Scalar::random(&mut OsRng); + + use monero_serai::wallet::address::{Network, AddressType, AddressMeta, MoneroAddress}; + let addr = MoneroAddress::new( + AddressMeta::new(Network::Mainnet, AddressType::Standard), + spend, + ED25519_BASEPOINT_TABLE * &view, + ); + + (spend, view, addr) + }; + + // Get the current blocks + let mut start_bitcoin_block = + handles[0].bitcoin(&ops).await.get_latest_block_number().await.unwrap(); + let mut start_monero_block = handles[0].monero(&ops).await.get_height().await.unwrap(); + + // Burn the sriBTC/sriXMR + { + let burn = { + let serai = &serai; + let serai_pair = &serai_pair; + move |nonce, coin, amount, address| async move { + let out_instruction = OutInstructionWithBalance { + balance: Balance { coin, amount: Amount(amount) }, + instruction: OutInstruction { address, data: None }, + }; - let (monero_spend, monero_view, monero_addr) = { - use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar}; - let spend = ED25519_BASEPOINT_TABLE * &Scalar::random(&mut OsRng); - let view = Scalar::random(&mut OsRng); - - use monero_serai::wallet::address::{Network, AddressType, AddressMeta, MoneroAddress}; - let addr = MoneroAddress::new( - AddressMeta::new(Network::Mainnet, AddressType::Standard), - spend, - ED25519_BASEPOINT_TABLE * &view, - ); - - (spend, view, addr) + serai + .publish(&serai.sign( + serai_pair, + SeraiCoins::burn_with_instruction(out_instruction), + nonce, + Default::default(), + )) + .await + .unwrap(); + } }; - // Get the current blocks - let mut start_bitcoin_block = - handles[0].bitcoin(&ops).await.get_latest_block_number().await.unwrap(); - let mut start_monero_block = handles[0].monero(&ops).await.get_height().await.unwrap(); - - // Burn the sriBTC/sriXMR - { - let burn = { - let serai = &serai; - let serai_pair = &serai_pair; - move |nonce, coin, amount, address| async move { - let out_instruction = OutInstructionWithBalance { - balance: Balance { coin, amount: Amount(amount) }, - instruction: OutInstruction { address, data: None }, - }; - - serai - .publish(&serai.sign( - serai_pair, - SeraiCoins::burn_with_instruction(out_instruction), - nonce, - Default::default(), - )) - .await - .unwrap(); - } - }; - - #[allow(clippy::inconsistent_digit_grouping)] - burn( - 0, - Coin::Bitcoin, - 1_000_000_00, - ExternalAddress::new( - serai_client::networks::bitcoin::Address(bitcoin_addr.clone()).try_into().unwrap(), - ) - .unwrap(), + #[allow(clippy::inconsistent_digit_grouping)] + burn( + 0, + Coin::Bitcoin, + 1_000_000_00, + ExternalAddress::new( + serai_client::networks::bitcoin::Address(bitcoin_addr.clone()).try_into().unwrap(), ) - .await; - - burn( - 1, - Coin::Monero, - 1_000_000_000_000, - ExternalAddress::new( - serai_client::networks::monero::Address::new(monero_addr).unwrap().into(), - ) - .unwrap(), + .unwrap(), + ) + .await; + + burn( + 1, + Coin::Monero, + 1_000_000_000_000, + ExternalAddress::new( + serai_client::networks::monero::Address::new(monero_addr).unwrap().into(), ) - .await; - } + .unwrap(), + ) + .await; + } + + // TODO: Verify the burns + + // Verify the received Bitcoin TX + #[allow(clippy::inconsistent_digit_grouping)] + { + let rpc = handles[0].bitcoin(&ops).await; + + // Check for up to 15 minutes + let mut found = false; + let mut i = 0; + while i < (15 * 6) { + if let Ok(hash) = rpc.get_block_hash(start_bitcoin_block).await { + let block = rpc.get_block(&hash).await.unwrap(); + start_bitcoin_block += 1; + + if block.txdata.len() > 1 { + assert_eq!(block.txdata.len(), 2); + assert_eq!(block.txdata[1].output.len(), 2); + + let received_output = block.txdata[1] + .output + .iter() + .find(|output| output.script_pubkey == bitcoin_addr.script_pubkey()) + .unwrap(); - // TODO: Verify the burns + let tx_fee = 1_100_000_00 - + block.txdata[1].output.iter().map(|output| output.value.to_sat()).sum::(); - // Verify the received Bitcoin TX - #[allow(clippy::inconsistent_digit_grouping)] - { - let rpc = handles[0].bitcoin(&ops).await; - - // Check for up to 15 minutes - let mut found = false; - let mut i = 0; - while i < (15 * 6) { - if let Ok(hash) = rpc.get_block_hash(start_bitcoin_block).await { - let block = rpc.get_block(&hash).await.unwrap(); - start_bitcoin_block += 1; - - if block.txdata.len() > 1 { - assert_eq!(block.txdata.len(), 2); - assert_eq!(block.txdata[1].output.len(), 2); - - let received_output = block.txdata[1] - .output - .iter() - .find(|output| output.script_pubkey == bitcoin_addr.script_pubkey()) - .unwrap(); - - let tx_fee = 1_100_000_00 - - block.txdata[1].output.iter().map(|output| output.value.to_sat()).sum::(); - - assert_eq!(received_output.value.to_sat(), 1_000_000_00 - tx_fee); - found = true; - } - } else { - i += 1; - tokio::time::sleep(Duration::from_secs(10)).await; + assert_eq!(received_output.value.to_sat(), 1_000_000_00 - tx_fee); + found = true; } - } - if !found { - panic!("couldn't find the expected Bitcoin transaction within 15 minutes"); + } else { + i += 1; + tokio::time::sleep(Duration::from_secs(10)).await; } } - - // Verify the received Monero TX - { - use monero_serai::wallet::{ViewPair, Scanner}; - let rpc = handles[0].monero(&ops).await; - let mut scanner = Scanner::from_view( - ViewPair::new(monero_spend, Zeroizing::new(monero_view)), - Some(HashSet::new()), - ); - - // Check for up to 5 minutes - let mut found = false; - let mut i = 0; - while i < (5 * 6) { - if let Ok(block) = rpc.get_block_by_number(start_monero_block).await { - start_monero_block += 1; - let outputs = scanner.scan(&rpc, &block).await.unwrap(); - if !outputs.is_empty() { - assert_eq!(outputs.len(), 1); - let outputs = outputs[0].not_locked(); - assert_eq!(outputs.len(), 1); - - assert_eq!(block.txs.len(), 1); - let tx = rpc.get_transaction(block.txs[0]).await.unwrap(); - let tx_fee = tx.rct_signatures.base.fee; - - assert_eq!(outputs[0].commitment().amount, 1_000_000_000_000 - tx_fee); - found = true; - } - } else { - i += 1; - tokio::time::sleep(Duration::from_secs(10)).await; + if !found { + panic!("couldn't find the expected Bitcoin transaction within 15 minutes"); + } + } + + // Verify the received Monero TX + { + use monero_serai::wallet::{ViewPair, Scanner}; + let rpc = handles[0].monero(&ops).await; + let mut scanner = Scanner::from_view( + ViewPair::new(monero_spend, Zeroizing::new(monero_view)), + Some(HashSet::new()), + ); + + // Check for up to 5 minutes + let mut found = false; + let mut i = 0; + while i < (5 * 6) { + if let Ok(block) = rpc.get_block_by_number(start_monero_block).await { + start_monero_block += 1; + let outputs = scanner.scan(&rpc, &block).await.unwrap(); + if !outputs.is_empty() { + assert_eq!(outputs.len(), 1); + let outputs = outputs[0].not_locked(); + assert_eq!(outputs.len(), 1); + + assert_eq!(block.txs.len(), 1); + let tx = rpc.get_transaction(block.txs[0]).await.unwrap(); + let tx_fee = tx.rct_signatures.base.fee; + + assert_eq!(outputs[0].commitment().amount, 1_000_000_000_000 - tx_fee); + found = true; } + } else { + i += 1; + tokio::time::sleep(Duration::from_secs(10)).await; } - if !found { - panic!("couldn't find the expected Monero transaction within 5 minutes"); - } } + if !found { + panic!("couldn't find the expected Monero transaction within 5 minutes"); + } + } - *keep_mining.lock().unwrap() = false; - mining_task.await.unwrap(); - }) - .await; + *keep_mining.lock().unwrap() = false; + mining_task.await.unwrap(); + }) + .await; } diff --git a/tests/full-stack/src/tests/mod.rs b/tests/full-stack/src/tests/mod.rs index 2c6603933..31c989521 100644 --- a/tests/full-stack/src/tests/mod.rs +++ b/tests/full-stack/src/tests/mod.rs @@ -1,6 +1,19 @@ -use std::sync::OnceLock; +use core::future::Future; +use std::{sync::OnceLock, collections::HashMap}; -use dockertest::DockerTest; +use tokio::sync::Mutex; + +use serai_client::primitives::NetworkId; + +use dockertest::{ + LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, TestBodySpecification, + DockerOperations, DockerTest, +}; + +use serai_docker_tests::fresh_logs_folder; +use serai_processor_tests::{network_instance, processor_instance}; +use serai_message_queue_tests::instance as message_queue_instance; +use serai_coordinator_tests::{coordinator_instance, serai_composition}; use crate::*; @@ -9,13 +22,29 @@ mod mint_and_burn; pub(crate) const VALIDATORS: usize = 4; // pub(crate) const THRESHOLD: usize = ((VALIDATORS * 2) / 3) + 1; -pub(crate) static ONE_AT_A_TIME: OnceLock> = OnceLock::new(); +static UNIQUE_ID: OnceLock> = OnceLock::new(); + +#[async_trait::async_trait] +pub(crate) trait TestBody: 'static + Send + Sync { + async fn body(&self, ops: DockerOperations, handles: Vec); +} +#[async_trait::async_trait] +impl) -> F> TestBody + for TB +{ + async fn body(&self, ops: DockerOperations, handles: Vec) { + (self)(ops, handles).await; + } +} + +pub(crate) async fn new_test(test_body: impl TestBody) { + let mut unique_id_lock = UNIQUE_ID.get_or_init(|| Mutex::new(0)).lock().await; -pub(crate) fn new_test() -> (Vec, DockerTest) { - let mut validators = vec![]; + let mut all_handles = vec![]; let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); + let mut coordinator_compositions = vec![]; for i in 0 .. VALIDATORS { - let (handles, compositions) = full_stack(match i { + let name = match i { 0 => "Alice", 1 => "Bob", 2 => "Charlie", @@ -23,11 +52,164 @@ pub(crate) fn new_test() -> (Vec, DockerTest) { 4 => "Eve", 5 => "Ferdie", _ => panic!("needed a 7th name for a serai node"), - }); - validators.push(handles); - for composition in compositions { + }; + + let (coord_key, message_queue_keys, message_queue_composition) = message_queue_instance(); + + let (bitcoin_composition, bitcoin_port) = network_instance(NetworkId::Bitcoin); + let bitcoin_processor_composition = + processor_instance(NetworkId::Bitcoin, bitcoin_port, message_queue_keys[&NetworkId::Bitcoin]); + + let (monero_composition, monero_port) = network_instance(NetworkId::Monero); + let monero_processor_composition = + processor_instance(NetworkId::Monero, monero_port, message_queue_keys[&NetworkId::Monero]); + + let coordinator_composition = coordinator_instance(name, coord_key); + let serai_composition = serai_composition(name); + + // Give every item in this stack a unique ID + // Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits + let (first, unique_id) = { + let first = *unique_id_lock == 0; + let unique_id = *unique_id_lock; + *unique_id_lock += 1; + (first, unique_id) + }; + + let logs_path = fresh_logs_folder(first, "full-stack"); + + let mut compositions = HashMap::new(); + let mut handles = HashMap::new(); + for (name, composition) in [ + ("message_queue", message_queue_composition), + ("bitcoin", bitcoin_composition), + ("bitcoin_processor", bitcoin_processor_composition), + ("monero", monero_composition), + ("monero_processor", monero_processor_composition), + ("coordinator", coordinator_composition), + ("serai", serai_composition), + ] { + let handle = format!("full_stack-{name}-{unique_id}"); + compositions.insert( + name, + composition + .set_start_policy(StartPolicy::Strict) + .set_handle(handle.clone()) + .set_log_options(Some(LogOptions { + action: if std::env::var("GITHUB_CI") == Ok("true".to_string()) { + LogAction::Forward + } else { + LogAction::ForwardToFile { path: logs_path.clone() } + }, + policy: LogPolicy::Always, + source: LogSource::Both, + })), + ); + handles.insert(name, handle); + } + + let handles = Handles { + message_queue: handles.remove("message_queue").unwrap(), + bitcoin: (handles.remove("bitcoin").unwrap(), bitcoin_port), + bitcoin_processor: handles.remove("bitcoin_processor").unwrap(), + monero: (handles.remove("monero").unwrap(), monero_port), + monero_processor: handles.remove("monero_processor").unwrap(), + serai: handles.remove("serai").unwrap(), + }; + + { + let bitcoin_processor_composition = compositions.get_mut("bitcoin_processor").unwrap(); + bitcoin_processor_composition + .inject_container_name(handles.message_queue.clone(), "MESSAGE_QUEUE_RPC"); + bitcoin_processor_composition + .inject_container_name(handles.bitcoin.0.clone(), "NETWORK_RPC_HOSTNAME"); + } + + { + let monero_processor_composition = compositions.get_mut("monero_processor").unwrap(); + monero_processor_composition + .inject_container_name(handles.message_queue.clone(), "MESSAGE_QUEUE_RPC"); + monero_processor_composition + .inject_container_name(handles.monero.0.clone(), "NETWORK_RPC_HOSTNAME"); + } + + coordinator_compositions.push(compositions.remove("coordinator").unwrap()); + + all_handles.push(handles); + for (_, composition) in compositions { test.provide_container(composition); } } - (validators, test) + + struct Context { + pending_coordinator_compositions: Mutex>, + handles: Vec, + test_body: Box, + } + static CONTEXT: OnceLock>> = OnceLock::new(); + *CONTEXT.get_or_init(|| Mutex::new(None)).lock().await = Some(Context { + pending_coordinator_compositions: Mutex::new(coordinator_compositions), + handles: all_handles, + test_body: Box::new(test_body), + }); + + // The DockerOperations from the first invocation, containing the Message Queue servers and the + // Serai nodes. + static OUTER_OPS: OnceLock>> = OnceLock::new(); + + // Reset OUTER_OPS + *OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None; + + // Spawns a coordinator, if one has yet to be spawned, or else runs the test. + #[async_recursion::async_recursion] + async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) { + // If the outer operations have yet to be set, these *are* the outer operations + let outer_ops = OUTER_OPS.get().unwrap(); + if outer_ops.lock().await.is_none() { + *outer_ops.lock().await = Some(inner_ops); + } + + let context_lock = CONTEXT.get().unwrap().lock().await; + let Context { pending_coordinator_compositions, handles, test_body } = + context_lock.as_ref().unwrap(); + + // Check if there is a coordinator left + let maybe_coordinator = { + let mut remaining = pending_coordinator_compositions.lock().await; + let maybe_coordinator = if !remaining.is_empty() { + let handles = handles[handles.len() - remaining.len()].clone(); + let composition = remaining.remove(0); + Some((composition, handles)) + } else { + None + }; + drop(remaining); + maybe_coordinator + }; + + if let Some((mut composition, handles)) = maybe_coordinator { + let network = { + let outer_ops = outer_ops.lock().await; + let outer_ops = outer_ops.as_ref().unwrap(); + // Spawn it by building another DockerTest which recursively calls this function + // TODO: Spawn this outside of DockerTest so we can remove the recursion + let serai_container = outer_ops.handle(&handles.serai); + composition.modify_env("SERAI_HOSTNAME", serai_container.ip()); + let message_queue_container = outer_ops.handle(&handles.message_queue); + composition.modify_env("MESSAGE_QUEUE_RPC", message_queue_container.ip()); + + format!("container:{}", serai_container.name()) + }; + let mut test = DockerTest::new().with_network(dockertest::Network::External(network)); + test.provide_container(composition); + + drop(context_lock); + test.run_async(spawn_coordinator_or_run_test).await; + } else { + let outer_ops = outer_ops.lock().await.take().unwrap(); + test_body.body(outer_ops, handles.clone()).await; + } + } + + test.run_async(spawn_coordinator_or_run_test).await; } diff --git a/tests/message-queue/Cargo.toml b/tests/message-queue/Cargo.toml index ebffff750..2d7355346 100644 --- a/tests/message-queue/Cargo.toml +++ b/tests/message-queue/Cargo.toml @@ -13,6 +13,9 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] hex = "0.4" diff --git a/tests/no-std/Cargo.toml b/tests/no-std/Cargo.toml index c659fba23..dc128786b 100644 --- a/tests/no-std/Cargo.toml +++ b/tests/no-std/Cargo.toml @@ -13,6 +13,9 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] flexible-transcript = { path = "../../crypto/transcript", default-features = false, features = ["recommended", "merlin"] } @@ -33,4 +36,4 @@ dkg = { path = "../../crypto/dkg", default-features = false } bitcoin-serai = { path = "../../coins/bitcoin", default-features = false, features = ["hazmat"] } monero-generators = { path = "../../coins/monero/generators", default-features = false } -monero-serai = { path = "../../coins/monero", default-features = false } +monero-serai = { path = "../../coins/monero", default-features = false, features = ["cache-distribution"] } diff --git a/tests/processor/Cargo.toml b/tests/processor/Cargo.toml index d7582eabb..686dbcea7 100644 --- a/tests/processor/Cargo.toml +++ b/tests/processor/Cargo.toml @@ -13,6 +13,9 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] hex = "0.4" diff --git a/tests/processor/src/lib.rs b/tests/processor/src/lib.rs index a318851eb..511382abb 100644 --- a/tests/processor/src/lib.rs +++ b/tests/processor/src/lib.rs @@ -314,7 +314,7 @@ impl Coordinator { let res: Option = rpc.rpc_call("submitblock", serde_json::json!([hex::encode(block)])).await.unwrap(); if let Some(err) = res { - panic!("submitblock failed: {}", err); + panic!("submitblock failed: {err}"); } } NetworkId::Ethereum => todo!(), diff --git a/tests/processor/src/networks.rs b/tests/processor/src/networks.rs index db02686e5..a54703ce9 100644 --- a/tests/processor/src/networks.rs +++ b/tests/processor/src/networks.rs @@ -361,7 +361,7 @@ impl Wallet { None, these_inputs.drain(..).zip(decoys.drain(..)).collect(), vec![(to_addr, AMOUNT)], - Change::new(view_pair, false), + &Change::new(view_pair, false), data, rpc.get_fee(Protocol::v16, FeePriority::Low).await.unwrap(), ) diff --git a/tests/processor/src/tests/batch.rs b/tests/processor/src/tests/batch.rs index 9c678b983..5729fd73e 100644 --- a/tests/processor/src/tests/batch.rs +++ b/tests/processor/src/tests/batch.rs @@ -7,7 +7,6 @@ use dkg::{Participant, tests::clone_without}; use messages::{coordinator::*, SubstrateContext}; -use scale::Encode; use serai_client::{ primitives::{ BlockHash, Amount, Balance, crypto::RuntimePublic, PublicKey, SeraiAddress, NetworkId, @@ -28,11 +27,7 @@ pub(crate) async fn recv_batch_preprocesses( batch: &Batch, attempt: u32, ) -> (SubstrateSignId, HashMap) { - let id = SubstrateSignId { - session, - id: SubstrateSignableId::Batch((batch.network, batch.id).encode().try_into().unwrap()), - attempt, - }; + let id = SubstrateSignId { session, id: SubstrateSignableId::Batch(batch.id), attempt }; let mut block = None; let mut preprocesses = HashMap::new(); @@ -285,7 +280,7 @@ fn batch_test() { // TODO: Double check how the processor handles this ID field // It should be able to assert its perfectly sequential id.attempt = attempt; - for coordinator in coordinators.iter_mut() { + for coordinator in &mut coordinators { coordinator .send_message(messages::coordinator::CoordinatorMessage::BatchReattempt { id: id.clone(), diff --git a/tests/processor/src/tests/key_gen.rs b/tests/processor/src/tests/key_gen.rs index b98ec04eb..d50c12b79 100644 --- a/tests/processor/src/tests/key_gen.rs +++ b/tests/processor/src/tests/key_gen.rs @@ -115,7 +115,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair { .await; // Confirm the key pair - // TODO: Beter document network_latest_finalized_block's genesis state, and error if a set claims + // TODO: Better document network_latest_finalized_block's genesis state, and error if a set claims // [0; 32] was finalized let context = SubstrateContext { serai_time: SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs(), diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index 986671c16..8685af047 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -246,14 +246,14 @@ fn send_test() { // TODO: Double check how the processor handles this ID field // It should be able to assert its perfectly sequential id.attempt = attempt; - for coordinator in coordinators.iter_mut() { + for coordinator in &mut coordinators { coordinator .send_message(messages::sign::CoordinatorMessage::Reattempt { id: id.clone() }) .await; } (id, preprocesses) = recv_sign_preprocesses(&mut coordinators, Session(0), attempt).await; } - let participating = preprocesses.keys().cloned().collect::>(); + let participating = preprocesses.keys().copied().collect::>(); let tx_id = sign_tx(&mut coordinators, Session(0), id.clone(), preprocesses).await; @@ -272,7 +272,7 @@ fn send_test() { for (i, coordinator) in coordinators.iter_mut().enumerate() { if !participating.contains(&i) { coordinator.publish_transacton(&ops, &tx).await; - // Tell them of it as a completion of the relevant signing nodess + // Tell them of it as a completion of the relevant signing nodes coordinator .send_message(messages::sign::CoordinatorMessage::Completed { session: Session(0), @@ -297,8 +297,8 @@ fn send_test() { } // TODO: Test the Eventuality from the blockchain, instead of from the coordinator - // TODO: Test what happenns when Completed is sent with a non-existent TX ID - // TODO: Test what happenns when Completed is sent with a non-completing TX ID + // TODO: Test what happens when Completed is sent with a non-existent TX ID + // TODO: Test what happens when Completed is sent with a non-completing TX ID }); } } diff --git a/tests/reproducible-runtime/Cargo.toml b/tests/reproducible-runtime/Cargo.toml index 9e1d11ec0..6e024111a 100644 --- a/tests/reproducible-runtime/Cargo.toml +++ b/tests/reproducible-runtime/Cargo.toml @@ -13,6 +13,9 @@ publish = false all-features = true rustdoc-args = ["--cfg", "docsrs"] +[lints] +workspace = true + [dependencies] rand_core = "0.6" hex = "0.4" diff --git a/tests/reproducible-runtime/src/lib.rs b/tests/reproducible-runtime/src/lib.rs index 2a7f7f51d..3421026db 100644 --- a/tests/reproducible-runtime/src/lib.rs +++ b/tests/reproducible-runtime/src/lib.rs @@ -96,6 +96,6 @@ pub fn reproducibly_builds() { for res in res.clone() { identical.insert(res.unwrap()); } - assert_eq!(identical.len(), 1, "got different runtime hashes {:?}", res); + assert_eq!(identical.len(), 1, "got different runtime hashes {res:?}"); }); }