diff --git a/.github/actions/bitcoin/action.yml b/.github/actions/bitcoin/action.yml index 903108680..2a6dbce3e 100644 --- a/.github/actions/bitcoin/action.yml +++ b/.github/actions/bitcoin/action.yml @@ -5,7 +5,7 @@ inputs: version: description: "Version to download and run" required: false - default: 24.0.1 + default: "27.0" runs: using: "composite" diff --git a/.github/actions/build-dependencies/action.yml b/.github/actions/build-dependencies/action.yml index 2a8e8ed8c..5994b7232 100644 --- a/.github/actions/build-dependencies/action.yml +++ b/.github/actions/build-dependencies/action.yml @@ -42,8 +42,8 @@ runs: shell: bash run: | cargo install svm-rs - svm install 0.8.16 - svm use 0.8.16 + svm install 0.8.25 + svm use 0.8.25 # - name: Cache Rust # uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 diff --git a/.github/actions/test-dependencies/action.yml b/.github/actions/test-dependencies/action.yml index a19e17046..49c2fa642 100644 --- a/.github/actions/test-dependencies/action.yml +++ b/.github/actions/test-dependencies/action.yml @@ -10,7 +10,7 @@ inputs: bitcoin-version: description: "Bitcoin version to download and run as a regtest node" required: false - default: 24.0.1 + default: "27.0" runs: using: "composite" @@ -19,9 +19,9 @@ runs: uses: ./.github/actions/build-dependencies - name: Install Foundry - uses: foundry-rs/foundry-toolchain@cb603ca0abb544f301eaed59ac0baf579aa6aecf + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 with: - version: nightly-09fe3e041369a816365a020f715ad6f94dbce9f2 + version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9 cache: false - name: Run a Monero Regtest Node diff --git a/.github/nightly-version b/.github/nightly-version index 4a8f1e33b..1852d9b51 100644 --- a/.github/nightly-version +++ b/.github/nightly-version @@ -1 +1 @@ -nightly-2024-02-07 +nightly-2024-06-01 diff --git a/.github/workflows/coins-tests.yml b/.github/workflows/coins-tests.yml index a0437c61a..f94e9fd54 100644 --- a/.github/workflows/coins-tests.yml +++ b/.github/workflows/coins-tests.yml @@ -30,6 +30,7 @@ jobs: run: | GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p bitcoin-serai \ + -p alloy-simple-request-transport \ -p ethereum-serai \ -p monero-generators \ -p monero-serai diff --git a/.github/workflows/common-tests.yml b/.github/workflows/common-tests.yml index 0135fcaff..f0545f0b1 100644 --- a/.github/workflows/common-tests.yml +++ b/.github/workflows/common-tests.yml @@ -28,4 +28,5 @@ jobs: -p std-shims \ -p zalloc \ -p serai-db \ - -p serai-env + -p serai-env \ + -p simple-request diff --git a/.github/workflows/coordinator-tests.yml b/.github/workflows/coordinator-tests.yml index 7cc4d7b36..138fd1064 100644 --- a/.github/workflows/coordinator-tests.yml +++ b/.github/workflows/coordinator-tests.yml @@ -37,4 +37,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run coordinator Docker tests - run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/full-stack-tests.yml b/.github/workflows/full-stack-tests.yml index 3d1c86a19..baacf7746 100644 --- a/.github/workflows/full-stack-tests.yml +++ b/.github/workflows/full-stack-tests.yml @@ -19,4 +19,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run Full Stack Docker tests - run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/message-queue-tests.yml b/.github/workflows/message-queue-tests.yml index 273af237f..7894549c2 100644 --- a/.github/workflows/message-queue-tests.yml +++ b/.github/workflows/message-queue-tests.yml @@ -33,4 +33,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run message-queue Docker tests - run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml new file mode 100644 index 000000000..7c4a1f12b --- /dev/null +++ b/.github/workflows/pages.yml @@ -0,0 +1,90 @@ +# MIT License +# +# Copyright (c) 2022 just-the-docs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +# Sample workflow for building and deploying a Jekyll site to GitHub Pages +name: Deploy Jekyll site to Pages + +on: + push: + branches: + - "develop" + paths: + - "docs/**" + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow one concurrent deployment +concurrency: + group: "pages" + cancel-in-progress: true + +jobs: + # Build job + build: + runs-on: ubuntu-latest + defaults: + run: + working-directory: docs + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Setup Ruby + uses: ruby/setup-ruby@v1 + with: + bundler-cache: true + cache-version: 0 + working-directory: "${{ github.workspace }}/docs" + - name: Setup Pages + id: pages + uses: actions/configure-pages@v3 + - name: Build with Jekyll + run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" + env: + JEKYLL_ENV: production + - name: Upload artifact + uses: actions/upload-pages-artifact@v1 + with: + path: "docs/_site/" + + # Deployment job + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v2 diff --git a/.github/workflows/processor-tests.yml b/.github/workflows/processor-tests.yml index 88f4429cf..0b5ecbbe0 100644 --- a/.github/workflows/processor-tests.yml +++ b/.github/workflows/processor-tests.yml @@ -37,4 +37,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run processor Docker tests - run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/reproducible-runtime.yml b/.github/workflows/reproducible-runtime.yml index 16256ab61..d34e5ca5d 100644 --- a/.github/workflows/reproducible-runtime.yml +++ b/.github/workflows/reproducible-runtime.yml @@ -33,4 +33,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run Reproducible Runtime tests - run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 257c1dd56..e32d21196 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -43,6 +43,7 @@ jobs: -p tendermint-machine \ -p tributary-chain \ -p serai-coordinator \ + -p serai-orchestrator \ -p serai-docker-tests test-substrate: @@ -64,7 +65,9 @@ jobs: -p serai-validator-sets-pallet \ -p serai-in-instructions-primitives \ -p serai-in-instructions-pallet \ + -p serai-signals-primitives \ -p serai-signals-pallet \ + -p serai-abi \ -p serai-runtime \ -p serai-node diff --git a/Cargo.lock b/Cargo.lock index 5d4e75f91..0fc295479 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -48,9 +48,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", "cipher", @@ -73,9 +73,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "getrandom", @@ -86,18 +86,370 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" + +[[package]] +name = "alloy-consensus" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "serde", +] + +[[package]] +name = "alloy-core" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7253846c7bf55147775fd66c334abc1dd0a41e97e6155577b3dc513c6e66ef2" +dependencies = [ + "alloy-primitives", +] + +[[package]] +name = "alloy-eips" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "once_cell", + "serde", + "sha2", +] + +[[package]] +name = "alloy-genesis" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-primitives", + "alloy-serde", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-json-abi" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e30946aa6173020259055a44971f5cf40a7d76c931d209caeb51b333263df4f" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", +] + +[[package]] +name = "alloy-json-rpc" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "alloy-network" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-primitives", + "alloy-rpc-types", + "alloy-signer", + "alloy-sol-types", + "async-trait", + "futures-utils-wasm", + "thiserror", +] + +[[package]] +name = "alloy-node-bindings" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-genesis", + "alloy-primitives", + "k256", + "serde_json", + "tempfile", + "thiserror", + "tracing", + "url", +] + +[[package]] +name = "alloy-primitives" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "db8aa973e647ec336810a9356af8aea787249c9d00b1525359f3db29a68d231b" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more", + "hex-literal", + "itoa", + "k256", + "keccak-asm", + "proptest", + "rand", + "ruint", + "serde", + "tiny-keccak", +] + +[[package]] +name = "alloy-provider" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-primitives", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-rpc-types-trace", + "alloy-transport", + "async-stream", + "async-trait", + "auto_impl", + "dashmap", + "futures", + "futures-utils-wasm", + "lru", + "pin-project", + "serde_json", + "tokio", + "tracing", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" +dependencies = [ + "alloy-rlp-derive", + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rlp-derive" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "alloy-rpc-client" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "alloy-transport-http", + "futures", + "pin-project", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower", + "tracing", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "itertools 0.12.1", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "alloy-rpc-types-trace" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types", + "alloy-serde", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-serde" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-signer" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-primitives", + "async-trait", + "auto_impl", + "elliptic-curve", + "k256", + "thiserror", +] + +[[package]] +name = "alloy-simple-request-transport" +version = "0.1.0" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "serde_json", + "simple-request", + "tower", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dbd17d67f3e89478c8a634416358e539e577899666c927bc3d2b1328ee9b6ca" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6da95adcf4760bb4b108fefa51d50096c5e5fdd29ee72fed3e86ee414f2e34" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck 0.4.1", + "indexmap 2.2.6", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.65", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32c8da04c1343871fb6ce5a489218f9c85323c8340a36e9106b5fc98d4dd59d5" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck 0.5.0", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.65", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368cae4dc052cad1d8f72eb2ae0c38027116933eeb49213c200a9e9875f208d7" +dependencies = [ + "winnow 0.6.8", +] + +[[package]] +name = "alloy-sol-types" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40a64d2d2395c1ac636b62419a7b17ec39031d6b2367e66e9acbf566e6055e9c" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", +] + +[[package]] +name = "alloy-transport" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-json-rpc", + "base64 0.22.1", + "futures-util", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "url", + "wasm-bindgen-futures", +] + +[[package]] +name = "alloy-transport-http" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" +dependencies = [ + "alloy-transport", + "url", +] [[package]] name = "android-tzdata" @@ -125,47 +477,48 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.11" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -173,9 +526,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "approx" @@ -192,11 +545,135 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.0", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand", +] + [[package]] name = "array-bytes" -version = "6.2.2" +version = "6.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f840fb7195bcfc5e17ea40c26e5ce6d5b9ce5d584466e17703209657e459ae0" +checksum = "5d5dde061bd34119e902bbb2d9b90c5692635cf59fb91d582c2b68043f1b8293" [[package]] name = "arrayref" @@ -262,9 +739,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f97ab0c5b00a7cdbe5a371b9a782ee7be1316095885c8a4ea1daf490eb0ef65" +checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" dependencies = [ "async-lock", "cfg-if", @@ -287,40 +764,40 @@ checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" dependencies = [ "event-listener 4.0.3", "event-listener-strategy", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] -name = "async-recursion" -version = "1.0.5" +name = "async-stream" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.48", + "async-stream-impl", + "futures-core", + "pin-project-lite 0.2.14", ] [[package]] -name = "async-trait" -version = "0.1.77" +name = "async-stream-impl" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] -name = "async_io_stream" -version = "0.3.3" +name = "async-trait" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ - "futures", - "pharos", - "rustc_version", + "proc-macro2", + "quote", + "syn 2.0.65", ] [[package]] @@ -333,7 +810,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -342,33 +819,33 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" dependencies = [ - "http 0.2.11", + "http 0.2.12", "log", "url", ] [[package]] name = "auto_impl" -version = "1.1.2" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "823b8bb275161044e2ac7a25879cb3e2480cb403e3943022c7c769c599b756aa" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line 0.21.0", "cc", @@ -401,6 +878,16 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "base58ck" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c8d66485a3a2ea485c1913c4572ce0256067a5377ac8c75c4960e1cda98605f" +dependencies = [ + "bitcoin-internals", + "bitcoin_hashes", +] + [[package]] name = "base64" version = "0.13.1" @@ -413,6 +900,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + [[package]] name = "base64ct" version = "1.6.0" @@ -421,9 +914,9 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bech32" -version = "0.10.0-beta" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98f7eed2b2781a6f0b5c903471d48e15f56fb4e1165df8a9a2337fd1a59d45ea" +checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" [[package]] name = "beef" @@ -445,35 +938,51 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.65.1" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "cexpr", "clang-sys", + "itertools 0.12.1", "lazy_static", "lazycell", - "peeking_take_while", - "prettyplease 0.2.16", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.48", + "syn 2.0.65", +] + +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", ] +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitcoin" -version = "0.31.1" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd00f3c09b5f21fb357abe32d29946eb8bb7a0862bae62c0b5e4a692acbbe73c" +checksum = "4bf33434c870e98ecc8608588ccc990c5daba9ba9ad39733dc85fba22c211504" dependencies = [ + "base58ck", "bech32", "bitcoin-internals", + "bitcoin-io", + "bitcoin-units", "bitcoin_hashes", - "core2 0.3.3", "hex-conservative", "hex_lit", "secp256k1", @@ -482,13 +991,19 @@ dependencies = [ [[package]] name = "bitcoin-internals" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" +checksum = "30bdbe14aa07b06e6cfeffc529a1f099e5fbe249524f8125358604df99a4bed2" dependencies = [ "serde", ] +[[package]] +name = "bitcoin-io" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "340e09e8399c7bd8912f495af6aa58bea0c9214773417ffaa8f6460f93aaee56" + [[package]] name = "bitcoin-serai" version = "0.3.0" @@ -510,13 +1025,22 @@ dependencies = [ ] [[package]] -name = "bitcoin_hashes" -version = "0.13.0" +name = "bitcoin-units" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" +checksum = "cb54da0b28892f3c52203a7191534033e051b6f4b52bc15480681b57b7e036f5" dependencies = [ "bitcoin-internals", - "core2 0.3.3", + "serde", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +dependencies = [ + "bitcoin-io", "hex-conservative", "serde", ] @@ -529,9 +1053,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" [[package]] name = "bitvec" @@ -578,9 +1102,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" +checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" dependencies = [ "arrayref", "arrayvec", @@ -629,6 +1153,18 @@ dependencies = [ "subtle", ] +[[package]] +name = "blst" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c94087b935a822949d3291a9989ad2b2051ea141eda0fd4e478a75f6aa3e604b" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + [[package]] name = "bollard" version = "0.15.0" @@ -641,11 +1177,11 @@ dependencies = [ "futures-core", "futures-util", "hex", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.28", "hyperlocal", "log", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "serde", "serde_derive", "serde_json", @@ -671,9 +1207,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.3.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58b559fd6448c6e2fd0adb5720cd98a2506594cafa4737ff98c396f3e82f667" +checksum = "dbe5b10e214954177fb1dc9fbd20a1a2608fe99e6c832033bdc7cea287a20d77" dependencies = [ "borsh-derive", "cfg_aliases", @@ -681,15 +1217,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.3.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aadb5b6ccbd078890f6d7003694e33816e6b784358f18e15e7e6d9f065a57cd" +checksum = "d7a8646f94ab393e43e8b35a2558b1624bed28b97ee09c5d15456e3c9463f46d" dependencies = [ "once_cell", "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", "syn_derive", ] @@ -707,18 +1243,18 @@ dependencies = [ [[package]] name = "bs58" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ "tinyvec", ] [[package]] name = "bstr" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" +checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", "serde", @@ -735,9 +1271,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byte-slice-cast" @@ -747,9 +1283,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.14.2" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea31d69bda4949c1c1562c1e6f042a1caefac98cdc8a298260a2ff41c1e2d42b" +checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" [[package]] name = "byteorder" @@ -759,9 +1295,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" dependencies = [ "serde", ] @@ -777,20 +1313,34 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "c-kzg" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdf100c4cea8f207e883ff91ca886d621d8a166cb04971dfaa9bb8fd99ed95df" +dependencies = [ + "blst", + "cc", + "glob", + "hex", + "libc", + "serde", +] + [[package]] name = "camino" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" dependencies = [ "serde", ] [[package]] name = "cargo-platform" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceed8ef69d8518a5dda55c07425450b58a4e1946f4951eab6d7191ee86c2443d" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" dependencies = [ "serde", ] @@ -803,7 +1353,7 @@ checksum = "e7daec1a2a2129eeba1644b220b4647ec537b0b5d4bfd6876fcc5a540056b592" dependencies = [ "camino", "cargo-platform", - "semver 1.0.21", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -811,12 +1361,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.83" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" dependencies = [ "jobserver", "libc", + "once_cell", ] [[package]] @@ -830,9 +1381,9 @@ dependencies = [ [[package]] name = "cfg-expr" -version = "0.15.6" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6100bc57b6209840798d95cb2775684849d332f7bd788db2a8c8caf7ef82a41a" +checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" dependencies = [ "smallvec", ] @@ -875,9 +1426,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.33" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", @@ -885,7 +1436,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -894,7 +1445,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd94671561e36e4e7de75f753f577edafb0e7c05d6e4547229fdf7938fbcd2c3" dependencies = [ - "core2 0.4.0", + "core2", "multibase", "multihash 0.18.1", "serde", @@ -948,9 +1499,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.18" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -958,9 +1509,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.18" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -970,21 +1521,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "codespan-reporting" @@ -998,28 +1549,29 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "concurrent-queue" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] [[package]] name = "const-hex" -version = "1.9.1" +version = "1.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c37be52ef5e3b394db27a2341010685ad5103c72ac15ce2e9420a7e8f93f342c" +checksum = "70ff96486ccc291d36a958107caf2c0af8c78c0af7d31ae2f35ce055130de1a6" dependencies = [ "cfg-if", "cpufeatures", "hex", + "proptest", "serde", ] @@ -1031,9 +1583,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-random" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaf16c9c2c612020bcfd042e170f6e32de9b9d75adb5277cdbbd2e2c8c8299a" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" dependencies = [ "const-random-macro", ] @@ -1055,6 +1607,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.4" @@ -1071,15 +1629,6 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" -[[package]] -name = "core2" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239fa3ae9b63c2dc74bd3fa852d4792b8b305ae64eeede946265b6af62f1fff3" -dependencies = [ - "memchr", -] - [[package]] name = "core2" version = "0.4.0" @@ -1208,7 +1757,7 @@ dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "itertools", + "itertools 0.10.5", "log", "smallvec", "wasmparser", @@ -1217,9 +1766,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] @@ -1245,9 +1794,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" @@ -1301,7 +1850,7 @@ dependencies = [ "group", "platforms", "rand_core", - "rustc_version", + "rustc_version 0.4.0", "subtle", "zeroize", ] @@ -1314,14 +1863,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] name = "cxx" -version = "1.0.115" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de00f15a6fa069c99b88c5c78c4541d0e7899a33b86f7480e23df2431fce0bc" +checksum = "bb497fad022245b29c2a0351df572e2d67c1046bcef2260ebc022aec81efea82" dependencies = [ "cc", "cxxbridge-flags", @@ -1331,9 +1880,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.115" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a71e1e631fa2f2f5f92e8b0d860a00c198c6771623a6cefcc863e3554f0d8d6" +checksum = "9327c7f9fbd6329a200a5d4aa6f674c60ab256525ff0084b52a889d4e4c60cee" dependencies = [ "cc", "codespan-reporting", @@ -1341,24 +1890,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] name = "cxxbridge-flags" -version = "1.0.115" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f3fed61d56ba497c4efef9144dfdbaa25aa58f2f6b3a7cf441d4591c583745c" +checksum = "688c799a4a846f1c0acb9f36bb9c6272d9b3d9457f3633c7753c6057270df13c" [[package]] name = "cxxbridge-macro" -version = "1.0.115" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8908e380a8efd42150c017b0cfa31509fc49b6d47f7cb6b33e93ffb8f4e3661e" +checksum = "928bc249a7e3cd554fd2e8e08a426e9670c50bbfc9a621653cfa9accc9641783" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -1377,17 +1926,30 @@ dependencies = [ "zeroize", ] +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core 0.9.10", +] + [[package]] name = "data-encoding" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "data-encoding-macro" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1395,9 +1957,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" dependencies = [ "data-encoding", "syn 1.0.109", @@ -1414,9 +1976,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "zeroize", @@ -1446,6 +2008,17 @@ dependencies = [ "serde", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "derive-syn-parse" version = "0.1.5" @@ -1463,8 +2036,10 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ + "convert_case", "proc-macro2", "quote", + "rustc_version 0.4.0", "syn 1.0.109", ] @@ -1544,7 +2119,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -1586,7 +2161,7 @@ dependencies = [ [[package]] name = "dockertest" version = "0.4.0" -source = "git+https://github.com/kayabaNerve/dockertest-rs?branch=arc#c0ea77997048f9edc9987984bbe20e43fac74e06" +source = "git+https://github.com/orcalabs/dockertest-rs?rev=4dd6ae24738aa6dc5c89444cc822ea4745517493#4dd6ae24738aa6dc5c89444cc822ea4745517493" dependencies = [ "anyhow", "async-trait", @@ -1645,9 +2220,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" [[package]] name = "ecdsa" @@ -1696,7 +2271,7 @@ checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" dependencies = [ "curve25519-dalek", "ed25519", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "hex", "rand_core", "sha2", @@ -1705,9 +2280,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" [[package]] name = "elliptic-curve" @@ -1729,40 +2304,13 @@ dependencies = [ "zeroize", ] -[[package]] -name = "encoding_rs" -version = "0.8.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "enr" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" -dependencies = [ - "base64 0.21.7", - "bytes", - "hex", - "k256", - "log", - "rand", - "rlp", - "serde", - "sha3", - "zeroize", -] - [[package]] name = "enum-as-inner" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "syn 1.0.109", @@ -1774,10 +2322,10 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -1807,204 +2355,34 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", ] -[[package]] -name = "ethabi" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" -dependencies = [ - "ethereum-types", - "hex", - "once_cell", - "regex", - "serde", - "serde_json", - "sha3", - "thiserror", - "uint", -] - -[[package]] -name = "ethbloom" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "scale-info", - "tiny-keccak", -] - [[package]] name = "ethereum-serai" version = "0.1.0" dependencies = [ - "ethers-contract", - "ethers-core", - "ethers-providers", - "eyre", + "alloy-consensus", + "alloy-core", + "alloy-network", + "alloy-node-bindings", + "alloy-provider", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-simple-request-transport", + "alloy-sol-types", + "flexible-transcript", "group", - "hex", "k256", "modular-frost", "rand_core", - "serde", - "serde_json", - "sha2", - "sha3", - "thiserror", - "tokio", -] - -[[package]] -name = "ethereum-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "primitive-types", - "scale-info", - "uint", -] - -[[package]] -name = "ethers-contract" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d79269278125006bb0552349c03593ffa9702112ca88bc7046cc669f148fb47c" -dependencies = [ - "const-hex", - "ethers-contract-abigen", - "ethers-contract-derive", - "ethers-core", - "ethers-providers", - "futures-util", - "once_cell", - "pin-project", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "ethers-contract-abigen" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce95a43c939b2e4e2f3191c5ad4a1f279780b8a39139c9905b43a7433531e2ab" -dependencies = [ - "Inflector", - "const-hex", - "dunce", - "ethers-core", - "eyre", - "prettyplease 0.2.16", - "proc-macro2", - "quote", - "regex", - "serde", - "serde_json", - "syn 2.0.48", - "toml 0.7.8", - "walkdir", -] - -[[package]] -name = "ethers-contract-derive" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9ce44906fc871b3ee8c69a695ca7ec7f70e50cb379c9b9cb5e532269e492f6" -dependencies = [ - "Inflector", - "const-hex", - "ethers-contract-abigen", - "ethers-core", - "proc-macro2", - "quote", - "serde_json", - "syn 2.0.48", -] - -[[package]] -name = "ethers-core" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0a17f0708692024db9956b31d7a20163607d2745953f5ae8125ab368ba280ad" -dependencies = [ - "arrayvec", - "bytes", - "cargo_metadata", - "chrono", - "const-hex", - "elliptic-curve", - "ethabi", - "generic-array 0.14.7", - "k256", - "num_enum", - "once_cell", - "open-fastrlp", - "rand", - "rlp", - "serde", - "serde_json", - "strum 0.25.0", - "syn 2.0.48", - "tempfile", - "thiserror", - "tiny-keccak", - "unicode-xid", -] - -[[package]] -name = "ethers-providers" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6838fa110e57d572336178b7c79e94ff88ef976306852d8cb87d9e5b1fc7c0b5" -dependencies = [ - "async-trait", - "auto_impl", - "base64 0.21.7", - "bytes", - "const-hex", - "enr", - "ethers-core", - "futures-core", - "futures-timer", - "futures-util", - "hashers", - "http 0.2.11", - "instant", - "jsonwebtoken", - "once_cell", - "pin-project", - "reqwest", - "serde", - "serde_json", "thiserror", "tokio", - "tracing", - "tracing-futures", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "ws_stream_wasm", ] [[package]] @@ -2021,7 +2399,7 @@ checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" dependencies = [ "concurrent-queue", "parking", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2031,7 +2409,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" dependencies = [ "event-listener 4.0.3", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2053,17 +2431,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.48", -] - -[[package]] -name = "eyre" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" -dependencies = [ - "indenter", - "once_cell", + "syn 2.0.65", ] [[package]] @@ -2074,9 +2442,20 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "2.0.1" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" + +[[package]] +name = "fastrlp" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] [[package]] name = "fdlimit" @@ -2114,9 +2493,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.6" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "file-per-thread-logger" @@ -2152,7 +2531,7 @@ dependencies = [ "log", "num-traits", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "scale-info", ] @@ -2322,12 +2701,12 @@ dependencies = [ "derive-syn-parse", "expander", "frame-support-procedural-tools", - "itertools", + "itertools 0.10.5", "macro_magic", "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -2339,7 +2718,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -2349,7 +2728,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -2492,12 +2871,12 @@ checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ "futures-core", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2508,7 +2887,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -2518,7 +2897,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.21.10", + "rustls 0.21.12", ] [[package]] @@ -2546,13 +2925,9 @@ dependencies = [ [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" -dependencies = [ - "gloo-timers", - "send_wrapper 0.4.0", -] +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -2567,11 +2942,17 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "pin-utils", "slab", ] +[[package]] +name = "futures-utils-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" + [[package]] name = "fxhash" version = "0.2.1" @@ -2587,7 +2968,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27d12c0aed7f1e24276a241aadc4cb8ea9f83000f34bc062b7cc2d51e3b0fabd" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "debugid", "fxhash", "serde", @@ -2596,15 +2977,16 @@ dependencies = [ [[package]] name = "generator" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc16584ff22b460a382b7feec54b23d2908d858152e5739a120b949293bd74e" +checksum = "186014d53bc231d0090ef8d6f03e0920c54d85a5ed22f4f2f74315ec56cf83fb" dependencies = [ "cc", + "cfg-if", "libc", "log", "rustversion", - "windows 0.48.0", + "windows 0.54.0", ] [[package]] @@ -2629,20 +3011,30 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", "wasi", ] +[[package]] +name = "getrandom_or_panic" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9" +dependencies = [ + "rand", + "rand_core", +] + [[package]] name = "ghash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ "opaque-debug", "polyval", @@ -2680,20 +3072,8 @@ dependencies = [ "aho-corasick", "bstr", "log", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", -] - -[[package]] -name = "gloo-timers" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -2709,17 +3089,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http 0.2.11", - "indexmap 2.2.2", + "http 0.2.12", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -2758,48 +3138,48 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", ] [[package]] -name = "hashers" -version = "1.0.1" +name = "heck" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2bca93b15ea5a746f220e56587f71e73c6165eab783df9e26590069953e3c30" -dependencies = [ - "fxhash", -] +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "heck" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.3.5" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c62115964e08cb8039170eb33c1d0e2388a256930279edca206fff675f82c3" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] [[package]] name = "hex-conservative" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed443af458ccb6d81c1e7e661545f94d3176752fb1df2f543b902a1e0f51e2" +checksum = "e1aa273bf451e37ed35ced41c71a5e2a4e29064afb104158f2514bcd71c2c986" dependencies = [ - "core2 0.3.3", + "arrayvec", ] [[package]] @@ -2860,9 +3240,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -2871,9 +3251,9 @@ dependencies = [ [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -2887,8 +3267,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http 0.2.11", - "pin-project-lite 0.2.13", + "http 0.2.12", + "pin-project-lite 0.2.14", ] [[package]] @@ -2898,20 +3278,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", - "http 1.0.0", + "http 1.1.0", ] [[package]] name = "http-body-util" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" dependencies = [ "bytes", - "futures-util", - "http 1.0.0", + "futures-core", + "http 1.1.0", "http-body 1.0.0", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2949,13 +3329,13 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.13", - "socket2 0.4.10", + "pin-project-lite 0.2.14", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -2964,33 +3344,34 @@ dependencies = [ [[package]] name = "hyper" -version = "1.1.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "httparse", "itoa", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", + "smallvec", "tokio", "want", ] [[package]] name = "hyper-rustls" -version = "0.26.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" +checksum = "908bb38696d7a037a01ebcc68a00634112ac2bbf8ca74e30a2c3d2f4f021302b" dependencies = [ "futures-util", - "http 1.0.0", - "hyper 1.1.0", + "http 1.1.0", + "hyper 1.3.1", "hyper-util", - "rustls 0.22.2", + "rustls 0.23.7", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -3007,11 +3388,11 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", - "hyper 1.1.0", - "pin-project-lite 0.2.13", - "socket2 0.5.5", + "hyper 1.3.1", + "pin-project-lite 0.2.14", + "socket2 0.5.7", "tokio", "tower", "tower-service", @@ -3042,7 +3423,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows-core 0.51.1", ] [[package]] @@ -3075,6 +3456,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" version = "0.10.2" @@ -3114,7 +3505,7 @@ dependencies = [ "attohttpc", "bytes", "futures", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.28", "log", "rand", @@ -3132,15 +3523,6 @@ dependencies = [ "parity-scale-codec", ] -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - [[package]] name = "impl-serde" version = "0.4.0" @@ -3161,12 +3543,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "indenter" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" - [[package]] name = "indexmap" version = "1.9.3" @@ -3180,12 +3556,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.2" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "serde", ] @@ -3201,9 +3577,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", ] @@ -3229,7 +3605,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.5", + "socket2 0.5.7", "widestring", "windows-sys 0.48.0", "winreg", @@ -3245,6 +3621,12 @@ checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" name = "is-terminal" version = "0.4.10" +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + [[package]] name = "itertools" version = "0.10.5" @@ -3254,26 +3636,35 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.27" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -3306,7 +3697,7 @@ dependencies = [ "globset", "hyper 0.14.28", "jsonrpsee-types", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "rustc-hash", "serde", @@ -3323,7 +3714,7 @@ version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44e8ab85614a08792b9bff6c8feee23be78c98d0182d4c622c05256ab553892a" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro-crate 1.3.1", "proc-macro2", "quote", @@ -3338,7 +3729,7 @@ checksum = "cf4d945a6008c9b03db3354fb3c83ee02d2faa9f2e755ec1dfb69c3551b8f4ba" dependencies = [ "futures-channel", "futures-util", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.28", "jsonrpsee-core", "jsonrpsee-types", @@ -3366,20 +3757,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "jsonwebtoken" -version = "8.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" -dependencies = [ - "base64 0.21.7", - "pem", - "ring 0.16.20", - "serde", - "serde_json", - "simple_asn1", -] - [[package]] name = "k256" version = "0.13.3" @@ -3403,6 +3780,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47a3633291834c4fbebf8673acbc1b04ec9d151418ff9b8e26dcd79129928758" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "kvdb" version = "0.13.0" @@ -3419,7 +3806,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf7a85fe66f9ff9cd74e169fdd2c94c6e1e74c412c99a73b4df3200b5d3760b2" dependencies = [ "kvdb", - "parking_lot 0.12.1", + "parking_lot 0.12.2", ] [[package]] @@ -3430,9 +3817,9 @@ checksum = "b644c70b92285f66bfc2032922a79000ea30af7bc2ab31902992a5dcb9b434f6" dependencies = [ "kvdb", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "regex", - "rocksdb", + "rocksdb 0.21.0", "smallvec", ] @@ -3458,18 +3845,18 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-sys 0.48.0", + "windows-targets 0.48.5", ] [[package]] @@ -3557,7 +3944,7 @@ dependencies = [ "multihash 0.19.1", "multistream-select", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "quick-protobuf", "rand", @@ -3579,7 +3966,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "smallvec", "trust-dns-resolver", ] @@ -3701,7 +4088,7 @@ dependencies = [ "log", "rand", "smallvec", - "socket2 0.5.5", + "socket2 0.5.7", "tokio", "trust-dns-proto 0.22.0", "void", @@ -3782,12 +4169,12 @@ dependencies = [ "libp2p-identity", "libp2p-tls", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "quinn", "rand", "ring 0.16.20", - "rustls 0.21.10", - "socket2 0.5.5", + "rustls 0.21.12", + "socket2 0.5.7", "thiserror", "tokio", ] @@ -3839,11 +4226,11 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -3859,7 +4246,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "socket2 0.5.5", + "socket2 0.5.7", "tokio", ] @@ -3875,7 +4262,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.16.20", - "rustls 0.21.10", + "rustls 0.21.12", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -3907,7 +4294,7 @@ dependencies = [ "futures", "js-sys", "libp2p-core", - "send_wrapper 0.6.0", + "send_wrapper", "wasm-bindgen", "wasm-bindgen-futures", ] @@ -3924,7 +4311,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "quicksink", "rw-stream-sink", "soketto", @@ -3945,11 +4332,21 @@ dependencies = [ "yamux", ] +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.5.0", + "libc", +] + [[package]] name = "librocksdb-sys" -version = "0.11.0+8.1.1" +version = "0.16.0+8.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" +checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c" dependencies = [ "bindgen", "bzip2-sys", @@ -3958,14 +4355,14 @@ dependencies = [ "libc", "libz-sys", "lz4-sys", - "tikv-jemalloc-sys", + "zstd-sys", ] [[package]] name = "libz-sys" -version = "1.1.15" +version = "1.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" +checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" dependencies = [ "cc", "pkg-config", @@ -4007,15 +4404,15 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -4023,15 +4420,15 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "loom" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e045d70ddfbc984eacfa964ded019534e8f6cbf36f6410aee0ed5cefa5a9175" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" dependencies = [ "cfg-if", "generator", @@ -4042,11 +4439,11 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2c024b41519440580066ba82aab04092b333e09066a5eb86c7c4890df31f22" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -4096,7 +4493,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -4110,7 +4507,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -4121,7 +4518,7 @@ checksum = "d710e1214dffbab3b5dacb21475dde7d6ed84c69ff722b3a47a782668d44fbac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -4132,7 +4529,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -4185,9 +4582,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memfd" @@ -4209,9 +4606,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" dependencies = [ "autocfg", ] @@ -4237,12 +4634,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - [[package]] name = "mini-serai" version = "0.1.0" @@ -4274,18 +4665,18 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi", @@ -4327,7 +4718,6 @@ dependencies = [ "dalek-ff-group", "digest 0.10.7", "dkg", - "dleq", "flexible-transcript", "hex", "minimal-ed448", @@ -4364,7 +4754,6 @@ dependencies = [ "curve25519-dalek", "dalek-ff-group", "digest_auth", - "dleq", "flexible-transcript", "group", "hex", @@ -4441,7 +4830,7 @@ dependencies = [ "blake2b_simd", "blake2s_simd", "blake3", - "core2 0.4.0", + "core2", "digest 0.10.7", "multihash-derive 0.8.0", "sha2", @@ -4455,7 +4844,7 @@ version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" dependencies = [ - "core2 0.4.0", + "core2", "unsigned-varint", ] @@ -4468,7 +4857,7 @@ dependencies = [ "blake2b_simd", "blake2s_simd", "blake3", - "core2 0.4.0", + "core2", "digest 0.10.7", "multihash-derive 0.9.0", "ripemd", @@ -4498,7 +4887,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "890e72cb7396cb99ed98c1246a97b243cc16394470d94e0bc8b0c2c11d84290e" dependencies = [ - "core2 0.4.0", + "core2", "multihash 0.19.1", "multihash-derive-impl", ] @@ -4539,9 +4928,9 @@ dependencies = [ [[package]] name = "nalgebra" -version = "0.32.3" +version = "0.32.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307ed9b18cc2423f29e83f84fd23a8e73628727990181f18641a8b5dc2ab1caa" +checksum = "3ea4908d4f23254adda3daa60ffef0f1ac7b8c3e9a864cf3cc154b251908a2ef" dependencies = [ "approx", "matrixmultiply", @@ -4628,9 +5017,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" +checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" dependencies = [ "bytes", "futures", @@ -4684,24 +5073,29 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ - "autocfg", "num-integer", "num-traits", ] [[package]] name = "num-complex" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23c6602fda94a57c990fe0df199a035d83576b496aa29f4e634a8ac6004e68a6" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-format" version = "0.4.4" @@ -4723,11 +5117,10 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "autocfg", "num-bigint", "num-integer", "num-traits", @@ -4735,9 +5128,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -4753,27 +5146,6 @@ dependencies = [ "libc", ] -[[package]] -name = "num_enum" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" -dependencies = [ - "num_enum_derive", -] - -[[package]] -name = "num_enum_derive" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" -dependencies = [ - "proc-macro-crate 1.3.1", - "proc-macro2", - "quote", - "syn 2.0.48", -] - [[package]] name = "object" version = "0.31.1" @@ -4812,34 +5184,9 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "opaque-debug" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" - -[[package]] -name = "open-fastrlp" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" -dependencies = [ - "arrayvec", - "auto_impl", - "bytes", - "ethereum-types", - "open-fastrlp-derive", -] - -[[package]] -name = "open-fastrlp-derive" -version = "0.1.1" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" -dependencies = [ - "bytes", - "proc-macro2", - "quote", - "syn 1.0.109", -] +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl-probe" @@ -5035,7 +5382,7 @@ dependencies = [ "log", "lz4", "memmap2", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "siphasher", "snap", @@ -5044,9 +5391,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arrayvec", "bitvec", @@ -5059,11 +5406,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 2.0.1", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 1.0.109", @@ -5084,49 +5431,38 @@ checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", + "parking_lot 0.12.2", ] [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] name = "parking_lot_core" version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core 0.9.10", ] [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall 0.5.1", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -5163,9 +5499,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pbkdf2" @@ -5188,12 +5524,6 @@ dependencies = [ "sha2", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "1.1.1" @@ -5210,43 +5540,44 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] -name = "petgraph" -version = "0.6.4" +name = "pest" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ - "fixedbitset", - "indexmap 2.2.2", + "memchr", + "thiserror", + "ucd-trie", ] [[package]] -name = "pharos" -version = "0.5.3" +name = "petgraph" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ - "futures", - "rustc_version", + "fixedbitset", + "indexmap 2.2.6", ] [[package]] name = "pin-project" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -5257,9 +5588,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -5279,25 +5610,26 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "platforms" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" +checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" [[package]] name = "polling" -version = "3.4.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" +checksum = "645493cf344456ef24219d02a768cf1fb92ddf8c92161679ae3d91b91a637be3" dependencies = [ "cfg-if", "concurrent-queue", - "pin-project-lite 0.2.13", + "hermit-abi", + "pin-project-lite 0.2.14", "rustix", "tracing", "windows-sys 0.52.0", @@ -5316,9 +5648,9 @@ dependencies = [ [[package]] name = "polyval" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", @@ -5346,7 +5678,7 @@ checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", - "itertools", + "itertools 0.10.5", "normalize-line-endings", "predicates-core", "regex", @@ -5378,16 +5710,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "prettyplease" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" -dependencies = [ - "proc-macro2", - "syn 2.0.48", -] - [[package]] name = "primeorder" version = "0.13.6" @@ -5405,7 +5727,6 @@ checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", "impl-codec", - "impl-rlp", "impl-serde", "scale-info", "uint", @@ -5469,29 +5790,29 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "0b33eb56c327dec362a9e55b3ad14f9d2f0904fb5a5b03b513ab5465399e9f43" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" +checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" dependencies = [ "cfg-if", "fnv", "lazy_static", "memchr", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "thiserror", ] @@ -5503,7 +5824,7 @@ checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "prometheus-client-derive-encode", ] @@ -5515,7 +5836,27 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", +] + +[[package]] +name = "proptest" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.5.0", + "lazy_static", + "num-traits", + "rand", + "rand_chacha", + "rand_xorshift", + "regex-syntax 0.8.3", + "rusty-fork", + "tempfile", + "unarray", ] [[package]] @@ -5535,13 +5876,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", - "heck", - "itertools", + "heck 0.4.1", + "itertools 0.10.5", "lazy_static", "log", "multimap", "petgraph", - "prettyplease 0.1.25", + "prettyplease", "prost", "prost-types", "regex", @@ -5557,7 +5898,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", @@ -5628,11 +5969,11 @@ checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" dependencies = [ "bytes", "futures-io", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.21.10", + "rustls 0.21.12", "thiserror", "tokio", "tracing", @@ -5648,7 +5989,7 @@ dependencies = [ "rand", "ring 0.16.20", "rustc-hash", - "rustls 0.21.10", + "rustls 0.21.12", "slab", "thiserror", "tinyvec", @@ -5663,16 +6004,16 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.5", + "socket2 0.5.7", "tracing", "windows-sys 0.48.0", ] [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -5732,6 +6073,15 @@ dependencies = [ "rand_core", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core", +] + [[package]] name = "rawpointer" version = "0.2.1" @@ -5740,9 +6090,9 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.8.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -5772,51 +6122,51 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.16" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" dependencies = [ "bitflags 1.3.2", ] [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", ] [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ "getrandom", - "redox_syscall 0.2.16", + "libredox", "thiserror", ] [[package]] name = "ref-cast" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4846d4c50d1721b1a3bef8af76924eef20d5e723647333798c1b519b3a9473f" +checksum = "ccf0a6f84d5f1d581da8b41b47ec8600871962f2a528115b542b362d4b744931" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc" +checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -5834,14 +6184,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -5852,65 +6202,30 @@ checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" dependencies = [ "regex-syntax 0.6.29", ] - -[[package]] -name = "regex-automata" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" -dependencies = [ - "aho-corasick", - "memchr", - "regex-syntax 0.8.2", -] - -[[package]] -name = "regex-syntax" -version = "0.6.29" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" - -[[package]] -name = "regex-syntax" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" - -[[package]] -name = "reqwest" -version = "0.11.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http 0.2.11", - "http-body 0.4.6", - "hyper 0.14.28", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite 0.2.13", - "serde", - "serde_json", - "serde_urlencoded", - "system-configuration", - "tokio", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", + +[[package]] +name = "regex-automata" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.3", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" + [[package]] name = "resolv-conf" version = "0.7.0" @@ -5948,16 +6263,17 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -5976,26 +6292,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ "bytes", - "rlp-derive", "rustc-hex", ] [[package]] -name = "rlp-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" +name = "rocksdb" +version = "0.21.0" dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", + "rocksdb 0.22.0", ] [[package]] name = "rocksdb" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" +checksum = "6bd13e55d6d7b8cd0ea569161127567cd587676c99f4472f779a0279aa60a7a7" dependencies = [ "libc", "librocksdb-sys", @@ -6037,11 +6348,41 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "ruint" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f308135fef9fc398342da5472ce7c484529df23743fb7c734e0f3d472971e62" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86854cf50259291520509879a5c294c3c9a4c334e9ff65071c51e42ef1e2343" + [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -6055,13 +6396,22 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.21", + "semver 1.0.23", ] [[package]] @@ -6075,11 +6425,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -6088,25 +6438,26 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", - "ring 0.17.7", + "ring 0.17.8", "rustls-webpki 0.101.7", "sct", ] [[package]] name = "rustls" -version = "0.22.2" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +checksum = "ebbbdb961df0ad3f2652da8f3fdc4b36122f568f968f45ad3316f26c025c677b" dependencies = [ - "ring 0.17.7", + "once_cell", + "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki 0.102.4", "subtle", "zeroize", ] @@ -6126,19 +6477,19 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.0.0" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.2.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a716eb65e3158e90e17cd93d855216e27bde02745ab842f2cab4a39dba1bacf" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustls-webpki" @@ -6146,26 +6497,38 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "untrusted 0.9.0", ] [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "rustls-pki-types", "untrusted 0.9.0", ] [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] [[package]] name = "rw-stream-sink" @@ -6180,9 +6543,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "safe_arch" @@ -6306,7 +6669,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -6357,7 +6720,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-executor", "sc-transaction-pool-api", "sc-utils", @@ -6386,7 +6749,7 @@ dependencies = [ "log", "parity-db", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-client-api", "sc-state-db", "schnellru", @@ -6410,7 +6773,7 @@ dependencies = [ "libp2p-identity", "log", "mockall", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-client-api", "sc-utils", "serde", @@ -6437,7 +6800,7 @@ dependencies = [ "num-rational", "num-traits", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-client-api", "sc-consensus", "sc-consensus-epochs", @@ -6488,7 +6851,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "sc-block-builder", "sc-chain-spec", @@ -6543,7 +6906,7 @@ version = "0.10.0-dev" source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a" dependencies = [ "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-executor-common", "sc-executor-wasmtime", "schnellru", @@ -6610,7 +6973,7 @@ version = "4.0.0-dev" source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a" dependencies = [ "array-bytes", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "serde_json", "sp-application-crypto", "sp-core", @@ -6638,7 +7001,7 @@ dependencies = [ "log", "mockall", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "partial_sort", "pin-project", "rand", @@ -6804,7 +7167,7 @@ dependencies = [ "num_cpus", "once_cell", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "sc-client-api", "sc-network", @@ -6838,7 +7201,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -6883,7 +7246,7 @@ name = "sc-rpc-server" version = "4.0.0-dev" source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a" dependencies = [ - "http 0.2.11", + "http 0.2.12", "jsonrpsee", "log", "serde_json", @@ -6905,7 +7268,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-chain-spec", "sc-client-api", "sc-transaction-pool-api", @@ -6932,7 +7295,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand", "sc-block-builder", @@ -6989,7 +7352,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sp-core", ] @@ -7021,7 +7384,7 @@ dependencies = [ "futures", "libp2p", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand", "sc-utils", @@ -7041,7 +7404,7 @@ dependencies = [ "lazy_static", "libc", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "regex", "rustc-hash", "sc-client-api", @@ -7067,7 +7430,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -7081,7 +7444,7 @@ dependencies = [ "linked-hash-map", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-client-api", "sc-transaction-pool-api", "sc-utils", @@ -7122,16 +7485,16 @@ dependencies = [ "futures-timer", "lazy_static", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "prometheus", "sp-arithmetic", ] [[package]] name = "scale-info" -version = "2.10.0" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" +checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" dependencies = [ "bitvec", "cfg-if", @@ -7143,11 +7506,11 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.10.0" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" +checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ - "proc-macro-crate 1.3.1", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 1.0.109", @@ -7164,9 +7527,9 @@ dependencies = [ [[package]] name = "schnellru" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "772575a524feeb803e5b0fcbc6dd9f367e579488197c94c6e4023aad2305774d" +checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367" dependencies = [ "ahash", "cfg-if", @@ -7190,15 +7553,16 @@ dependencies = [ [[package]] name = "schnorrkel" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da18ffd9f2f5d01bc0b3050b37ce7728665f926b4dd1157fe3221b05737d924f" +checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0" dependencies = [ + "aead", "arrayref", "arrayvec", "curve25519-dalek", + "getrandom_or_panic", "merlin", - "rand", "rand_core", "serde_bytes", "sha2", @@ -7230,7 +7594,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "untrusted 0.9.0", ] @@ -7250,9 +7614,9 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.28.2" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" +checksum = "0e0cc0f1cf93f4969faf3ea1c7d8a9faed25918d96affa959720823dfe86d4f3" dependencies = [ "bitcoin_hashes", "rand", @@ -7262,9 +7626,9 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.9.2" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" +checksum = "1433bd67156263443f14d603720b082dd3121779323fce20cba2aa07b874bc1b" dependencies = [ "cc", ] @@ -7280,11 +7644,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -7293,9 +7657,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -7307,14 +7671,23 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser 0.10.2", ] [[package]] name = "semver" -version = "1.0.21" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -7326,10 +7699,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] -name = "send_wrapper" -version = "0.4.0" +name = "semver-parser" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] [[package]] name = "send_wrapper" @@ -7451,7 +7827,6 @@ dependencies = [ name = "serai-coordinator-tests" version = "0.1.0" dependencies = [ - "async-recursion", "async-trait", "blake2", "borsh", @@ -7476,7 +7851,7 @@ name = "serai-db" version = "0.1.0" dependencies = [ "parity-db", - "rocksdb", + "rocksdb 0.21.0", ] [[package]] @@ -7509,11 +7884,21 @@ dependencies = [ name = "serai-env" version = "0.1.0" +[[package]] +name = "serai-ethereum-relayer" +version = "0.1.0" +dependencies = [ + "env_logger", + "log", + "serai-db", + "serai-env", + "tokio", +] + [[package]] name = "serai-full-stack-tests" version = "0.1.0" dependencies = [ - "async-recursion", "async-trait", "bitcoin-serai", "curve25519-dalek", @@ -7632,6 +8017,8 @@ dependencies = [ "futures-util", "hex", "jsonrpsee", + "libp2p", + "log", "pallet-transaction-payment-rpc", "rand_core", "sc-authority-discovery", @@ -7706,9 +8093,11 @@ dependencies = [ "bitcoin-serai", "borsh", "ciphersuite", + "const-hex", "dalek-ff-group", "dockertest", "env_logger", + "ethereum-serai", "flexible-transcript", "frost-schnorrkel", "hex", @@ -7758,11 +8147,14 @@ dependencies = [ "curve25519-dalek", "dkg", "dockertest", + "ethereum-serai", "hex", + "k256", "monero-serai", "parity-scale-codec", "rand_core", "serai-client", + "serai-db", "serai-docker-tests", "serai-message-queue", "serai-message-queue-tests", @@ -7794,7 +8186,7 @@ dependencies = [ "frame-support", "frame-system", "frame-system-rpc-runtime-api", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "pallet-authorship", "pallet-babe", "pallet-grandpa", @@ -7859,7 +8251,7 @@ version = "0.1.0" dependencies = [ "frame-support", "frame-system", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "pallet-babe", "pallet-grandpa", "parity-scale-codec", @@ -7895,9 +8287,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.196" +version = "1.0.202" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395" dependencies = [ "serde_derive", ] @@ -7913,20 +8305,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.202" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" dependencies = [ "itoa", "ryu", @@ -7935,20 +8327,20 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -7967,16 +8359,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.6.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b0ed1662c5a68664f45b76d18deb0e234aff37207086803165c961eb695e981" +checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.2", + "indexmap 2.2.6", "serde", + "serde_derive", "serde_json", "time", ] @@ -8026,6 +8419,16 @@ dependencies = [ "keccak", ] +[[package]] +name = "sha3-asm" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9b57fd861253bff08bb1919e995f90ba8f4889de2726091c8876f3a4e823b40" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -8043,9 +8446,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -8079,7 +8482,7 @@ version = "0.1.0" dependencies = [ "base64ct", "http-body-util", - "hyper 1.1.0", + "hyper 1.3.1", "hyper-rustls", "hyper-util", "tokio", @@ -8087,18 +8490,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "simple_asn1" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" -dependencies = [ - "num-bigint", - "num-traits", - "thiserror", - "time", -] - [[package]] name = "siphasher" version = "0.3.11" @@ -8122,9 +8513,9 @@ checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7" [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "snap" @@ -8143,8 +8534,8 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek", "rand_core", - "ring 0.17.7", - "rustc_version", + "ring 0.17.8", + "rustc_version 0.4.0", "sha2", "subtle", ] @@ -8161,12 +8552,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -8178,7 +8569,7 @@ dependencies = [ "base64 0.13.1", "bytes", "futures", - "http 0.2.11", + "http 0.2.12", "httparse", "log", "rand", @@ -8217,7 +8608,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -8278,7 +8669,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "schnellru", "sp-api", "sp-consensus", @@ -8371,7 +8762,7 @@ dependencies = [ "log", "merlin", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "paste", "primitive-types", "rand", @@ -8413,7 +8804,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -8422,7 +8813,7 @@ version = "4.0.0-dev" source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a" dependencies = [ "kvdb", - "parking_lot 0.12.1", + "parking_lot 0.12.2", ] [[package]] @@ -8432,7 +8823,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -8499,7 +8890,7 @@ version = "0.27.0" source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a" dependencies = [ "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sp-core", "sp-externalities", "thiserror", @@ -8604,7 +8995,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -8644,7 +9035,7 @@ dependencies = [ "hash-db", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "smallvec", "sp-core", @@ -8716,12 +9107,12 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "ahash", "hash-db", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "lazy_static", "memory-db", "nohash-hasher", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "scale-info", "schnellru", "sp-core", @@ -8757,7 +9148,7 @@ dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -8818,9 +9209,9 @@ checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" [[package]] name = "ss58-registry" -version = "1.46.0" +version = "1.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1114ee5900b8569bbc8b1a014a942f937b752af4b44f4607430b5f86cedaac0" +checksum = "4743ce898933fbff7bbf414f497c459a782d496269644b3d650a398ae6a487ba" dependencies = [ "Inflector", "num-format", @@ -8875,7 +9266,7 @@ dependencies = [ name = "std-shims" version = "0.1.1" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", "spin 0.9.8", ] @@ -8894,9 +9285,9 @@ dependencies = [ [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" @@ -8919,7 +9310,7 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", @@ -8932,11 +9323,11 @@ version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -9024,15 +9415,27 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "d2863d96a84c6439701d7a38f9de935ec562c8832cc55d1dde0f513b52fad106" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn-solidity" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8db114c44cf843a8bacd37a146e37987a0b823a0e8bc4fdc610c9c72ab397a5" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.65", +] + [[package]] name = "syn_derive" version = "0.1.8" @@ -9042,7 +9445,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -9086,15 +9489,15 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.13" +version = "0.12.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69758bda2e78f098e4ccb393021a0963bb3442eac05f135c30f61b7370bbafae" +checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", @@ -9112,6 +9515,7 @@ dependencies = [ "hex", "log", "parity-scale-codec", + "serai-db", "thiserror", "tokio", ] @@ -9133,29 +9537,29 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -9170,24 +9574,15 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "tikv-jemalloc-sys" -version = "0.5.4+5.3.0-patched" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "time" -version = "0.3.31" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", + "num-conv", "powerfmt", "serde", "time-core", @@ -9202,10 +9597,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ + "num-conv", "time-core", ] @@ -9254,19 +9650,19 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", "libc", "mio", "num_cpus", - "parking_lot 0.12.1", - "pin-project-lite 0.2.13", + "parking_lot 0.12.2", + "pin-project-lite 0.2.14", "signal-hook-registry", - "socket2 0.5.5", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -9279,45 +9675,44 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] name = "tokio-rustls" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.22.2", + "rustls 0.23.7", "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tokio", "tokio-util", ] [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tokio", - "tracing", ] [[package]] @@ -9343,9 +9738,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] @@ -9356,11 +9751,11 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] @@ -9369,9 +9764,9 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.6", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] @@ -9383,7 +9778,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tokio", "tower-layer", "tower-service", @@ -9396,14 +9791,14 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "bytes", "futures-core", "futures-util", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "http-range-header", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tower-layer", "tower-service", ] @@ -9427,7 +9822,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tracing-attributes", "tracing-core", ] @@ -9440,7 +9835,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -9633,7 +10028,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "resolv-conf", "smallvec", @@ -9673,6 +10068,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "uint" version = "0.9.5" @@ -9685,6 +10086,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicode-bidi" version = "0.3.15" @@ -9699,18 +10106,18 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6" [[package]] name = "unicode-xid" @@ -9754,12 +10161,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna 0.5.0", "percent-encoding", ] @@ -9771,9 +10178,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" [[package]] name = "valuable" @@ -9799,11 +10206,20 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -9826,9 +10242,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -9836,24 +10252,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -9863,9 +10279,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9873,22 +10289,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-encoder" @@ -9969,8 +10385,8 @@ version = "0.110.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dfcdb72d96f01e6c85b6bf20102e7423bdbaad5c337301bab2bbf253d26413c" dependencies = [ - "indexmap 2.2.2", - "semver 1.0.21", + "indexmap 2.2.6", + "semver 1.0.23", ] [[package]] @@ -9984,7 +10400,7 @@ dependencies = [ "bumpalo", "cfg-if", "fxprof-processed-profile", - "indexmap 2.2.2", + "indexmap 2.2.6", "libc", "log", "object 0.31.1", @@ -10083,7 +10499,7 @@ dependencies = [ "anyhow", "cranelift-entity", "gimli 0.27.3", - "indexmap 2.2.2", + "indexmap 2.2.6", "log", "object 0.31.1", "serde", @@ -10150,7 +10566,7 @@ dependencies = [ "anyhow", "cc", "cfg-if", - "indexmap 2.2.2", + "indexmap 2.2.6", "libc", "log", "mach", @@ -10188,14 +10604,14 @@ checksum = "ca7af9bb3ee875c4907835e607a275d10b04d15623d3aebe01afe8fbd3f85050" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] name = "web-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -10221,9 +10637,9 @@ dependencies = [ [[package]] name = "wide" -version = "0.7.15" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89beec544f246e679fc25490e3f8e08003bc4bf612068f325120dad4cea02c1c" +checksum = "21e005a4cc35784183a9e39cb22e9a9c46353ef6a7f113fd8d36ddc58c15ef3c" dependencies = [ "bytemuck", "safe_arch", @@ -10231,9 +10647,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" @@ -10253,11 +10669,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -10268,21 +10684,22 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.48.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ + "windows-core 0.51.1", "windows-targets 0.48.5", ] [[package]] name = "windows" -version = "0.51.1" +version = "0.54.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" +checksum = "9252e5725dbed82865af151df558e754e4a3c2c30818359eb17465f1346a1b49" dependencies = [ - "windows-core", - "windows-targets 0.48.5", + "windows-core 0.54.0", + "windows-targets 0.52.5", ] [[package]] @@ -10294,6 +10711,25 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows-core" +version = "0.54.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12661b9c89351d684a50a8a643ce5f608e20243b9fb84687800163429f161d65" +dependencies = [ + "windows-result", + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-result" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "749f0da9cc72d82e600d8d2e44cadd0b9eedb9038f71a1c58556ac1c5791813b" +dependencies = [ + "windows-targets 0.52.5", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -10309,7 +10745,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.5", ] [[package]] @@ -10329,17 +10765,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -10350,9 +10787,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -10362,9 +10799,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -10374,9 +10811,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -10386,9 +10829,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -10398,9 +10841,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -10410,9 +10853,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -10422,19 +10865,25 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" -version = "0.5.39" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5389a154b01683d28c77f8f68f49dea75f0a4da32557a58f68ee51ebba472d29" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" + [[package]] name = "winreg" version = "0.50.0" @@ -10445,25 +10894,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "ws_stream_wasm" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" -dependencies = [ - "async_io_stream", - "futures", - "js-sys", - "log", - "pharos", - "rustc_version", - "send_wrapper 0.6.0", - "thiserror", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - [[package]] name = "wyz" version = "0.5.1" @@ -10504,9 +10934,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" +checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" [[package]] name = "xmltree" @@ -10526,7 +10956,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand", "static_assertions", @@ -10545,27 +10975,28 @@ dependencies = [ name = "zalloc" version = "0.1.0" dependencies = [ + "rustversion", "zeroize", ] [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] @@ -10585,14 +11016,14 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.65", ] [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" dependencies = [ - "zstd 0.12.4", + "zstd 0.13.1", ] [[package]] @@ -10601,7 +11032,16 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" dependencies = [ - "zstd-safe", + "zstd-safe 6.0.6", +] + +[[package]] +name = "zstd" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" +dependencies = [ + "zstd-safe 7.1.0", ] [[package]] @@ -10614,11 +11054,20 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "zstd-safe" +version = "7.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" +dependencies = [ + "zstd-sys", +] + [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 25601c46e..ce0062f01 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,10 @@ resolver = "2" members = [ # Version patches + "patches/parking_lot_core", + "patches/parking_lot", "patches/zstd", + "patches/rocksdb", "patches/proc-macro-crate", # std patches @@ -35,7 +38,11 @@ members = [ "crypto/schnorrkel", "coins/bitcoin", + + "coins/ethereum/alloy-simple-request-transport", "coins/ethereum", + "coins/ethereum/relayer", + "coins/monero/generators", "coins/monero", @@ -108,10 +115,14 @@ panic = "unwind" lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" } # Needed due to dockertest's usage of `Rc`s when we need `Arc`s -dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "arc" } +dockertest = { git = "https://github.com/orcalabs/dockertest-rs", rev = "4dd6ae24738aa6dc5c89444cc822ea4745517493" } +parking_lot_core = { path = "patches/parking_lot_core" } +parking_lot = { path = "patches/parking_lot" } # wasmtime pulls in an old version for this zstd = { path = "patches/zstd" } +# Needed for WAL compression +rocksdb = { path = "patches/rocksdb" } # proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3 proc-macro-crate = { path = "patches/proc-macro-crate" } diff --git a/README.md b/README.md index 8f8c19829..4a8ac4d5d 100644 --- a/README.md +++ b/README.md @@ -5,13 +5,16 @@ Bitcoin, Ethereum, DAI, and Monero, offering a liquidity-pool-based trading experience. Funds are stored in an economically secured threshold-multisig wallet. -[Getting Started](docs/Getting%20Started.md) +[Getting Started](spec/Getting%20Started.md) ### Layout - `audits`: Audits for various parts of Serai. -- `docs`: Documentation on the Serai protocol. +- `spec`: The specification of the Serai protocol, both internally and as + networked. + +- `docs`: User-facing documentation on the Serai protocol. - `common`: Crates containing utilities common to a variety of areas under Serai, none neatly fitting under another category. diff --git a/coins/bitcoin/Cargo.toml b/coins/bitcoin/Cargo.toml index 4ff0f79a4..66fcc0140 100644 --- a/coins/bitcoin/Cargo.toml +++ b/coins/bitcoin/Cargo.toml @@ -23,7 +23,7 @@ thiserror = { version = "1", default-features = false, optional = true } zeroize = { version = "^1.5", default-features = false } rand_core = { version = "0.6", default-features = false } -bitcoin = { version = "0.31", default-features = false, features = ["no-std"] } +bitcoin = { version = "0.32", default-features = false } k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits"] } @@ -36,7 +36,7 @@ serde_json = { version = "1", default-features = false, optional = true } simple-request = { path = "../../common/request", version = "0.1", default-features = false, features = ["tls", "basic-auth"], optional = true } [dev-dependencies] -secp256k1 = { version = "0.28", default-features = false, features = ["std"] } +secp256k1 = { version = "0.29", default-features = false, features = ["std"] } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["tests"] } diff --git a/coins/bitcoin/src/rpc.rs b/coins/bitcoin/src/rpc.rs index 6778636bd..fb1c35d6f 100644 --- a/coins/bitcoin/src/rpc.rs +++ b/coins/bitcoin/src/rpc.rs @@ -195,13 +195,13 @@ impl Rpc { // If this was already successfully published, consider this having succeeded if let RpcError::RequestError(Error { code, .. }) = e { if code == RPC_VERIFY_ALREADY_IN_CHAIN { - return Ok(tx.txid()); + return Ok(tx.compute_txid()); } } Err(e)? } }; - if txid != tx.txid() { + if txid != tx.compute_txid() { Err(RpcError::InvalidResponse("returned TX ID inequals calculated TX ID"))?; } Ok(txid) @@ -215,7 +215,7 @@ impl Rpc { let tx: Transaction = encode::deserialize(&bytes) .map_err(|_| RpcError::InvalidResponse("node sent an improperly serialized transaction"))?; - let mut tx_hash = *tx.txid().as_raw_hash().as_byte_array(); + let mut tx_hash = *tx.compute_txid().as_raw_hash().as_byte_array(); tx_hash.reverse(); if hash != &tx_hash { Err(RpcError::InvalidResponse("node replied with a different transaction"))?; diff --git a/coins/bitcoin/src/tests/crypto.rs b/coins/bitcoin/src/tests/crypto.rs index 2170219c0..cfc694f40 100644 --- a/coins/bitcoin/src/tests/crypto.rs +++ b/coins/bitcoin/src/tests/crypto.rs @@ -39,7 +39,7 @@ fn test_algorithm() { .verify_schnorr( &Signature::from_slice(&sig) .expect("couldn't convert produced signature to secp256k1::Signature"), - &Message::from(Hash::hash(MESSAGE)), + &Message::from_digest_slice(Hash::hash(MESSAGE).as_ref()).unwrap(), &x_only(&keys[&Participant::new(1).unwrap()].group_key()), ) .unwrap() diff --git a/coins/bitcoin/src/wallet/mod.rs b/coins/bitcoin/src/wallet/mod.rs index 3f099faac..195182fff 100644 --- a/coins/bitcoin/src/wallet/mod.rs +++ b/coins/bitcoin/src/wallet/mod.rs @@ -4,7 +4,7 @@ use std_shims::{ io::{self, Write}, }; #[cfg(feature = "std")] -use std_shims::io::Read; +use std::io::{Read, BufReader}; use k256::{ elliptic_curve::sec1::{Tag, ToEncodedPoint}, @@ -18,8 +18,8 @@ use frost::{ }; use bitcoin::{ - consensus::encode::serialize, key::TweakedPublicKey, address::Payload, OutPoint, ScriptBuf, - TxOut, Transaction, Block, + consensus::encode::serialize, key::TweakedPublicKey, OutPoint, ScriptBuf, TxOut, Transaction, + Block, }; #[cfg(feature = "std")] use bitcoin::consensus::encode::Decodable; @@ -46,12 +46,12 @@ pub fn tweak_keys(keys: &ThresholdKeys) -> ThresholdKeys { /// Return the Taproot address payload for a public key. /// /// If the key is odd, this will return None. -pub fn address_payload(key: ProjectivePoint) -> Option { +pub fn p2tr_script_buf(key: ProjectivePoint) -> Option { if key.to_encoded_point(true).tag() != Tag::CompressedEvenY { return None; } - Some(Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked(x_only(&key)))) + Some(ScriptBuf::new_p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked(x_only(&key)))) } /// A spendable output. @@ -89,11 +89,17 @@ impl ReceivedOutput { /// Read a ReceivedOutput from a generic satisfying Read. #[cfg(feature = "std")] pub fn read(r: &mut R) -> io::Result { - Ok(ReceivedOutput { - offset: Secp256k1::read_F(r)?, - output: TxOut::consensus_decode(r).map_err(|_| io::Error::other("invalid TxOut"))?, - outpoint: OutPoint::consensus_decode(r).map_err(|_| io::Error::other("invalid OutPoint"))?, - }) + let offset = Secp256k1::read_F(r)?; + let output; + let outpoint; + { + let mut buf_r = BufReader::with_capacity(0, r); + output = + TxOut::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid TxOut"))?; + outpoint = + OutPoint::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid OutPoint"))?; + } + Ok(ReceivedOutput { offset, output, outpoint }) } /// Write a ReceivedOutput to a generic satisfying Write. @@ -124,7 +130,7 @@ impl Scanner { /// Returns None if this key can't be scanned for. pub fn new(key: ProjectivePoint) -> Option { let mut scripts = HashMap::new(); - scripts.insert(address_payload(key)?.script_pubkey(), Scalar::ZERO); + scripts.insert(p2tr_script_buf(key)?, Scalar::ZERO); Some(Scanner { key, scripts }) } @@ -141,9 +147,8 @@ impl Scanner { // chance of being even // That means this should terminate within a very small amount of iterations loop { - match address_payload(self.key + (ProjectivePoint::GENERATOR * offset)) { - Some(address) => { - let script = address.script_pubkey(); + match p2tr_script_buf(self.key + (ProjectivePoint::GENERATOR * offset)) { + Some(script) => { if self.scripts.contains_key(&script) { None?; } @@ -166,7 +171,7 @@ impl Scanner { res.push(ReceivedOutput { offset: *offset, output: output.clone(), - outpoint: OutPoint::new(tx.txid(), vout), + outpoint: OutPoint::new(tx.compute_txid(), vout), }); } } diff --git a/coins/bitcoin/src/wallet/send.rs b/coins/bitcoin/src/wallet/send.rs index f4cfa3b5d..1980a5548 100644 --- a/coins/bitcoin/src/wallet/send.rs +++ b/coins/bitcoin/src/wallet/send.rs @@ -18,12 +18,12 @@ use bitcoin::{ absolute::LockTime, script::{PushBytesBuf, ScriptBuf}, transaction::{Version, Transaction}, - OutPoint, Sequence, Witness, TxIn, Amount, TxOut, Address, + OutPoint, Sequence, Witness, TxIn, Amount, TxOut, }; use crate::{ crypto::Schnorr, - wallet::{ReceivedOutput, address_payload}, + wallet::{ReceivedOutput, p2tr_script_buf}, }; #[rustfmt::skip] @@ -61,7 +61,11 @@ pub struct SignableTransaction { } impl SignableTransaction { - fn calculate_weight(inputs: usize, payments: &[(Address, u64)], change: Option<&Address>) -> u64 { + fn calculate_weight( + inputs: usize, + payments: &[(ScriptBuf, u64)], + change: Option<&ScriptBuf>, + ) -> u64 { // Expand this a full transaction in order to use the bitcoin library's weight function let mut tx = Transaction { version: Version(2), @@ -86,14 +90,14 @@ impl SignableTransaction { // The script pub key is not of a fixed size and does have to be used here .map(|payment| TxOut { value: Amount::from_sat(payment.1), - script_pubkey: payment.0.script_pubkey(), + script_pubkey: payment.0.clone(), }) .collect(), }; if let Some(change) = change { // Use a 0 value since we're currently unsure what the change amount will be, and since // the value is fixed size (so any value could be used here) - tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.script_pubkey() }); + tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.clone() }); } u64::from(tx.weight()) } @@ -121,8 +125,8 @@ impl SignableTransaction { /// If data is specified, an OP_RETURN output will be added with it. pub fn new( mut inputs: Vec, - payments: &[(Address, u64)], - change: Option<&Address>, + payments: &[(ScriptBuf, u64)], + change: Option, data: Option>, fee_per_weight: u64, ) -> Result { @@ -159,10 +163,7 @@ impl SignableTransaction { let payment_sat = payments.iter().map(|payment| payment.1).sum::(); let mut tx_outs = payments .iter() - .map(|payment| TxOut { - value: Amount::from_sat(payment.1), - script_pubkey: payment.0.script_pubkey(), - }) + .map(|payment| TxOut { value: Amount::from_sat(payment.1), script_pubkey: payment.0.clone() }) .collect::>(); // Add the OP_RETURN output @@ -213,12 +214,11 @@ impl SignableTransaction { // If there's a change address, check if there's change to give it if let Some(change) = change { - let weight_with_change = Self::calculate_weight(tx_ins.len(), payments, Some(change)); + let weight_with_change = Self::calculate_weight(tx_ins.len(), payments, Some(&change)); let fee_with_change = fee_per_weight * weight_with_change; if let Some(value) = input_sat.checked_sub(payment_sat + fee_with_change) { if value >= DUST { - tx_outs - .push(TxOut { value: Amount::from_sat(value), script_pubkey: change.script_pubkey() }); + tx_outs.push(TxOut { value: Amount::from_sat(value), script_pubkey: change }); weight = weight_with_change; needed_fee = fee_with_change; } @@ -248,7 +248,7 @@ impl SignableTransaction { /// Returns the TX ID of the transaction this will create. pub fn txid(&self) -> [u8; 32] { - let mut res = self.tx.txid().to_byte_array(); + let mut res = self.tx.compute_txid().to_byte_array(); res.reverse(); res } @@ -288,7 +288,7 @@ impl SignableTransaction { transcript.append_message(b"signing_input", u32::try_from(i).unwrap().to_le_bytes()); let offset = keys.clone().offset(self.offsets[i]); - if address_payload(offset.group_key())?.script_pubkey() != self.prevouts[i].script_pubkey { + if p2tr_script_buf(offset.group_key())? != self.prevouts[i].script_pubkey { None?; } @@ -375,7 +375,7 @@ impl SignMachine for TransactionSignMachine { msg: &[u8], ) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> { if !msg.is_empty() { - panic!("message was passed to the TransactionMachine when it generates its own"); + panic!("message was passed to the TransactionSignMachine when it generates its own"); } let commitments = (0 .. self.sigs.len()) diff --git a/coins/bitcoin/tests/wallet.rs b/coins/bitcoin/tests/wallet.rs index 9eca20c78..9db004f57 100644 --- a/coins/bitcoin/tests/wallet.rs +++ b/coins/bitcoin/tests/wallet.rs @@ -22,11 +22,10 @@ use bitcoin_serai::{ hashes::Hash as HashTrait, blockdata::opcodes::all::OP_RETURN, script::{PushBytesBuf, Instruction, Instructions, Script}, - address::NetworkChecked, OutPoint, Amount, TxOut, Transaction, Network, Address, }, wallet::{ - tweak_keys, address_payload, ReceivedOutput, Scanner, TransactionError, SignableTransaction, + tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, SignableTransaction, }, rpc::Rpc, }; @@ -48,7 +47,7 @@ async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint) "generatetoaddress", serde_json::json!([ 1, - Address::::new(Network::Regtest, address_payload(key).unwrap()) + Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap() ]), ) .await @@ -69,7 +68,7 @@ async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint) assert_eq!(outputs, scanner.scan_transaction(&block.txdata[0])); assert_eq!(outputs.len(), 1); - assert_eq!(outputs[0].outpoint(), &OutPoint::new(block.txdata[0].txid(), 0)); + assert_eq!(outputs[0].outpoint(), &OutPoint::new(block.txdata[0].compute_txid(), 0)); assert_eq!(outputs[0].value(), block.txdata[0].output[0].value.to_sat()); assert_eq!( @@ -193,7 +192,7 @@ async_sequential! { assert_eq!(output.offset(), Scalar::ZERO); let inputs = vec![output]; - let addr = || Address::::new(Network::Regtest, address_payload(key).unwrap()); + let addr = || p2tr_script_buf(key).unwrap(); let payments = vec![(addr(), 1000)]; assert!(SignableTransaction::new(inputs.clone(), &payments, None, None, FEE).is_ok()); @@ -206,7 +205,7 @@ async_sequential! { // No change assert!(SignableTransaction::new(inputs.clone(), &[(addr(), 1000)], None, None, FEE).is_ok()); // Consolidation TX - assert!(SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, FEE).is_ok()); + assert!(SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, FEE).is_ok()); // Data assert!(SignableTransaction::new(inputs.clone(), &[], None, Some(vec![]), FEE).is_ok()); // No outputs @@ -229,7 +228,7 @@ async_sequential! { ); assert_eq!( - SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, 0), + SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, 0), Err(TransactionError::TooLowFee), ); @@ -261,20 +260,19 @@ async_sequential! { // Declare payments, change, fee let payments = [ - (Address::::new(Network::Regtest, address_payload(key).unwrap()), 1005), - (Address::::new(Network::Regtest, address_payload(offset_key).unwrap()), 1007) + (p2tr_script_buf(key).unwrap(), 1005), + (p2tr_script_buf(offset_key).unwrap(), 1007) ]; let change_offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap(); let change_key = key + (ProjectivePoint::GENERATOR * change_offset); - let change_addr = - Address::::new(Network::Regtest, address_payload(change_key).unwrap()); + let change_addr = p2tr_script_buf(change_key).unwrap(); // Create and sign the TX let tx = SignableTransaction::new( vec![output.clone(), offset_output.clone()], &payments, - Some(&change_addr), + Some(change_addr.clone()), None, FEE ).unwrap(); @@ -287,7 +285,7 @@ async_sequential! { // Ensure we can scan it let outputs = scanner.scan_transaction(&tx); for (o, output) in outputs.iter().enumerate() { - assert_eq!(output.outpoint(), &OutPoint::new(tx.txid(), u32::try_from(o).unwrap())); + assert_eq!(output.outpoint(), &OutPoint::new(tx.compute_txid(), u32::try_from(o).unwrap())); assert_eq!(&ReceivedOutput::read::<&[u8]>(&mut output.serialize().as_ref()).unwrap(), output); } @@ -299,7 +297,7 @@ async_sequential! { for ((output, scanned), payment) in tx.output.iter().zip(outputs.iter()).zip(payments.iter()) { assert_eq!( output, - &TxOut { script_pubkey: payment.0.script_pubkey(), value: Amount::from_sat(payment.1) }, + &TxOut { script_pubkey: payment.0.clone(), value: Amount::from_sat(payment.1) }, ); assert_eq!(scanned.value(), payment.1 ); } @@ -314,13 +312,13 @@ async_sequential! { input_value - payments.iter().map(|payment| payment.1).sum::() - needed_fee; assert_eq!( tx.output[2], - TxOut { script_pubkey: change_addr.script_pubkey(), value: Amount::from_sat(change_amount) }, + TxOut { script_pubkey: change_addr, value: Amount::from_sat(change_amount) }, ); // This also tests send_raw_transaction and get_transaction, which the RPC test can't // effectively test rpc.send_raw_transaction(&tx).await.unwrap(); - let mut hash = *tx.txid().as_raw_hash().as_byte_array(); + let mut hash = *tx.compute_txid().as_raw_hash().as_byte_array(); hash.reverse(); assert_eq!(tx, rpc.get_transaction(&hash).await.unwrap()); assert_eq!(expected_id, hash); @@ -344,7 +342,7 @@ async_sequential! { &SignableTransaction::new( vec![output], &[], - Some(&Address::::new(Network::Regtest, address_payload(key).unwrap())), + Some(p2tr_script_buf(key).unwrap()), Some(data.clone()), FEE ).unwrap() diff --git a/coins/ethereum/.gitignore b/coins/ethereum/.gitignore index 6ff358616..2dccdce9b 100644 --- a/coins/ethereum/.gitignore +++ b/coins/ethereum/.gitignore @@ -1,3 +1,3 @@ -# solidity build outputs +# Solidity build outputs cache artifacts diff --git a/coins/ethereum/Cargo.toml b/coins/ethereum/Cargo.toml index 1d1c6dbbd..3366f0720 100644 --- a/coins/ethereum/Cargo.toml +++ b/coins/ethereum/Cargo.toml @@ -18,25 +18,32 @@ workspace = true [dependencies] thiserror = { version = "1", default-features = false } -eyre = { version = "0.6", default-features = false } -sha3 = { version = "0.10", default-features = false, features = ["std"] } +rand_core = { version = "0.6", default-features = false, features = ["std"] } + +transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] } group = { version = "0.13", default-features = false } -k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa"] } -frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] } +k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] } + +alloy-core = { version = "0.7", default-features = false } +alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false, features = ["k256"] } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } +alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } +alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } -ethers-core = { version = "2", default-features = false } -ethers-providers = { version = "2", default-features = false } -ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false, optional = true } [dev-dependencies] -rand_core = { version = "0.6", default-features = false, features = ["std"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] } -hex = { version = "0.4", default-features = false, features = ["std"] } -serde = { version = "1", default-features = false, features = ["std"] } -serde_json = { version = "1", default-features = false, features = ["std"] } +tokio = { version = "1", features = ["macros"] } -sha2 = { version = "0.10", default-features = false, features = ["std"] } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } -tokio = { version = "1", features = ["macros"] } +[features] +tests = ["alloy-node-bindings", "frost/tests"] diff --git a/coins/ethereum/README.md b/coins/ethereum/README.md index 13f1f2db2..0090b26bd 100644 --- a/coins/ethereum/README.md +++ b/coins/ethereum/README.md @@ -3,6 +3,12 @@ This package contains Ethereum-related functionality, specifically deploying and interacting with Serai contracts. +While `monero-serai` and `bitcoin-serai` are general purpose libraries, +`ethereum-serai` is Serai specific. If any of the utilities are generally +desired, please fork and maintain your own copy to ensure the desired +functionality is preserved, or open an issue to request we make this library +general purpose. + ### Dependencies - solc diff --git a/coins/ethereum/alloy-simple-request-transport/Cargo.toml b/coins/ethereum/alloy-simple-request-transport/Cargo.toml new file mode 100644 index 000000000..f44427f79 --- /dev/null +++ b/coins/ethereum/alloy-simple-request-transport/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "alloy-simple-request-transport" +version = "0.1.0" +description = "A transport for alloy based off simple-request" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/alloy-simple-request-transport" +authors = ["Luke Parker "] +edition = "2021" +rust-version = "1.74" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +tower = "0.4" + +serde_json = { version = "1", default-features = false } +simple-request = { path = "../../../common/request", default-features = false } + +alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } +alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } + +[features] +default = ["tls"] +tls = ["simple-request/tls"] diff --git a/coins/ethereum/alloy-simple-request-transport/LICENSE b/coins/ethereum/alloy-simple-request-transport/LICENSE new file mode 100644 index 000000000..659881f1a --- /dev/null +++ b/coins/ethereum/alloy-simple-request-transport/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/coins/ethereum/alloy-simple-request-transport/README.md b/coins/ethereum/alloy-simple-request-transport/README.md new file mode 100644 index 000000000..372540d16 --- /dev/null +++ b/coins/ethereum/alloy-simple-request-transport/README.md @@ -0,0 +1,4 @@ +# Alloy Simple Request Transport + +A transport for alloy based on simple-request, a small HTTP client built around +hyper. diff --git a/coins/ethereum/alloy-simple-request-transport/src/lib.rs b/coins/ethereum/alloy-simple-request-transport/src/lib.rs new file mode 100644 index 000000000..93b35bc1a --- /dev/null +++ b/coins/ethereum/alloy-simple-request-transport/src/lib.rs @@ -0,0 +1,60 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] + +use core::task; +use std::io; + +use alloy_json_rpc::{RequestPacket, ResponsePacket}; +use alloy_transport::{TransportError, TransportErrorKind, TransportFut}; + +use simple_request::{hyper, Request, Client}; + +use tower::Service; + +#[derive(Clone, Debug)] +pub struct SimpleRequest { + client: Client, + url: String, +} + +impl SimpleRequest { + pub fn new(url: String) -> Self { + Self { client: Client::with_connection_pool(), url } + } +} + +impl Service for SimpleRequest { + type Response = ResponsePacket; + type Error = TransportError; + type Future = TransportFut<'static>; + + #[inline] + fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> task::Poll> { + task::Poll::Ready(Ok(())) + } + + #[inline] + fn call(&mut self, req: RequestPacket) -> Self::Future { + let inner = self.clone(); + Box::pin(async move { + let packet = req.serialize().map_err(TransportError::SerError)?; + let request = Request::from( + hyper::Request::post(&inner.url) + .header("Content-Type", "application/json") + .body(serde_json::to_vec(&packet).map_err(TransportError::SerError)?.into()) + .unwrap(), + ); + + let mut res = inner + .client + .request(request) + .await + .map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))? + .body() + .await + .map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?; + + serde_json::from_reader(&mut res).map_err(|e| TransportError::deser_err(e, "")) + }) + } +} diff --git a/coins/ethereum/build.rs b/coins/ethereum/build.rs index 2166f6ad2..38fcfe002 100644 --- a/coins/ethereum/build.rs +++ b/coins/ethereum/build.rs @@ -1,15 +1,41 @@ +use std::process::Command; + fn main() { - println!("cargo:rerun-if-changed=contracts"); - println!("cargo:rerun-if-changed=artifacts"); + println!("cargo:rerun-if-changed=contracts/*"); + println!("cargo:rerun-if-changed=artifacts/*"); + + for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout) + .unwrap() + .lines() + { + if let Some(version) = line.strip_prefix("Version: ") { + let version = version.split('+').next().unwrap(); + assert_eq!(version, "0.8.25"); + } + } #[rustfmt::skip] let args = [ "--base-path", ".", "-o", "./artifacts", "--overwrite", "--bin", "--abi", - "--optimize", - "./contracts/Schnorr.sol" - ]; + "--via-ir", "--optimize", + + "./contracts/IERC20.sol", - assert!(std::process::Command::new("solc").args(args).status().unwrap().success()); + "./contracts/Schnorr.sol", + "./contracts/Deployer.sol", + "./contracts/Sandbox.sol", + "./contracts/Router.sol", + + "./src/tests/contracts/Schnorr.sol", + "./src/tests/contracts/ERC20.sol", + + "--no-color", + ]; + let solc = Command::new("solc").args(args).output().unwrap(); + assert!(solc.status.success()); + for line in String::from_utf8(solc.stderr).unwrap().lines() { + assert!(!line.starts_with("Error:")); + } } diff --git a/coins/ethereum/contracts/Deployer.sol b/coins/ethereum/contracts/Deployer.sol new file mode 100644 index 000000000..475be4c1b --- /dev/null +++ b/coins/ethereum/contracts/Deployer.sol @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.0; + +/* +The expected deployment process of the Router is as follows: + +1) A transaction deploying Deployer is made. Then, a deterministic signature is + created such that an account with an unknown private key is the creator of + the contract. Anyone can fund this address, and once anyone does, the + transaction deploying Deployer can be published by anyone. No other + transaction may be made from that account. + +2) Anyone deploys the Router through the Deployer. This uses a sequential nonce + such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible. + While such attacks would still be feasible if the Deployer's address was + controllable, the usage of a deterministic signature with a NUMS method + prevents that. + +This doesn't have any denial-of-service risks and will resolve once anyone steps +forward as deployer. This does fail to guarantee an identical address across +every chain, though it enables letting anyone efficiently ask the Deployer for +the address (with the Deployer having an identical address on every chain). + +Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the +Deployer contract to use a consistent salt for the Router, yet the Router must +be deployed with a specific public key for Serai. Since Ethereum isn't able to +determine a valid public key (one the result of a Serai DKG) from a dishonest +public key, we have to allow multiple deployments with Serai being the one to +determine which to use. + +The alternative would be to have a council publish the Serai key on-Ethereum, +with Serai verifying the published result. This would introduce a DoS risk in +the council not publishing the correct key/not publishing any key. +*/ + +contract Deployer { + event Deployment(bytes32 indexed init_code_hash, address created); + + error DeploymentFailed(); + + function deploy(bytes memory init_code) external { + address created; + assembly { + created := create(0, add(init_code, 0x20), mload(init_code)) + } + if (created == address(0)) { + revert DeploymentFailed(); + } + // These may be emitted out of order upon re-entrancy + emit Deployment(keccak256(init_code), created); + } +} diff --git a/coins/ethereum/contracts/IERC20.sol b/coins/ethereum/contracts/IERC20.sol new file mode 100644 index 000000000..70f1f93c9 --- /dev/null +++ b/coins/ethereum/contracts/IERC20.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: CC0 +pragma solidity ^0.8.0; + +interface IERC20 { + event Transfer(address indexed from, address indexed to, uint256 value); + event Approval(address indexed owner, address indexed spender, uint256 value); + + function name() external view returns (string memory); + function symbol() external view returns (string memory); + function decimals() external view returns (uint8); + + function totalSupply() external view returns (uint256); + + function balanceOf(address owner) external view returns (uint256); + function transfer(address to, uint256 value) external returns (bool); + function transferFrom(address from, address to, uint256 value) external returns (bool); + + function approve(address spender, uint256 value) external returns (bool); + function allowance(address owner, address spender) external view returns (uint256); +} diff --git a/coins/ethereum/contracts/Router.sol b/coins/ethereum/contracts/Router.sol new file mode 100644 index 000000000..c5e1efa2f --- /dev/null +++ b/coins/ethereum/contracts/Router.sol @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.0; + +import "./IERC20.sol"; + +import "./Schnorr.sol"; +import "./Sandbox.sol"; + +contract Router { + // Nonce is incremented for each batch of transactions executed/key update + uint256 public nonce; + + // Current public key's x-coordinate + // This key must always have the parity defined within the Schnorr contract + bytes32 public seraiKey; + + struct OutInstruction { + address to; + Call[] calls; + + uint256 value; + } + + struct Signature { + bytes32 c; + bytes32 s; + } + + event SeraiKeyUpdated( + uint256 indexed nonce, + bytes32 indexed key, + Signature signature + ); + event InInstruction( + address indexed from, + address indexed coin, + uint256 amount, + bytes instruction + ); + // success is a uint256 representing a bitfield of transaction successes + event Executed( + uint256 indexed nonce, + bytes32 indexed batch, + uint256 success, + Signature signature + ); + + // error types + error InvalidKey(); + error InvalidSignature(); + error InvalidAmount(); + error FailedTransfer(); + error TooManyTransactions(); + + modifier _updateSeraiKeyAtEndOfFn( + uint256 _nonce, + bytes32 key, + Signature memory sig + ) { + if ( + (key == bytes32(0)) || + ((bytes32(uint256(key) % Schnorr.Q)) != key) + ) { + revert InvalidKey(); + } + + _; + + seraiKey = key; + emit SeraiKeyUpdated(_nonce, key, sig); + } + + constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn( + 0, + _seraiKey, + Signature({ c: bytes32(0), s: bytes32(0) }) + ) { + nonce = 1; + } + + // updateSeraiKey validates the given Schnorr signature against the current + // public key, and if successful, updates the contract's public key to the + // given one. + function updateSeraiKey( + bytes32 _seraiKey, + Signature calldata sig + ) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) { + bytes memory message = + abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey); + nonce++; + + if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { + revert InvalidSignature(); + } + } + + function inInstruction( + address coin, + uint256 amount, + bytes memory instruction + ) external payable { + if (coin == address(0)) { + if (amount != msg.value) { + revert InvalidAmount(); + } + } else { + (bool success, bytes memory res) = + address(coin).call( + abi.encodeWithSelector( + IERC20.transferFrom.selector, + msg.sender, + address(this), + amount + ) + ); + + // Require there was nothing returned, which is done by some non-standard + // tokens, or that the ERC20 contract did in fact return true + bool nonStandardResOrTrue = + (res.length == 0) || abi.decode(res, (bool)); + if (!(success && nonStandardResOrTrue)) { + revert FailedTransfer(); + } + } + + /* + Due to fee-on-transfer tokens, emitting the amount directly is frowned upon. + The amount instructed to transfer may not actually be the amount + transferred. + + If we add nonReentrant to every single function which can effect the + balance, we can check the amount exactly matches. This prevents transfers of + less value than expected occurring, at least, not without an additional + transfer to top up the difference (which isn't routed through this contract + and accordingly isn't trying to artificially create events). + + If we don't add nonReentrant, a transfer can be started, and then a new + transfer for the difference can follow it up (again and again until a + rounding error is reached). This contract would believe all transfers were + done in full, despite each only being done in part (except for the last + one). + + Given fee-on-transfer tokens aren't intended to be supported, the only + token planned to be supported is Dai and it doesn't have any fee-on-transfer + logic, fee-on-transfer tokens aren't even able to be supported at this time, + we simply classify this entire class of tokens as non-standard + implementations which induce undefined behavior. It is the Serai network's + role not to add support for any non-standard implementations. + */ + emit InInstruction(msg.sender, coin, amount, instruction); + } + + // execute accepts a list of transactions to execute as well as a signature. + // if signature verification passes, the given transactions are executed. + // if signature verification fails, this function will revert. + function execute( + OutInstruction[] calldata transactions, + Signature calldata sig + ) external { + if (transactions.length > 256) { + revert TooManyTransactions(); + } + + bytes memory message = + abi.encode("execute", block.chainid, nonce, transactions); + uint256 executed_with_nonce = nonce; + // This prevents re-entrancy from causing double spends yet does allow + // out-of-order execution via re-entrancy + nonce++; + + if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { + revert InvalidSignature(); + } + + uint256 successes; + for (uint256 i = 0; i < transactions.length; i++) { + bool success; + + // If there are no calls, send to `to` the value + if (transactions[i].calls.length == 0) { + (success, ) = transactions[i].to.call{ + value: transactions[i].value, + gas: 5_000 + }(""); + } else { + // If there are calls, ignore `to`. Deploy a new Sandbox and proxy the + // calls through that + // + // We could use a single sandbox in order to reduce gas costs, yet that + // risks one person creating an approval that's hooked before another + // user's intended action executes, in order to drain their coins + // + // While technically, that would be a flaw in the sandboxed flow, this + // is robust and prevents such flaws from being possible + // + // We also don't want people to set state via the Sandbox and expect it + // future available when anyone else could set a distinct value + Sandbox sandbox = new Sandbox(); + (success, ) = address(sandbox).call{ + value: transactions[i].value, + // TODO: Have the Call specify the gas up front + gas: 350_000 + }( + abi.encodeWithSelector( + Sandbox.sandbox.selector, + transactions[i].calls + ) + ); + } + + assembly { + successes := or(successes, shl(i, success)) + } + } + emit Executed( + executed_with_nonce, + keccak256(message), + successes, + sig + ); + } +} diff --git a/coins/ethereum/contracts/Sandbox.sol b/coins/ethereum/contracts/Sandbox.sol new file mode 100644 index 000000000..a82a3afda --- /dev/null +++ b/coins/ethereum/contracts/Sandbox.sol @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.24; + +struct Call { + address to; + uint256 value; + bytes data; +} + +// A minimal sandbox focused on gas efficiency. +// +// The first call is executed if any of the calls fail, making it a fallback. +// All other calls are executed sequentially. +contract Sandbox { + error AlreadyCalled(); + error CallsFailed(); + + function sandbox(Call[] calldata calls) external payable { + // Prevent re-entrancy due to this executing arbitrary calls from anyone + // and anywhere + bool called; + assembly { called := tload(0) } + if (called) { + revert AlreadyCalled(); + } + assembly { tstore(0, 1) } + + // Execute the calls, starting from 1 + for (uint256 i = 1; i < calls.length; i++) { + (bool success, ) = + calls[i].to.call{ value: calls[i].value }(calls[i].data); + + // If this call failed, execute the fallback (call 0) + if (!success) { + (success, ) = + calls[0].to.call{ value: address(this).balance }(calls[0].data); + // If this call also failed, revert entirely + if (!success) { + revert CallsFailed(); + } + return; + } + } + + // We don't clear the re-entrancy guard as this contract should never be + // called again, so there's no reason to spend the effort + } +} diff --git a/coins/ethereum/contracts/Schnorr.sol b/coins/ethereum/contracts/Schnorr.sol index 3f0196b27..8edcdffd6 100644 --- a/coins/ethereum/contracts/Schnorr.sol +++ b/coins/ethereum/contracts/Schnorr.sol @@ -1,36 +1,44 @@ -//SPDX-License-Identifier: AGPLv3 +// SPDX-License-Identifier: AGPLv3 pragma solidity ^0.8.0; // see https://github.com/noot/schnorr-verify for implementation details -contract Schnorr { +library Schnorr { // secp256k1 group order uint256 constant public Q = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; - // parity := public key y-coord parity (27 or 28) - // px := public key x-coord - // message := 32-byte message + // Fixed parity for the public keys used in this contract + // This avoids spending a word passing the parity in a similar style to + // Bitcoin's Taproot + uint8 constant public KEY_PARITY = 27; + + error InvalidSOrA(); + error MalformedSignature(); + + // px := public key x-coord, where the public key has a parity of KEY_PARITY + // message := 32-byte hash of the message + // c := schnorr signature challenge // s := schnorr signature - // e := schnorr signature challenge function verify( - uint8 parity, bytes32 px, - bytes32 message, - bytes32 s, - bytes32 e - ) public view returns (bool) { - // ecrecover = (m, v, r, s); - bytes32 sp = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); - bytes32 ep = bytes32(Q - mulmod(uint256(e), uint256(px), Q)); + bytes memory message, + bytes32 c, + bytes32 s + ) internal pure returns (bool) { + // ecrecover = (m, v, r, s) -> key + // We instead pass the following to obtain the nonce (not the key) + // Then we hash it and verify it matches the challenge + bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); + bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q)); + + // For safety, we want each input to ecrecover to be 0 (sa, px, ca) + // The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero + // That leaves us to check `sa` are non-zero + if (sa == 0) revert InvalidSOrA(); + address R = ecrecover(sa, KEY_PARITY, px, ca); + if (R == address(0)) revert MalformedSignature(); - require(sp != 0); - // the ecrecover precompile implementation checks that the `r` and `s` - // inputs are non-zero (in this case, `px` and `ep`), thus we don't need to - // check if they're zero.will make me - address R = ecrecover(sp, parity, px, ep); - require(R != address(0), "ecrecover failed"); - return e == keccak256( - abi.encodePacked(R, uint8(parity), px, block.chainid, message) - ); + // Check the signature is correct by rebuilding the challenge + return c == keccak256(abi.encodePacked(R, px, message)); } } diff --git a/coins/ethereum/relayer/Cargo.toml b/coins/ethereum/relayer/Cargo.toml new file mode 100644 index 000000000..22c200760 --- /dev/null +++ b/coins/ethereum/relayer/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "serai-ethereum-relayer" +version = "0.1.0" +description = "A relayer for Serai's Ethereum transactions" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/relayer" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +log = { version = "0.4", default-features = false, features = ["std"] } +env_logger = { version = "0.10", default-features = false, features = ["humantime"] } + +tokio = { version = "1", default-features = false, features = ["rt", "time", "io-util", "net", "macros"] } + +serai-env = { path = "../../../common/env" } +serai-db = { path = "../../../common/db" } + +[features] +parity-db = ["serai-db/parity-db"] +rocksdb = ["serai-db/rocksdb"] diff --git a/coins/ethereum/relayer/LICENSE b/coins/ethereum/relayer/LICENSE new file mode 100644 index 000000000..26d57cbb3 --- /dev/null +++ b/coins/ethereum/relayer/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2023-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/coins/ethereum/relayer/README.md b/coins/ethereum/relayer/README.md new file mode 100644 index 000000000..beed4b724 --- /dev/null +++ b/coins/ethereum/relayer/README.md @@ -0,0 +1,4 @@ +# Ethereum Transaction Relayer + +This server collects Ethereum router commands to be published, offering an RPC +to fetch them. diff --git a/coins/ethereum/relayer/src/main.rs b/coins/ethereum/relayer/src/main.rs new file mode 100644 index 000000000..545930040 --- /dev/null +++ b/coins/ethereum/relayer/src/main.rs @@ -0,0 +1,100 @@ +pub(crate) use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpListener, +}; + +use serai_db::{Get, DbTxn, Db as DbTrait}; + +#[tokio::main(flavor = "current_thread")] +async fn main() { + // Override the panic handler with one which will panic if any tokio task panics + { + let existing = std::panic::take_hook(); + std::panic::set_hook(Box::new(move |panic| { + existing(panic); + const MSG: &str = "exiting the process due to a task panicking"; + println!("{MSG}"); + log::error!("{MSG}"); + std::process::exit(1); + })); + } + + if std::env::var("RUST_LOG").is_err() { + std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); + } + env_logger::init(); + + log::info!("Starting Ethereum relayer server..."); + + // Open the DB + #[allow(unused_variables, unreachable_code)] + let db = { + #[cfg(all(feature = "parity-db", feature = "rocksdb"))] + panic!("built with parity-db and rocksdb"); + #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] + let db = + serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); + #[cfg(feature = "rocksdb")] + let db = + serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); + db + }; + + // Start command recipience server + // This should not be publicly exposed + // TODO: Add auth + tokio::spawn({ + let db = db.clone(); + async move { + // 5132 ^ ((b'E' << 8) | b'R') + let server = TcpListener::bind("0.0.0.0:20830").await.unwrap(); + loop { + let (mut socket, _) = server.accept().await.unwrap(); + let db = db.clone(); + tokio::spawn(async move { + let mut db = db.clone(); + loop { + let Ok(msg_len) = socket.read_u32_le().await else { break }; + let mut buf = vec![0; usize::try_from(msg_len).unwrap()]; + let Ok(_) = socket.read_exact(&mut buf).await else { break }; + + if buf.len() < 5 { + break; + } + let nonce = u32::from_le_bytes(buf[.. 4].try_into().unwrap()); + let mut txn = db.txn(); + txn.put(nonce.to_le_bytes(), &buf[4 ..]); + txn.commit(); + + let Ok(()) = socket.write_all(&[1]).await else { break }; + + log::info!("received signed command #{nonce}"); + } + }); + } + } + }); + + // Start command fetch server + // 5132 ^ ((b'E' << 8) | b'R') + 1 + let server = TcpListener::bind("0.0.0.0:20831").await.unwrap(); + loop { + let (mut socket, _) = server.accept().await.unwrap(); + let db = db.clone(); + tokio::spawn(async move { + let db = db.clone(); + loop { + // Nonce to get the router comamnd for + let mut buf = vec![0; 4]; + let Ok(_) = socket.read_exact(&mut buf).await else { break }; + + let command = db.get(&buf[.. 4]).unwrap_or(vec![]); + let Ok(()) = socket.write_all(&u32::try_from(command.len()).unwrap().to_le_bytes()).await + else { + break; + }; + let Ok(()) = socket.write_all(&command).await else { break }; + } + }); + } +} diff --git a/coins/ethereum/src/abi/mod.rs b/coins/ethereum/src/abi/mod.rs new file mode 100644 index 000000000..1ae233743 --- /dev/null +++ b/coins/ethereum/src/abi/mod.rs @@ -0,0 +1,37 @@ +use alloy_sol_types::sol; + +#[rustfmt::skip] +#[allow(warnings)] +#[allow(needless_pass_by_value)] +#[allow(clippy::all)] +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod erc20_container { + use super::*; + sol!("contracts/IERC20.sol"); +} +pub use erc20_container::IERC20 as erc20; + +#[rustfmt::skip] +#[allow(warnings)] +#[allow(needless_pass_by_value)] +#[allow(clippy::all)] +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod deployer_container { + use super::*; + sol!("contracts/Deployer.sol"); +} +pub use deployer_container::Deployer as deployer; + +#[rustfmt::skip] +#[allow(warnings)] +#[allow(needless_pass_by_value)] +#[allow(clippy::all)] +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod router_container { + use super::*; + sol!(Router, "artifacts/Router.abi"); +} +pub use router_container::Router as router; diff --git a/coins/ethereum/src/contract.rs b/coins/ethereum/src/contract.rs deleted file mode 100644 index 80093b084..000000000 --- a/coins/ethereum/src/contract.rs +++ /dev/null @@ -1,36 +0,0 @@ -use thiserror::Error; -use eyre::{eyre, Result}; - -use ethers_providers::{Provider, Http}; -use ethers_contract::abigen; - -use crate::crypto::ProcessedSignature; - -#[derive(Error, Debug)] -pub enum EthereumError { - #[error("failed to verify Schnorr signature")] - VerificationError, -} - -abigen!(Schnorr, "./artifacts/Schnorr.abi"); - -pub async fn call_verify( - contract: &Schnorr>, - params: &ProcessedSignature, -) -> Result<()> { - if contract - .verify( - params.parity + 27, - params.px.to_bytes().into(), - params.message, - params.s.to_bytes().into(), - params.e.to_bytes().into(), - ) - .call() - .await? - { - Ok(()) - } else { - Err(eyre!(EthereumError::VerificationError)) - } -} diff --git a/coins/ethereum/src/crypto.rs b/coins/ethereum/src/crypto.rs index 3e9d50fa4..6ea6a0b04 100644 --- a/coins/ethereum/src/crypto.rs +++ b/coins/ethereum/src/crypto.rs @@ -1,107 +1,188 @@ -use sha3::{Digest, Keccak256}; - -use group::Group; +use group::ff::PrimeField; use k256::{ - elliptic_curve::{ - bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint, sec1::ToEncodedPoint, - }, - AffinePoint, ProjectivePoint, Scalar, U256, + elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint}, + ProjectivePoint, Scalar, U256 as KU256, +}; +#[cfg(test)] +use k256::{elliptic_curve::point::DecompressPoint, AffinePoint}; + +use frost::{ + algorithm::{Hram, SchnorrSignature}, + curve::{Ciphersuite, Secp256k1}, }; -use frost::{algorithm::Hram, curve::Secp256k1}; +use alloy_core::primitives::{Parity, Signature as AlloySignature}; +use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; -pub fn keccak256(data: &[u8]) -> [u8; 32] { - Keccak256::digest(data).into() +use crate::abi::router::{Signature as AbiSignature}; + +pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] { + alloy_core::primitives::keccak256(data).into() } -pub fn hash_to_scalar(data: &[u8]) -> Scalar { - Scalar::reduce(U256::from_be_slice(&keccak256(data))) +pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar { + >::reduce_bytes(&keccak256(data).into()) } pub fn address(point: &ProjectivePoint) -> [u8; 20] { let encoded_point = point.to_encoded_point(false); - keccak256(&encoded_point.as_ref()[1 .. 65])[12 .. 32].try_into().unwrap() + // Last 20 bytes of the hash of the concatenated x and y coordinates + // We obtain the concatenated x and y coordinates via the uncompressed encoding of the point + keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap() } -pub fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> { - if r.is_zero().into() || s.is_zero().into() { - return None; +/// Deterministically sign a transaction. +/// +/// This function panics if passed a transaction with a non-None chain ID. +pub fn deterministically_sign(tx: &TxLegacy) -> Signed { + assert!( + tx.chain_id.is_none(), + "chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)" + ); + + let sig_hash = tx.signature_hash().0; + let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat()); + let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat()); + loop { + let r_bytes: [u8; 32] = r.to_repr().into(); + let s_bytes: [u8; 32] = s.to_repr().into(); + let v = Parity::NonEip155(false); + let signature = + AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap(); + let tx = tx.clone().into_signed(signature); + if tx.recover_signer().is_ok() { + return tx; + } + + // Re-hash until valid + r = hash_to_scalar(r_bytes.as_ref()); + s = hash_to_scalar(s_bytes.as_ref()); } +} +/// The public key for a Schnorr-signing account. +#[allow(non_snake_case)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct PublicKey { + pub(crate) A: ProjectivePoint, + pub(crate) px: Scalar, +} + +impl PublicKey { + /// Construct a new `PublicKey`. + /// + /// This will return None if the provided point isn't eligible to be a public key (due to + /// bounds such as parity). #[allow(non_snake_case)] - let R = AffinePoint::decompress(&r.to_bytes(), v.into()); - #[allow(non_snake_case)] - if let Some(R) = Option::::from(R) { - #[allow(non_snake_case)] - let R = ProjectivePoint::from(R); - - let r = r.invert().unwrap(); - let u1 = ProjectivePoint::GENERATOR * (-message * r); - let u2 = R * (s * r); - let key: ProjectivePoint = u1 + u2; - if !bool::from(key.is_identity()) { - return Some(address(&key)); + pub fn new(A: ProjectivePoint) -> Option { + let affine = A.to_affine(); + // Only allow even keys to save a word within Ethereum + let is_odd = bool::from(affine.y_is_odd()); + if is_odd { + None?; + } + + let x_coord = affine.x(); + let x_coord_scalar = >::reduce_bytes(&x_coord); + // Return None if a reduction would occur + // Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less + // headache/concern to have + // This does ban a trivial amoount of public keys + if x_coord_scalar.to_repr() != x_coord { + None?; } + + Some(PublicKey { A, px: x_coord_scalar }) + } + + pub fn point(&self) -> ProjectivePoint { + self.A + } + + pub(crate) fn eth_repr(&self) -> [u8; 32] { + self.px.to_repr().into() } - None + #[cfg(test)] + pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option { + #[allow(non_snake_case)] + let A = Option::::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into(); + Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px }) + } } +/// The HRAm to use for the Schnorr contract. #[derive(Clone, Default)] pub struct EthereumHram {} impl Hram for EthereumHram { #[allow(non_snake_case)] fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { - let a_encoded_point = A.to_encoded_point(true); - let mut a_encoded = a_encoded_point.as_ref().to_owned(); - a_encoded[0] += 25; // Ethereum uses 27/28 for point parity + let x_coord = A.to_affine().x(); + let mut data = address(R).to_vec(); - data.append(&mut a_encoded); - data.append(&mut m.to_vec()); - Scalar::reduce(U256::from_be_slice(&keccak256(&data))) + data.extend(x_coord.as_slice()); + data.extend(m); + + >::reduce_bytes(&keccak256(&data).into()) } } -pub struct ProcessedSignature { - pub s: Scalar, - pub px: Scalar, - pub parity: u8, - pub message: [u8; 32], - pub e: Scalar, +/// A signature for the Schnorr contract. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Signature { + pub(crate) c: Scalar, + pub(crate) s: Scalar, } +impl Signature { + pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool { + #[allow(non_snake_case)] + let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c); + EthereumHram::hram(&R, &public_key.A, message) == self.c + } -#[allow(non_snake_case)] -pub fn preprocess_signature_for_ecrecover( - m: [u8; 32], - R: &ProjectivePoint, - s: Scalar, - A: &ProjectivePoint, - chain_id: U256, -) -> (Scalar, Scalar) { - let processed_sig = process_signature_for_contract(m, R, s, A, chain_id); - let sr = processed_sig.s.mul(&processed_sig.px).negate(); - let er = processed_sig.e.mul(&processed_sig.px).negate(); - (sr, er) -} + /// Construct a new `Signature`. + /// + /// This will return None if the signature is invalid. + pub fn new( + public_key: &PublicKey, + message: &[u8], + signature: SchnorrSignature, + ) -> Option { + let c = EthereumHram::hram(&signature.R, &public_key.A, message); + if !signature.verify(public_key.A, c) { + None?; + } -#[allow(non_snake_case)] -pub fn process_signature_for_contract( - m: [u8; 32], - R: &ProjectivePoint, - s: Scalar, - A: &ProjectivePoint, - chain_id: U256, -) -> ProcessedSignature { - let encoded_pk = A.to_encoded_point(true); - let px = &encoded_pk.as_ref()[1 .. 33]; - let px_scalar = Scalar::reduce(U256::from_be_slice(px)); - let e = EthereumHram::hram(R, A, &[chain_id.to_be_byte_array().as_slice(), &m].concat()); - ProcessedSignature { - s, - px: px_scalar, - parity: &encoded_pk.as_ref()[0] - 2, - #[allow(non_snake_case)] - message: m, - e, + let res = Signature { c, s: signature.s }; + assert!(res.verify(public_key, message)); + Some(res) + } + + pub fn c(&self) -> Scalar { + self.c + } + pub fn s(&self) -> Scalar { + self.s + } + + pub fn to_bytes(&self) -> [u8; 64] { + let mut res = [0; 64]; + res[.. 32].copy_from_slice(self.c.to_repr().as_ref()); + res[32 ..].copy_from_slice(self.s.to_repr().as_ref()); + res + } + + pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result { + let mut reader = bytes.as_slice(); + let c = Secp256k1::read_F(&mut reader)?; + let s = Secp256k1::read_F(&mut reader)?; + Ok(Signature { c, s }) + } +} +impl From<&Signature> for AbiSignature { + fn from(sig: &Signature) -> AbiSignature { + let c: [u8; 32] = sig.c.to_repr().into(); + let s: [u8; 32] = sig.s.to_repr().into(); + AbiSignature { c: c.into(), s: s.into() } } } diff --git a/coins/ethereum/src/deployer.rs b/coins/ethereum/src/deployer.rs new file mode 100644 index 000000000..7c0bf16cf --- /dev/null +++ b/coins/ethereum/src/deployer.rs @@ -0,0 +1,113 @@ +use std::sync::Arc; + +use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind}; +use alloy_consensus::{Signed, TxLegacy}; + +use alloy_sol_types::{SolCall, SolEvent}; + +use alloy_rpc_types::{BlockNumberOrTag, Filter}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use crate::{ + Error, + crypto::{self, keccak256, PublicKey}, + router::Router, +}; +pub use crate::abi::deployer as abi; + +/// The Deployer contract for the Router contract. +/// +/// This Deployer has a deterministic address, letting it be immediately identified on any +/// compatible chain. It then supports retrieving the Router contract's address (which isn't +/// deterministic) using a single log query. +#[derive(Clone, Debug)] +pub struct Deployer; +impl Deployer { + /// Obtain the transaction to deploy this contract, already signed. + /// + /// The account this transaction is sent from (which is populated in `from`) must be sufficiently + /// funded for this transaction to be submitted. This account has no known private key to anyone, + /// so ETH sent can be neither misappropriated nor returned. + pub fn deployment_tx() -> Signed { + let bytecode = include_str!("../artifacts/Deployer.bin"); + let bytecode = + Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex"); + + let tx = TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 100_000_000_000u128, + // TODO: Use a more accurate gas limit + gas_limit: 1_000_000u128, + to: TxKind::Create, + value: U256::ZERO, + input: bytecode, + }; + + crypto::deterministically_sign(&tx) + } + + /// Obtain the deterministic address for this contract. + pub fn address() -> [u8; 20] { + let deployer_deployer = + Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature"); + **Address::create(&deployer_deployer, 0) + } + + /// Construct a new view of the `Deployer`. + pub async fn new(provider: Arc>) -> Result, Error> { + let address = Self::address(); + let code = provider.get_code_at(address.into()).await.map_err(|_| Error::ConnectionError)?; + // Contract has yet to be deployed + if code.is_empty() { + return Ok(None); + } + Ok(Some(Self)) + } + + /// Yield the `ContractCall` necessary to deploy the Router. + pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy { + TxLegacy { + to: TxKind::Call(Self::address().into()), + input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(), + gas_limit: 1_000_000, + ..Default::default() + } + } + + /// Find the first Router deployed with the specified key as its first key. + /// + /// This is the Router Serai will use, and is the only way to construct a `Router`. + pub async fn find_router( + &self, + provider: Arc>, + key: &PublicKey, + ) -> Result, Error> { + let init_code = Router::init_code(key); + let init_code_hash = keccak256(&init_code); + + #[cfg(not(test))] + let to_block = BlockNumberOrTag::Finalized; + #[cfg(test)] + let to_block = BlockNumberOrTag::Latest; + + // Find the first log using this init code (where the init code is binding to the key) + // TODO: Make an abstraction for event filtering (de-duplicating common code) + let filter = + Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address())); + let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH); + let filter = filter.topic1(B256::from(init_code_hash)); + let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + let Some(first_log) = logs.first() else { return Ok(None) }; + let router = first_log + .log_decode::() + .map_err(|_| Error::ConnectionError)? + .inner + .data + .created; + + Ok(Some(Router::new(provider, router))) + } +} diff --git a/coins/ethereum/src/erc20.rs b/coins/ethereum/src/erc20.rs new file mode 100644 index 000000000..1d8744030 --- /dev/null +++ b/coins/ethereum/src/erc20.rs @@ -0,0 +1,105 @@ +use std::{sync::Arc, collections::HashSet}; + +use alloy_core::primitives::{Address, B256, U256}; + +use alloy_sol_types::{SolInterface, SolEvent}; + +use alloy_rpc_types::Filter; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use crate::Error; +pub use crate::abi::erc20 as abi; +use abi::{IERC20Calls, Transfer, transferCall, transferFromCall}; + +#[derive(Clone, Debug)] +pub struct TopLevelErc20Transfer { + pub id: [u8; 32], + pub from: [u8; 20], + pub amount: U256, + pub data: Vec, +} + +/// A view for an ERC20 contract. +#[derive(Clone, Debug)] +pub struct Erc20(Arc>, Address); +impl Erc20 { + /// Construct a new view of the specified ERC20 contract. + pub fn new(provider: Arc>, address: [u8; 20]) -> Self { + Self(provider, Address::from(&address)) + } + + pub async fn top_level_transfers( + &self, + block: u64, + to: [u8; 20], + ) -> Result, Error> { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(Transfer::SIGNATURE_HASH); + let mut to_topic = [0; 32]; + to_topic[12 ..].copy_from_slice(&to); + let filter = filter.topic2(B256::from(to_topic)); + let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + let mut handled = HashSet::new(); + + let mut top_level_transfers = vec![]; + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(Error::ConnectionError)?; + } + + let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?; + let tx = + self.0.get_transaction_by_hash(tx_id).await.ok().flatten().ok_or(Error::ConnectionError)?; + + // If this is a top-level call... + if tx.to == Some(self.1) { + // And we recognize the call... + // Don't validate the encoding as this can't be re-encoded to an identical bytestring due + // to the InInstruction appended + if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) { + // Extract the top-level call's from/to/value + let (from, call_to, value) = match call { + IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value), + IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => { + (from, call_to, value) + } + // Treat any other function selectors as unrecognized + _ => continue, + }; + + let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + + // Ensure the top-level transfer is equivalent, and this presumably isn't a log for an + // internal transfer + if (log.from != from) || (call_to != to) || (value != log.value) { + continue; + } + + // Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's + // the only log we handle + if handled.contains(&tx_id) { + continue; + } + handled.insert(tx_id); + + // Read the data appended after + let encoded = call.abi_encode(); + let data = tx.input.as_ref()[encoded.len() ..].to_vec(); + + // Push the transfer + top_level_transfers.push(TopLevelErc20Transfer { + // Since we'll only handle one log for this TX, set the ID to the TX ID + id: *tx_id, + from: *log.from.0, + amount: log.value, + data, + }); + } + } + } + Ok(top_level_transfers) + } +} diff --git a/coins/ethereum/src/lib.rs b/coins/ethereum/src/lib.rs index 75a585250..322b5f839 100644 --- a/coins/ethereum/src/lib.rs +++ b/coins/ethereum/src/lib.rs @@ -1,2 +1,35 @@ -pub mod contract; +use thiserror::Error; + +pub mod alloy { + pub use alloy_core::primitives; + pub use alloy_core as core; + pub use alloy_sol_types as sol_types; + + pub use alloy_consensus as consensus; + pub use alloy_network as network; + pub use alloy_rpc_types as rpc_types; + pub use alloy_simple_request_transport as simple_request_transport; + pub use alloy_rpc_client as rpc_client; + pub use alloy_provider as provider; +} + pub mod crypto; + +pub(crate) mod abi; + +pub mod erc20; +pub mod deployer; +pub mod router; + +pub mod machine; + +#[cfg(any(test, feature = "tests"))] +pub mod tests; + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] +pub enum Error { + #[error("failed to verify Schnorr signature")] + InvalidSignature, + #[error("couldn't make call/send TX")] + ConnectionError, +} diff --git a/coins/ethereum/src/machine.rs b/coins/ethereum/src/machine.rs new file mode 100644 index 000000000..0d5dc7a59 --- /dev/null +++ b/coins/ethereum/src/machine.rs @@ -0,0 +1,414 @@ +use std::{ + io::{self, Read}, + collections::HashMap, +}; + +use rand_core::{RngCore, CryptoRng}; + +use transcript::{Transcript, RecommendedTranscript}; + +use group::GroupEncoding; +use frost::{ + curve::{Ciphersuite, Secp256k1}, + Participant, ThresholdKeys, FrostError, + algorithm::Schnorr, + sign::*, +}; + +use alloy_core::primitives::U256; + +use crate::{ + crypto::{PublicKey, EthereumHram, Signature}, + router::{ + abi::{Call as AbiCall, OutInstruction as AbiOutInstruction}, + Router, + }, +}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Call { + pub to: [u8; 20], + pub value: U256, + pub data: Vec, +} +impl Call { + pub fn read(reader: &mut R) -> io::Result { + let mut to = [0; 20]; + reader.read_exact(&mut to)?; + + let value = { + let mut value_bytes = [0; 32]; + reader.read_exact(&mut value_bytes)?; + U256::from_le_slice(&value_bytes) + }; + + let mut data_len = { + let mut data_len = [0; 4]; + reader.read_exact(&mut data_len)?; + usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize") + }; + + // A valid DoS would be to claim a 4 GB data is present for only 4 bytes + // We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB) + let mut data = vec![]; + while data_len > 0 { + let chunk_len = data_len.min(1024); + let mut chunk = vec![0; chunk_len]; + reader.read_exact(&mut chunk)?; + data.extend(&chunk); + data_len -= chunk_len; + } + + Ok(Call { to, value, data }) + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&self.to)?; + writer.write_all(&self.value.as_le_bytes())?; + + let data_len = u32::try_from(self.data.len()) + .map_err(|_| io::Error::other("call data length exceeded 2**32"))?; + writer.write_all(&data_len.to_le_bytes())?; + writer.write_all(&self.data) + } +} +impl From for AbiCall { + fn from(call: Call) -> AbiCall { + AbiCall { to: call.to.into(), value: call.value, data: call.data.into() } + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum OutInstructionTarget { + Direct([u8; 20]), + Calls(Vec), +} +impl OutInstructionTarget { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + + match kind[0] { + 0 => { + let mut addr = [0; 20]; + reader.read_exact(&mut addr)?; + Ok(OutInstructionTarget::Direct(addr)) + } + 1 => { + let mut calls_len = [0; 4]; + reader.read_exact(&mut calls_len)?; + let calls_len = u32::from_le_bytes(calls_len); + + let mut calls = vec![]; + for _ in 0 .. calls_len { + calls.push(Call::read(reader)?); + } + Ok(OutInstructionTarget::Calls(calls)) + } + _ => Err(io::Error::other("unrecognized OutInstructionTarget"))?, + } + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + OutInstructionTarget::Direct(addr) => { + writer.write_all(&[0])?; + writer.write_all(addr)?; + } + OutInstructionTarget::Calls(calls) => { + writer.write_all(&[1])?; + let call_len = u32::try_from(calls.len()) + .map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?; + writer.write_all(&call_len.to_le_bytes())?; + for call in calls { + call.write(writer)?; + } + } + } + Ok(()) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct OutInstruction { + pub target: OutInstructionTarget, + pub value: U256, +} +impl OutInstruction { + fn read(reader: &mut R) -> io::Result { + let target = OutInstructionTarget::read(reader)?; + + let value = { + let mut value_bytes = [0; 32]; + reader.read_exact(&mut value_bytes)?; + U256::from_le_slice(&value_bytes) + }; + + Ok(OutInstruction { target, value }) + } + fn write(&self, writer: &mut W) -> io::Result<()> { + self.target.write(writer)?; + writer.write_all(&self.value.as_le_bytes()) + } +} +impl From for AbiOutInstruction { + fn from(instruction: OutInstruction) -> AbiOutInstruction { + match instruction.target { + OutInstructionTarget::Direct(addr) => { + AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value } + } + OutInstructionTarget::Calls(calls) => AbiOutInstruction { + to: [0; 20].into(), + calls: calls.into_iter().map(Into::into).collect(), + value: instruction.value, + }, + } + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum RouterCommand { + UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey }, + Execute { chain_id: U256, nonce: U256, outs: Vec }, +} + +impl RouterCommand { + pub fn msg(&self) -> Vec { + match self { + RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { + Router::update_serai_key_message(*chain_id, *nonce, key) + } + RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message( + *chain_id, + *nonce, + outs.iter().map(|out| out.clone().into()).collect(), + ), + } + } + + pub fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + + match kind[0] { + 0 => { + let mut chain_id = [0; 32]; + reader.read_exact(&mut chain_id)?; + + let mut nonce = [0; 32]; + reader.read_exact(&mut nonce)?; + + let key = PublicKey::new(Secp256k1::read_G(reader)?) + .ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?; + Ok(RouterCommand::UpdateSeraiKey { + chain_id: U256::from_le_slice(&chain_id), + nonce: U256::from_le_slice(&nonce), + key, + }) + } + 1 => { + let mut chain_id = [0; 32]; + reader.read_exact(&mut chain_id)?; + let chain_id = U256::from_le_slice(&chain_id); + + let mut nonce = [0; 32]; + reader.read_exact(&mut nonce)?; + let nonce = U256::from_le_slice(&nonce); + + let mut outs_len = [0; 4]; + reader.read_exact(&mut outs_len)?; + let outs_len = u32::from_le_bytes(outs_len); + + let mut outs = vec![]; + for _ in 0 .. outs_len { + outs.push(OutInstruction::read(reader)?); + } + + Ok(RouterCommand::Execute { chain_id, nonce, outs }) + } + _ => Err(io::Error::other("reading unknown type of RouterCommand"))?, + } + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { + writer.write_all(&[0])?; + writer.write_all(&chain_id.as_le_bytes())?; + writer.write_all(&nonce.as_le_bytes())?; + writer.write_all(&key.A.to_bytes()) + } + RouterCommand::Execute { chain_id, nonce, outs } => { + writer.write_all(&[1])?; + writer.write_all(&chain_id.as_le_bytes())?; + writer.write_all(&nonce.as_le_bytes())?; + writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?; + for out in outs { + out.write(writer)?; + } + Ok(()) + } + } + } + + pub fn serialize(&self) -> Vec { + let mut res = vec![]; + self.write(&mut res).unwrap(); + res + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct SignedRouterCommand { + command: RouterCommand, + signature: Signature, +} + +impl SignedRouterCommand { + pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option { + let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?; + let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?; + let signature = Signature { c, s }; + + if !signature.verify(key, &command.msg()) { + None? + } + Some(SignedRouterCommand { command, signature }) + } + + pub fn command(&self) -> &RouterCommand { + &self.command + } + + pub fn signature(&self) -> &Signature { + &self.signature + } + + pub fn read(reader: &mut R) -> io::Result { + let command = RouterCommand::read(reader)?; + + let mut sig = [0; 64]; + reader.read_exact(&mut sig)?; + let signature = Signature::from_bytes(sig)?; + + Ok(SignedRouterCommand { command, signature }) + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + self.command.write(writer)?; + writer.write_all(&self.signature.to_bytes()) + } +} + +pub struct RouterCommandMachine { + key: PublicKey, + command: RouterCommand, + machine: AlgorithmMachine>, +} + +impl RouterCommandMachine { + pub fn new(keys: ThresholdKeys, command: RouterCommand) -> Option { + // The Schnorr algorithm should be fine without this, even when using the IETF variant + // If this is better and more comprehensive, we should do it, even if not necessary + let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1"); + let key = keys.group_key(); + transcript.append_message(b"key", key.to_bytes()); + transcript.append_message(b"command", command.serialize()); + + Some(Self { + key: PublicKey::new(key)?, + command, + machine: AlgorithmMachine::new(Schnorr::new(transcript), keys), + }) + } +} + +impl PreprocessMachine for RouterCommandMachine { + type Preprocess = Preprocess; + type Signature = SignedRouterCommand; + type SignMachine = RouterCommandSignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + let (machine, preprocess) = self.machine.preprocess(rng); + + (RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess) + } +} + +pub struct RouterCommandSignMachine { + key: PublicKey, + command: RouterCommand, + machine: AlgorithmSignMachine>, +} + +impl SignMachine for RouterCommandSignMachine { + type Params = (); + type Keys = ThresholdKeys; + type Preprocess = Preprocess; + type SignatureShare = SignatureShare; + type SignatureMachine = RouterCommandSignatureMachine; + + fn cache(self) -> CachedPreprocess { + unimplemented!( + "RouterCommand machines don't support caching their preprocesses due to {}", + "being already bound to a specific command" + ); + } + + fn from_cache( + (): (), + _: ThresholdKeys, + _: CachedPreprocess, + ) -> (Self, Self::Preprocess) { + unimplemented!( + "RouterCommand machines don't support caching their preprocesses due to {}", + "being already bound to a specific command" + ); + } + + fn read_preprocess(&self, reader: &mut R) -> io::Result { + self.machine.read_preprocess(reader) + } + + fn sign( + self, + commitments: HashMap, + msg: &[u8], + ) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> { + if !msg.is_empty() { + panic!("message was passed to a RouterCommand machine when it generates its own"); + } + + let (machine, share) = self.machine.sign(commitments, &self.command.msg())?; + + Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share)) + } +} + +pub struct RouterCommandSignatureMachine { + key: PublicKey, + command: RouterCommand, + machine: + AlgorithmSignatureMachine>, +} + +impl SignatureMachine for RouterCommandSignatureMachine { + type SignatureShare = SignatureShare; + + fn read_share(&self, reader: &mut R) -> io::Result { + self.machine.read_share(reader) + } + + fn complete( + self, + shares: HashMap, + ) -> Result { + let sig = self.machine.complete(shares)?; + let signature = Signature::new(&self.key, &self.command.msg(), sig) + .expect("machine produced an invalid signature"); + Ok(SignedRouterCommand { command: self.command, signature }) + } +} diff --git a/coins/ethereum/src/router.rs b/coins/ethereum/src/router.rs new file mode 100644 index 000000000..8d46b24f2 --- /dev/null +++ b/coins/ethereum/src/router.rs @@ -0,0 +1,443 @@ +use std::{sync::Arc, io, collections::HashSet}; + +use k256::{ + elliptic_curve::{group::GroupEncoding, sec1}, + ProjectivePoint, +}; + +use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; +#[cfg(test)] +use alloy_core::primitives::B256; +use alloy_consensus::TxLegacy; + +use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; + +use alloy_rpc_types::Filter; +#[cfg(test)] +use alloy_rpc_types::{BlockId, TransactionRequest, TransactionInput}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +pub use crate::{ + Error, + crypto::{PublicKey, Signature}, + abi::{erc20::Transfer, router as abi}, +}; +use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum Coin { + Ether, + Erc20([u8; 20]), +} + +impl Coin { + pub fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + Ok(match kind[0] { + 0 => Coin::Ether, + 1 => { + let mut address = [0; 20]; + reader.read_exact(&mut address)?; + Coin::Erc20(address) + } + _ => Err(io::Error::other("unrecognized Coin type"))?, + }) + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Coin::Ether => writer.write_all(&[0]), + Coin::Erc20(token) => { + writer.write_all(&[1])?; + writer.write_all(token) + } + } + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct InInstruction { + pub id: ([u8; 32], u64), + pub from: [u8; 20], + pub coin: Coin, + pub amount: U256, + pub data: Vec, + pub key_at_end_of_block: ProjectivePoint, +} + +impl InInstruction { + pub fn read(reader: &mut R) -> io::Result { + let id = { + let mut id_hash = [0; 32]; + reader.read_exact(&mut id_hash)?; + let mut id_pos = [0; 8]; + reader.read_exact(&mut id_pos)?; + let id_pos = u64::from_le_bytes(id_pos); + (id_hash, id_pos) + }; + + let mut from = [0; 20]; + reader.read_exact(&mut from)?; + + let coin = Coin::read(reader)?; + let mut amount = [0; 32]; + reader.read_exact(&mut amount)?; + let amount = U256::from_le_slice(&amount); + + let mut data_len = [0; 4]; + reader.read_exact(&mut data_len)?; + let data_len = usize::try_from(u32::from_le_bytes(data_len)) + .map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?; + let mut data = vec![0; data_len]; + reader.read_exact(&mut data)?; + + let mut key_at_end_of_block = ::Repr::default(); + reader.read_exact(&mut key_at_end_of_block)?; + let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block)) + .ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?; + + Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block }) + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&self.id.0)?; + writer.write_all(&self.id.1.to_le_bytes())?; + + writer.write_all(&self.from)?; + + self.coin.write(writer)?; + writer.write_all(&self.amount.as_le_bytes())?; + + writer.write_all( + &u32::try_from(self.data.len()) + .map_err(|_| { + io::Error::other("InInstruction being written had data exceeding 2**32 in length") + })? + .to_le_bytes(), + )?; + writer.write_all(&self.data)?; + + writer.write_all(&self.key_at_end_of_block.to_bytes()) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Executed { + pub tx_id: [u8; 32], + pub nonce: u64, + pub signature: [u8; 64], +} + +/// The contract Serai uses to manage its state. +#[derive(Clone, Debug)] +pub struct Router(Arc>, Address); +impl Router { + pub(crate) fn code() -> Vec { + let bytecode = include_str!("../artifacts/Router.bin"); + Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec() + } + + pub(crate) fn init_code(key: &PublicKey) -> Vec { + let mut bytecode = Self::code(); + // Append the constructor arguments + bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode()); + bytecode + } + + // This isn't pub in order to force users to use `Deployer::find_router`. + pub(crate) fn new(provider: Arc>, address: Address) -> Self { + Self(provider, address) + } + + pub fn address(&self) -> [u8; 20] { + **self.1 + } + + /// Get the key for Serai at the specified block. + #[cfg(test)] + pub async fn serai_key(&self, at: [u8; 32]) -> Result { + let call = TransactionRequest::default() + .to(self.1) + .input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into())); + let bytes = self + .0 + .call(&call) + .block(BlockId::Hash(B256::from(at).into())) + .await + .map_err(|_| Error::ConnectionError)?; + let res = + abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; + PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError) + } + + /// Get the message to be signed in order to update the key for Serai. + pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec { + let mut buffer = b"updateSeraiKey".to_vec(); + buffer.extend(&chain_id.to_be_bytes::<32>()); + buffer.extend(&nonce.to_be_bytes::<32>()); + buffer.extend(&key.eth_repr()); + buffer + } + + /// Update the key representing Serai. + pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy { + // TODO: Set a more accurate gas + TxLegacy { + to: TxKind::Call(self.1), + input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into())) + .abi_encode() + .into(), + gas_limit: 100_000, + ..Default::default() + } + } + + /// Get the current nonce for the published batches. + #[cfg(test)] + pub async fn nonce(&self, at: [u8; 32]) -> Result { + let call = TransactionRequest::default() + .to(self.1) + .input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into())); + let bytes = self + .0 + .call(&call) + .block(BlockId::Hash(B256::from(at).into())) + .await + .map_err(|_| Error::ConnectionError)?; + let res = + abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; + Ok(res._0) + } + + /// Get the message to be signed in order to update the key for Serai. + pub(crate) fn execute_message( + chain_id: U256, + nonce: U256, + outs: Vec, + ) -> Vec { + ("execute".to_string(), chain_id, nonce, outs).abi_encode_params() + } + + /// Execute a batch of `OutInstruction`s. + pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy { + TxLegacy { + to: TxKind::Call(self.1), + input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(), + // TODO + gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()), + ..Default::default() + } + } + + pub async fn key_at_end_of_block(&self, block: u64) -> Result, Error> { + let filter = Filter::new().from_block(0).to_block(block).address(self.1); + let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); + let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + if all_keys.is_empty() { + return Ok(None); + }; + + let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?; + let last_key_x_coordinate = last_key_x_coordinate_log + .log_decode::() + .map_err(|_| Error::ConnectionError)? + .inner + .data + .key; + + let mut compressed_point = ::Repr::default(); + compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY); + compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice()); + + let key = + Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)?; + Ok(Some(key)) + } + + pub async fn in_instructions( + &self, + block: u64, + allowed_tokens: &HashSet<[u8; 20]>, + ) -> Result, Error> { + let Some(key_at_end_of_block) = self.key_at_end_of_block(block).await? else { + return Ok(vec![]); + }; + + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + let mut transfer_check = HashSet::new(); + let mut in_instructions = vec![]; + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(Error::ConnectionError)?; + } + + let id = ( + log.block_hash.ok_or(Error::ConnectionError)?.into(), + log.log_index.ok_or(Error::ConnectionError)?, + ); + + let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?; + let tx = self + .0 + .get_transaction_by_hash(tx_hash) + .await + .ok() + .flatten() + .ok_or(Error::ConnectionError)?; + + let log = + log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + + let coin = if log.coin.0 == [0; 20] { + Coin::Ether + } else { + let token = *log.coin.0; + + if !allowed_tokens.contains(&token) { + continue; + } + + // If this also counts as a top-level transfer via the token, drop it + // + // Necessary in order to handle a potential edge case with some theoretical token + // implementations + // + // This will either let it be handled by the top-level transfer hook or will drop it + // entirely on the side of caution + if tx.to == Some(token.into()) { + continue; + } + + // Get all logs for this TX + let receipt = self + .0 + .get_transaction_receipt(tx_hash) + .await + .map_err(|_| Error::ConnectionError)? + .ok_or(Error::ConnectionError)?; + let tx_logs = receipt.inner.logs(); + + // Find a matching transfer log + let mut found_transfer = false; + for tx_log in tx_logs { + let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?; + // Ensure we didn't already use this transfer to check a distinct InInstruction event + if transfer_check.contains(&log_index) { + continue; + } + + // Check if this log is from the token we expected to be transferred + if tx_log.address().0 != token { + continue; + } + // Check if this is a transfer log + // https://github.com/alloy-rs/core/issues/589 + if tx_log.topics()[0] != Transfer::SIGNATURE_HASH { + continue; + } + let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue }; + // Check if this is a transfer to us for the expected amount + if (transfer.to == self.1) && (transfer.value == log.amount) { + transfer_check.insert(log_index); + found_transfer = true; + break; + } + } + if !found_transfer { + // This shouldn't be a ConnectionError + // This is an exploit, a non-conforming ERC20, or an invalid connection + // This should halt the process which is sufficient, yet this is sub-optimal + // TODO + Err(Error::ConnectionError)?; + } + + Coin::Erc20(token) + }; + + in_instructions.push(InInstruction { + id, + from: *log.from.0, + coin, + amount: log.amount, + data: log.instruction.as_ref().to_vec(), + key_at_end_of_block, + }); + } + + Ok(in_instructions) + } + + pub async fn executed_commands(&self, block: u64) -> Result, Error> { + let mut res = vec![]; + + { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(Error::ConnectionError)?; + } + + let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); + + let log = + log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + + let mut signature = [0; 64]; + signature[.. 32].copy_from_slice(log.signature.c.as_ref()); + signature[32 ..].copy_from_slice(log.signature.s.as_ref()); + res.push(Executed { + tx_id, + nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, + signature, + }); + } + } + + { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(Error::ConnectionError)?; + } + + let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); + + let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + + let mut signature = [0; 64]; + signature[.. 32].copy_from_slice(log.signature.c.as_ref()); + signature[32 ..].copy_from_slice(log.signature.s.as_ref()); + res.push(Executed { + tx_id, + nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, + signature, + }); + } + } + + Ok(res) + } + + #[cfg(feature = "tests")] + pub fn key_updated_filter(&self) -> Filter { + Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH) + } + #[cfg(feature = "tests")] + pub fn executed_filter(&self) -> Filter { + Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH) + } +} diff --git a/coins/ethereum/src/tests/abi/mod.rs b/coins/ethereum/src/tests/abi/mod.rs new file mode 100644 index 000000000..57ea88116 --- /dev/null +++ b/coins/ethereum/src/tests/abi/mod.rs @@ -0,0 +1,13 @@ +use alloy_sol_types::sol; + +#[rustfmt::skip] +#[allow(warnings)] +#[allow(needless_pass_by_value)] +#[allow(clippy::all)] +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod schnorr_container { + use super::*; + sol!("src/tests/contracts/Schnorr.sol"); +} +pub(crate) use schnorr_container::TestSchnorr as schnorr; diff --git a/coins/ethereum/src/tests/contracts/ERC20.sol b/coins/ethereum/src/tests/contracts/ERC20.sol new file mode 100644 index 000000000..e157974c7 --- /dev/null +++ b/coins/ethereum/src/tests/contracts/ERC20.sol @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.0; + +contract TestERC20 { + event Transfer(address indexed from, address indexed to, uint256 value); + event Approval(address indexed owner, address indexed spender, uint256 value); + + function name() public pure returns (string memory) { + return "Test ERC20"; + } + function symbol() public pure returns (string memory) { + return "TEST"; + } + function decimals() public pure returns (uint8) { + return 18; + } + + function totalSupply() public pure returns (uint256) { + return 1_000_000 * 10e18; + } + + mapping(address => uint256) balances; + mapping(address => mapping(address => uint256)) allowances; + + constructor() { + balances[msg.sender] = totalSupply(); + } + + function balanceOf(address owner) public view returns (uint256) { + return balances[owner]; + } + function transfer(address to, uint256 value) public returns (bool) { + balances[msg.sender] -= value; + balances[to] += value; + return true; + } + function transferFrom(address from, address to, uint256 value) public returns (bool) { + allowances[from][msg.sender] -= value; + balances[from] -= value; + balances[to] += value; + return true; + } + + function approve(address spender, uint256 value) public returns (bool) { + allowances[msg.sender][spender] = value; + return true; + } + function allowance(address owner, address spender) public view returns (uint256) { + return allowances[owner][spender]; + } +} diff --git a/coins/ethereum/src/tests/contracts/Schnorr.sol b/coins/ethereum/src/tests/contracts/Schnorr.sol new file mode 100644 index 000000000..832cd2fee --- /dev/null +++ b/coins/ethereum/src/tests/contracts/Schnorr.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.0; + +import "../../../contracts/Schnorr.sol"; + +contract TestSchnorr { + function verify( + bytes32 px, + bytes calldata message, + bytes32 c, + bytes32 s + ) external pure returns (bool) { + return Schnorr.verify(px, message, c, s); + } +} diff --git a/coins/ethereum/src/tests/crypto.rs b/coins/ethereum/src/tests/crypto.rs new file mode 100644 index 000000000..a668b2d6d --- /dev/null +++ b/coins/ethereum/src/tests/crypto.rs @@ -0,0 +1,105 @@ +use rand_core::OsRng; + +use group::ff::{Field, PrimeField}; +use k256::{ + ecdsa::{ + self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey, + }, + Scalar, ProjectivePoint, +}; + +use frost::{ + curve::{Ciphersuite, Secp256k1}, + algorithm::{Hram, IetfSchnorr}, + tests::{algorithm_machines, sign}, +}; + +use crate::{crypto::*, tests::key_gen}; + +// The ecrecover opcode, yet with parity replacing v +pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> { + let sig = ecdsa::Signature::from_scalars(r, s).ok()?; + let message: [u8; 32] = message.to_repr().into(); + alloy_core::primitives::Signature::from_signature_and_parity( + sig, + alloy_core::primitives::Parity::Parity(odd_y), + ) + .ok()? + .recover_address_from_prehash(&alloy_core::primitives::B256::from(message)) + .ok() + .map(Into::into) +} + +#[test] +fn test_ecrecover() { + let private = SigningKey::random(&mut OsRng); + let public = VerifyingKey::from(&private); + + // Sign the signature + const MESSAGE: &[u8] = b"Hello, World!"; + let (sig, recovery_id) = private + .as_nonzero_scalar() + .try_sign_prehashed( + ::F::random(&mut OsRng), + &keccak256(MESSAGE).into(), + ) + .unwrap(); + + // Sanity check the signature verifies + #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result + { + assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ()); + } + + // Perform the ecrecover + assert_eq!( + ecrecover( + hash_to_scalar(MESSAGE), + u8::from(recovery_id.unwrap().is_y_odd()) == 1, + *sig.r(), + *sig.s() + ) + .unwrap(), + address(&ProjectivePoint::from(public.as_affine())) + ); +} + +// Run the sign test with the EthereumHram +#[test] +fn test_signing() { + let (keys, _) = key_gen(); + + const MESSAGE: &[u8] = b"Hello, World!"; + + let algo = IetfSchnorr::::ietf(); + let _sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); +} + +#[allow(non_snake_case)] +pub fn preprocess_signature_for_ecrecover( + R: ProjectivePoint, + public_key: &PublicKey, + m: &[u8], + s: Scalar, +) -> (Scalar, Scalar) { + let c = EthereumHram::hram(&R, &public_key.A, m); + let sa = -(s * public_key.px); + let ca = -(c * public_key.px); + (sa, ca) +} + +#[test] +fn test_ecrecover_hack() { + let (keys, public_key) = key_gen(); + + const MESSAGE: &[u8] = b"Hello, World!"; + + let algo = IetfSchnorr::::ietf(); + let sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); + + let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s); + let q = ecrecover(sa, false, public_key.px, ca).unwrap(); + assert_eq!(q, address(&sig.R)); +} diff --git a/coins/ethereum/src/tests/mod.rs b/coins/ethereum/src/tests/mod.rs new file mode 100644 index 000000000..e88e90e5c --- /dev/null +++ b/coins/ethereum/src/tests/mod.rs @@ -0,0 +1,131 @@ +use std::{sync::Arc, collections::HashMap}; + +use rand_core::OsRng; + +use k256::{Scalar, ProjectivePoint}; +use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen}; + +use alloy_core::{ + primitives::{Address, U256, Bytes, TxKind}, + hex::FromHex, +}; +use alloy_consensus::{SignableTransaction, TxLegacy}; + +use alloy_rpc_types::TransactionReceipt; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use crate::crypto::{address, deterministically_sign, PublicKey}; + +#[cfg(test)] +mod crypto; + +#[cfg(test)] +mod abi; +#[cfg(test)] +mod schnorr; +#[cfg(test)] +mod router; + +pub fn key_gen() -> (HashMap>, PublicKey) { + let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng); + let mut group_key = keys[&Participant::new(1).unwrap()].group_key(); + + let mut offset = Scalar::ZERO; + while PublicKey::new(group_key).is_none() { + offset += Scalar::ONE; + group_key += ProjectivePoint::GENERATOR; + } + for keys in keys.values_mut() { + *keys = keys.offset(offset); + } + let public_key = PublicKey::new(group_key).unwrap(); + + (keys, public_key) +} + +// TODO: Use a proper error here +pub async fn send( + provider: &RootProvider, + wallet: &k256::ecdsa::SigningKey, + mut tx: TxLegacy, +) -> Option { + let verifying_key = *wallet.verifying_key().as_affine(); + let address = Address::from(address(&verifying_key.into())); + + // https://github.com/alloy-rs/alloy/issues/539 + // let chain_id = provider.get_chain_id().await.unwrap(); + // tx.chain_id = Some(chain_id); + tx.chain_id = None; + tx.nonce = provider.get_transaction_count(address).await.unwrap(); + // 100 gwei + tx.gas_price = 100_000_000_000u128; + + let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap(); + assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap()); + assert!( + provider.get_balance(address).await.unwrap() > + ((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value) + ); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig.into(), &mut bytes); + let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?; + pending_tx.get_receipt().await.ok() +} + +pub async fn fund_account( + provider: &RootProvider, + wallet: &k256::ecdsa::SigningKey, + to_fund: Address, + value: U256, +) -> Option<()> { + let funding_tx = + TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() }; + assert!(send(provider, wallet, funding_tx).await.unwrap().status()); + + Some(()) +} + +// TODO: Use a proper error here +pub async fn deploy_contract( + client: Arc>, + wallet: &k256::ecdsa::SigningKey, + name: &str, +) -> Option
{ + let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap(); + let hex_bin = + if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf }; + let bin = Bytes::from_hex(hex_bin).unwrap(); + + let deployment_tx = TxLegacy { + chain_id: None, + nonce: 0, + // 100 gwei + gas_price: 100_000_000_000u128, + gas_limit: 1_000_000, + to: TxKind::Create, + value: U256::ZERO, + input: bin, + }; + + let deployment_tx = deterministically_sign(&deployment_tx); + + // Fund the deployer address + fund_account( + &client, + wallet, + deployment_tx.recover_signer().unwrap(), + U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price), + ) + .await?; + + let (deployment_tx, sig, _) = deployment_tx.into_parts(); + let mut bytes = vec![]; + deployment_tx.encode_with_signature_fields(&sig, &mut bytes); + let pending_tx = client.send_raw_transaction(&bytes).await.ok()?; + let receipt = pending_tx.get_receipt().await.ok()?; + assert!(receipt.status()); + + Some(receipt.contract_address.unwrap()) +} diff --git a/coins/ethereum/src/tests/router.rs b/coins/ethereum/src/tests/router.rs new file mode 100644 index 000000000..39a865bd9 --- /dev/null +++ b/coins/ethereum/src/tests/router.rs @@ -0,0 +1,183 @@ +use std::{convert::TryFrom, sync::Arc, collections::HashMap}; + +use rand_core::OsRng; + +use group::Group; +use k256::ProjectivePoint; +use frost::{ + curve::Secp256k1, + Participant, ThresholdKeys, + algorithm::IetfSchnorr, + tests::{algorithm_machines, sign}, +}; + +use alloy_core::primitives::{Address, U256}; + +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; + +use alloy_node_bindings::{Anvil, AnvilInstance}; + +use crate::{ + crypto::*, + deployer::Deployer, + router::{Router, abi as router}, + tests::{key_gen, send, fund_account}, +}; + +async fn setup_test() -> ( + AnvilInstance, + Arc>, + u64, + Router, + HashMap>, + PublicKey, +) { + let anvil = Anvil::new().spawn(); + + let provider = RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), + ); + let chain_id = provider.get_chain_id().await.unwrap(); + let wallet = anvil.keys()[0].clone().into(); + let client = Arc::new(provider); + + // Make sure the Deployer constructor returns None, as it doesn't exist yet + assert!(Deployer::new(client.clone()).await.unwrap().is_none()); + + // Deploy the Deployer + let tx = Deployer::deployment_tx(); + fund_account( + &client, + &wallet, + tx.recover_signer().unwrap(), + U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price), + ) + .await + .unwrap(); + + let (tx, sig, _) = tx.into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + + let pending_tx = client.send_raw_transaction(&bytes).await.unwrap(); + let receipt = pending_tx.get_receipt().await.unwrap(); + assert!(receipt.status()); + let deployer = + Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed"); + + let (keys, public_key) = key_gen(); + + // Verify the Router constructor returns None, as it doesn't exist yet + assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none()); + + // Deploy the router + let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key)) + .await + .unwrap(); + assert!(receipt.status()); + let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap(); + + (anvil, client, chain_id, contract, keys, public_key) +} + +async fn latest_block_hash(client: &RootProvider) -> [u8; 32] { + client + .get_block(client.get_block_number().await.unwrap().into(), false) + .await + .unwrap() + .unwrap() + .header + .hash + .unwrap() + .0 +} + +#[tokio::test] +async fn test_deploy_contract() { + let (_anvil, client, _, router, _, public_key) = setup_test().await; + + let block_hash = latest_block_hash(&client).await; + assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key); + assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap()); + // TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis +} + +pub fn hash_and_sign( + keys: &HashMap>, + public_key: &PublicKey, + message: &[u8], +) -> Signature { + let algo = IetfSchnorr::::ietf(); + let sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message); + + Signature::new(public_key, message, sig).unwrap() +} + +#[tokio::test] +async fn test_router_update_serai_key() { + let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await; + + let next_key = loop { + let point = ProjectivePoint::random(&mut OsRng); + let Some(next_key) = PublicKey::new(point) else { continue }; + break next_key; + }; + + let message = Router::update_serai_key_message( + U256::try_from(chain_id).unwrap(), + U256::try_from(1u64).unwrap(), + &next_key, + ); + let sig = hash_and_sign(&keys, &public_key, &message); + + let first_block_hash = latest_block_hash(&client).await; + assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key); + + let receipt = + send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig)) + .await + .unwrap(); + assert!(receipt.status()); + + let second_block_hash = latest_block_hash(&client).await; + assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key); + // Check this does still offer the historical state + assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key); + // TODO: Check logs + + println!("gas used: {:?}", receipt.gas_used); + // println!("logs: {:?}", receipt.logs); +} + +#[tokio::test] +async fn test_router_execute() { + let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await; + + let to = Address::from([0; 20]); + let value = U256::ZERO; + let tx = router::OutInstruction { to, value, calls: vec![] }; + let txs = vec![tx]; + + let first_block_hash = latest_block_hash(&client).await; + let nonce = contract.nonce(first_block_hash).await.unwrap(); + assert_eq!(nonce, U256::try_from(1u64).unwrap()); + + let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone()); + let sig = hash_and_sign(&keys, &public_key, &message); + + let receipt = + send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap(); + assert!(receipt.status()); + + let second_block_hash = latest_block_hash(&client).await; + assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap()); + // Check this does still offer the historical state + assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap()); + // TODO: Check logs + + println!("gas used: {:?}", receipt.gas_used); + // println!("logs: {:?}", receipt.logs); +} diff --git a/coins/ethereum/src/tests/schnorr.rs b/coins/ethereum/src/tests/schnorr.rs new file mode 100644 index 000000000..21d8b45ae --- /dev/null +++ b/coins/ethereum/src/tests/schnorr.rs @@ -0,0 +1,93 @@ +use std::sync::Arc; + +use rand_core::OsRng; + +use group::ff::PrimeField; +use k256::Scalar; + +use frost::{ + curve::Secp256k1, + algorithm::IetfSchnorr, + tests::{algorithm_machines, sign}, +}; + +use alloy_core::primitives::Address; + +use alloy_sol_types::SolCall; + +use alloy_rpc_types::{TransactionInput, TransactionRequest}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; + +use alloy_node_bindings::{Anvil, AnvilInstance}; + +use crate::{ + Error, + crypto::*, + tests::{key_gen, deploy_contract, abi::schnorr as abi}, +}; + +async fn setup_test() -> (AnvilInstance, Arc>, Address) { + let anvil = Anvil::new().spawn(); + + let provider = RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), + ); + let wallet = anvil.keys()[0].clone().into(); + let client = Arc::new(provider); + + let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap(); + (anvil, client, address) +} + +#[tokio::test] +async fn test_deploy_contract() { + setup_test().await; +} + +pub async fn call_verify( + provider: &RootProvider, + contract: Address, + public_key: &PublicKey, + message: &[u8], + signature: &Signature, +) -> Result<(), Error> { + let px: [u8; 32] = public_key.px.to_repr().into(); + let c_bytes: [u8; 32] = signature.c.to_repr().into(); + let s_bytes: [u8; 32] = signature.s.to_repr().into(); + let call = TransactionRequest::default().to(contract).input(TransactionInput::new( + abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into())) + .abi_encode() + .into(), + )); + let bytes = provider.call(&call).await.map_err(|_| Error::ConnectionError)?; + let res = + abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; + + if res._0 { + Ok(()) + } else { + Err(Error::InvalidSignature) + } +} + +#[tokio::test] +async fn test_ecrecover_hack() { + let (_anvil, client, contract) = setup_test().await; + + let (keys, public_key) = key_gen(); + + const MESSAGE: &[u8] = b"Hello, World!"; + + let algo = IetfSchnorr::::ietf(); + let sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); + let sig = Signature::new(&public_key, MESSAGE, sig).unwrap(); + + call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap(); + // Test an invalid signature fails + let mut sig = sig; + sig.s += Scalar::ONE; + assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err()); +} diff --git a/coins/ethereum/tests/contract.rs b/coins/ethereum/tests/contract.rs deleted file mode 100644 index 378758190..000000000 --- a/coins/ethereum/tests/contract.rs +++ /dev/null @@ -1,128 +0,0 @@ -use std::{convert::TryFrom, sync::Arc, time::Duration, fs::File}; - -use rand_core::OsRng; - -use ::k256::{ - elliptic_curve::{bigint::ArrayEncoding, PrimeField}, - U256, -}; - -use ethers_core::{ - types::Signature, - abi::Abi, - utils::{keccak256, Anvil, AnvilInstance}, -}; -use ethers_contract::ContractFactory; -use ethers_providers::{Middleware, Provider, Http}; - -use frost::{ - curve::Secp256k1, - Participant, - algorithm::IetfSchnorr, - tests::{key_gen, algorithm_machines, sign}, -}; - -use ethereum_serai::{ - crypto, - contract::{Schnorr, call_verify}, -}; - -// TODO: Replace with a contract deployment from an unknown account, so the environment solely has -// to fund the deployer, not create/pass a wallet -pub async fn deploy_schnorr_verifier_contract( - chain_id: u32, - client: Arc>, - wallet: &k256::ecdsa::SigningKey, -) -> eyre::Result>> { - let abi: Abi = serde_json::from_reader(File::open("./artifacts/Schnorr.abi").unwrap()).unwrap(); - - let hex_bin_buf = std::fs::read_to_string("./artifacts/Schnorr.bin").unwrap(); - let hex_bin = - if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf }; - let bin = hex::decode(hex_bin).unwrap(); - let factory = ContractFactory::new(abi, bin.into(), client.clone()); - - let mut deployment_tx = factory.deploy(())?.tx; - deployment_tx.set_chain_id(chain_id); - deployment_tx.set_gas(500_000); - let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?; - deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas); - deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas); - - let sig_hash = deployment_tx.sighash(); - let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap(); - - // EIP-155 v - let mut v = u64::from(rid.to_byte()); - assert!((v == 0) || (v == 1)); - v += u64::from((chain_id * 2) + 35); - - let r = sig.r().to_repr(); - let r_ref: &[u8] = r.as_ref(); - let s = sig.s().to_repr(); - let s_ref: &[u8] = s.as_ref(); - let deployment_tx = deployment_tx.rlp_signed(&Signature { r: r_ref.into(), s: s_ref.into(), v }); - - let pending_tx = client.send_raw_transaction(deployment_tx).await?; - - let mut receipt; - while { - receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?; - receipt.is_none() - } { - tokio::time::sleep(Duration::from_secs(6)).await; - } - let receipt = receipt.unwrap(); - assert!(receipt.status == Some(1.into())); - - let contract = Schnorr::new(receipt.contract_address.unwrap(), client.clone()); - Ok(contract) -} - -async fn deploy_test_contract() -> (u32, AnvilInstance, Schnorr>) { - let anvil = Anvil::new().spawn(); - - let provider = - Provider::::try_from(anvil.endpoint()).unwrap().interval(Duration::from_millis(10u64)); - let chain_id = provider.get_chainid().await.unwrap().as_u32(); - let wallet = anvil.keys()[0].clone().into(); - let client = Arc::new(provider); - - (chain_id, anvil, deploy_schnorr_verifier_contract(chain_id, client, &wallet).await.unwrap()) -} - -#[tokio::test] -async fn test_deploy_contract() { - deploy_test_contract().await; -} - -#[tokio::test] -async fn test_ecrecover_hack() { - let (chain_id, _anvil, contract) = deploy_test_contract().await; - let chain_id = U256::from(chain_id); - - let keys = key_gen::<_, Secp256k1>(&mut OsRng); - let group_key = keys[&Participant::new(1).unwrap()].group_key(); - - const MESSAGE: &[u8] = b"Hello, World!"; - let hashed_message = keccak256(MESSAGE); - - let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat(); - - let algo = IetfSchnorr::::ietf(); - let sig = sign( - &mut OsRng, - &algo, - keys.clone(), - algorithm_machines(&mut OsRng, &algo, &keys), - full_message, - ); - let mut processed_sig = - crypto::process_signature_for_contract(hashed_message, &sig.R, sig.s, &group_key, chain_id); - - call_verify(&contract, &processed_sig).await.unwrap(); - - // test invalid signature fails - processed_sig.message[0] = 0; - assert!(call_verify(&contract, &processed_sig).await.is_err()); -} diff --git a/coins/ethereum/tests/crypto.rs b/coins/ethereum/tests/crypto.rs deleted file mode 100644 index f1ab08b03..000000000 --- a/coins/ethereum/tests/crypto.rs +++ /dev/null @@ -1,87 +0,0 @@ -use k256::{ - elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, sec1::ToEncodedPoint}, - ProjectivePoint, Scalar, U256, -}; -use frost::{curve::Secp256k1, Participant}; - -use ethereum_serai::crypto::*; - -#[test] -fn test_ecrecover() { - use rand_core::OsRng; - use sha2::Sha256; - use sha3::{Digest, Keccak256}; - use k256::ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey}; - - let private = SigningKey::random(&mut OsRng); - let public = VerifyingKey::from(&private); - - const MESSAGE: &[u8] = b"Hello, World!"; - let (sig, recovery_id) = private - .as_nonzero_scalar() - .try_sign_prehashed_rfc6979::(&Keccak256::digest(MESSAGE), b"") - .unwrap(); - #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result - { - assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ()); - } - - assert_eq!( - ecrecover(hash_to_scalar(MESSAGE), recovery_id.unwrap().is_y_odd().into(), *sig.r(), *sig.s()) - .unwrap(), - address(&ProjectivePoint::from(public.as_affine())) - ); -} - -#[test] -fn test_signing() { - use frost::{ - algorithm::IetfSchnorr, - tests::{algorithm_machines, key_gen, sign}, - }; - use rand_core::OsRng; - - let keys = key_gen::<_, Secp256k1>(&mut OsRng); - let _group_key = keys[&Participant::new(1).unwrap()].group_key(); - - const MESSAGE: &[u8] = b"Hello, World!"; - - let algo = IetfSchnorr::::ietf(); - let _sig = - sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); -} - -#[test] -fn test_ecrecover_hack() { - use frost::{ - algorithm::IetfSchnorr, - tests::{algorithm_machines, key_gen, sign}, - }; - use rand_core::OsRng; - - let keys = key_gen::<_, Secp256k1>(&mut OsRng); - let group_key = keys[&Participant::new(1).unwrap()].group_key(); - let group_key_encoded = group_key.to_encoded_point(true); - let group_key_compressed = group_key_encoded.as_ref(); - let group_key_x = Scalar::reduce(U256::from_be_slice(&group_key_compressed[1 .. 33])); - - const MESSAGE: &[u8] = b"Hello, World!"; - let hashed_message = keccak256(MESSAGE); - let chain_id = U256::ONE; - - let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat(); - - let algo = IetfSchnorr::::ietf(); - let sig = sign( - &mut OsRng, - &algo, - keys.clone(), - algorithm_machines(&mut OsRng, &algo, &keys), - full_message, - ); - - let (sr, er) = - preprocess_signature_for_ecrecover(hashed_message, &sig.R, sig.s, &group_key, chain_id); - let q = ecrecover(sr, group_key_compressed[0] - 2, group_key_x, er).unwrap(); - assert_eq!(q, address(&sig.R)); -} diff --git a/coins/ethereum/tests/mod.rs b/coins/ethereum/tests/mod.rs deleted file mode 100644 index 257fb61f6..000000000 --- a/coins/ethereum/tests/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod contract; -mod crypto; diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index 9c78e431c..357803c92 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -43,7 +43,6 @@ multiexp = { path = "../../crypto/multiexp", version = "0.4", default-features = # Needed for multisig transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true } -dleq = { path = "../../crypto/dleq", version = "0.4", default-features = false, features = ["serialize"], optional = true } frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true } monero-generators = { path = "generators", version = "0.4", default-features = false } @@ -91,7 +90,6 @@ std = [ "multiexp/std", "transcript/std", - "dleq/std", "monero-generators/std", @@ -106,7 +104,7 @@ std = [ cache-distribution = ["async-lock"] http-rpc = ["digest_auth", "simple-request", "tokio"] -multisig = ["transcript", "frost", "dleq", "std"] +multisig = ["transcript", "frost", "std"] binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"] experimental = [] diff --git a/coins/monero/src/lib.rs b/coins/monero/src/lib.rs index 6d9c0a6b7..4e6b26d1c 100644 --- a/coins/monero/src/lib.rs +++ b/coins/monero/src/lib.rs @@ -14,7 +14,12 @@ use zeroize::{Zeroize, ZeroizeOnDrop}; use sha3::{Digest, Keccak256}; -use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint}; +use curve25519_dalek::{ + constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT}, + scalar::Scalar, + edwards::{EdwardsPoint, VartimeEdwardsPrecomputation}, + traits::VartimePrecomputedMultiscalarMul, +}; pub use monero_generators::{H, decompress_point}; @@ -56,6 +61,13 @@ pub(crate) fn INV_EIGHT() -> Scalar { *INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert()) } +static BASEPOINT_PRECOMP_CELL: OnceLock = OnceLock::new(); +#[allow(non_snake_case)] +pub(crate) fn BASEPOINT_PRECOMP() -> &'static VartimeEdwardsPrecomputation { + BASEPOINT_PRECOMP_CELL + .get_or_init(|| VartimeEdwardsPrecomputation::new([ED25519_BASEPOINT_POINT])) +} + /// Monero protocol version. /// /// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the diff --git a/coins/monero/src/ringct/bulletproofs/mod.rs b/coins/monero/src/ringct/bulletproofs/mod.rs index df0c6ff8a..ce9f74926 100644 --- a/coins/monero/src/ringct/bulletproofs/mod.rs +++ b/coins/monero/src/ringct/bulletproofs/mod.rs @@ -91,7 +91,7 @@ impl Bulletproofs { Bulletproofs::Plus( AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect()) .unwrap() - .prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap())) + .prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs.to_vec()).unwrap())) .unwrap(), ) }) diff --git a/coins/monero/src/ringct/bulletproofs/original.rs b/coins/monero/src/ringct/bulletproofs/original.rs index 5e50c02ea..0e841080e 100644 --- a/coins/monero/src/ringct/bulletproofs/original.rs +++ b/coins/monero/src/ringct/bulletproofs/original.rs @@ -9,7 +9,7 @@ use curve25519_dalek::{scalar::Scalar as DalekScalar, edwards::EdwardsPoint as D use group::{ff::Field, Group}; use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint}; -use multiexp::BatchVerifier; +use multiexp::{BatchVerifier, multiexp}; use crate::{Commitment, ringct::bulletproofs::core::*}; @@ -17,7 +17,20 @@ include!(concat!(env!("OUT_DIR"), "/generators.rs")); static IP12_CELL: OnceLock = OnceLock::new(); pub(crate) fn IP12() -> Scalar { - *IP12_CELL.get_or_init(|| inner_product(&ScalarVector(vec![Scalar::ONE; N]), TWO_N())) + *IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; N]).inner_product(TWO_N())) +} + +pub(crate) fn hadamard_fold( + l: &[EdwardsPoint], + r: &[EdwardsPoint], + a: Scalar, + b: Scalar, +) -> Vec { + let mut res = Vec::with_capacity(l.len() / 2); + for i in 0 .. l.len() { + res.push(multiexp(&[(a, l[i]), (b, r[i])])); + } + res } #[derive(Clone, PartialEq, Eq, Debug)] @@ -57,7 +70,7 @@ impl OriginalStruct { let mut cache = hash_to_scalar(&y.to_bytes()); let z = cache; - let l0 = &aL - z; + let l0 = aL - z; let l1 = sL; let mut zero_twos = Vec::with_capacity(MN); @@ -69,12 +82,12 @@ impl OriginalStruct { } let yMN = ScalarVector::powers(y, MN); - let r0 = (&(aR + z) * &yMN) + ScalarVector(zero_twos); - let r1 = yMN * sR; + let r0 = ((aR + z) * &yMN) + &ScalarVector(zero_twos); + let r1 = yMN * &sR; let (T1, T2, x, mut taux) = { - let t1 = inner_product(&l0, &r1) + inner_product(&l1, &r0); - let t2 = inner_product(&l1, &r1); + let t1 = l0.clone().inner_product(&r1) + r0.clone().inner_product(&l1); + let t2 = l1.clone().inner_product(&r1); let mut tau1 = Scalar::random(&mut *rng); let mut tau2 = Scalar::random(&mut *rng); @@ -100,10 +113,10 @@ impl OriginalStruct { taux += zpow[i + 2] * gamma; } - let l = &l0 + &(l1 * x); - let r = &r0 + &(r1 * x); + let l = l0 + &(l1 * x); + let r = r0 + &(r1 * x); - let t = inner_product(&l, &r); + let t = l.clone().inner_product(&r); let x_ip = hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]); @@ -126,8 +139,8 @@ impl OriginalStruct { let (aL, aR) = a.split(); let (bL, bR) = b.split(); - let cL = inner_product(&aL, &bR); - let cR = inner_product(&aR, &bL); + let cL = aL.clone().inner_product(&bR); + let cR = aR.clone().inner_product(&bL); let (G_L, G_R) = G_proof.split_at(aL.len()); let (H_L, H_R) = H_proof.split_at(aL.len()); @@ -140,8 +153,8 @@ impl OriginalStruct { let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]); let winv = w.invert().unwrap(); - a = (aL * w) + (aR * winv); - b = (bL * winv) + (bR * w); + a = (aL * w) + &(aR * winv); + b = (bL * winv) + &(bR * w); if a.len() != 1 { G_proof = hadamard_fold(G_L, G_R, winv, w); diff --git a/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs b/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs index 859cb1e44..cba950142 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs @@ -24,7 +24,7 @@ use crate::{ }, }; -// Figure 3 +// Figure 3 of the Bulletproofs+ Paper #[derive(Clone, Debug)] pub(crate) struct AggregateRangeStatement { generators: Generators, @@ -38,24 +38,15 @@ impl Zeroize for AggregateRangeStatement { } #[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)] -pub(crate) struct AggregateRangeWitness { - values: Vec, - gammas: Vec, -} +pub(crate) struct AggregateRangeWitness(Vec); impl AggregateRangeWitness { - pub(crate) fn new(commitments: &[Commitment]) -> Option { + pub(crate) fn new(commitments: Vec) -> Option { if commitments.is_empty() || (commitments.len() > MAX_M) { return None; } - let mut values = Vec::with_capacity(commitments.len()); - let mut gammas = Vec::with_capacity(commitments.len()); - for commitment in commitments { - values.push(commitment.amount); - gammas.push(Scalar(commitment.mask)); - } - Some(AggregateRangeWitness { values, gammas }) + Some(AggregateRangeWitness(commitments)) } } @@ -112,7 +103,7 @@ impl AggregateRangeStatement { let mut d = ScalarVector::new(mn); for j in 1 ..= V.len() { z_pow.push(z.pow(Scalar::from(2 * u64::try_from(j).unwrap()))); // TODO: Optimize this - d = d.add_vec(&Self::d_j(j, V.len()).mul(z_pow[j - 1])); + d = d + &(Self::d_j(j, V.len()) * (z_pow[j - 1])); } let mut ascending_y = ScalarVector(vec![y]); @@ -124,7 +115,8 @@ impl AggregateRangeStatement { let mut descending_y = ascending_y.clone(); descending_y.0.reverse(); - let d_descending_y = d.mul_vec(&descending_y); + let d_descending_y = d.clone() * &descending_y; + let d_descending_y_plus_z = d_descending_y + z; let y_mn_plus_one = descending_y[0] * y; @@ -135,9 +127,9 @@ impl AggregateRangeStatement { let neg_z = -z; let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2); - for (i, d_y_z) in d_descending_y.add(z).0.drain(..).enumerate() { + for (i, d_y_z) in d_descending_y_plus_z.0.iter().enumerate() { A_terms.push((neg_z, generators.generator(GeneratorsList::GBold1, i))); - A_terms.push((d_y_z, generators.generator(GeneratorsList::HBold1, i))); + A_terms.push((*d_y_z, generators.generator(GeneratorsList::HBold1, i))); } A_terms.push((y_mn_plus_one, commitment_accum)); A_terms.push(( @@ -145,7 +137,14 @@ impl AggregateRangeStatement { Generators::g(), )); - (y, d_descending_y, y_mn_plus_one, z, ScalarVector(z_pow), A + multiexp_vartime(&A_terms)) + ( + y, + d_descending_y_plus_z, + y_mn_plus_one, + z, + ScalarVector(z_pow), + A + multiexp_vartime(&A_terms), + ) } pub(crate) fn prove( @@ -154,13 +153,11 @@ impl AggregateRangeStatement { witness: &AggregateRangeWitness, ) -> Option { // Check for consistency with the witness - if self.V.len() != witness.values.len() { + if self.V.len() != witness.0.len() { return None; } - for (commitment, (value, gamma)) in - self.V.iter().zip(witness.values.iter().zip(witness.gammas.iter())) - { - if Commitment::new(**gamma, *value).calculate() != **commitment { + for (commitment, witness) in self.V.iter().zip(witness.0.iter()) { + if witness.calculate() != **commitment { return None; } } @@ -188,10 +185,16 @@ impl AggregateRangeStatement { let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N)); for j in 1 ..= V.len() { d_js.push(Self::d_j(j, V.len())); - a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0); + #[allow(clippy::map_unwrap_or)] + a_l.0.append( + &mut u64_decompose( + *witness.0.get(j - 1).map(|commitment| &commitment.amount).unwrap_or(&0), + ) + .0, + ); } - let a_r = a_l.sub(Scalar::ONE); + let a_r = a_l.clone() - Scalar::ONE; let alpha = Scalar::random(&mut *rng); @@ -209,14 +212,14 @@ impl AggregateRangeStatement { // Multiply by INV_EIGHT per earlier commentary A.0 *= crate::INV_EIGHT(); - let (y, d_descending_y, y_mn_plus_one, z, z_pow, A_hat) = + let (y, d_descending_y_plus_z, y_mn_plus_one, z, z_pow, A_hat) = Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A); - let a_l = a_l.sub(z); - let a_r = a_r.add_vec(&d_descending_y).add(z); + let a_l = a_l - z; + let a_r = a_r + &d_descending_y_plus_z; let mut alpha = alpha; - for j in 1 ..= witness.gammas.len() { - alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one; + for j in 1 ..= witness.0.len() { + alpha += z_pow[j - 1] * Scalar(witness.0[j - 1].mask) * y_mn_plus_one; } Some(AggregateRangeProof { diff --git a/coins/monero/src/ringct/bulletproofs/plus/mod.rs b/coins/monero/src/ringct/bulletproofs/plus/mod.rs index 6a2d7b9c4..304178214 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/mod.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/mod.rs @@ -3,8 +3,7 @@ use group::Group; use dalek_ff_group::{Scalar, EdwardsPoint}; -mod scalar_vector; -pub(crate) use scalar_vector::{ScalarVector, weighted_inner_product}; +pub(crate) use crate::ringct::bulletproofs::scalar_vector::ScalarVector; mod point_vector; pub(crate) use point_vector::PointVector; diff --git a/coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs b/coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs deleted file mode 100644 index 7bc0c3f47..000000000 --- a/coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs +++ /dev/null @@ -1,114 +0,0 @@ -use core::{ - borrow::Borrow, - ops::{Index, IndexMut}, -}; -use std_shims::vec::Vec; - -use zeroize::Zeroize; - -use group::ff::Field; -use dalek_ff_group::Scalar; - -#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] -pub(crate) struct ScalarVector(pub(crate) Vec); - -impl Index for ScalarVector { - type Output = Scalar; - fn index(&self, index: usize) -> &Scalar { - &self.0[index] - } -} - -impl IndexMut for ScalarVector { - fn index_mut(&mut self, index: usize) -> &mut Scalar { - &mut self.0[index] - } -} - -impl ScalarVector { - pub(crate) fn new(len: usize) -> Self { - ScalarVector(vec![Scalar::ZERO; len]) - } - - pub(crate) fn add(&self, scalar: impl Borrow) -> Self { - let mut res = self.clone(); - for val in &mut res.0 { - *val += scalar.borrow(); - } - res - } - - pub(crate) fn sub(&self, scalar: impl Borrow) -> Self { - let mut res = self.clone(); - for val in &mut res.0 { - *val -= scalar.borrow(); - } - res - } - - pub(crate) fn mul(&self, scalar: impl Borrow) -> Self { - let mut res = self.clone(); - for val in &mut res.0 { - *val *= scalar.borrow(); - } - res - } - - pub(crate) fn add_vec(&self, vector: &Self) -> Self { - debug_assert_eq!(self.len(), vector.len()); - let mut res = self.clone(); - for (i, val) in res.0.iter_mut().enumerate() { - *val += vector.0[i]; - } - res - } - - pub(crate) fn mul_vec(&self, vector: &Self) -> Self { - debug_assert_eq!(self.len(), vector.len()); - let mut res = self.clone(); - for (i, val) in res.0.iter_mut().enumerate() { - *val *= vector.0[i]; - } - res - } - - pub(crate) fn inner_product(&self, vector: &Self) -> Scalar { - self.mul_vec(vector).sum() - } - - pub(crate) fn powers(x: Scalar, len: usize) -> Self { - debug_assert!(len != 0); - - let mut res = Vec::with_capacity(len); - res.push(Scalar::ONE); - res.push(x); - for i in 2 .. len { - res.push(res[i - 1] * x); - } - res.truncate(len); - ScalarVector(res) - } - - pub(crate) fn sum(mut self) -> Scalar { - self.0.drain(..).sum() - } - - pub(crate) fn len(&self) -> usize { - self.0.len() - } - - pub(crate) fn split(mut self) -> (Self, Self) { - debug_assert!(self.len() > 1); - let r = self.0.split_off(self.0.len() / 2); - debug_assert_eq!(self.len(), r.len()); - (self, ScalarVector(r)) - } -} - -pub(crate) fn weighted_inner_product( - a: &ScalarVector, - b: &ScalarVector, - y: &ScalarVector, -) -> Scalar { - a.inner_product(&b.mul_vec(y)) -} diff --git a/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs b/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs index 1bc1e85da..7cb9a4df2 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs @@ -4,7 +4,7 @@ use rand_core::{RngCore, CryptoRng}; use zeroize::{Zeroize, ZeroizeOnDrop}; -use multiexp::{multiexp, multiexp_vartime, BatchVerifier}; +use multiexp::{BatchVerifier, multiexp, multiexp_vartime}; use group::{ ff::{Field, PrimeField}, GroupEncoding, @@ -12,11 +12,10 @@ use group::{ use dalek_ff_group::{Scalar, EdwardsPoint}; use crate::ringct::bulletproofs::plus::{ - ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, weighted_inner_product, - transcript::*, + ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*, }; -// Figure 1 +// Figure 1 of the Bulletproofs+ paper #[derive(Clone, Debug)] pub(crate) struct WipStatement { generators: Generators, @@ -219,7 +218,7 @@ impl WipStatement { .zip(g_bold.0.iter().copied()) .chain(witness.b.0.iter().copied().zip(h_bold.0.iter().copied())) .collect::>(); - P_terms.push((weighted_inner_product(&witness.a, &witness.b, &y), g)); + P_terms.push((witness.a.clone().weighted_inner_product(&witness.b, &y), g)); P_terms.push((witness.alpha, h)); debug_assert_eq!(multiexp(&P_terms), P); P_terms.zeroize(); @@ -258,14 +257,13 @@ impl WipStatement { let d_l = Scalar::random(&mut *rng); let d_r = Scalar::random(&mut *rng); - let c_l = weighted_inner_product(&a1, &b2, &y); - let c_r = weighted_inner_product(&(a2.mul(y_n_hat)), &b1, &y); + let c_l = a1.clone().weighted_inner_product(&b2, &y); + let c_r = (a2.clone() * y_n_hat).weighted_inner_product(&b1, &y); // TODO: Calculate these with a batch inversion let y_inv_n_hat = y_n_hat.invert().unwrap(); - let mut L_terms = a1 - .mul(y_inv_n_hat) + let mut L_terms = (a1.clone() * y_inv_n_hat) .0 .drain(..) .zip(g_bold2.0.iter().copied()) @@ -277,8 +275,7 @@ impl WipStatement { L_vec.push(L); L_terms.zeroize(); - let mut R_terms = a2 - .mul(y_n_hat) + let mut R_terms = (a2.clone() * y_n_hat) .0 .drain(..) .zip(g_bold1.0.iter().copied()) @@ -294,8 +291,8 @@ impl WipStatement { (e, inv_e, e_square, inv_e_square, g_bold, h_bold) = Self::next_G_H(&mut transcript, g_bold1, g_bold2, h_bold1, h_bold2, L, R, y_inv_n_hat); - a = a1.mul(e).add_vec(&a2.mul(y_n_hat * inv_e)); - b = b1.mul(inv_e).add_vec(&b2.mul(e)); + a = (a1 * e) + &(a2 * (y_n_hat * inv_e)); + b = (b1 * inv_e) + &(b2 * e); alpha += (d_l * e_square) + (d_r * inv_e_square); debug_assert_eq!(g_bold.len(), a.len()); diff --git a/coins/monero/src/ringct/bulletproofs/scalar_vector.rs b/coins/monero/src/ringct/bulletproofs/scalar_vector.rs index 6f94f228d..e62883672 100644 --- a/coins/monero/src/ringct/bulletproofs/scalar_vector.rs +++ b/coins/monero/src/ringct/bulletproofs/scalar_vector.rs @@ -1,114 +1,138 @@ -use core::ops::{Add, Sub, Mul, Index}; +use core::{ + borrow::Borrow, + ops::{Index, IndexMut, Add, Sub, Mul}, +}; use std_shims::vec::Vec; use zeroize::{Zeroize, ZeroizeOnDrop}; use group::ff::Field; use dalek_ff_group::{Scalar, EdwardsPoint}; - use multiexp::multiexp; #[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)] pub(crate) struct ScalarVector(pub(crate) Vec); -macro_rules! math_op { - ($Op: ident, $op: ident, $f: expr) => { - #[allow(clippy::redundant_closure_call)] - impl $Op for ScalarVector { - type Output = ScalarVector; - fn $op(self, b: Scalar) -> ScalarVector { - ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect()) - } - } - #[allow(clippy::redundant_closure_call)] - impl $Op for &ScalarVector { - type Output = ScalarVector; - fn $op(self, b: Scalar) -> ScalarVector { - ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect()) - } - } +impl Index for ScalarVector { + type Output = Scalar; + fn index(&self, index: usize) -> &Scalar { + &self.0[index] + } +} +impl IndexMut for ScalarVector { + fn index_mut(&mut self, index: usize) -> &mut Scalar { + &mut self.0[index] + } +} - #[allow(clippy::redundant_closure_call)] - impl $Op for ScalarVector { - type Output = ScalarVector; - fn $op(self, b: ScalarVector) -> ScalarVector { - debug_assert_eq!(self.len(), b.len()); - ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect()) - } +impl> Add for ScalarVector { + type Output = ScalarVector; + fn add(mut self, scalar: S) -> ScalarVector { + for s in &mut self.0 { + *s += scalar.borrow(); + } + self + } +} +impl> Sub for ScalarVector { + type Output = ScalarVector; + fn sub(mut self, scalar: S) -> ScalarVector { + for s in &mut self.0 { + *s -= scalar.borrow(); } + self + } +} +impl> Mul for ScalarVector { + type Output = ScalarVector; + fn mul(mut self, scalar: S) -> ScalarVector { + for s in &mut self.0 { + *s *= scalar.borrow(); + } + self + } +} - #[allow(clippy::redundant_closure_call)] - impl $Op<&ScalarVector> for &ScalarVector { - type Output = ScalarVector; - fn $op(self, b: &ScalarVector) -> ScalarVector { - debug_assert_eq!(self.len(), b.len()); - ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect()) - } +impl Add<&ScalarVector> for ScalarVector { + type Output = ScalarVector; + fn add(mut self, other: &ScalarVector) -> ScalarVector { + debug_assert_eq!(self.len(), other.len()); + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s += o; } - }; + self + } +} +impl Sub<&ScalarVector> for ScalarVector { + type Output = ScalarVector; + fn sub(mut self, other: &ScalarVector) -> ScalarVector { + debug_assert_eq!(self.len(), other.len()); + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s -= o; + } + self + } +} +impl Mul<&ScalarVector> for ScalarVector { + type Output = ScalarVector; + fn mul(mut self, other: &ScalarVector) -> ScalarVector { + debug_assert_eq!(self.len(), other.len()); + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s *= o; + } + self + } +} + +impl Mul<&[EdwardsPoint]> for &ScalarVector { + type Output = EdwardsPoint; + fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint { + debug_assert_eq!(self.len(), b.len()); + let mut multiexp_args = self.0.iter().copied().zip(b.iter().copied()).collect::>(); + let res = multiexp(&multiexp_args); + multiexp_args.zeroize(); + res + } } -math_op!(Add, add, |(a, b): (&Scalar, &Scalar)| *a + *b); -math_op!(Sub, sub, |(a, b): (&Scalar, &Scalar)| *a - *b); -math_op!(Mul, mul, |(a, b): (&Scalar, &Scalar)| *a * *b); impl ScalarVector { - pub(crate) fn new(len: usize) -> ScalarVector { + pub(crate) fn new(len: usize) -> Self { ScalarVector(vec![Scalar::ZERO; len]) } - pub(crate) fn powers(x: Scalar, len: usize) -> ScalarVector { + pub(crate) fn powers(x: Scalar, len: usize) -> Self { debug_assert!(len != 0); let mut res = Vec::with_capacity(len); res.push(Scalar::ONE); - for i in 1 .. len { + res.push(x); + for i in 2 .. len { res.push(res[i - 1] * x); } + res.truncate(len); ScalarVector(res) } - pub(crate) fn sum(mut self) -> Scalar { - self.0.drain(..).sum() - } - pub(crate) fn len(&self) -> usize { self.0.len() } - pub(crate) fn split(self) -> (ScalarVector, ScalarVector) { - let (l, r) = self.0.split_at(self.0.len() / 2); - (ScalarVector(l.to_vec()), ScalarVector(r.to_vec())) + pub(crate) fn sum(mut self) -> Scalar { + self.0.drain(..).sum() } -} -impl Index for ScalarVector { - type Output = Scalar; - fn index(&self, index: usize) -> &Scalar { - &self.0[index] + pub(crate) fn inner_product(self, vector: &Self) -> Scalar { + (self * vector).sum() } -} - -pub(crate) fn inner_product(a: &ScalarVector, b: &ScalarVector) -> Scalar { - (a * b).sum() -} -impl Mul<&[EdwardsPoint]> for &ScalarVector { - type Output = EdwardsPoint; - fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint { - debug_assert_eq!(self.len(), b.len()); - multiexp(&self.0.iter().copied().zip(b.iter().copied()).collect::>()) + pub(crate) fn weighted_inner_product(self, vector: &Self, y: &Self) -> Scalar { + (self * vector * y).sum() } -} -pub(crate) fn hadamard_fold( - l: &[EdwardsPoint], - r: &[EdwardsPoint], - a: Scalar, - b: Scalar, -) -> Vec { - let mut res = Vec::with_capacity(l.len() / 2); - for i in 0 .. l.len() { - res.push(multiexp(&[(a, l[i]), (b, r[i])])); - } - res + pub(crate) fn split(mut self) -> (Self, Self) { + debug_assert!(self.len() > 1); + let r = self.0.split_off(self.0.len() / 2); + debug_assert_eq!(self.len(), r.len()); + (self, ScalarVector(r)) + } } diff --git a/coins/monero/src/ringct/clsag/mod.rs b/coins/monero/src/ringct/clsag/mod.rs index 1290e3e38..042d964ac 100644 --- a/coins/monero/src/ringct/clsag/mod.rs +++ b/coins/monero/src/ringct/clsag/mod.rs @@ -9,17 +9,17 @@ use std_shims::{ use rand_core::{RngCore, CryptoRng}; use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; -use subtle::{ConstantTimeEq, Choice, CtOption}; +use subtle::{ConstantTimeEq, ConditionallySelectable}; use curve25519_dalek::{ - constants::ED25519_BASEPOINT_TABLE, + constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT}, scalar::Scalar, - traits::{IsIdentity, VartimePrecomputedMultiscalarMul}, + traits::{IsIdentity, MultiscalarMul, VartimePrecomputedMultiscalarMul}, edwards::{EdwardsPoint, VartimeEdwardsPrecomputation}, }; use crate::{ - INV_EIGHT, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys, + INV_EIGHT, BASEPOINT_PRECOMP, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys, ringct::hash_to_point, serialize::*, }; @@ -27,8 +27,6 @@ use crate::{ mod multisig; #[cfg(feature = "multisig")] pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig}; -#[cfg(feature = "multisig")] -pub(crate) use multisig::add_key_image_share; /// Errors returned when CLSAG signing fails. #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -100,8 +98,11 @@ fn core( ) -> ((EdwardsPoint, Scalar, Scalar), Scalar) { let n = ring.len(); - let images_precomp = VartimeEdwardsPrecomputation::new([I, D]); - let D = D * INV_EIGHT(); + let images_precomp = match A_c1 { + Mode::Sign(..) => None, + Mode::Verify(..) => Some(VartimeEdwardsPrecomputation::new([I, D])), + }; + let D_INV_EIGHT = D * INV_EIGHT(); // Generate the transcript // Instead of generating multiple, a single transcript is created and then edited as needed @@ -130,7 +131,7 @@ fn core( } to_hash.extend(I.compress().to_bytes()); - to_hash.extend(D.compress().to_bytes()); + to_hash.extend(D_INV_EIGHT.compress().to_bytes()); to_hash.extend(pseudo_out.compress().to_bytes()); // mu_P with agg_0 let mu_P = hash_to_scalar(&to_hash); @@ -169,29 +170,44 @@ fn core( } // Perform the core loop - let mut c1 = CtOption::new(Scalar::ZERO, Choice::from(0)); + let mut c1 = c; for i in (start .. end).map(|i| i % n) { - // This will only execute once and shouldn't need to be constant time. Making it constant time - // removes the risk of branch prediction creating timing differences depending on ring index - // however - c1 = c1.or_else(|| CtOption::new(c, i.ct_eq(&0))); - let c_p = mu_P * c; let c_c = mu_C * c; - let L = (&s[i] * ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]); + // (s_i * G) + (c_p * P_i) + (c_c * C_i) + let L = match A_c1 { + Mode::Sign(..) => { + EdwardsPoint::multiscalar_mul([s[i], c_p, c_c], [ED25519_BASEPOINT_POINT, P[i], C[i]]) + } + Mode::Verify(..) => { + BASEPOINT_PRECOMP().vartime_mixed_multiscalar_mul([s[i]], [c_p, c_c], [P[i], C[i]]) + } + }; + let PH = hash_to_point(&P[i]); - // Shouldn't be an issue as all of the variables in this vartime statement are public - let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul([c_p, c_c]); + + // (c_p * I) + (c_c * D) + (s_i * PH) + let R = match A_c1 { + Mode::Sign(..) => EdwardsPoint::multiscalar_mul([c_p, c_c, s[i]], [I, D, &PH]), + Mode::Verify(..) => { + images_precomp.as_ref().unwrap().vartime_mixed_multiscalar_mul([c_p, c_c], [s[i]], [PH]) + } + }; to_hash.truncate(((2 * n) + 3) * 32); to_hash.extend(L.compress().to_bytes()); to_hash.extend(R.compress().to_bytes()); c = hash_to_scalar(&to_hash); + + // This will only execute once and shouldn't need to be constant time. Making it constant time + // removes the risk of branch prediction creating timing differences depending on ring index + // however + c1.conditional_assign(&c, i.ct_eq(&(n - 1))); } // This first tuple is needed to continue signing, the latter is the c to be tested/worked with - ((D, c * mu_P, c * mu_C), c1.unwrap_or(c)) + ((D_INV_EIGHT, c * mu_P, c * mu_C), c1) } /// CLSAG signature, as used in Monero. @@ -261,8 +277,10 @@ impl Clsag { nonce.deref() * hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]), ); - clsag.s[usize::from(inputs[i].2.decoys.i)] = - (-((p * inputs[i].0.deref()) + c)) + nonce.deref(); + // Effectively r - cx, except cx is (c_p x) + (c_c z), where z is the delta between a ring + // member's commitment and our input commitment (which will only have a known discrete log + // over G if the amounts cancel out) + clsag.s[usize::from(inputs[i].2.decoys.i)] = nonce.deref() - ((p * inputs[i].0.deref()) + c); inputs[i].0.zeroize(); nonce.zeroize(); diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index 9cb930ce1..e9234979d 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -1,5 +1,8 @@ use core::{ops::Deref, fmt::Debug}; -use std_shims::io::{self, Read, Write}; +use std_shims::{ + io::{self, Read, Write}, + collections::HashMap, +}; use std::sync::{Arc, RwLock}; use rand_core::{RngCore, CryptoRng, SeedableRng}; @@ -9,11 +12,13 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint}; -use group::{ff::Field, Group, GroupEncoding}; +use group::{ + ff::{Field, PrimeField}, + Group, GroupEncoding, +}; use transcript::{Transcript, RecommendedTranscript}; use dalek_ff_group as dfg; -use dleq::DLEqProof; use frost::{ dkg::lagrange, curve::Ed25519, @@ -26,10 +31,6 @@ use crate::ringct::{ clsag::{ClsagInput, Clsag}, }; -fn dleq_transcript() -> RecommendedTranscript { - RecommendedTranscript::new(b"monero_key_image_dleq") -} - impl ClsagInput { fn transcript(&self, transcript: &mut T) { // Doesn't domain separate as this is considered part of the larger CLSAG proof @@ -43,6 +44,7 @@ impl ClsagInput { // They're just a unreliable reference to this data which will be included in the message // if in use transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]); + // This also transcripts the key image generator since it's derived from this key transcript.append_message(b"key", pair[0].compress().to_bytes()); transcript.append_message(b"commitment", pair[1].compress().to_bytes()) } @@ -70,13 +72,11 @@ impl ClsagDetails { #[derive(Clone, PartialEq, Eq, Zeroize, Debug)] pub struct ClsagAddendum { pub(crate) key_image: dfg::EdwardsPoint, - dleq: DLEqProof, } impl WriteAddendum for ClsagAddendum { fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(self.key_image.compress().to_bytes().as_ref())?; - self.dleq.write(writer) + writer.write_all(self.key_image.compress().to_bytes().as_ref()) } } @@ -97,9 +97,8 @@ pub struct ClsagMultisig { transcript: RecommendedTranscript, pub(crate) H: EdwardsPoint, - // Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires - // an extra round - image: EdwardsPoint, + key_image_shares: HashMap<[u8; 32], dfg::EdwardsPoint>, + image: Option, details: Arc>>, @@ -117,7 +116,8 @@ impl ClsagMultisig { transcript, H: hash_to_point(&output_key), - image: EdwardsPoint::identity(), + key_image_shares: HashMap::new(), + image: None, details, @@ -135,20 +135,6 @@ impl ClsagMultisig { } } -pub(crate) fn add_key_image_share( - image: &mut EdwardsPoint, - generator: EdwardsPoint, - offset: Scalar, - included: &[Participant], - participant: Participant, - share: EdwardsPoint, -) { - if image.is_identity().into() { - *image = generator * offset; - } - *image += share * lagrange::(participant, included).0; -} - impl Algorithm for ClsagMultisig { type Transcript = RecommendedTranscript; type Addendum = ClsagAddendum; @@ -160,23 +146,10 @@ impl Algorithm for ClsagMultisig { fn preprocess_addendum( &mut self, - rng: &mut R, + _rng: &mut R, keys: &ThresholdKeys, ) -> ClsagAddendum { - ClsagAddendum { - key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref(), - dleq: DLEqProof::prove( - rng, - // Doesn't take in a larger transcript object due to the usage of this - // Every prover would immediately write their own DLEq proof, when they can only do so in - // the proper order if they want to reach consensus - // It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to - // try to merge later in some form, when it should instead just merge xH (as it does) - &mut dleq_transcript(), - &[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)], - keys.secret_share(), - ), - } + ClsagAddendum { key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref() } } fn read_addendum(&self, reader: &mut R) -> io::Result { @@ -190,7 +163,7 @@ impl Algorithm for ClsagMultisig { Err(io::Error::other("non-canonical key image"))?; } - Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::::read(reader)? }) + Ok(ClsagAddendum { key_image: xH }) } fn process_addendum( @@ -199,32 +172,29 @@ impl Algorithm for ClsagMultisig { l: Participant, addendum: ClsagAddendum, ) -> Result<(), FrostError> { - if self.image.is_identity().into() { + if self.image.is_none() { self.transcript.domain_separate(b"CLSAG"); + // Transcript the ring self.input().transcript(&mut self.transcript); + // Transcript the mask self.transcript.append_message(b"mask", self.mask().to_bytes()); + + // Init the image to the offset + self.image = Some(dfg::EdwardsPoint(self.H) * view.offset()); } + // Transcript this participant's contribution self.transcript.append_message(b"participant", l.to_bytes()); + self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes()); - addendum - .dleq - .verify( - &mut dleq_transcript(), - &[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)], - &[view.original_verification_share(l), addendum.key_image], - ) - .map_err(|_| FrostError::InvalidPreprocess(l))?; + // Accumulate the interpolated share + let interpolated_key_image_share = + addendum.key_image * lagrange::(l, view.included()); + *self.image.as_mut().unwrap() += interpolated_key_image_share; - self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes()); - add_key_image_share( - &mut self.image, - self.H, - view.offset().0, - view.included(), - l, - addendum.key_image.0, - ); + self + .key_image_shares + .insert(view.verification_share(l).to_bytes(), interpolated_key_image_share); Ok(()) } @@ -252,7 +222,7 @@ impl Algorithm for ClsagMultisig { #[allow(non_snake_case)] let (clsag, pseudo_out, p, c) = Clsag::sign_core( &mut rng, - &self.image, + &self.image.expect("verifying a share despite never processing any addendums").0, &self.input(), self.mask(), self.msg.as_ref().unwrap(), @@ -261,7 +231,8 @@ impl Algorithm for ClsagMultisig { ); self.interim = Some(Interim { p, c, clsag, pseudo_out }); - (-(dfg::Scalar(p) * view.secret_share().deref())) + nonces[0].deref() + // r - p x, where p is the challenge for the keys + *nonces[0] - dfg::Scalar(p) * view.secret_share().deref() } #[must_use] @@ -273,11 +244,13 @@ impl Algorithm for ClsagMultisig { ) -> Option { let interim = self.interim.as_ref().unwrap(); let mut clsag = interim.clsag.clone(); + // We produced shares as `r - p x`, yet the signature is `r - p x - c x` + // Substract `c x` (saved as `c`) now clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c; if clsag .verify( &self.input().decoys.ring, - &self.image, + &self.image.expect("verifying a signature despite never processing any addendums").0, &interim.pseudo_out, self.msg.as_ref().unwrap(), ) @@ -295,10 +268,61 @@ impl Algorithm for ClsagMultisig { share: dfg::Scalar, ) -> Result, ()> { let interim = self.interim.as_ref().unwrap(); - Ok(vec![ + + // For a share `r - p x`, the following two equalities should hold: + // - `(r - p x)G == R.0 - pV`, where `V = xG` + // - `(r - p x)H == R.1 - pK`, where `K = xH` (the key image share) + // + // This is effectively a discrete log equality proof for: + // V, K over G, H + // with nonces + // R.0, R.1 + // and solution + // s + // + // Which is a batch-verifiable rewrite of the traditional CP93 proof + // (and also writable as Generalized Schnorr Protocol) + // + // That means that given a proper challenge, this alone can be certainly argued to prove the + // key image share is well-formed and the provided signature so proves for that. + + // This is a bit funky as it doesn't prove the nonces are well-formed however. They're part of + // the prover data/transcript for a CP93/GSP proof, not part of the statement. This practically + // is fine, for a variety of reasons (given a consistent `x`, a consistent `r` can be + // extracted, and the nonces as used in CLSAG are also part of its prover data/transcript). + + let key_image_share = self.key_image_shares[&verification_share.to_bytes()]; + + // Hash every variable relevant here, using the hahs output as the random weight + let mut weight_transcript = + RecommendedTranscript::new(b"monero-serai v0.1 ClsagMultisig::verify_share"); + weight_transcript.append_message(b"G", dfg::EdwardsPoint::generator().to_bytes()); + weight_transcript.append_message(b"H", self.H.to_bytes()); + weight_transcript.append_message(b"xG", verification_share.to_bytes()); + weight_transcript.append_message(b"xH", key_image_share.to_bytes()); + weight_transcript.append_message(b"rG", nonces[0][0].to_bytes()); + weight_transcript.append_message(b"rH", nonces[0][1].to_bytes()); + weight_transcript.append_message(b"c", dfg::Scalar(interim.p).to_repr()); + weight_transcript.append_message(b"s", share.to_repr()); + let weight = weight_transcript.challenge(b"weight"); + let weight = dfg::Scalar(Scalar::from_bytes_mod_order_wide(&weight.into())); + + let part_one = vec![ (share, dfg::EdwardsPoint::generator()), - (dfg::Scalar(interim.p), verification_share), + // -(R.0 - pV) == -R.0 + pV (-dfg::Scalar::ONE, nonces[0][0]), - ]) + (dfg::Scalar(interim.p), verification_share), + ]; + + let mut part_two = vec![ + (weight * share, dfg::EdwardsPoint(self.H)), + // -(R.1 - pK) == -R.1 + pK + (-weight, nonces[0][1]), + (weight * dfg::Scalar(interim.p), key_image_share), + ]; + + let mut all = part_one; + all.append(&mut part_two); + Ok(all) } } diff --git a/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs b/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs index a50b9d407..658da250e 100644 --- a/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs +++ b/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs @@ -21,7 +21,7 @@ fn test_aggregate_range_proof() { } let commitment_points = commitments.iter().map(|com| EdwardsPoint(com.calculate())).collect(); let statement = AggregateRangeStatement::new(commitment_points).unwrap(); - let witness = AggregateRangeWitness::new(&commitments).unwrap(); + let witness = AggregateRangeWitness::new(commitments).unwrap(); let proof = statement.clone().prove(&mut OsRng, &witness).unwrap(); statement.verify(&mut OsRng, &mut verifier, (), proof); diff --git a/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs b/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs index 7db2ecc8c..b0890cf87 100644 --- a/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs +++ b/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs @@ -9,7 +9,6 @@ use dalek_ff_group::{Scalar, EdwardsPoint}; use crate::ringct::bulletproofs::plus::{ ScalarVector, PointVector, GeneratorsList, Generators, weighted_inner_product::{WipStatement, WipWitness}, - weighted_inner_product, }; #[test] @@ -68,7 +67,7 @@ fn test_weighted_inner_product() { #[allow(non_snake_case)] let P = g_bold.multiexp(&a) + h_bold.multiexp(&b) + - (g * weighted_inner_product(&a, &b, &y_vec)) + + (g * a.clone().weighted_inner_product(&b, &y_vec)) + (h * alpha); let statement = WipStatement::new(generators, P, y); diff --git a/coins/monero/src/tests/clsag.rs b/coins/monero/src/tests/clsag.rs index 59e41ebf5..a17d7ba27 100644 --- a/coins/monero/src/tests/clsag.rs +++ b/coins/monero/src/tests/clsag.rs @@ -57,7 +57,7 @@ fn clsag() { } let image = generate_key_image(&secrets.0); - let (clsag, pseudo_out) = Clsag::sign( + let (mut clsag, pseudo_out) = Clsag::sign( &mut OsRng, vec![( secrets.0, @@ -76,7 +76,12 @@ fn clsag() { msg, ) .swap_remove(0); + clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap(); + + // make sure verification fails if we throw a random `c1` at it. + clsag.c1 = random_scalar(&mut OsRng); + assert!(clsag.verify(&ring, &image, &pseudo_out, &msg).is_err()); } } diff --git a/coins/monero/src/wallet/address.rs b/coins/monero/src/wallet/address.rs index 9c79942bc..d080488da 100644 --- a/coins/monero/src/wallet/address.rs +++ b/coins/monero/src/wallet/address.rs @@ -1,5 +1,5 @@ -use core::{marker::PhantomData, fmt::Debug}; -use std_shims::string::{String, ToString}; +use core::{marker::PhantomData, fmt}; +use std_shims::string::ToString; use zeroize::Zeroize; @@ -81,7 +81,7 @@ impl AddressType { } /// A type which returns the byte for a given address. -pub trait AddressBytes: Clone + Copy + PartialEq + Eq + Debug { +pub trait AddressBytes: Clone + Copy + PartialEq + Eq + fmt::Debug { fn network_bytes(network: Network) -> (u8, u8, u8, u8); } @@ -191,8 +191,8 @@ pub struct Address { pub view: EdwardsPoint, } -impl core::fmt::Debug for Address { - fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { +impl fmt::Debug for Address { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { fmt .debug_struct("Address") .field("meta", &self.meta) @@ -212,8 +212,8 @@ impl Zeroize for Address { } } -impl ToString for Address { - fn to_string(&self) -> String { +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut data = vec![self.meta.to_byte()]; data.extend(self.spend.compress().to_bytes()); data.extend(self.view.compress().to_bytes()); @@ -226,7 +226,7 @@ impl ToString for Address { if let Some(id) = self.meta.kind.payment_id() { data.extend(id); } - encode_check(&data).unwrap() + write!(f, "{}", encode_check(&data).unwrap()) } } diff --git a/coins/monero/src/wallet/scan.rs b/coins/monero/src/wallet/scan.rs index 0c2cebbdc..45bae04df 100644 --- a/coins/monero/src/wallet/scan.rs +++ b/coins/monero/src/wallet/scan.rs @@ -105,13 +105,13 @@ pub struct Metadata { /// but the payment ID will be returned here anyway: /// /// 1) If the payment ID is tied to an output received by a subaddress account - /// that spent Monero in the transaction (the received output is considered - /// "change" and is not considered a "payment" in this case). If there are multiple - /// spending subaddress accounts in a transaction, the highest index spent key image - /// is used to determine the spending subaddress account. + /// that spent Monero in the transaction (the received output is considered + /// "change" and is not considered a "payment" in this case). If there are multiple + /// spending subaddress accounts in a transaction, the highest index spent key image + /// is used to determine the spending subaddress account. /// /// 2) If the payment ID is the unencrypted variant and the block's hf version is - /// v12 or higher (https://github.com/serai-dex/serai/issues/512) + /// v12 or higher (https://github.com/serai-dex/serai/issues/512) pub payment_id: Option, /// Arbitrary data encoded in TX extra. pub arbitrary_data: Vec>, diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index f4ac208e3..153e6b6cf 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -364,8 +364,8 @@ impl Change { /// 1) The change in the tx is shunted to the fee (fingerprintable fee). /// /// 2) If there are 2 outputs in the tx, there would be no payment ID as is the case when the - /// reference wallet creates 2 output txs, since monero-serai doesn't know which output - /// to tie the dummy payment ID to. + /// reference wallet creates 2 output txs, since monero-serai doesn't know which output + /// to tie the dummy payment ID to. pub fn fingerprintable(address: Option) -> Change { Change { address, view: None } } diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 02626e6a7..a5be404a2 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -18,6 +18,7 @@ use transcript::{Transcript, RecommendedTranscript}; use frost::{ curve::Ed25519, Participant, FrostError, ThresholdKeys, + dkg::lagrange, sign::{ Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine, @@ -27,7 +28,7 @@ use frost::{ use crate::{ random_scalar, ringct::{ - clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig, add_key_image_share}, + clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig}, RctPrunable, }, transaction::{Input, Transaction}, @@ -261,8 +262,13 @@ impl SignMachine for TransactionSignMachine { included.push(self.i); included.sort_unstable(); - // Convert the unified commitments to a Vec of the individual commitments + // Start calculating the key images, as needed on the TX level let mut images = vec![EdwardsPoint::identity(); self.clsags.len()]; + for (image, (generator, offset)) in images.iter_mut().zip(&self.key_images) { + *image = generator * offset; + } + + // Convert the serialized nonces commitments to a parallelized Vec let mut commitments = (0 .. self.clsags.len()) .map(|c| { included @@ -291,14 +297,7 @@ impl SignMachine for TransactionSignMachine { // provides the easiest API overall, as this is where the TX is (which needs the key // images in its message), along with where the outputs are determined (where our // outputs may need these in order to guarantee uniqueness) - add_key_image_share( - &mut images[c], - self.key_images[c].0, - self.key_images[c].1, - &included, - *l, - preprocess.addendum.key_image.0, - ); + images[c] += preprocess.addendum.key_image.0 * lagrange::(*l, &included).0; Ok((*l, preprocess)) }) diff --git a/coins/monero/tests/wallet2_compatibility.rs b/coins/monero/tests/wallet2_compatibility.rs index 2002f3bdb..c6b589789 100644 --- a/coins/monero/tests/wallet2_compatibility.rs +++ b/coins/monero/tests/wallet2_compatibility.rs @@ -88,7 +88,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) { .unwrap(); let tx_hash = hex::decode(tx.tx_hash).unwrap().try_into().unwrap(); - // TODO: Needs https://github.com/monero-project/monero/pull/8882 + // TODO: Needs https://github.com/monero-project/monero/pull/9260 // let fee_rate = daemon_rpc // .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant) // .await @@ -107,7 +107,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) { let tx = daemon_rpc.get_transaction(tx_hash).await.unwrap(); let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0); - // TODO: Needs https://github.com/monero-project/monero/pull/8882 + // TODO: Needs https://github.com/monero-project/monero/pull/9260 // runner::check_weight_and_fee(&tx, fee_rate); match spec { diff --git a/common/db/Cargo.toml b/common/db/Cargo.toml index 78d486a1b..e422b346e 100644 --- a/common/db/Cargo.toml +++ b/common/db/Cargo.toml @@ -18,7 +18,7 @@ workspace = true [dependencies] parity-db = { version = "0.4", default-features = false, optional = true } -rocksdb = { version = "0.21", default-features = false, features = ["lz4"], optional = true } +rocksdb = { version = "0.21", default-features = false, features = ["zstd"], optional = true } [features] parity-db = ["dep:parity-db"] diff --git a/common/db/src/parity_db.rs b/common/db/src/parity_db.rs index 06fd0c7c0..8c9134684 100644 --- a/common/db/src/parity_db.rs +++ b/common/db/src/parity_db.rs @@ -11,7 +11,7 @@ impl Get for Transaction<'_> { let mut res = self.0.get(&key); for change in &self.1 { if change.1 == key.as_ref() { - res = change.2.clone(); + res.clone_from(&change.2); } } res diff --git a/common/db/src/rocks.rs b/common/db/src/rocks.rs index 74fd33adc..6a7245632 100644 --- a/common/db/src/rocks.rs +++ b/common/db/src/rocks.rs @@ -1,42 +1,65 @@ use std::sync::Arc; -use rocksdb::{DBCompressionType, ThreadMode, SingleThreaded, Options, Transaction, TransactionDB}; +use rocksdb::{ + DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions, + Transaction as RocksTransaction, Options, OptimisticTransactionDB, +}; use crate::*; -impl Get for Transaction<'_, TransactionDB> { +pub struct Transaction<'a, T: ThreadMode>( + RocksTransaction<'a, OptimisticTransactionDB>, + &'a OptimisticTransactionDB, +); + +impl Get for Transaction<'_, T> { fn get(&self, key: impl AsRef<[u8]>) -> Option> { - self.get(key).expect("couldn't read from RocksDB via transaction") + self.0.get(key).expect("couldn't read from RocksDB via transaction") } } -impl DbTxn for Transaction<'_, TransactionDB> { +impl DbTxn for Transaction<'_, T> { fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) { - Transaction::put(self, key, value).expect("couldn't write to RocksDB via transaction") + self.0.put(key, value).expect("couldn't write to RocksDB via transaction") } fn del(&mut self, key: impl AsRef<[u8]>) { - self.delete(key).expect("couldn't delete from RocksDB via transaction") + self.0.delete(key).expect("couldn't delete from RocksDB via transaction") } fn commit(self) { - Transaction::commit(self).expect("couldn't commit to RocksDB via transaction") + self.0.commit().expect("couldn't commit to RocksDB via transaction"); + self.1.flush_wal(true).expect("couldn't flush RocksDB WAL"); + self.1.flush().expect("couldn't flush RocksDB"); } } -impl Get for Arc> { +impl Get for Arc> { fn get(&self, key: impl AsRef<[u8]>) -> Option> { - TransactionDB::get(self, key).expect("couldn't read from RocksDB") + OptimisticTransactionDB::get(self, key).expect("couldn't read from RocksDB") } } -impl Db for Arc> { - type Transaction<'a> = Transaction<'a, TransactionDB>; +impl Db for Arc> { + type Transaction<'a> = Transaction<'a, T>; fn txn(&mut self) -> Self::Transaction<'_> { - self.transaction() + let mut opts = WriteOptions::default(); + opts.set_sync(true); + Transaction(self.transaction_opt(&opts, &Default::default()), &**self) } } -pub type RocksDB = Arc>; +pub type RocksDB = Arc>; pub fn new_rocksdb(path: &str) -> RocksDB { let mut options = Options::default(); options.create_if_missing(true); - options.set_compression_type(DBCompressionType::Lz4); - Arc::new(TransactionDB::open(&options, &Default::default(), path).unwrap()) + options.set_compression_type(DBCompressionType::Zstd); + + options.set_wal_compression_type(DBCompressionType::Zstd); + // 10 MB + options.set_max_total_wal_size(10 * 1024 * 1024); + options.set_wal_size_limit_mb(10); + + options.set_log_level(LogLevel::Warn); + // 1 MB + options.set_max_log_file_size(1024 * 1024); + options.set_recycle_log_file_num(1); + + Arc::new(OptimisticTransactionDB::open(&options, path).unwrap()) } diff --git a/common/request/Cargo.toml b/common/request/Cargo.toml index 0fe9ae5c8..e50180561 100644 --- a/common/request/Cargo.toml +++ b/common/request/Cargo.toml @@ -23,7 +23,7 @@ hyper-util = { version = "0.1", default-features = false, features = ["http1", " http-body-util = { version = "0.1", default-features = false } tokio = { version = "1", default-features = false } -hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true } +hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true } zeroize = { version = "1", optional = true } base64ct = { version = "1", features = ["alloc"], optional = true } diff --git a/common/request/src/lib.rs b/common/request/src/lib.rs index ad452a0cc..60e510193 100644 --- a/common/request/src/lib.rs +++ b/common/request/src/lib.rs @@ -55,6 +55,8 @@ impl Client { fn connector() -> Connector { let mut res = HttpConnector::new(); res.set_keepalive(Some(core::time::Duration::from_secs(60))); + res.set_nodelay(true); + res.set_reuse_address(true); #[cfg(feature = "tls")] let res = HttpsConnectorBuilder::new() .with_native_roots() @@ -68,7 +70,9 @@ impl Client { pub fn with_connection_pool() -> Client { Client { connection: Connection::ConnectionPool( - HyperClient::builder(TokioExecutor::new()).build(Self::connector()), + HyperClient::builder(TokioExecutor::new()) + .pool_idle_timeout(core::time::Duration::from_secs(60)) + .build(Self::connector()), ), } } diff --git a/common/zalloc/Cargo.toml b/common/zalloc/Cargo.toml index 1a4a6b45f..af4e7c1c8 100644 --- a/common/zalloc/Cargo.toml +++ b/common/zalloc/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc" authors = ["Luke Parker "] keywords = [] edition = "2021" -rust-version = "1.60" +rust-version = "1.77.0" [package.metadata.docs.rs] all-features = true @@ -19,8 +19,10 @@ workspace = true [dependencies] zeroize = { version = "^1.5", default-features = false } +[build-dependencies] +rustversion = { version = "1", default-features = false } + [features] std = ["zeroize/std"] default = ["std"] -# Commented for now as it requires nightly and we don't use nightly -# allocator = [] +allocator = [] diff --git a/common/zalloc/build.rs b/common/zalloc/build.rs new file mode 100644 index 000000000..f3351e228 --- /dev/null +++ b/common/zalloc/build.rs @@ -0,0 +1,10 @@ +#[rustversion::nightly] +fn main() { + println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)"); + println!("cargo::rustc-cfg=zalloc_rustc_nightly"); +} + +#[rustversion::not(nightly)] +fn main() { + println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)"); +} diff --git a/common/zalloc/src/lib.rs b/common/zalloc/src/lib.rs index 0e4c1f75d..cc5562a0a 100644 --- a/common/zalloc/src/lib.rs +++ b/common/zalloc/src/lib.rs @@ -1,6 +1,6 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))] -#![cfg_attr(feature = "allocator", feature(allocator_api))] +#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))] //! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation. //! This can either be used with Box (requires nightly and the "allocator" feature) to provide the @@ -17,12 +17,12 @@ use zeroize::Zeroize; /// An allocator wrapper which zeroizes its memory on dealloc. pub struct ZeroizingAlloc(pub T); -#[cfg(feature = "allocator")] +#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))] use core::{ ptr::NonNull, alloc::{AllocError, Allocator}, }; -#[cfg(feature = "allocator")] +#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))] unsafe impl Allocator for ZeroizingAlloc { fn allocate(&self, layout: Layout) -> Result, AllocError> { self.0.allocate(layout) diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index 12f8e763f..ae4e2be7a 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -51,7 +51,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim futures-util = { version = "0.3", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } -libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "macros"] } +libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] } [dev-dependencies] tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] } diff --git a/coordinator/src/cosign_evaluator.rs b/coordinator/src/cosign_evaluator.rs index 4ce7faf77..29d9cc4b0 100644 --- a/coordinator/src/cosign_evaluator.rs +++ b/coordinator/src/cosign_evaluator.rs @@ -22,7 +22,7 @@ use serai_db::{Get, DbTxn, Db, create_db}; use processor_messages::coordinator::cosign_block_msg; use crate::{ - p2p::{CosignedBlock, P2pMessageKind, P2p}, + p2p::{CosignedBlock, GossipMessageKind, P2p}, substrate::LatestCosignedBlock, }; @@ -323,7 +323,7 @@ impl CosignEvaluator { for cosign in cosigns { let mut buf = vec![]; cosign.serialize(&mut buf).unwrap(); - P2p::broadcast(&p2p, P2pMessageKind::CosignedBlock, buf).await; + P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await; } sleep(Duration::from_secs(60)).await; } diff --git a/coordinator/src/db.rs b/coordinator/src/db.rs index 09eab1732..04ee9d350 100644 --- a/coordinator/src/db.rs +++ b/coordinator/src/db.rs @@ -122,7 +122,7 @@ impl QueuedBatchesDb { pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec { let batches_vec = Self::get(txn, set).unwrap_or_default(); - txn.del(&Self::key(set)); + txn.del(Self::key(set)); let mut batches: &[u8] = &batches_vec; let mut res = vec![]; diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 690fb3426..58de348d2 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -260,7 +260,7 @@ async fn handle_processor_message( cosign_channel.send(cosigned_block).unwrap(); let mut buf = vec![]; cosigned_block.serialize(&mut buf).unwrap(); - P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, buf).await; + P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await; None } // This causes an action on Substrate yet not on any Tributary @@ -836,8 +836,8 @@ async fn handle_cosigns_and_batch_publication( ) { let mut tributaries = HashMap::new(); 'outer: loop { - // TODO: Create a better async flow for this, as this does still hammer this task - tokio::task::yield_now().await; + // TODO: Create a better async flow for this + tokio::time::sleep(core::time::Duration::from_millis(100)).await; match tributary_event.try_recv() { Ok(event) => match event { diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index ce6be6880..ef876f9a8 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -1,8 +1,8 @@ use core::{time::Duration, fmt}; use std::{ sync::Arc, - io::Read, - collections::HashMap, + io::{self, Read}, + collections::{HashSet, HashMap}, time::{SystemTime, Instant}, }; @@ -15,7 +15,7 @@ use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorS use serai_db::Db; -use futures_util::StreamExt; +use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, StreamExt}; use tokio::{ sync::{Mutex, RwLock, mpsc, broadcast}, time::sleep, @@ -27,12 +27,16 @@ use libp2p::{ PeerId, tcp::Config as TcpConfig, noise, yamux, + request_response::{ + Codec as RrCodecTrait, Message as RrMessage, Event as RrEvent, Config as RrConfig, + Behaviour as RrBehavior, + }, gossipsub::{ IdentTopic, FastMessageId, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder, IdentityTransform, AllowAllSubscriptionFilter, Event as GsEvent, PublishError, Behaviour as GsBehavior, }, - swarm::{NetworkBehaviour, SwarmEvent, Swarm}, + swarm::{NetworkBehaviour, SwarmEvent}, SwarmBuilder, }; @@ -40,6 +44,8 @@ pub(crate) use tributary::{ReadWrite, P2p as TributaryP2p}; use crate::{Transaction, Block, Tributary, ActiveTributary, TributaryEvent}; +// Block size limit + 1 KB of space for signatures/metadata +const MAX_LIBP2P_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024; const LIBP2P_TOPIC: &str = "serai-coordinator"; #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)] @@ -51,72 +57,113 @@ pub struct CosignedBlock { } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum P2pMessageKind { +pub enum ReqResMessageKind { KeepAlive, - Tributary([u8; 32]), Heartbeat([u8; 32]), Block([u8; 32]), - CosignedBlock, } -impl P2pMessageKind { - fn genesis(&self) -> Option<[u8; 32]> { - match self { - P2pMessageKind::KeepAlive | P2pMessageKind::CosignedBlock => None, - P2pMessageKind::Tributary(genesis) | - P2pMessageKind::Heartbeat(genesis) | - P2pMessageKind::Block(genesis) => Some(*genesis), +impl ReqResMessageKind { + pub fn read(reader: &mut R) -> Option { + let mut kind = [0; 1]; + reader.read_exact(&mut kind).ok()?; + match kind[0] { + 0 => Some(ReqResMessageKind::KeepAlive), + 1 => Some({ + let mut genesis = [0; 32]; + reader.read_exact(&mut genesis).ok()?; + ReqResMessageKind::Heartbeat(genesis) + }), + 2 => Some({ + let mut genesis = [0; 32]; + reader.read_exact(&mut genesis).ok()?; + ReqResMessageKind::Block(genesis) + }), + _ => None, } } - fn serialize(&self) -> Vec { + pub fn serialize(&self) -> Vec { match self { - P2pMessageKind::KeepAlive => vec![0], - P2pMessageKind::Tributary(genesis) => { + ReqResMessageKind::KeepAlive => vec![0], + ReqResMessageKind::Heartbeat(genesis) => { let mut res = vec![1]; res.extend(genesis); res } - P2pMessageKind::Heartbeat(genesis) => { + ReqResMessageKind::Block(genesis) => { let mut res = vec![2]; res.extend(genesis); res } - P2pMessageKind::Block(genesis) => { - let mut res = vec![3]; - res.extend(genesis); - res - } - P2pMessageKind::CosignedBlock => { - vec![4] - } } } +} - fn read(reader: &mut R) -> Option { +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum GossipMessageKind { + Tributary([u8; 32]), + CosignedBlock, +} + +impl GossipMessageKind { + pub fn read(reader: &mut R) -> Option { let mut kind = [0; 1]; reader.read_exact(&mut kind).ok()?; match kind[0] { - 0 => Some(P2pMessageKind::KeepAlive), - 1 => Some({ - let mut genesis = [0; 32]; - reader.read_exact(&mut genesis).ok()?; - P2pMessageKind::Tributary(genesis) - }), - 2 => Some({ - let mut genesis = [0; 32]; - reader.read_exact(&mut genesis).ok()?; - P2pMessageKind::Heartbeat(genesis) - }), - 3 => Some({ + 0 => Some({ let mut genesis = [0; 32]; reader.read_exact(&mut genesis).ok()?; - P2pMessageKind::Block(genesis) + GossipMessageKind::Tributary(genesis) }), - 4 => Some(P2pMessageKind::CosignedBlock), + 1 => Some(GossipMessageKind::CosignedBlock), _ => None, } } + + pub fn serialize(&self) -> Vec { + match self { + GossipMessageKind::Tributary(genesis) => { + let mut res = vec![0]; + res.extend(genesis); + res + } + GossipMessageKind::CosignedBlock => { + vec![1] + } + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum P2pMessageKind { + ReqRes(ReqResMessageKind), + Gossip(GossipMessageKind), +} + +impl P2pMessageKind { + fn genesis(&self) -> Option<[u8; 32]> { + match self { + P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) | + P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => None, + P2pMessageKind::ReqRes( + ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis), + ) | + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => Some(*genesis), + } + } +} + +impl From for P2pMessageKind { + fn from(kind: ReqResMessageKind) -> P2pMessageKind { + P2pMessageKind::ReqRes(kind) + } +} + +impl From for P2pMessageKind { + fn from(kind: GossipMessageKind) -> P2pMessageKind { + P2pMessageKind::Gossip(kind) + } } #[derive(Clone, Debug)] @@ -133,17 +180,21 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]); async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]); - async fn send_raw(&self, to: Self::Id, genesis: Option<[u8; 32]>, msg: Vec); - async fn broadcast_raw(&self, genesis: Option<[u8; 32]>, msg: Vec); - async fn receive_raw(&self) -> (Self::Id, Vec); + async fn send_raw(&self, to: Self::Id, msg: Vec); + async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec); + async fn receive(&self) -> Message; - async fn send(&self, to: Self::Id, kind: P2pMessageKind, msg: Vec) { + async fn send(&self, to: Self::Id, kind: ReqResMessageKind, msg: Vec) { let mut actual_msg = kind.serialize(); actual_msg.extend(msg); - self.send_raw(to, kind.genesis(), actual_msg).await; + self.send_raw(to, actual_msg).await; } - async fn broadcast(&self, kind: P2pMessageKind, msg: Vec) { - let mut actual_msg = kind.serialize(); + async fn broadcast(&self, kind: impl Send + Into, msg: Vec) { + let kind = kind.into(); + let mut actual_msg = match kind { + P2pMessageKind::ReqRes(kind) => kind.serialize(), + P2pMessageKind::Gossip(kind) => kind.serialize(), + }; actual_msg.extend(msg); /* log::trace!( @@ -157,41 +208,70 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { } ); */ - self.broadcast_raw(kind.genesis(), actual_msg).await; + self.broadcast_raw(kind, actual_msg).await; } - async fn receive(&self) -> Message { - let (sender, kind, msg) = loop { - let (sender, msg) = self.receive_raw().await; - if msg.is_empty() { - log::error!("empty p2p message from {sender:?}"); - continue; - } +} - let mut msg_ref = msg.as_ref(); - let Some(kind) = P2pMessageKind::read::<&[u8]>(&mut msg_ref) else { - log::error!("invalid p2p message kind from {sender:?}"); - continue; - }; - break (sender, kind, msg_ref.to_vec()); - }; - /* - log::trace!( - "received p2p message (kind {})", - match kind { - P2pMessageKind::KeepAlive => "KeepAlive".to_string(), - P2pMessageKind::Tributary(genesis) => format!("Tributary({})", hex::encode(genesis)), - P2pMessageKind::Heartbeat(genesis) => format!("Heartbeat({})", hex::encode(genesis)), - P2pMessageKind::Block(genesis) => format!("Block({})", hex::encode(genesis)), - P2pMessageKind::CosignedBlock => "CosignedBlock".to_string(), - } - ); - */ - Message { sender, kind, msg } +#[derive(Default, Clone, Copy, PartialEq, Eq, Debug)] +struct RrCodec; +#[async_trait] +impl RrCodecTrait for RrCodec { + type Protocol = &'static str; + type Request = Vec; + type Response = Vec; + + async fn read_request( + &mut self, + _: &Self::Protocol, + io: &mut R, + ) -> io::Result> { + let mut len = [0; 4]; + io.read_exact(&mut len).await?; + let len = usize::try_from(u32::from_le_bytes(len)).expect("not a 32-bit platform?"); + if len > MAX_LIBP2P_MESSAGE_SIZE { + Err(io::Error::other("request length exceeded MAX_LIBP2P_MESSAGE_SIZE"))?; + } + // This may be a non-trivial allocation easily causable + // While we could chunk the read, meaning we only perform the allocation as bandwidth is used, + // the max message size should be sufficiently sane + let mut buf = vec![0; len]; + io.read_exact(&mut buf).await?; + Ok(buf) + } + async fn read_response( + &mut self, + proto: &Self::Protocol, + io: &mut R, + ) -> io::Result> { + self.read_request(proto, io).await + } + async fn write_request( + &mut self, + _: &Self::Protocol, + io: &mut W, + req: Vec, + ) -> io::Result<()> { + io.write_all( + &u32::try_from(req.len()) + .map_err(|_| io::Error::other("request length exceeded 2**32"))? + .to_le_bytes(), + ) + .await?; + io.write_all(&req).await + } + async fn write_response( + &mut self, + proto: &Self::Protocol, + io: &mut W, + res: Vec, + ) -> io::Result<()> { + self.write_request(proto, io, res).await } } #[derive(NetworkBehaviour)] struct Behavior { + reqres: RrBehavior, gossipsub: GsBehavior, } @@ -199,8 +279,9 @@ struct Behavior { #[derive(Clone)] pub struct LibP2p { subscribe: Arc>>, - broadcast: Arc, Vec)>>>, - receive: Arc)>>>, + send: Arc)>>>, + broadcast: Arc)>>>, + receive: Arc>>>, } impl fmt::Debug for LibP2p { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -211,14 +292,12 @@ impl fmt::Debug for LibP2p { impl LibP2p { #[allow(clippy::new_without_default)] pub fn new(serai: Arc) -> Self { - // Block size limit + 1 KB of space for signatures/metadata - const MAX_LIBP2P_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024; - log::info!("creating a libp2p instance"); let throwaway_key_pair = Keypair::generate_ed25519(); let behavior = Behavior { + reqres: { RrBehavior::new([], RrConfig::default()) }, gossipsub: { let heartbeat_interval = tributary::tendermint::LATENCY_TIME / 2; let heartbeats_per_block = @@ -282,6 +361,7 @@ impl LibP2p { const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o') swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap(); + let (send_send, mut send_recv) = mpsc::unbounded_channel(); let (broadcast_send, mut broadcast_recv) = mpsc::unbounded_channel(); let (receive_send, receive_recv) = mpsc::unbounded_channel(); let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel(); @@ -290,44 +370,31 @@ impl LibP2p { IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode()))) } - tokio::spawn({ - let mut time_of_last_p2p_message = Instant::now(); + // TODO: If a network has less than TARGET_PEERS, this will cause retries ad infinitum + const TARGET_PEERS: usize = 5; - #[allow(clippy::needless_pass_by_ref_mut)] // False positive - fn broadcast_raw( - p2p: &mut Swarm, - time_of_last_p2p_message: &mut Instant, - set: Option, - msg: Vec, - ) { - // Update the time of last message - *time_of_last_p2p_message = Instant::now(); - - let topic = - if let Some(set) = set { topic_for_set(set) } else { IdentTopic::new(LIBP2P_TOPIC) }; - - match p2p.behaviour_mut().gossipsub.publish(topic, msg.clone()) { - Err(PublishError::SigningError(e)) => panic!("signing error when broadcasting: {e}"), - Err(PublishError::InsufficientPeers) => { - log::warn!("failed to send p2p message due to insufficient peers") - } - Err(PublishError::MessageTooLarge) => { - panic!("tried to send a too large message: {}", hex::encode(msg)) - } - Err(PublishError::TransformFailed(e)) => panic!("IdentityTransform failed: {e}"), - Err(PublishError::Duplicate) | Ok(_) => {} - } - } + // The addrs we're currently dialing, and the networks associated with them + let dialing_peers = Arc::new(RwLock::new(HashMap::new())); + // The peers we're currently connected to, and the networks associated with them + let connected_peers = Arc::new(RwLock::new(HashMap::>::new())); + + // Find and connect to peers + let (connect_to_network_send, mut connect_to_network_recv) = + tokio::sync::mpsc::unbounded_channel(); + let (to_dial_send, mut to_dial_recv) = tokio::sync::mpsc::unbounded_channel(); + tokio::spawn({ + let dialing_peers = dialing_peers.clone(); + let connected_peers = connected_peers.clone(); + let connect_to_network_send = connect_to_network_send.clone(); async move { - let mut set_for_genesis = HashMap::new(); - let mut pending_p2p_connections = vec![]; - // Run this task ad-infinitum loop { - // Handle pending P2P connections - // TODO: Break this out onto its own task with better peer management logic? - { - let mut connect = |addr: Multiaddr| { + let connect = |network: NetworkId, addr: Multiaddr| { + let dialing_peers = dialing_peers.clone(); + let connected_peers = connected_peers.clone(); + let to_dial_send = to_dial_send.clone(); + let connect_to_network_send = connect_to_network_send.clone(); + async move { log::info!("found peer from substrate: {addr}"); let protocols = addr.iter().filter_map(|piece| match piece { @@ -345,42 +412,114 @@ impl LibP2p { let addr = new_addr; log::debug!("transformed found peer: {addr}"); - if let Err(e) = swarm.dial(addr) { - log::warn!("dialing peer failed: {e:?}"); - } - }; - - while let Some(network) = pending_p2p_connections.pop() { - if let Ok(mut nodes) = serai.p2p_validators(network).await { - // If there's an insufficient amount of nodes known, connect to all yet add it back - // and break - if nodes.len() < 3 { - log::warn!( - "insufficient amount of P2P nodes known for {:?}: {}", - network, - nodes.len() - ); - pending_p2p_connections.push(network); - for node in nodes { - connect(node); + let (is_fresh_dial, nets) = { + let mut dialing_peers = dialing_peers.write().await; + let is_fresh_dial = !dialing_peers.contains_key(&addr); + if is_fresh_dial { + dialing_peers.insert(addr.clone(), HashSet::new()); + } + // Associate this network with this peer + dialing_peers.get_mut(&addr).unwrap().insert(network); + + let nets = dialing_peers.get(&addr).unwrap().clone(); + (is_fresh_dial, nets) + }; + + // Spawn a task to remove this peer from 'dialing' in sixty seconds, in case dialing + // fails + // This performs cleanup and bounds the size of the map to whatever growth occurs + // within a temporal window + tokio::spawn({ + let dialing_peers = dialing_peers.clone(); + let connected_peers = connected_peers.clone(); + let connect_to_network_send = connect_to_network_send.clone(); + let addr = addr.clone(); + async move { + tokio::time::sleep(core::time::Duration::from_secs(60)).await; + let mut dialing_peers = dialing_peers.write().await; + if let Some(expected_nets) = dialing_peers.remove(&addr) { + log::debug!("removed addr from dialing upon timeout: {addr}"); + + // TODO: De-duplicate this below instance + // If we failed to dial and haven't gotten enough actual connections, retry + let connected_peers = connected_peers.read().await; + for net in expected_nets { + let mut remaining_peers = 0; + for nets in connected_peers.values() { + if nets.contains(&net) { + remaining_peers += 1; + } + } + // If we do not, start connecting to this network again + if remaining_peers < TARGET_PEERS { + connect_to_network_send.send(net).expect( + "couldn't send net to connect to due to disconnects (receiver dropped?)", + ); + } + } } - break; } + }); - // Randomly select up to 5 - for _ in 0 .. 5 { - if !nodes.is_empty() { - let to_connect = nodes.swap_remove( - usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) - .unwrap(), - ); - connect(to_connect); - } + if is_fresh_dial { + to_dial_send.send((addr, nets)).unwrap(); + } + } + }; + + // TODO: We should also connect to random peers from random nets as needed for + // cosigning + + // Drain the chainnel, de-duplicating any networks in it + let mut connect_to_network_networks = HashSet::new(); + while let Ok(network) = connect_to_network_recv.try_recv() { + connect_to_network_networks.insert(network); + } + for network in connect_to_network_networks { + if let Ok(mut nodes) = serai.p2p_validators(network).await { + // If there's an insufficient amount of nodes known, connect to all yet add it + // back and break + if nodes.len() < TARGET_PEERS { + log::warn!( + "insufficient amount of P2P nodes known for {:?}: {}", + network, + nodes.len() + ); + // Retry this later + connect_to_network_send.send(network).unwrap(); + for node in nodes { + connect(network, node).await; + } + continue; + } + + // Randomly select up to 150% of the TARGET_PEERS + for _ in 0 .. ((3 * TARGET_PEERS) / 2) { + if !nodes.is_empty() { + let to_connect = nodes.swap_remove( + usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) + .unwrap(), + ); + connect(network, to_connect).await; } } } } + // Sleep 60 seconds before moving to the next iteration + tokio::time::sleep(core::time::Duration::from_secs(60)).await; + } + } + }); + + // Manage the actual swarm + tokio::spawn({ + let mut time_of_last_p2p_message = Instant::now(); + + async move { + let connected_peers = connected_peers.clone(); + let mut set_for_genesis = HashMap::new(); + loop { let time_since_last = Instant::now().duration_since(time_of_last_p2p_message); tokio::select! { biased; @@ -392,7 +531,7 @@ impl LibP2p { let topic = topic_for_set(set); if subscribe { log::info!("subscribing to p2p messages for {set:?}"); - pending_p2p_connections.push(set.network); + connect_to_network_send.send(set.network).unwrap(); set_for_genesis.insert(genesis, set); swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap(); } else { @@ -402,17 +541,50 @@ impl LibP2p { } } + msg = send_recv.recv() => { + let (peer, msg): (PeerId, Vec) = + msg.expect("send_recv closed. are we shutting down?"); + swarm.behaviour_mut().reqres.send_request(&peer, msg); + }, + // Handle any queued outbound messages msg = broadcast_recv.recv() => { - let (genesis, msg): (Option<[u8; 32]>, Vec) = + // Update the time of last message + time_of_last_p2p_message = Instant::now(); + + let (kind, msg): (P2pMessageKind, Vec) = msg.expect("broadcast_recv closed. are we shutting down?"); - let set = genesis.and_then(|genesis| set_for_genesis.get(&genesis).copied()); - broadcast_raw( - &mut swarm, - &mut time_of_last_p2p_message, - set, - msg, - ); + + if matches!(kind, P2pMessageKind::ReqRes(_)) { + // Use request/response, yet send to all connected peers + for peer_id in swarm.connected_peers().copied().collect::>() { + swarm.behaviour_mut().reqres.send_request(&peer_id, msg.clone()); + } + } else { + // Use gossipsub + + let set = + kind.genesis().and_then(|genesis| set_for_genesis.get(&genesis).copied()); + let topic = if let Some(set) = set { + topic_for_set(set) + } else { + IdentTopic::new(LIBP2P_TOPIC) + }; + + match swarm.behaviour_mut().gossipsub.publish(topic, msg.clone()) { + Err(PublishError::SigningError(e)) => { + panic!("signing error when broadcasting: {e}") + }, + Err(PublishError::InsufficientPeers) => { + log::warn!("failed to send p2p message due to insufficient peers") + } + Err(PublishError::MessageTooLarge) => { + panic!("tried to send a too large message: {}", hex::encode(msg)) + } + Err(PublishError::TransformFailed(e)) => panic!("IdentityTransform failed: {e}"), + Err(PublishError::Duplicate) | Ok(_) => {} + } + } } // Handle new incoming messages @@ -421,25 +593,124 @@ impl LibP2p { Some(SwarmEvent::Dialing { connection_id, .. }) => { log::debug!("dialing to peer in connection ID {}", &connection_id); } - Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => { + Some(SwarmEvent::ConnectionEstablished { + peer_id, + connection_id, + endpoint, + .. + }) => { + if &peer_id == swarm.local_peer_id() { + log::warn!("established a libp2p connection to ourselves"); + swarm.close_connection(connection_id); + continue; + } + + let addr = endpoint.get_remote_address(); + let nets = { + let mut dialing_peers = dialing_peers.write().await; + if let Some(nets) = dialing_peers.remove(addr) { + nets + } else { + log::debug!("connected to a peer who we didn't have within dialing"); + HashSet::new() + } + }; + { + let mut connected_peers = connected_peers.write().await; + connected_peers.insert(addr.clone(), nets); + log::debug!( - "connection established to peer {} in connection ID {}", + "connection established to peer {} in connection ID {}, connected peers: {}", &peer_id, &connection_id, + connected_peers.len(), ); - swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id) } + } + Some(SwarmEvent::ConnectionClosed { peer_id, endpoint, .. }) => { + let mut connected_peers = connected_peers.write().await; + let Some(nets) = connected_peers.remove(endpoint.get_remote_address()) else { + log::debug!("closed connection to peer which wasn't in connected_peers"); + continue; + }; + // Downgrade to a read lock + let connected_peers = connected_peers.downgrade(); + + // For each net we lost a peer for, check if we still have sufficient peers + // overall + for net in nets { + let mut remaining_peers = 0; + for nets in connected_peers.values() { + if nets.contains(&net) { + remaining_peers += 1; + } + } + // If we do not, start connecting to this network again + if remaining_peers < TARGET_PEERS { + connect_to_network_send + .send(net) + .expect( + "couldn't send net to connect to due to disconnects (receiver dropped?)" + ); + } + } + + log::debug!( + "connection with peer {peer_id} closed, connected peers: {}", + connected_peers.len(), + ); + } + Some(SwarmEvent::Behaviour(BehaviorEvent::Reqres( + RrEvent::Message { peer, message }, + ))) => { + let message = match message { + RrMessage::Request { request, .. } => request, + RrMessage::Response { response, .. } => response, + }; + + let mut msg_ref = message.as_slice(); + let Some(kind) = ReqResMessageKind::read(&mut msg_ref) else { continue }; + let message = Message { + sender: peer, + kind: P2pMessageKind::ReqRes(kind), + msg: msg_ref.to_vec(), + }; + receive_send.send(message).expect("receive_send closed. are we shutting down?"); + } Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub( GsEvent::Message { propagation_source, message, .. }, ))) => { - receive_send - .send((propagation_source, message.data)) - .expect("receive_send closed. are we shutting down?"); + let mut msg_ref = message.data.as_slice(); + let Some(kind) = GossipMessageKind::read(&mut msg_ref) else { continue }; + let message = Message { + sender: propagation_source, + kind: P2pMessageKind::Gossip(kind), + msg: msg_ref.to_vec(), + }; + receive_send.send(message).expect("receive_send closed. are we shutting down?"); } _ => {} } } + // Handle peers to dial + addr_and_nets = to_dial_recv.recv() => { + let (addr, nets) = + addr_and_nets.expect("received address was None (sender dropped?)"); + // If we've already dialed and connected to this address, don't further dial them + // Just associate these networks with them + if let Some(existing_nets) = connected_peers.write().await.get_mut(&addr) { + for net in nets { + existing_nets.insert(net); + } + continue; + } + + if let Err(e) = swarm.dial(addr) { + log::warn!("dialing peer failed: {e:?}"); + } + } + // If it's been >80s since we've published a message, publish a KeepAlive since we're // still an active service // This is useful when we have no active tributaries and accordingly aren't sending @@ -448,12 +719,13 @@ impl LibP2p { // (where a finalized block only occurs due to network activity), meaning this won't be // run () = tokio::time::sleep(Duration::from_secs(80).saturating_sub(time_since_last)) => { - broadcast_raw( - &mut swarm, - &mut time_of_last_p2p_message, - None, - P2pMessageKind::KeepAlive.serialize() - ); + time_of_last_p2p_message = Instant::now(); + for peer_id in swarm.connected_peers().copied().collect::>() { + swarm + .behaviour_mut() + .reqres + .send_request(&peer_id, ReqResMessageKind::KeepAlive.serialize()); + } } } } @@ -462,6 +734,7 @@ impl LibP2p { LibP2p { subscribe: Arc::new(Mutex::new(subscribe_send)), + send: Arc::new(Mutex::new(send_send)), broadcast: Arc::new(Mutex::new(broadcast_send)), receive: Arc::new(Mutex::new(receive_recv)), } @@ -490,22 +763,22 @@ impl P2p for LibP2p { .expect("subscribe_send closed. are we shutting down?"); } - async fn send_raw(&self, _: Self::Id, genesis: Option<[u8; 32]>, msg: Vec) { - self.broadcast_raw(genesis, msg).await; + async fn send_raw(&self, peer: Self::Id, msg: Vec) { + self.send.lock().await.send((peer, msg)).expect("send_send closed. are we shutting down?"); } - async fn broadcast_raw(&self, genesis: Option<[u8; 32]>, msg: Vec) { + async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec) { self .broadcast .lock() .await - .send((genesis, msg)) + .send((kind, msg)) .expect("broadcast_send closed. are we shutting down?"); } // TODO: We only have a single handle call this. Differentiate Send/Recv to remove this constant // lock acquisition? - async fn receive_raw(&self) -> (Self::Id, Vec) { + async fn receive(&self) -> Message { self.receive.lock().await.recv().await.expect("receive_recv closed. are we shutting down?") } } @@ -513,7 +786,7 @@ impl P2p for LibP2p { #[async_trait] impl TributaryP2p for LibP2p { async fn broadcast(&self, genesis: [u8; 32], msg: Vec) { - ::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await + ::broadcast(self, GossipMessageKind::Tributary(genesis), msg).await } } @@ -551,16 +824,12 @@ pub async fn heartbeat_tributaries_task( if SystemTime::now() > (block_time + Duration::from_secs(60)) { log::warn!("last known tributary block was over a minute ago"); let mut msg = tip.to_vec(); - // Also include the timestamp so LibP2p doesn't flag this as an old message re-circulating - let timestamp = SystemTime::now() + let time: u64 = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .expect("system clock is wrong") .as_secs(); - // Divide by the block time so if multiple parties send a Heartbeat, they're more likely to - // overlap - let time_unit = timestamp / u64::from(Tributary::::block_time()); - msg.extend(time_unit.to_le_bytes()); - P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), msg).await; + msg.extend(time.to_le_bytes()); + P2p::broadcast(&p2p, ReqResMessageKind::Heartbeat(tributary.genesis()), msg).await; } } @@ -592,6 +861,8 @@ pub async fn handle_p2p_task( // Subscribe to the topic for this tributary p2p.subscribe(tributary.spec.set(), genesis).await; + let spec_set = tributary.spec.set(); + // Per-Tributary P2P message handler tokio::spawn({ let p2p = p2p.clone(); @@ -602,91 +873,58 @@ pub async fn handle_p2p_task( break; }; match msg.kind { - P2pMessageKind::KeepAlive => {} + P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {} - P2pMessageKind::Tributary(msg_genesis) => { - assert_eq!(msg_genesis, genesis); - log::trace!("handling message for tributary {:?}", tributary.spec.set()); - if tributary.tributary.handle_message(&msg.msg).await { - P2p::broadcast(&p2p, msg.kind, msg.msg).await; - } - } - - // TODO2: Rate limit this per timestamp - // And/or slash on Heartbeat which justifies a response, since the node + // TODO: Slash on Heartbeat which justifies a response, since the node // obviously was offline and we must now use our bandwidth to compensate for // them? - P2pMessageKind::Heartbeat(msg_genesis) => { + P2pMessageKind::ReqRes(ReqResMessageKind::Heartbeat(msg_genesis)) => { assert_eq!(msg_genesis, genesis); if msg.msg.len() != 40 { log::error!("validator sent invalid heartbeat"); continue; } + // Only respond to recent heartbeats + let msg_time = u64::from_le_bytes(msg.msg[32 .. 40].try_into().expect( + "length-checked heartbeat message didn't have 8 bytes for the u64", + )); + if SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("system clock is wrong") + .as_secs() + .saturating_sub(msg_time) > + 10 + { + continue; + } + + log::debug!("received heartbeat with a recent timestamp"); - let p2p = p2p.clone(); - let spec = tributary.spec.clone(); let reader = tributary.tributary.reader(); + + let p2p = p2p.clone(); // Spawn a dedicated task as this may require loading large amounts of data // from disk and take a notable amount of time tokio::spawn(async move { - /* - // Have sqrt(n) nodes reply with the blocks - let mut responders = (tributary.spec.n() as f32).sqrt().floor() as u64; - // Try to have at least 3 responders - if responders < 3 { - responders = tributary.spec.n().min(3).into(); - } - */ - - /* - // Have up to three nodes respond - let responders = u64::from(spec.n().min(3)); - - // Decide which nodes will respond by using the latest block's hash as a - // mutually agreed upon entropy source - // This isn't a secure source of entropy, yet it's fine for this - let entropy = u64::from_le_bytes(reader.tip()[.. 8].try_into().unwrap()); - // If n = 10, responders = 3, we want `start` to be 0 ..= 7 - // (so the highest is 7, 8, 9) - // entropy % (10 + 1) - 3 = entropy % 8 = 0 ..= 7 - let start = - usize::try_from(entropy % (u64::from(spec.n() + 1) - responders)) - .unwrap(); - let mut selected = false; - for validator in &spec.validators() - [start .. (start + usize::try_from(responders).unwrap())] - { - if our_key == validator.0 { - selected = true; - break; - } - } - if !selected { - log::debug!("received heartbeat and not selected to respond"); - return; - } - - log::debug!("received heartbeat and selected to respond"); - */ - - // Have every node respond - // While we could only have a subset respond, LibP2P will sync all messages - // it isn't aware of - // It's cheaper to be aware from our disk than from over the network - // TODO: Spawn a dedicated topic for this heartbeat response? let mut latest = msg.msg[.. 32].try_into().unwrap(); + let mut to_send = vec![]; while let Some(next) = reader.block_after(&latest) { - let mut res = reader.block(&next).unwrap().serialize(); - res.extend(reader.commit(&next).unwrap()); - // Also include the timestamp used within the Heartbeat - res.extend(&msg.msg[32 .. 40]); - p2p.send(msg.sender, P2pMessageKind::Block(spec.genesis()), res).await; + to_send.push(next); latest = next; } + if to_send.len() > 3 { + for next in to_send { + let mut res = reader.block(&next).unwrap().serialize(); + res.extend(reader.commit(&next).unwrap()); + // Also include the timestamp used within the Heartbeat + res.extend(&msg.msg[32 .. 40]); + p2p.send(msg.sender, ReqResMessageKind::Block(genesis), res).await; + } + } }); } - P2pMessageKind::Block(msg_genesis) => { + P2pMessageKind::ReqRes(ReqResMessageKind::Block(msg_genesis)) => { assert_eq!(msg_genesis, genesis); let mut msg_ref: &[u8] = msg.msg.as_ref(); let Ok(block) = Block::::read(&mut msg_ref) else { @@ -705,7 +943,15 @@ pub async fn handle_p2p_task( ); } - P2pMessageKind::CosignedBlock => unreachable!(), + P2pMessageKind::Gossip(GossipMessageKind::Tributary(msg_genesis)) => { + assert_eq!(msg_genesis, genesis); + log::trace!("handling message for tributary {:?}", spec_set); + if tributary.tributary.handle_message(&msg.msg).await { + P2p::broadcast(&p2p, msg.kind, msg.msg).await; + } + } + + P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => unreachable!(), } } } @@ -725,15 +971,16 @@ pub async fn handle_p2p_task( loop { let msg = p2p.receive().await; match msg.kind { - P2pMessageKind::KeepAlive => {} - P2pMessageKind::Tributary(genesis) | - P2pMessageKind::Heartbeat(genesis) | - P2pMessageKind::Block(genesis) => { + P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {} + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) | + P2pMessageKind::ReqRes( + ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis), + ) => { if let Some(channel) = channels.read().await.get(&genesis) { channel.send(msg).unwrap(); } } - P2pMessageKind::CosignedBlock => { + P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => { let Ok(msg) = CosignedBlock::deserialize_reader(&mut msg.msg.as_slice()) else { log::error!("received CosignedBlock message with invalidly serialized contents"); continue; diff --git a/coordinator/src/substrate/cosign.rs b/coordinator/src/substrate/cosign.rs index 2443c8116..005607632 100644 --- a/coordinator/src/substrate/cosign.rs +++ b/coordinator/src/substrate/cosign.rs @@ -41,8 +41,9 @@ enum HasEvents { create_db!( SubstrateCosignDb { + ScanCosignFrom: () -> u64, IntendedCosign: () -> (u64, Option), - BlockHasEvents: (block: u64) -> HasEvents, + BlockHasEventsCache: (block: u64) -> HasEvents, LatestCosignedBlock: () -> u64, } ); @@ -85,7 +86,7 @@ async fn block_has_events( serai: &Serai, block: u64, ) -> Result { - let cached = BlockHasEvents::get(txn, block); + let cached = BlockHasEventsCache::get(txn, block); match cached { None => { let serai = serai.as_of( @@ -107,8 +108,8 @@ async fn block_has_events( let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes }; - BlockHasEvents::set(txn, block, &has_events); - Ok(HasEvents::Yes) + BlockHasEventsCache::set(txn, block, &has_events); + Ok(has_events) } Some(code) => Ok(code), } @@ -135,6 +136,7 @@ async fn potentially_cosign_block( if (block_has_events == HasEvents::No) && (LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1)) { + log::debug!("automatically co-signing next block ({block}) since it has no events"); LatestCosignedBlock::set(txn, &block); } @@ -178,7 +180,7 @@ async fn potentially_cosign_block( which should be cosigned). Accordingly, it is necessary to call multiple times even if `latest_number` doesn't change. */ -pub async fn advance_cosign_protocol( +async fn advance_cosign_protocol_inner( db: &mut impl Db, key: &Zeroizing<::F>, serai: &Serai, @@ -203,16 +205,23 @@ pub async fn advance_cosign_protocol( let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE; // If we've never triggered a cosign, don't skip any cosigns based on proximity if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN { - window_end_exclusive = 0; + window_end_exclusive = 1; } + // The consensus rules for this are `last_intended_to_cosign_block + 1` + let scan_start_block = last_intended_to_cosign_block + 1; + // As a practical optimization, we don't re-scan old blocks since old blocks are independent to + // new state + let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1)); + // Check all blocks within the window to see if they should be cosigned // If so, we're skipping them and need to flag them as skipped so that once the window closes, we // do cosign them // We only perform this check if we haven't already marked a block as skipped since the cosign // the skipped block will cause will cosign all other blocks within this window if skipped_block.is_none() { - for b in (last_intended_to_cosign_block + 1) .. window_end_exclusive.min(latest_number) { + let window_end_inclusive = window_end_exclusive - 1; + for b in scan_start_block ..= window_end_inclusive.min(latest_number) { if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes { skipped_block = Some(b); log::debug!("skipping cosigning {b} due to proximity to prior cosign"); @@ -227,7 +236,7 @@ pub async fn advance_cosign_protocol( // A list of sets which are cosigning, along with a boolean of if we're in the set let mut cosigning = vec![]; - for block in (last_intended_to_cosign_block + 1) ..= latest_number { + for block in scan_start_block ..= latest_number { let actual_block = serai .finalized_block_by_number(block) .await? @@ -276,6 +285,11 @@ pub async fn advance_cosign_protocol( break; } + + // If this TX is committed, always start future scanning from the next block + ScanCosignFrom::set(&mut txn, &(block + 1)); + // Since we're scanning *from* the next block, tidy the cache + BlockHasEventsCache::del(&mut txn, block); } if let Some((number, hash)) = to_cosign { @@ -297,3 +311,22 @@ pub async fn advance_cosign_protocol( Ok(()) } + +pub async fn advance_cosign_protocol( + db: &mut impl Db, + key: &Zeroizing<::F>, + serai: &Serai, + latest_number: u64, +) -> Result<(), SeraiError> { + loop { + let scan_from = ScanCosignFrom::get(db).unwrap_or(1); + // Only scan 1000 blocks at a time to limit a massive txn from forming + let scan_to = latest_number.min(scan_from + 1000); + advance_cosign_protocol_inner(db, key, serai, scan_to).await?; + // If we didn't limit the scan_to, break + if scan_to == latest_number { + break; + } + } + Ok(()) +} diff --git a/coordinator/src/substrate/mod.rs b/coordinator/src/substrate/mod.rs index 7a76353ce..fb1e3aed2 100644 --- a/coordinator/src/substrate/mod.rs +++ b/coordinator/src/substrate/mod.rs @@ -11,10 +11,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use serai_client::{ SeraiError, Block, Serai, TemporalSerai, primitives::{BlockHash, NetworkId}, - validator_sets::{ - primitives::{ValidatorSet, amortize_excess_key_shares}, - ValidatorSetsEvent, - }, + validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent}, in_instructions::InInstructionsEvent, coins::CoinsEvent, }; @@ -69,12 +66,7 @@ async fn handle_new_set( let set_participants = serai.participants(set.network).await?.expect("NewSet for set which doesn't exist"); - let mut set_data = set_participants - .into_iter() - .map(|(k, w)| (k, u16::try_from(w).unwrap())) - .collect::>(); - amortize_excess_key_shares(&mut set_data); - set_data + set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::>() }; let time = if let Ok(time) = block.time() { diff --git a/coordinator/src/tests/mod.rs b/coordinator/src/tests/mod.rs index 45a62297c..db4c158fd 100644 --- a/coordinator/src/tests/mod.rs +++ b/coordinator/src/tests/mod.rs @@ -14,7 +14,7 @@ use tokio::sync::RwLock; use crate::{ processors::{Message, Processors}, - TributaryP2p, P2pMessageKind, P2p, + TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p, }; pub mod tributary; @@ -45,7 +45,10 @@ impl Processors for MemProcessors { #[allow(clippy::type_complexity)] #[derive(Clone, Debug)] -pub struct LocalP2p(usize, pub Arc>, Vec)>>)>>); +pub struct LocalP2p( + usize, + pub Arc>, Vec)>>)>>, +); impl LocalP2p { pub fn new(validators: usize) -> Vec { @@ -65,11 +68,13 @@ impl P2p for LocalP2p { async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {} async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {} - async fn send_raw(&self, to: Self::Id, _genesis: Option<[u8; 32]>, msg: Vec) { - self.1.write().await.1[to].push_back((self.0, msg)); + async fn send_raw(&self, to: Self::Id, msg: Vec) { + let mut msg_ref = msg.as_slice(); + let kind = ReqResMessageKind::read(&mut msg_ref).unwrap(); + self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec())); } - async fn broadcast_raw(&self, _genesis: Option<[u8; 32]>, msg: Vec) { + async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec) { // Content-based deduplication let mut lock = self.1.write().await; { @@ -81,19 +86,26 @@ impl P2p for LocalP2p { } let queues = &mut lock.1; + let kind_len = (match kind { + P2pMessageKind::ReqRes(kind) => kind.serialize(), + P2pMessageKind::Gossip(kind) => kind.serialize(), + }) + .len(); + let msg = msg[kind_len ..].to_vec(); + for (i, msg_queue) in queues.iter_mut().enumerate() { if i == self.0 { continue; } - msg_queue.push_back((self.0, msg.clone())); + msg_queue.push_back((self.0, kind, msg.clone())); } } - async fn receive_raw(&self) -> (Self::Id, Vec) { + async fn receive(&self) -> P2pMessage { // This is a cursed way to implement an async read from a Vec loop { - if let Some(res) = self.1.write().await.1[self.0].pop_front() { - return res; + if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() { + return P2pMessage { sender, kind, msg }; } tokio::time::sleep(std::time::Duration::from_millis(100)).await; } @@ -103,6 +115,11 @@ impl P2p for LocalP2p { #[async_trait] impl TributaryP2p for LocalP2p { async fn broadcast(&self, genesis: [u8; 32], msg: Vec) { - ::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await + ::broadcast( + self, + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)), + msg, + ) + .await } } diff --git a/coordinator/src/tests/tributary/chain.rs b/coordinator/src/tests/tributary/chain.rs index 360af7ecf..7fc6a0647 100644 --- a/coordinator/src/tests/tributary/chain.rs +++ b/coordinator/src/tests/tributary/chain.rs @@ -26,7 +26,7 @@ use serai_db::MemDb; use tributary::Tributary; use crate::{ - P2pMessageKind, P2p, + GossipMessageKind, P2pMessageKind, P2p, tributary::{Transaction, TributarySpec}, tests::LocalP2p, }; @@ -98,7 +98,7 @@ pub async fn run_tributaries( for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { - P2pMessageKind::Tributary(genesis) => { + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { assert_eq!(genesis, tributary.genesis()); if tributary.handle_message(&msg.msg).await { p2p.broadcast(msg.kind, msg.msg).await; @@ -173,7 +173,7 @@ async fn tributary_test() { for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { - P2pMessageKind::Tributary(genesis) => { + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { assert_eq!(genesis, tributary.genesis()); tributary.handle_message(&msg.msg).await; } @@ -199,7 +199,7 @@ async fn tributary_test() { for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { - P2pMessageKind::Tributary(genesis) => { + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { assert_eq!(genesis, tributary.genesis()); tributary.handle_message(&msg.msg).await; } diff --git a/coordinator/src/tests/tributary/sync.rs b/coordinator/src/tests/tributary/sync.rs index 0a468c63e..18f60864d 100644 --- a/coordinator/src/tests/tributary/sync.rs +++ b/coordinator/src/tests/tributary/sync.rs @@ -116,8 +116,8 @@ async fn sync_test() { .map_err(|_| "failed to send ActiveTributary to heartbeat") .unwrap(); - // The heartbeat is once every 10 blocks - sleep(Duration::from_secs(10 * block_time)).await; + // The heartbeat is once every 10 blocks, with some limitations + sleep(Duration::from_secs(20 * block_time)).await; assert!(syncer_tributary.tip().await != spec.genesis()); // Verify it synced to the tip diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 5a5df1a7a..a4c6bfe5d 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -1,5 +1,5 @@ use core::{marker::PhantomData, fmt::Debug}; -use std::{sync::Arc, io}; +use std::{sync::Arc, io, collections::VecDeque}; use async_trait::async_trait; @@ -59,8 +59,7 @@ pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50; pub const BLOCK_SIZE_LIMIT: usize = 3_001_000; pub(crate) const TENDERMINT_MESSAGE: u8 = 0; -pub(crate) const BLOCK_MESSAGE: u8 = 1; -pub(crate) const TRANSACTION_MESSAGE: u8 = 2; +pub(crate) const TRANSACTION_MESSAGE: u8 = 1; #[allow(clippy::large_enum_variant)] #[derive(Clone, PartialEq, Eq, Debug)] @@ -194,7 +193,7 @@ impl Tributary { ); let blockchain = Arc::new(RwLock::new(blockchain)); - let to_rebroadcast = Arc::new(RwLock::new(vec![])); + let to_rebroadcast = Arc::new(RwLock::new(VecDeque::new())); // Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the // P2P layer let p2p_meta_task_handle = Arc::new( @@ -207,7 +206,7 @@ impl Tributary { for msg in to_rebroadcast { p2p.broadcast(genesis, msg).await; } - tokio::time::sleep(core::time::Duration::from_secs(1)).await; + tokio::time::sleep(core::time::Duration::from_secs(60)).await; } } }) @@ -218,7 +217,15 @@ impl Tributary { TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p }; let TendermintHandle { synced_block, synced_block_result, messages, machine } = - TendermintMachine::new(network.clone(), block_number, start_time, proposal).await; + TendermintMachine::new( + db.clone(), + network.clone(), + genesis, + block_number, + start_time, + proposal, + ) + .await; tokio::spawn(machine.run()); Some(Self { @@ -328,9 +335,6 @@ impl Tributary { // Return true if the message should be rebroadcasted. pub async fn handle_message(&self, msg: &[u8]) -> bool { - // Acquire the lock now to prevent sync_block from being run at the same time - let mut sync_block = self.synced_block_result.write().await; - match msg.first() { Some(&TRANSACTION_MESSAGE) => { let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else { @@ -362,19 +366,6 @@ impl Tributary { false } - Some(&BLOCK_MESSAGE) => { - let mut msg_ref = &msg[1 ..]; - let Ok(block) = Block::::read(&mut msg_ref) else { - log::error!("received invalid block message"); - return false; - }; - let commit = msg[(msg.len() - msg_ref.len()) ..].to_vec(); - if self.sync_block_internal(block, commit, &mut sync_block).await { - log::debug!("synced block over p2p net instead of building the commit ourselves"); - } - false - } - _ => false, } } diff --git a/coordinator/tributary/src/provided.rs b/coordinator/tributary/src/provided.rs index 103286afb..27c5f3cda 100644 --- a/coordinator/tributary/src/provided.rs +++ b/coordinator/tributary/src/provided.rs @@ -74,7 +74,7 @@ impl ProvidedTransactions { panic!("provided transaction saved to disk wasn't provided"); }; - if res.transactions.get(order).is_none() { + if !res.transactions.contains_key(order) { res.transactions.insert(order, VecDeque::new()); } res.transactions.get_mut(order).unwrap().push_back(tx); @@ -135,7 +135,7 @@ impl ProvidedTransactions { txn.put(current_provided_key, currently_provided); txn.commit(); - if self.transactions.get(order).is_none() { + if !self.transactions.contains_key(order) { self.transactions.insert(order, VecDeque::new()); } self.transactions.get_mut(order).unwrap().push_back(tx); diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary/src/tendermint/mod.rs index dc62c798a..e38efa5d3 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -1,5 +1,8 @@ use core::ops::Deref; -use std::{sync::Arc, collections::HashMap}; +use std::{ + sync::Arc, + collections::{VecDeque, HashMap}, +}; use async_trait::async_trait; @@ -38,9 +41,8 @@ use tendermint::{ use tokio::sync::RwLock; use crate::{ - TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, BLOCK_MESSAGE, ReadWrite, - transaction::Transaction as TransactionTrait, Transaction, BlockHeader, Block, BlockError, - Blockchain, P2p, + TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, transaction::Transaction as TransactionTrait, + Transaction, BlockHeader, Block, BlockError, Blockchain, P2p, }; pub mod tx; @@ -268,7 +270,7 @@ pub struct TendermintNetwork { pub(crate) validators: Arc, pub(crate) blockchain: Arc>>, - pub(crate) to_rebroadcast: Arc>>>, + pub(crate) to_rebroadcast: Arc>>>, pub(crate) p2p: P, } @@ -277,31 +279,10 @@ pub const BLOCK_PROCESSING_TIME: u32 = 999; pub const LATENCY_TIME: u32 = 1667; pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME); -#[test] -fn assert_target_block_time() { - use serai_db::MemDb; - - #[derive(Clone, Debug)] - pub struct DummyP2p; - - #[async_trait::async_trait] - impl P2p for DummyP2p { - async fn broadcast(&self, _: [u8; 32], _: Vec) { - unimplemented!() - } - } - - // Type paremeters don't matter here since we only need to call the block_time() - // and it only relies on the constants of the trait implementation. block_time() is in seconds, - // TARGET_BLOCK_TIME is in milliseconds. - assert_eq!( - as Network>::block_time(), - TARGET_BLOCK_TIME / 1000 - ) -} - #[async_trait] impl Network for TendermintNetwork { + type Db = D; + type ValidatorId = [u8; 32]; type SignatureScheme = Arc; type Weights = Arc; @@ -325,19 +306,28 @@ impl Network for TendermintNetwork } async fn broadcast(&mut self, msg: SignedMessageFor) { + let mut to_broadcast = vec![TENDERMINT_MESSAGE]; + to_broadcast.extend(msg.encode()); + // Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second // until the block it's trying to build is complete // If the P2P layer drops a message before all nodes obtained access, or a node had an // intermittent failure, this will ensure reconcilliation - // Resolves halts caused by timing discrepancies, which technically are violations of - // Tendermint as a BFT protocol, and shouldn't occur yet have in low-powered testing - // environments // This is atrocious if there's no content-based deduplication protocol for messages actively // being gossiped // LibP2p, as used by Serai, is configured to content-based deduplicate - let mut to_broadcast = vec![TENDERMINT_MESSAGE]; - to_broadcast.extend(msg.encode()); - self.to_rebroadcast.write().await.push(to_broadcast.clone()); + { + let mut to_rebroadcast_lock = self.to_rebroadcast.write().await; + to_rebroadcast_lock.push_back(to_broadcast.clone()); + // We should have, ideally, 3 * validators messages within a round + // Therefore, this should keep the most recent 2-rounds + // TODO: This isn't perfect. Each participant should just rebroadcast their latest round of + // messages + while to_rebroadcast_lock.len() > (6 * self.validators.weights.len()) { + to_rebroadcast_lock.pop_front(); + } + } + self.p2p.broadcast(self.genesis, to_broadcast).await } @@ -423,12 +413,7 @@ impl Network for TendermintNetwork ); match block_res { Ok(()) => { - // If we successfully added this block, broadcast it - // TODO: Move this under the coordinator once we set up on new block notifications? - let mut msg = serialized_block.0; - msg.insert(0, BLOCK_MESSAGE); - msg.extend(encoded_commit); - self.p2p.broadcast(self.genesis, msg).await; + // If we successfully added this block, break break; } Err(BlockError::NonLocalProvided(hash)) => { @@ -437,13 +422,14 @@ impl Network for TendermintNetwork hex::encode(hash), hex::encode(self.genesis) ); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; } _ => return invalid_block(), } } // Since we've added a valid block, clear to_rebroadcast - *self.to_rebroadcast.write().await = vec![]; + *self.to_rebroadcast.write().await = VecDeque::new(); Some(TendermintBlock( self.blockchain.write().await.build_block::(&self.signature_scheme()).serialize(), diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 7c75ac365..dcaa11a55 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -1,3 +1,6 @@ +#[cfg(test)] +mod tendermint; + mod transaction; pub use transaction::*; diff --git a/coordinator/tributary/src/tests/tendermint.rs b/coordinator/tributary/src/tests/tendermint.rs new file mode 100644 index 000000000..77dfc9e57 --- /dev/null +++ b/coordinator/tributary/src/tests/tendermint.rs @@ -0,0 +1,28 @@ +use tendermint::ext::Network; +use crate::{ + P2p, TendermintTx, + tendermint::{TARGET_BLOCK_TIME, TendermintNetwork}, +}; + +#[test] +fn assert_target_block_time() { + use serai_db::MemDb; + + #[derive(Clone, Debug)] + pub struct DummyP2p; + + #[async_trait::async_trait] + impl P2p for DummyP2p { + async fn broadcast(&self, _: [u8; 32], _: Vec) { + unimplemented!() + } + } + + // Type paremeters don't matter here since we only need to call the block_time() + // and it only relies on the constants of the trait implementation. block_time() is in seconds, + // TARGET_BLOCK_TIME is in milliseconds. + assert_eq!( + as Network>::block_time(), + TARGET_BLOCK_TIME / 1000 + ) +} diff --git a/coordinator/tributary/tendermint/Cargo.toml b/coordinator/tributary/tendermint/Cargo.toml index ba640391e..5a2905904 100644 --- a/coordinator/tributary/tendermint/Cargo.toml +++ b/coordinator/tributary/tendermint/Cargo.toml @@ -27,5 +27,7 @@ futures-util = { version = "0.3", default-features = false, features = ["std", " futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } tokio = { version = "1", default-features = false, features = ["time"] } +serai-db = { path = "../../../common/db", version = "0.1", default-features = false } + [dev-dependencies] tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] } diff --git a/coordinator/tributary/tendermint/src/block.rs b/coordinator/tributary/tendermint/src/block.rs index 8136f888d..8fc790188 100644 --- a/coordinator/tributary/tendermint/src/block.rs +++ b/coordinator/tributary/tendermint/src/block.rs @@ -3,6 +3,9 @@ use std::{ collections::{HashSet, HashMap}, }; +use parity_scale_codec::Encode; +use serai_db::{Get, DbTxn, Db}; + use crate::{ time::CanonicalInstant, ext::{RoundNumber, BlockNumber, Block, Network}, @@ -12,6 +15,9 @@ use crate::{ }; pub(crate) struct BlockData { + db: N::Db, + genesis: [u8; 32], + pub(crate) number: BlockNumber, pub(crate) validator_id: Option, pub(crate) proposal: Option, @@ -32,12 +38,17 @@ pub(crate) struct BlockData { impl BlockData { pub(crate) fn new( + db: N::Db, + genesis: [u8; 32], weights: Arc, number: BlockNumber, validator_id: Option, proposal: Option, ) -> BlockData { BlockData { + db, + genesis, + number, validator_id, proposal, @@ -129,11 +140,70 @@ impl BlockData { self.round_mut().step = data.step(); // Only return a message to if we're actually a current validator - self.validator_id.map(|validator_id| Message { + let round_number = self.round().number; + let res = self.validator_id.map(|validator_id| Message { sender: validator_id, block: self.number, - round: self.round().number, + round: round_number, data, - }) + }); + + if let Some(res) = res.as_ref() { + const LATEST_BLOCK_KEY: &[u8] = b"tendermint-machine-sent_block"; + const LATEST_ROUND_KEY: &[u8] = b"tendermint-machine-sent_round"; + const PROPOSE_KEY: &[u8] = b"tendermint-machine-sent_propose"; + const PEVOTE_KEY: &[u8] = b"tendermint-machine-sent_prevote"; + const PRECOMMIT_KEY: &[u8] = b"tendermint-machine-sent_commit"; + + let genesis = self.genesis; + let key = |prefix: &[u8]| [prefix, &genesis].concat(); + + let mut txn = self.db.txn(); + + // Ensure we haven't prior sent a message for a future block/round + let last_block_or_round = |txn: &mut ::Transaction<'_>, prefix, current| { + let key = key(prefix); + let latest = + u64::from_le_bytes(txn.get(key.as_slice()).unwrap_or(vec![0; 8]).try_into().unwrap()); + if latest > current { + None?; + } + if current > latest { + txn.put(&key, current.to_le_bytes()); + return Some(true); + } + Some(false) + }; + let new_block = last_block_or_round(&mut txn, LATEST_BLOCK_KEY, self.number.0)?; + if new_block { + // Delete the latest round key + txn.del(key(LATEST_ROUND_KEY)); + } + let new_round = last_block_or_round(&mut txn, LATEST_ROUND_KEY, round_number.0.into())?; + if new_block || new_round { + // Delete the messages for the old round + txn.del(key(PROPOSE_KEY)); + txn.del(key(PEVOTE_KEY)); + txn.del(key(PRECOMMIT_KEY)); + } + + // Check we haven't sent this message within this round + let msg_key = key(match res.data.step() { + Step::Propose => PROPOSE_KEY, + Step::Prevote => PEVOTE_KEY, + Step::Precommit => PRECOMMIT_KEY, + }); + if txn.get(&msg_key).is_some() { + assert!(!new_block); + assert!(!new_round); + None?; + } + // Put this message to the DB + txn.put(&msg_key, res.encode()); + + txn.commit(); + } + + res } } diff --git a/coordinator/tributary/tendermint/src/ext.rs b/coordinator/tributary/tendermint/src/ext.rs index 3d13a3b3e..b3d568a23 100644 --- a/coordinator/tributary/tendermint/src/ext.rs +++ b/coordinator/tributary/tendermint/src/ext.rs @@ -212,6 +212,9 @@ pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode /// Trait representing the distributed system Tendermint is providing consensus over. #[async_trait] pub trait Network: Sized + Send + Sync { + /// The database used to back this. + type Db: serai_db::Db; + // Type used to identify validators. type ValidatorId: ValidatorId; /// Signature scheme used by validators. diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index 163db6fc7..adc6fef7f 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -231,6 +231,9 @@ pub enum SlashEvent { /// A machine executing the Tendermint protocol. pub struct TendermintMachine { + db: N::Db, + genesis: [u8; 32], + network: N, signer: ::Signer, validators: N::SignatureScheme, @@ -310,11 +313,16 @@ impl TendermintMachine { let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now()); if time_until_round_end == Duration::ZERO { log::trace!( + target: "tendermint", "resetting when prior round ended {}ms ago", Instant::now().saturating_duration_since(round_end.instant()).as_millis(), ); } - log::trace!("sleeping until round ends in {}ms", time_until_round_end.as_millis()); + log::trace!( + target: "tendermint", + "sleeping until round ends in {}ms", + time_until_round_end.as_millis(), + ); sleep(time_until_round_end).await; // Clear our outbound message queue @@ -322,6 +330,8 @@ impl TendermintMachine { // Create the new block self.block = BlockData::new( + self.db.clone(), + self.genesis, self.weights.clone(), BlockNumber(self.block.number.0 + 1), self.signer.validator_id().await, @@ -370,7 +380,9 @@ impl TendermintMachine { /// the machine itself. The machine should have `run` called from an asynchronous task. #[allow(clippy::new_ret_no_self)] pub async fn new( + db: N::Db, network: N, + genesis: [u8; 32], last_block: BlockNumber, last_time: u64, proposal: N::Block, @@ -409,6 +421,9 @@ impl TendermintMachine { let validator_id = signer.validator_id().await; // 01-10 let mut machine = TendermintMachine { + db: db.clone(), + genesis, + network, signer, validators, @@ -420,6 +435,8 @@ impl TendermintMachine { synced_block_result_send, block: BlockData::new( + db, + genesis, weights, BlockNumber(last_block.0 + 1), validator_id, @@ -497,7 +514,7 @@ impl TendermintMachine { match step { Step::Propose => { // Slash the validator for not proposing when they should've - log::debug!(target: "tendermint", "Validator didn't propose when they should have"); + log::debug!(target: "tendermint", "validator didn't propose when they should have"); // this slash will be voted on. self.slash( self.weights.proposer(self.block.number, self.block.round().number), @@ -586,7 +603,11 @@ impl TendermintMachine { ); let id = block.id(); let proposal = self.network.add_block(block, commit).await; - log::trace!("added block {} (produced by machine)", hex::encode(id.as_ref())); + log::trace!( + target: "tendermint", + "added block {} (produced by machine)", + hex::encode(id.as_ref()), + ); self.reset(msg.round, proposal).await; } Err(TendermintError::Malicious(sender, evidence)) => { @@ -680,7 +701,12 @@ impl TendermintMachine { (msg.round == self.block.round().number) && (msg.data.step() == Step::Propose) { - log::trace!("received Propose for block {}, round {}", msg.block.0, msg.round.0); + log::trace!( + target: "tendermint", + "received Propose for block {}, round {}", + msg.block.0, + msg.round.0, + ); } // If this is a precommit, verify its signature @@ -698,7 +724,13 @@ impl TendermintMachine { if !self.block.log.log(signed.clone())? { return Err(TendermintError::AlreadyHandled); } - log::debug!(target: "tendermint", "received new tendermint message"); + log::trace!( + target: "tendermint", + "received new tendermint message (block: {}, round: {}, step: {:?})", + msg.block.0, + msg.round.0, + msg.data.step(), + ); // All functions, except for the finalizer and the jump, are locked to the current round @@ -745,6 +777,13 @@ impl TendermintMachine { // 55-56 // Jump, enabling processing by the below code if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() { + log::debug!( + target: "tendermint", + "jumping from round {} to round {}", + self.block.round().number.0, + msg.round.0, + ); + // Jump to the new round. let proposer = self.round(msg.round, None); @@ -802,13 +841,26 @@ impl TendermintMachine { if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) { let (participation, weight) = self.block.log.message_instances(self.block.round().number, &Data::Prevote(None)); + let threshold_weight = self.weights.threshold(); + if participation < threshold_weight { + log::trace!( + target: "tendermint", + "progess towards setting prevote timeout, participation: {}, needed: {}", + participation, + threshold_weight, + ); + } // 34-35 - if participation >= self.weights.threshold() { + if participation >= threshold_weight { + log::trace!( + target: "tendermint", + "setting timeout for prevote due to sufficient participation", + ); self.block.round_mut().set_timeout(Step::Prevote); } // 44-46 - if weight >= self.weights.threshold() { + if weight >= threshold_weight { self.broadcast(Data::Precommit(None)); return Ok(None); } @@ -818,6 +870,10 @@ impl TendermintMachine { if matches!(msg.data, Data::Precommit(_)) && self.block.log.has_participation(self.block.round().number, Step::Precommit) { + log::trace!( + target: "tendermint", + "setting timeout for precommit due to sufficient participation", + ); self.block.round_mut().set_timeout(Step::Precommit); } diff --git a/coordinator/tributary/tendermint/src/message_log.rs b/coordinator/tributary/tendermint/src/message_log.rs index e045189bb..3959852d8 100644 --- a/coordinator/tributary/tendermint/src/message_log.rs +++ b/coordinator/tributary/tendermint/src/message_log.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, collections::HashMap}; -use log::debug; use parity_scale_codec::Encode; use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence}; @@ -27,7 +26,7 @@ impl MessageLog { let step = msg.data.step(); if let Some(existing) = msgs.get(&step) { if existing.msg.data != msg.data { - debug!( + log::debug!( target: "tendermint", "Validator sent multiple messages for the same block + round + step" ); diff --git a/coordinator/tributary/tendermint/tests/ext.rs b/coordinator/tributary/tendermint/tests/ext.rs index e3df7e489..3b3cf7c3b 100644 --- a/coordinator/tributary/tendermint/tests/ext.rs +++ b/coordinator/tributary/tendermint/tests/ext.rs @@ -10,6 +10,8 @@ use parity_scale_codec::{Encode, Decode}; use futures_util::sink::SinkExt; use tokio::{sync::RwLock, time::sleep}; +use serai_db::MemDb; + use tendermint_machine::{ ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender, SlashEvent, TendermintMachine, TendermintHandle, @@ -111,6 +113,8 @@ struct TestNetwork( #[async_trait] impl Network for TestNetwork { + type Db = MemDb; + type ValidatorId = TestValidatorId; type SignatureScheme = TestSignatureScheme; type Weights = TestWeights; @@ -170,7 +174,9 @@ impl TestNetwork { let i = u16::try_from(i).unwrap(); let TendermintHandle { messages, synced_block, synced_block_result, machine } = TendermintMachine::new( + MemDb::new(), TestNetwork(i, arc.clone()), + [0; 32], BlockNumber(1), start_time, TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) }, diff --git a/crypto/dalek-ff-group/Cargo.toml b/crypto/dalek-ff-group/Cargo.toml index 0fe4bce0b..d8a92194c 100644 --- a/crypto/dalek-ff-group/Cargo.toml +++ b/crypto/dalek-ff-group/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-gr authors = ["Luke Parker "] keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"] edition = "2021" -rust-version = "1.65" +rust-version = "1.66" [package.metadata.docs.rs] all-features = true diff --git a/crypto/dkg/Cargo.toml b/crypto/dkg/Cargo.toml index a8d3f0a8d..bf308705e 100644 --- a/crypto/dkg/Cargo.toml +++ b/crypto/dkg/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg" authors = ["Luke Parker "] keywords = ["dkg", "multisig", "threshold", "ff", "group"] edition = "2021" -rust-version = "1.70" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index 7d8c87e9c..c9d525e18 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.73" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/crypto/ed448/Cargo.toml b/crypto/ed448/Cargo.toml index 2302d7b3b..b0d0026e2 100644 --- a/crypto/ed448/Cargo.toml +++ b/crypto/ed448/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ed448" authors = ["Luke Parker "] keywords = ["ed448", "ff", "group"] edition = "2021" -rust-version = "1.65" +rust-version = "1.66" [package.metadata.docs.rs] all-features = true diff --git a/crypto/ed448/src/backend.rs b/crypto/ed448/src/backend.rs index 83dc3fcaa..db41e8110 100644 --- a/crypto/ed448/src/backend.rs +++ b/crypto/ed448/src/backend.rs @@ -34,7 +34,7 @@ macro_rules! math_op { impl $Op<$Other> for $Value { type Output = $Value; fn $op_fn(self, other: $Other) -> Self::Output { - Self($function(self.0, other.0)) + $Value($function(self.0, other.0)) } } impl $Assign<$Other> for $Value { @@ -45,7 +45,7 @@ macro_rules! math_op { impl<'a> $Op<&'a $Other> for $Value { type Output = $Value; fn $op_fn(self, other: &'a $Other) -> Self::Output { - Self($function(self.0, other.0)) + $Value($function(self.0, other.0)) } } impl<'a> $Assign<&'a $Other> for $Value { @@ -60,7 +60,7 @@ macro_rules! from_wrapper { ($wrapper: ident, $inner: ident, $uint: ident) => { impl From<$uint> for $wrapper { fn from(a: $uint) -> $wrapper { - Self(Residue::new(&$inner::from(a))) + $wrapper(Residue::new(&$inner::from(a))) } } }; @@ -127,7 +127,7 @@ macro_rules! field { impl Neg for $FieldName { type Output = $FieldName; fn neg(self) -> $FieldName { - Self(self.0.neg()) + $FieldName(self.0.neg()) } } @@ -141,13 +141,13 @@ macro_rules! field { impl $FieldName { /// Perform an exponentiation. pub fn pow(&self, other: $FieldName) -> $FieldName { - let mut table = [Self(Residue::ONE); 16]; + let mut table = [$FieldName(Residue::ONE); 16]; table[1] = *self; for i in 2 .. 16 { table[i] = table[i - 1] * self; } - let mut res = Self(Residue::ONE); + let mut res = $FieldName(Residue::ONE); let mut bits = 0; for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { bits <<= 1; @@ -170,8 +170,8 @@ macro_rules! field { } impl Field for $FieldName { - const ZERO: Self = Self(Residue::ZERO); - const ONE: Self = Self(Residue::ONE); + const ZERO: Self = $FieldName(Residue::ZERO); + const ONE: Self = $FieldName(Residue::ONE); fn random(mut rng: impl RngCore) -> Self { let mut bytes = [0; 112]; @@ -188,12 +188,12 @@ macro_rules! field { fn invert(&self) -> CtOption { const NEG_2: $FieldName = - Self($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::from_u8(2)))); + $FieldName($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::from_u8(2)))); CtOption::new(self.pow(NEG_2), !self.is_zero()) } fn sqrt(&self) -> CtOption { - const MOD_1_4: $FieldName = Self($ResidueType::new( + const MOD_1_4: $FieldName = $FieldName($ResidueType::new( &$MODULUS.saturating_add(&U448::ONE).wrapping_div(&U448::from_u8(4)), )); @@ -217,14 +217,14 @@ macro_rules! field { const TWO_INV: Self = $FieldName($ResidueType::new(&U448::from_u8(2)).invert().0); const MULTIPLICATIVE_GENERATOR: Self = - Self(Residue::new(&U448::from_u8($MULTIPLICATIVE_GENERATOR))); + $FieldName(Residue::new(&U448::from_u8($MULTIPLICATIVE_GENERATOR))); // True for both the Ed448 Scalar field and FieldElement field const S: u32 = 1; // Both fields have their root of unity as -1 const ROOT_OF_UNITY: Self = - Self($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::ONE))); - const ROOT_OF_UNITY_INV: Self = Self(Self::ROOT_OF_UNITY.0.invert().0); + $FieldName($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::ONE))); + const ROOT_OF_UNITY_INV: Self = $FieldName(Self::ROOT_OF_UNITY.0.invert().0); const DELTA: Self = $FieldName(Residue::new(&U448::from_le_hex($DELTA))); diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 128a36678..b89d5290c 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -38,7 +38,6 @@ ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] } schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] } -dleq = { path = "../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] } dkg = { path = "../dkg", version = "^0.5.1", default-features = false, features = ["std"] } diff --git a/crypto/frost/README.md b/crypto/frost/README.md index 278458443..e6ed2b0a0 100644 --- a/crypto/frost/README.md +++ b/crypto/frost/README.md @@ -10,7 +10,7 @@ integrating with existing systems. This library offers ciphersuites compatible with the [IETF draft](https://github.com/cfrg/draft-irtf-cfrg-frost). Currently, version -11 is supported. +15 is supported. This library was [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), diff --git a/crypto/frost/src/algorithm.rs b/crypto/frost/src/algorithm.rs index f2da59ea1..0b0abd6c0 100644 --- a/crypto/frost/src/algorithm.rs +++ b/crypto/frost/src/algorithm.rs @@ -39,6 +39,13 @@ pub trait Algorithm: Send + Sync + Clone { /// Obtain the list of nonces to generate, as specified by the generators to create commitments /// against per-nonce. + /// + /// The Algorithm is responsible for all transcripting of these nonce specifications/generators. + /// + /// The prover will be passed the commitments, and the commitments will be sent to all other + /// participants. No guarantees the commitments are internally consistent (have the same discrete + /// logarithm across generators) are made. Any Algorithm which specifies multiple generators for + /// a single nonce must handle that itself. fn nonces(&self) -> Vec>; /// Generate an addendum to FROST"s preprocessing stage. diff --git a/crypto/frost/src/nonce.rs b/crypto/frost/src/nonce.rs index 8638baff8..f76f9bc48 100644 --- a/crypto/frost/src/nonce.rs +++ b/crypto/frost/src/nonce.rs @@ -1,13 +1,9 @@ // FROST defines its nonce as sum(Di, Ei * bi) -// Monero needs not just the nonce over G however, yet also over H -// Then there is a signature (a modified Chaum Pedersen proof) using multiple nonces at once // -// Accordingly, in order for this library to be robust, it supports generating an arbitrary amount -// of nonces, each against an arbitrary list of generators +// In order for this library to be robust, it supports generating an arbitrary amount of nonces, +// each against an arbitrary list of generators // // Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b) -// When representations across multiple generators are provided, a DLEq proof is also provided to -// confirm their integrity use core::ops::Deref; use std::{ @@ -24,32 +20,8 @@ use transcript::Transcript; use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding}; use multiexp::multiexp_vartime; -use dleq::MultiDLEqProof; - use crate::{curve::Curve, Participant}; -// Transcript used to aggregate binomial nonces for usage within a single DLEq proof. -fn aggregation_transcript(context: &[u8]) -> T { - let mut transcript = T::new(b"FROST DLEq Aggregation v0.5"); - transcript.append_message(b"context", context); - transcript -} - -// Every participant proves for their commitments at the start of the protocol -// These proofs are verified sequentially, requiring independent transcripts -// In order to make these transcripts more robust, the FROST transcript (at time of preprocess) is -// challenged in order to create a commitment to it, carried in each independent transcript -// (effectively forking the original transcript) -// -// For FROST, as defined by the IETF, this will do nothing (and this transcript will never even be -// constructed). For higher level protocols, the transcript may have contextual info these proofs -// will then be bound to -fn dleq_transcript(context: &[u8]) -> T { - let mut transcript = T::new(b"FROST Commitments DLEq v0.5"); - transcript.append_message(b"context", context); - transcript -} - // Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper // This is considered a single nonce as r = d + be #[derive(Clone, Zeroize)] @@ -69,7 +41,7 @@ impl GeneratorCommitments { } } -// A single nonce's commitments and relevant proofs +// A single nonce's commitments #[derive(Clone, PartialEq, Eq)] pub(crate) struct NonceCommitments { // Called generators as these commitments are indexed by generator later on @@ -121,12 +93,6 @@ impl NonceCommitments { t.append_message(b"commitment_E", commitments.0[1].to_bytes()); } } - - fn aggregation_factor(&self, context: &[u8]) -> C::F { - let mut transcript = aggregation_transcript::(context); - self.transcript(&mut transcript); - ::hash_to_F(b"dleq_aggregation", transcript.challenge(b"binding").as_ref()) - } } /// Commitments for all the nonces across all their generators. @@ -135,51 +101,26 @@ pub(crate) struct Commitments { // Called nonces as these commitments are indexed by nonce // So to get the commitments for the first nonce, it'd be commitments.nonces[0] pub(crate) nonces: Vec>, - // DLEq Proof proving that each set of commitments were generated using a single pair of discrete - // logarithms - pub(crate) dleq: Option>, } impl Commitments { - pub(crate) fn new( + pub(crate) fn new( rng: &mut R, secret_share: &Zeroizing, planned_nonces: &[Vec], - context: &[u8], ) -> (Vec>, Commitments) { let mut nonces = vec![]; let mut commitments = vec![]; - let mut dleq_generators = vec![]; - let mut dleq_nonces = vec![]; for generators in planned_nonces { let (nonce, these_commitments): (Nonce, _) = NonceCommitments::new(&mut *rng, secret_share, generators); - if generators.len() > 1 { - dleq_generators.push(generators.clone()); - dleq_nonces.push(Zeroizing::new( - (these_commitments.aggregation_factor::(context) * nonce.0[1].deref()) + - nonce.0[0].deref(), - )); - } - nonces.push(nonce); commitments.push(these_commitments); } - let dleq = if !dleq_generators.is_empty() { - Some(MultiDLEqProof::prove( - rng, - &mut dleq_transcript::(context), - &dleq_generators, - &dleq_nonces, - )) - } else { - None - }; - - (nonces, Commitments { nonces: commitments, dleq }) + (nonces, Commitments { nonces: commitments }) } pub(crate) fn transcript(&self, t: &mut T) { @@ -187,58 +128,20 @@ impl Commitments { for nonce in &self.nonces { nonce.transcript(t); } - - // Transcripting the DLEqs implicitly transcripts the exact generators used for the nonces in - // an exact order - // This means it shouldn't be possible for variadic generators to cause conflicts - if let Some(dleq) = &self.dleq { - t.append_message(b"dleq", dleq.serialize()); - } } - pub(crate) fn read( - reader: &mut R, - generators: &[Vec], - context: &[u8], - ) -> io::Result { + pub(crate) fn read(reader: &mut R, generators: &[Vec]) -> io::Result { let nonces = (0 .. generators.len()) .map(|i| NonceCommitments::read(reader, &generators[i])) .collect::>, _>>()?; - let mut dleq_generators = vec![]; - let mut dleq_nonces = vec![]; - for (generators, nonce) in generators.iter().cloned().zip(&nonces) { - if generators.len() > 1 { - let binding = nonce.aggregation_factor::(context); - let mut aggregated = vec![]; - for commitments in &nonce.generators { - aggregated.push(commitments.0[0] + (commitments.0[1] * binding)); - } - dleq_generators.push(generators); - dleq_nonces.push(aggregated); - } - } - - let dleq = if !dleq_generators.is_empty() { - let dleq = MultiDLEqProof::read(reader, dleq_generators.len())?; - dleq - .verify(&mut dleq_transcript::(context), &dleq_generators, &dleq_nonces) - .map_err(|_| io::Error::other("invalid DLEq proof"))?; - Some(dleq) - } else { - None - }; - - Ok(Commitments { nonces, dleq }) + Ok(Commitments { nonces }) } pub(crate) fn write(&self, writer: &mut W) -> io::Result<()> { for nonce in &self.nonces { nonce.write(writer)?; } - if let Some(dleq) = &self.dleq { - dleq.write(writer)?; - } Ok(()) } } diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index a716dc583..5115244f1 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -125,14 +125,8 @@ impl> AlgorithmMachine { let mut params = self.params; let mut rng = ChaCha20Rng::from_seed(*seed.0); - // Get a challenge to the existing transcript for use when proving for the commitments - let commitments_challenge = params.algorithm.transcript().challenge(b"commitments"); - let (nonces, commitments) = Commitments::new::<_, A::Transcript>( - &mut rng, - params.keys.secret_share(), - ¶ms.algorithm.nonces(), - commitments_challenge.as_ref(), - ); + let (nonces, commitments) = + Commitments::new::<_>(&mut rng, params.keys.secret_share(), ¶ms.algorithm.nonces()); let addendum = params.algorithm.preprocess_addendum(&mut rng, ¶ms.keys); let preprocess = Preprocess { commitments, addendum }; @@ -141,27 +135,18 @@ impl> AlgorithmMachine { let mut blame_entropy = [0; 32]; rng.fill_bytes(&mut blame_entropy); ( - AlgorithmSignMachine { - params, - seed, - commitments_challenge, - nonces, - preprocess: preprocess.clone(), - blame_entropy, - }, + AlgorithmSignMachine { params, seed, nonces, preprocess: preprocess.clone(), blame_entropy }, preprocess, ) } #[cfg(any(test, feature = "tests"))] pub(crate) fn unsafe_override_preprocess( - mut self, + self, nonces: Vec>, preprocess: Preprocess, ) -> AlgorithmSignMachine { AlgorithmSignMachine { - commitments_challenge: self.params.algorithm.transcript().challenge(b"commitments"), - params: self.params, seed: CachedPreprocess(Zeroizing::new([0; 32])), @@ -255,8 +240,6 @@ pub struct AlgorithmSignMachine> { params: Params, seed: CachedPreprocess, - #[zeroize(skip)] - commitments_challenge: ::Challenge, pub(crate) nonces: Vec>, // Skips the preprocess due to being too large a bound to feasibly enforce on users #[zeroize(skip)] @@ -285,11 +268,7 @@ impl> SignMachine for AlgorithmSignMachi fn read_preprocess(&self, reader: &mut R) -> io::Result { Ok(Preprocess { - commitments: Commitments::read::<_, A::Transcript>( - reader, - &self.params.algorithm.nonces(), - self.commitments_challenge.as_ref(), - )?, + commitments: Commitments::read::<_>(reader, &self.params.algorithm.nonces())?, addendum: self.params.algorithm.read_addendum(reader)?, }) } @@ -383,9 +362,7 @@ impl> SignMachine for AlgorithmSignMachi rho_transcript.append_message(b"message", C::hash_msg(msg)); rho_transcript.append_message( b"preprocesses", - &C::hash_commitments( - self.params.algorithm.transcript().challenge(b"preprocesses").as_ref(), - ), + C::hash_commitments(self.params.algorithm.transcript().challenge(b"preprocesses").as_ref()), ); // Generate the per-signer binding factors diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index e457c7037..f93a5fbf6 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -12,7 +12,7 @@ use crate::{ /// Tests for the nonce handling code. pub mod nonces; -use nonces::{test_multi_nonce, test_invalid_commitment, test_invalid_dleq_proof}; +use nonces::test_multi_nonce; /// Vectorized test suite to ensure consistency. pub mod vectors; @@ -267,6 +267,4 @@ pub fn test_ciphersuite>(rng: &mut test_schnorr_blame::(rng); test_multi_nonce::(rng); - test_invalid_commitment::(rng); - test_invalid_dleq_proof::(rng); } diff --git a/crypto/frost/src/tests/nonces.rs b/crypto/frost/src/tests/nonces.rs index ee060befd..7b1480e91 100644 --- a/crypto/frost/src/tests/nonces.rs +++ b/crypto/frost/src/tests/nonces.rs @@ -9,14 +9,12 @@ use transcript::{Transcript, RecommendedTranscript}; use ciphersuite::group::{ff::Field, Group, GroupEncoding}; -use dleq::MultiDLEqProof; pub use dkg::tests::{key_gen, recover_key}; use crate::{ Curve, Participant, ThresholdView, ThresholdKeys, FrostError, algorithm::Algorithm, - sign::{Writable, SignMachine}, - tests::{algorithm_machines, preprocess, sign}, + tests::{algorithm_machines, sign}, }; #[derive(Clone)] @@ -157,75 +155,3 @@ pub fn test_multi_nonce(rng: &mut R) { let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); sign(&mut *rng, &MultiNonce::::new(), keys.clone(), machines, &[]); } - -/// Test malleating a commitment for a nonce across generators causes the preprocess to error. -pub fn test_invalid_commitment(rng: &mut R) { - let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); - let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {}); - - // Select a random participant to give an invalid commitment - let participants = preprocesses.keys().collect::>(); - let faulty = *participants - [usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()]; - - // Grab their preprocess - let mut preprocess = preprocesses.remove(&faulty).unwrap(); - - // Mutate one of the commitments - let nonce = - preprocess.commitments.nonces.get_mut(usize::try_from(rng.next_u64()).unwrap() % 2).unwrap(); - let generators_len = nonce.generators.len(); - nonce.generators[usize::try_from(rng.next_u64()).unwrap() % generators_len].0 - [usize::try_from(rng.next_u64()).unwrap() % 2] = C::G::random(&mut *rng); - - // The commitments are validated at time of deserialization (read_preprocess) - // Accordingly, serialize it and read it again to make sure that errors - assert!(machines - .iter() - .next() - .unwrap() - .1 - .read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref()) - .is_err()); -} - -/// Test malleating the DLEq proof for a preprocess causes it to error. -pub fn test_invalid_dleq_proof(rng: &mut R) { - let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); - let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {}); - - // Select a random participant to give an invalid DLEq proof - let participants = preprocesses.keys().collect::>(); - let faulty = *participants - [usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()]; - - // Invalidate it by replacing it with a completely different proof - let dlogs = [Zeroizing::new(C::F::random(&mut *rng)), Zeroizing::new(C::F::random(&mut *rng))]; - let mut preprocess = preprocesses.remove(&faulty).unwrap(); - preprocess.commitments.dleq = Some(MultiDLEqProof::prove( - &mut *rng, - &mut RecommendedTranscript::new(b"Invalid DLEq Proof"), - &nonces::(), - &dlogs, - )); - - assert!(machines - .iter() - .next() - .unwrap() - .1 - .read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref()) - .is_err()); - - // Also test None for a proof will cause an error - preprocess.commitments.dleq = None; - assert!(machines - .iter() - .next() - .unwrap() - .1 - .read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref()) - .is_err()); -} diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 3356a6cdd..7be6478af 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -14,7 +14,7 @@ use ciphersuite::group::{ff::PrimeField, GroupEncoding}; use crate::{ curve::Curve, Participant, ThresholdCore, ThresholdKeys, - algorithm::{IetfTranscript, Hram, IetfSchnorr}, + algorithm::{Hram, IetfSchnorr}, sign::{ Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, @@ -191,7 +191,6 @@ pub fn test_with_vectors>( nonces: vec![NonceCommitments { generators: vec![GeneratorCommitments(these_commitments)], }], - dleq: None, }, addendum: (), }; @@ -301,12 +300,8 @@ pub fn test_with_vectors>( } // Also test it at the Commitments level - let (generated_nonces, commitments) = Commitments::::new::<_, IetfTranscript>( - &mut TransparentRng(randomness), - &share, - &[vec![C::generator()]], - &[], - ); + let (generated_nonces, commitments) = + Commitments::::new::<_>(&mut TransparentRng(randomness), &share, &[vec![C::generator()]]); assert_eq!(generated_nonces.len(), 1); assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]); diff --git a/crypto/schnorr/src/tests/rfc8032.rs b/crypto/schnorr/src/tests/rfc8032.rs index 991cf450b..418f4c0e7 100644 --- a/crypto/schnorr/src/tests/rfc8032.rs +++ b/crypto/schnorr/src/tests/rfc8032.rs @@ -52,7 +52,7 @@ fn test_rfc8032() { SchnorrSignature::::read::<&[u8]>(&mut hex::decode(vector.2).unwrap().as_ref()) .unwrap(); let hram = Sha512::new_with_prefix( - &[sig.R.to_bytes().as_ref(), &key.to_bytes(), &hex::decode(vector.1).unwrap()].concat(), + [sig.R.to_bytes().as_ref(), &key.to_bytes(), &hex::decode(vector.1).unwrap()].concat(), ); assert!(sig.verify(key, Scalar::from_hash(hram))); } diff --git a/crypto/transcript/README.md b/crypto/transcript/README.md index a8772a0ae..171246934 100644 --- a/crypto/transcript/README.md +++ b/crypto/transcript/README.md @@ -3,9 +3,9 @@ Flexible Transcript is a crate offering: - `Transcript`, a trait offering functions transcripts should implement. - `DigestTranscript`, a competent transcript format instantiated against a -provided hash function. + provided hash function. - `MerlinTranscript`, a wrapper of `merlin` into the trait (available via the -`merlin` feature). + `merlin` feature). - `RecommendedTranscript`, a transcript recommended for usage in applications. Currently, this is `DigestTranscript` (available via the `recommended` feature). diff --git a/deny.toml b/deny.toml index 2e516b996..a3e0e3d93 100644 --- a/deny.toml +++ b/deny.toml @@ -44,6 +44,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-env" }, { allow = ["AGPL-3.0"], name = "ethereum-serai" }, + { allow = ["AGPL-3.0"], name = "serai-ethereum-relayer" }, { allow = ["AGPL-3.0"], name = "serai-message-queue" }, @@ -99,6 +100,7 @@ allow-git = [ "https://github.com/rust-lang-nursery/lazy-static.rs", "https://github.com/serai-dex/substrate-bip39", "https://github.com/serai-dex/substrate", + "https://github.com/alloy-rs/alloy", "https://github.com/monero-rs/base58-monero", - "https://github.com/kayabaNerve/dockertest-rs", + "https://github.com/orcalabs/dockertest-rs", ] diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 000000000..be76315dc --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,7 @@ +_site/ +.sass-cache/ +.jekyll-cache/ +.jekyll-metadata + +.bundle/ +vendor/ diff --git a/docs/.ruby-version b/docs/.ruby-version new file mode 100644 index 000000000..8c50098d8 --- /dev/null +++ b/docs/.ruby-version @@ -0,0 +1 @@ +3.1 diff --git a/docs/Gemfile b/docs/Gemfile new file mode 100644 index 000000000..0b800b1fc --- /dev/null +++ b/docs/Gemfile @@ -0,0 +1,4 @@ +source 'https://rubygems.org' + +gem "jekyll", "~> 4.3.3" +gem "just-the-docs", "0.8.1" diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock new file mode 100644 index 000000000..f6d6627d1 --- /dev/null +++ b/docs/Gemfile.lock @@ -0,0 +1,84 @@ +GEM + remote: https://rubygems.org/ + specs: + addressable (2.8.6) + public_suffix (>= 2.0.2, < 6.0) + colorator (1.1.0) + concurrent-ruby (1.2.3) + em-websocket (0.5.3) + eventmachine (>= 0.12.9) + http_parser.rb (~> 0) + eventmachine (1.2.7) + ffi (1.16.3) + forwardable-extended (2.6.0) + google-protobuf (3.25.3-x86_64-linux) + http_parser.rb (0.8.0) + i18n (1.14.5) + concurrent-ruby (~> 1.0) + jekyll (4.3.3) + addressable (~> 2.4) + colorator (~> 1.0) + em-websocket (~> 0.5) + i18n (~> 1.0) + jekyll-sass-converter (>= 2.0, < 4.0) + jekyll-watch (~> 2.0) + kramdown (~> 2.3, >= 2.3.1) + kramdown-parser-gfm (~> 1.0) + liquid (~> 4.0) + mercenary (>= 0.3.6, < 0.5) + pathutil (~> 0.9) + rouge (>= 3.0, < 5.0) + safe_yaml (~> 1.0) + terminal-table (>= 1.8, < 4.0) + webrick (~> 1.7) + jekyll-include-cache (0.2.1) + jekyll (>= 3.7, < 5.0) + jekyll-sass-converter (3.0.0) + sass-embedded (~> 1.54) + jekyll-seo-tag (2.8.0) + jekyll (>= 3.8, < 5.0) + jekyll-watch (2.2.1) + listen (~> 3.0) + just-the-docs (0.8.1) + jekyll (>= 3.8.5) + jekyll-include-cache + jekyll-seo-tag (>= 2.0) + rake (>= 12.3.1) + kramdown (2.4.0) + rexml + kramdown-parser-gfm (1.1.0) + kramdown (~> 2.0) + liquid (4.0.4) + listen (3.9.0) + rb-fsevent (~> 0.10, >= 0.10.3) + rb-inotify (~> 0.9, >= 0.9.10) + mercenary (0.4.0) + pathutil (0.16.2) + forwardable-extended (~> 2.6) + public_suffix (5.0.5) + rake (13.2.1) + rb-fsevent (0.11.2) + rb-inotify (0.11.1) + ffi (~> 1.0) + rexml (3.2.8) + strscan (>= 3.0.9) + rouge (4.2.1) + safe_yaml (1.0.5) + sass-embedded (1.63.6) + google-protobuf (~> 3.23) + rake (>= 13.0.0) + strscan (3.1.0) + terminal-table (3.0.2) + unicode-display_width (>= 1.1.1, < 3) + unicode-display_width (2.5.0) + webrick (1.8.1) + +PLATFORMS + x86_64-linux + +DEPENDENCIES + jekyll (~> 4.3.3) + just-the-docs (= 0.8.1) + +BUNDLED WITH + 2.2.5 diff --git a/docs/_config.yml b/docs/_config.yml new file mode 100644 index 000000000..75c8f131e --- /dev/null +++ b/docs/_config.yml @@ -0,0 +1,14 @@ +title: Serai Documentation +description: Documentation for the Serai protocol. +theme: just-the-docs + +url: https://docs.serai.exchange + +callouts: + warning: + title: Warning + color: red + + definition: + title: Definition + color: blue diff --git a/docs/amm/index.md b/docs/amm/index.md new file mode 100644 index 000000000..678f6ee95 --- /dev/null +++ b/docs/amm/index.md @@ -0,0 +1,19 @@ +--- +title: Automatic Market Makers +layout: default +nav_order: 2 +--- + +# Automatic Market Makers + +*text on how AMMs work* + +Serai uses a symmetric liquidity pool with the `xy=k` formula. + +Concentrated liquidity would presumably offer less slippage on swaps, and there are +[discussions to evolve to a concentrated liquidity/order book environment](https://github.com/serai-dex/serai/issues/420). +Unfortunately, it effectively requires active management of provided liquidity. +This disenfranchises small liquidity providers who may not have the knowledge +and resources necessary to perform such management. Since Serai is expected to +have a community-bootstrapped start, starting with concentrated liquidity would +accordingly be contradictory. diff --git a/docs/cross_chain/index.md b/docs/cross_chain/index.md new file mode 100644 index 000000000..a7b6a480f --- /dev/null +++ b/docs/cross_chain/index.md @@ -0,0 +1,7 @@ +--- +title: Cross-Chain Architecture +layout: default +nav_order: 3 +--- + +# Cross-Chain Architecture diff --git a/docs/economics/genesis.md b/docs/economics/genesis.md new file mode 100644 index 000000000..d4dbef042 --- /dev/null +++ b/docs/economics/genesis.md @@ -0,0 +1,6 @@ +--- +title: Genesis +layout: default +nav_order: 1 +parent: Economics +--- diff --git a/docs/economics/index.md b/docs/economics/index.md new file mode 100644 index 000000000..c8d3bde4b --- /dev/null +++ b/docs/economics/index.md @@ -0,0 +1,45 @@ +--- +title: Economics +layout: default +nav_order: 4 +has_children: true +--- + +# Economics + +Serai's economics change depending on which of three eras is currently +occurring. + +## Genesis Era + +The network starts with the "Genesis" era, where the goal of the network is to +attract the liquidity necessary to facilitate swaps. This period will last for +30 days and will let anyone add liquidity to the protocol. Only with its +conclusion will SRI start being distributed. + +After the Genesis era, the network enters the "Pre-Economic Security" era. + +## Pre-Economic Security + +{: .definition-title } +> Definition: Economic Security +> +> Economic security is derived from it being unprofitable to misbehave. +> This is by the economic penalty which is presumed to occur upon misbehavior +> exceeding the value which would presumably be gained. +> Accordingly, rational actors would behave properly, causing the protocol to +> maintain its integrity. +> +> For Serai specifically, the stake required to produce unintended signatures +> must exceed the value accessible via producing unintended signatures. + +With liquidity provided, and swaps enabled, the goal is to have validators stake +sufficiently for economic security to be achieved. This is primarily via +offering freshly minted, staked SRI to would-be validators who decide to swap +external coins for their stake. + +## Post-Economic Security + +Having achieved economic security, the protocol changes its economics one last +time (barring future upgrades to the protocol) to a 'normal' state of +operations. diff --git a/docs/economics/post.md b/docs/economics/post.md new file mode 100644 index 000000000..4a41bd196 --- /dev/null +++ b/docs/economics/post.md @@ -0,0 +1,6 @@ +--- +title: Post-Economic Security +layout: default +nav_order: 3 +parent: Economics +--- diff --git a/docs/economics/pre.md b/docs/economics/pre.md new file mode 100644 index 000000000..d891c51d4 --- /dev/null +++ b/docs/economics/pre.md @@ -0,0 +1,6 @@ +--- +title: Pre-Economic Security +layout: default +nav_order: 2 +parent: Economics +--- diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..c7685d13f --- /dev/null +++ b/docs/index.md @@ -0,0 +1,32 @@ +--- +title: Home +layout: home +nav_order: 1 +--- + +{: .warning } +This documentation site is still under active development and may have missing +sections, errors, and typos. Even once this documentation site is 'complete', it +may become out-of-date (as Serai is an evolving protocol yet to release) or have +minor errors. + +# Serai + +Serai is a fairly launched cross-chain decentralized exchange, integrating +Bitcoin (BTC), Ethereum (ETH, DAI), and Monero (XMR). + +The Serai mainnet has yet to launch, and until then, all details are subject to +change. + +Prior to the Serai mainnet launching, SRI, Serai's native coin, will not +exist. As a fairly launched project, SRI will have no ICO, no IEO, no presale, +no developers' tax/fund, and no airdrop for out-of-mainnet activity. + +Out-of-mainnet activity includes: + +- Being a community member (such as on Discord or on Twitter) +- Participating in testnets +- Contributing to the GitHub + +None of these will be awarded any airdrop. All distributions of SRI will happen +on-chain per the protocols' defined rules, based on on-chain activity. diff --git a/docs/infrastructure/coordinator.md b/docs/infrastructure/coordinator.md new file mode 100644 index 000000000..cf6acacab --- /dev/null +++ b/docs/infrastructure/coordinator.md @@ -0,0 +1,21 @@ +--- +title: Coordinator +layout: default +nav_order: 3 +parent: Infrastructure +--- + +# Coordinator + +The coordinator is a local service which communicates with other validators' +coordinators. It provides a verifiable broadcast layer for various consensus +messages, such as agreement on external blockchains, key generation and signing +protocols, and the latest Serai block. + +The verifiable broadcast layer is implemented via a blockchain, referred to as a +Tributary, which is agreed upon using Tendermint consensus. This consensus is +not as offered by Tendermint Core/CometBFT, as used in the Cosmos SDK +(historically/presently), yet by our own implementation designed to be used as a +library and not as another daemon. Tributaries are ephemeral, only used by the +current validators, and deleted upon the next epoch. All of the results from it +are verifiable via the external network and the Serai blockchain alone. diff --git a/docs/infrastructure/index.md b/docs/infrastructure/index.md new file mode 100644 index 000000000..2db1a7913 --- /dev/null +++ b/docs/infrastructure/index.md @@ -0,0 +1,6 @@ +--- +title: Infrastructure +layout: default +nav_order: 6 +has_children: true +--- diff --git a/docs/infrastructure/message_queue.md b/docs/infrastructure/message_queue.md new file mode 100644 index 000000000..4ce21277f --- /dev/null +++ b/docs/infrastructure/message_queue.md @@ -0,0 +1,29 @@ +--- +title: Message Queue +layout: default +nav_order: 1 +parent: Infrastructure +--- + +# Message Queue + +The Message Queue is a microservice to authenticate and relay messages between +services. It offers just three functions: + +1) Queue a message. + +2) Receive the next message. + +3) Acknowledge a message, removing it from the queue. + +This ensures messages are delivered between services, with their order +preserved. This also ensures that if a service reboots while handling a message, +it'll still handle the message once rebooted (and the message will not be lost). + +The Message Queue also aims to offer increased liveliness and performance. +If services directly communicated, the rate at which one service could operate +would always be bottlenecked by the service it communicates with. If the +receiving service ever went offline, the sending service wouldn't be able to +deliver messages until the receiver came back online, halting its own work. By +defining a dedicated microservice, with a lack of complex logic, it's much less +likely to go offline or suffer from degraded performance. diff --git a/docs/infrastructure/processor.md b/docs/infrastructure/processor.md new file mode 100644 index 000000000..ca49120e5 --- /dev/null +++ b/docs/infrastructure/processor.md @@ -0,0 +1,21 @@ +--- +title: Processor +layout: default +nav_order: 2 +parent: Infrastructure +--- + +# Processor + +The processor performs several important tasks with regards to the external +network. Each of them are documented in the following sections. + +## Key Generation + +## Scanning + +## Signing Batches + +## Planning Transactions + +## Cosigning diff --git a/docs/infrastructure/serai.md b/docs/infrastructure/serai.md new file mode 100644 index 000000000..bbd04aa0f --- /dev/null +++ b/docs/infrastructure/serai.md @@ -0,0 +1,6 @@ +--- +title: Serai +layout: default +nav_order: 4 +parent: Infrastructure +--- diff --git a/docs/integrating/index.md b/docs/integrating/index.md new file mode 100644 index 000000000..58a6ea067 --- /dev/null +++ b/docs/integrating/index.md @@ -0,0 +1,6 @@ +--- +title: Integrating with Serai +layout: default +nav_order: 7 +has_children: true +--- diff --git a/docs/integrations/Ethereum.md b/docs/integrations/Ethereum.md deleted file mode 100644 index e66a1f5b4..000000000 --- a/docs/integrations/Ethereum.md +++ /dev/null @@ -1,38 +0,0 @@ -# Ethereum - -### Addresses - -Ethereum addresses are 20-byte hashes. - -### In Instructions - -Ethereum In Instructions are present via being appended to the calldata -transferring funds to Serai. `origin` is automatically set to the party from -which funds are being transferred. For an ERC20, this is `from`. For ETH, this -is the caller. - -### Out Instructions - -`data` is limited to 512 bytes. - -If `data` is provided, the Ethereum Router will call a contract-calling child -contract in order to sandbox it. The first byte of `data` designates which child -child contract to call. After this byte is read, `data` is solely considered as -`data`, post its first byte. The child contract is sent the funds before this -call is performed. - -##### Child Contract 0 - -This contract is intended to enable connecting with other protocols, and should -be used to convert withdrawn assets to other assets on Ethereum. - - 1) Transfers the asset to `destination`. - 2) Calls `destination` with `data`. - -##### Child Contract 1 - -This contract is intended to enable authenticated calls from Serai. - - 1) Transfers the asset to `destination`. - 2) Calls `destination` with `data[.. 4], serai_address, data[4 ..]`, where -`serai_address` is the address which triggered this Out Instruction. diff --git a/docs/protocol_changes/index.md b/docs/protocol_changes/index.md new file mode 100644 index 000000000..263e2cd78 --- /dev/null +++ b/docs/protocol_changes/index.md @@ -0,0 +1,44 @@ +--- +title: Protocol Changes +layout: default +nav_order: 5 +--- + +# Protocol Changes + +The protocol has no central authority nor organization nor actors (such as +liquidity providers/validators) who can compel new protocol rules. The Serai +protocol is as-written with all granted functionality and declared rules +present. + +Validators are explicitly granted the ability to signal for two things to occur: + +### 1) Halt another validator set. + +This will presumably occur if another validator set turns malicious and is the +expected incident response in order to apply an economic penalty of ideally +greater value than damage wrecked. Halting a validator set prevents further +publication of `Batch`s, preventing improper actions on the Serai blockchain, +and preventing validators from unstaking (as unstaking only occurs once future +validator sets have accepted responsibility, and accepting responsibility +requires `Batch` publication). This effectively burns the malicious validators' +stake. + +### 2) Retire the protocol. + +A supermajority of validators may favor a signal (an opaque 32-byte ID). A +common signal gaining sufficient favor will cause the protocol to stop producing +blocks in two weeks. + +Nodes will presumably, as individual entities, hard fork to new consensus rules. +These rules presumably will remove the rule to stop producing blocks in two +weeks, they may declare new validators, and they may declare new functionality +entirely. + +While nodes individually hard fork, across every hard fork the state of the +various `sriXYZ` coins (such as `sriBTC`, `sriETH`, `sriDAI`, and `sriXMR`) +remains intact (unless the new rules modify such state). These coins can still +be burned with instructions (unless the new rules prevent that) and if a +validator set doesn't send `XYZ` as expected, they can be halted (effectively +burning their `SRI` stake). Accordingly, every node decides if and how to future +participate, with the abilities and powers they declare themselves to have. diff --git a/docs/protocol_changes/signals.md b/docs/protocol_changes/signals.md new file mode 100644 index 000000000..e69de29bb diff --git a/docs/validator/index.md b/docs/validator/index.md new file mode 100644 index 000000000..753d5976b --- /dev/null +++ b/docs/validator/index.md @@ -0,0 +1,6 @@ +--- +title: Running a Validator +layout: default +nav_order: 8 +has_children: true +--- diff --git a/orchestration/dev/coins/ethereum-relayer/.folder b/orchestration/dev/coins/ethereum-relayer/.folder new file mode 100644 index 000000000..675d44382 --- /dev/null +++ b/orchestration/dev/coins/ethereum-relayer/.folder @@ -0,0 +1,11 @@ +#!/bin/sh + +RPC_USER="${RPC_USER:=serai}" +RPC_PASS="${RPC_PASS:=seraidex}" + +# Run Monero +monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ + --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ + --rpc-access-control-origins "*" --disable-rpc-ban \ + --rpc-login=$RPC_USER:$RPC_PASS \ + $1 diff --git a/orchestration/dev/coins/ethereum/run.sh b/orchestration/dev/coins/ethereum/run.sh index 0b86ff69d..464f4c6ee 100755 --- a/orchestration/dev/coins/ethereum/run.sh +++ b/orchestration/dev/coins/ethereum/run.sh @@ -1,6 +1,3 @@ #!/bin/sh -geth --dev --networkid 5208 --datadir "eth-devnet" \ - --http --http.api "web3,net,eth,miner" \ - --http.addr 0.0.0.0 --http.port 8545 \ - --http.vhosts="*" --http.corsdomain "*" +~/.foundry/bin/anvil --host 0.0.0.0 --no-cors --no-mining --slots-in-an-epoch 32 --silent diff --git a/orchestration/runtime/Dockerfile b/orchestration/runtime/Dockerfile index 4df698426..2801f070b 100644 --- a/orchestration/runtime/Dockerfile +++ b/orchestration/runtime/Dockerfile @@ -1,17 +1,20 @@ -FROM --platform=linux/amd64 rust:1.76.0-slim-bookworm as builder +# rust:1.77.0-slim-bookworm as of March 22nd, 2024 (GMT) +FROM --platform=linux/amd64 rust@sha256:e785e4aa81f87bc1ee02fa2026ffbc491e0410bdaf6652cea74884373f452664 as deterministic # Move to a Debian package snapshot RUN rm -rf /etc/apt/sources.list.d/debian.sources && \ rm -rf /var/lib/apt/lists/* && \ - echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20240201T000000Z bookworm main" > /etc/apt/sources.list && \ + echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20240301T000000Z bookworm main" > /etc/apt/sources.list && \ apt update # Install dependencies -RUN apt install clang -y +RUN apt update && apt upgrade && apt install clang -y # Add the wasm toolchain RUN rustup target add wasm32-unknown-unknown +FROM deterministic + # Add files for build ADD patches /serai/patches ADD common /serai/common @@ -30,3 +33,8 @@ ADD Cargo.lock /serai ADD AGPL-3.0 /serai WORKDIR /serai + +# Build the runtime, copying it to the volume if it exists +CMD cargo build --release -p serai-runtime && \ + mkdir -p /volume && \ + cp /serai/target/release/wbuild/serai-runtime/serai_runtime.wasm /volume/serai.wasm diff --git a/orchestration/src/coins/bitcoin.rs b/orchestration/src/coins/bitcoin.rs index 13c86bad7..94686244b 100644 --- a/orchestration/src/coins/bitcoin.rs +++ b/orchestration/src/coins/bitcoin.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use crate::{Network, Os, mimalloc, os, write_dockerfile}; @@ -7,7 +7,7 @@ pub fn bitcoin(orchestration_path: &Path, network: Network) { const DOWNLOAD_BITCOIN: &str = r#" FROM alpine:latest as bitcoin -ENV BITCOIN_VERSION=26.0 +ENV BITCOIN_VERSION=27.0 RUN apk --no-cache add git gnupg @@ -43,8 +43,7 @@ CMD ["/run.sh"] network.label() ); - let run = - os(Os::Debian, "RUN mkdir /volume && chown bitcoin:bitcoin /volume", "bitcoin") + &run_bitcoin; + let run = os(Os::Debian, "", "bitcoin") + &run_bitcoin; let res = setup + &run; let mut bitcoin_path = orchestration_path.to_path_buf(); diff --git a/orchestration/src/coins/ethereum.rs b/orchestration/src/coins/ethereum.rs deleted file mode 100644 index 2e15d3709..000000000 --- a/orchestration/src/coins/ethereum.rs +++ /dev/null @@ -1,5 +0,0 @@ -use std::path::Path; - -pub fn ethereum(_orchestration_path: &Path) { - // TODO -} diff --git a/orchestration/src/coins/ethereum/consensus/lighthouse.rs b/orchestration/src/coins/ethereum/consensus/lighthouse.rs new file mode 100644 index 000000000..add9728bb --- /dev/null +++ b/orchestration/src/coins/ethereum/consensus/lighthouse.rs @@ -0,0 +1,36 @@ +use crate::Network; + +pub fn lighthouse(network: Network) -> (String, String, String) { + assert_ne!(network, Network::Dev); + + #[rustfmt::skip] + const DOWNLOAD_LIGHTHOUSE: &str = r#" +FROM alpine:latest as lighthouse + +ENV LIGHTHOUSE_VERSION=5.1.3 + +RUN apk --no-cache add git gnupg + +# Download lighthouse +RUN wget https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz +RUN wget https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc + +# Verify the signature +gpg --keyserver keyserver.ubuntu.com --recv-keys 15E66D941F697E28F49381F426416DC3F30674B0 +gpg --verify lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz + +# Extract lighthouse +RUN tar xvf lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz +"#; + + let run_lighthouse = format!( + r#" +COPY --from=lighthouse --chown=ethereum lighthouse /bin + +ADD /orchestration/{}/coins/ethereum/consensus/lighthouse/run.sh /consensus_layer.sh +"#, + network.label() + ); + + (DOWNLOAD_LIGHTHOUSE.to_string(), String::new(), run_lighthouse) +} diff --git a/orchestration/src/coins/ethereum/consensus/mod.rs b/orchestration/src/coins/ethereum/consensus/mod.rs new file mode 100644 index 000000000..4f64c0d89 --- /dev/null +++ b/orchestration/src/coins/ethereum/consensus/mod.rs @@ -0,0 +1,6 @@ +mod lighthouse; +#[allow(unused)] +pub use lighthouse::lighthouse; + +mod nimbus; +pub use nimbus::nimbus; diff --git a/orchestration/src/coins/ethereum/consensus/nimbus.rs b/orchestration/src/coins/ethereum/consensus/nimbus.rs new file mode 100644 index 000000000..07006aa94 --- /dev/null +++ b/orchestration/src/coins/ethereum/consensus/nimbus.rs @@ -0,0 +1,49 @@ +use crate::Network; + +pub fn nimbus(network: Network) -> (String, String, String) { + assert_ne!(network, Network::Dev); + + let platform = match std::env::consts::ARCH { + "x86_64" => "amd64", + "arm" => "arm32v7", + "aarch64" => "arm64v8", + _ => panic!("unsupported platform"), + }; + + #[rustfmt::skip] + let checksum = match platform { + "amd64" => "5da10222cfb555ce2e3820ece12e8e30318945e3ed4b2b88d295963c879daeee071623c47926f880f3db89ce537fd47c6b26fe37e47aafbae3222b58bcec2fba", + "arm32v7" => "7055da77bfa1186ee2e7ce2a48b923d45ccb039592f529c58d93d55a62bca46566ada451bd7497c3ae691260544f0faf303602afd85ccc18388fdfdac0bb2b45", + "arm64v8" => "1a68f44598462abfade0dbeb6adf10b52614ba03605a8bf487b99493deb41468317926ef2d657479fcc26fce640aeebdbd880956beec3fb110b5abc97bd83556", + _ => panic!("unsupported platform"), + }; + + #[rustfmt::skip] + let download_nimbus = format!(r#" +FROM alpine:latest as nimbus + +ENV NIMBUS_VERSION=24.3.0 +ENV NIMBUS_COMMIT=dc19b082 + +# Download nimbus +RUN wget https://github.com/status-im/nimbus-eth2/releases/download/v${{NIMBUS_VERSION}}/nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz + +# Extract nimbus +RUN tar xvf nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz +RUN mv nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}/build/nimbus_beacon_node ./nimbus + +# Verify the checksum +RUN sha512sum nimbus | grep {checksum} +"#); + + let run_nimbus = format!( + r#" +COPY --from=nimbus --chown=ethereum nimbus /bin + +ADD /orchestration/{}/coins/ethereum/consensus/nimbus/run.sh /consensus_layer.sh +"#, + network.label() + ); + + (download_nimbus, String::new(), run_nimbus) +} diff --git a/orchestration/src/coins/ethereum/execution/anvil.rs b/orchestration/src/coins/ethereum/execution/anvil.rs new file mode 100644 index 000000000..53d894eca --- /dev/null +++ b/orchestration/src/coins/ethereum/execution/anvil.rs @@ -0,0 +1,14 @@ +use crate::Network; + +pub fn anvil(network: Network) -> (String, String, String) { + assert_eq!(network, Network::Dev); + + const ANVIL_SETUP: &str = r#" +RUN curl -L https://foundry.paradigm.xyz | bash || exit 0 +RUN ~/.foundry/bin/foundryup + +EXPOSE 8545 +"#; + + (String::new(), "RUN apt install git curl -y".to_string(), ANVIL_SETUP.to_string()) +} diff --git a/orchestration/src/coins/ethereum/execution/mod.rs b/orchestration/src/coins/ethereum/execution/mod.rs new file mode 100644 index 000000000..3db59c844 --- /dev/null +++ b/orchestration/src/coins/ethereum/execution/mod.rs @@ -0,0 +1,5 @@ +mod reth; +pub use reth::reth; + +mod anvil; +pub use anvil::anvil; diff --git a/orchestration/src/coins/ethereum/execution/reth.rs b/orchestration/src/coins/ethereum/execution/reth.rs new file mode 100644 index 000000000..8c80a9faa --- /dev/null +++ b/orchestration/src/coins/ethereum/execution/reth.rs @@ -0,0 +1,38 @@ +use crate::Network; + +pub fn reth(network: Network) -> (String, String, String) { + assert_ne!(network, Network::Dev); + + #[rustfmt::skip] + const DOWNLOAD_RETH: &str = r#" +FROM alpine:latest as reth + +ENV RETH_VERSION=0.2.0-beta.6 + +RUN apk --no-cache add git gnupg + +# Download reth +RUN wget https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz +RUN wget https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc + +# Verify the signature +gpg --keyserver keyserver.ubuntu.com --recv-keys A3AE097C89093A124049DF1F5391A3C4100530B4 +gpg --verify reth-v${RETH_VERSION}-$(uname -m).tar.gz.asc reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz + +# Extract reth +RUN tar xvf reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz +"#; + + let run_reth = format!( + r#" +COPY --from=reth --chown=ethereum reth /bin + +EXPOSE 30303 9001 8545 + +ADD /orchestration/{}/coins/ethereum/execution/reth/run.sh /execution_layer.sh +"#, + network.label() + ); + + (DOWNLOAD_RETH.to_string(), String::new(), run_reth) +} diff --git a/orchestration/src/coins/ethereum/mod.rs b/orchestration/src/coins/ethereum/mod.rs new file mode 100644 index 000000000..a06318c07 --- /dev/null +++ b/orchestration/src/coins/ethereum/mod.rs @@ -0,0 +1,43 @@ +use std::path::Path; + +use crate::{Network, Os, mimalloc, os, write_dockerfile}; + +mod execution; +use execution::*; + +mod consensus; +use consensus::*; + +pub fn ethereum(orchestration_path: &Path, network: Network) { + let ((el_download, el_run_as_root, el_run), (cl_download, cl_run_as_root, cl_run)) = + if network == Network::Dev { + (anvil(network), (String::new(), String::new(), String::new())) + } else { + // TODO: Select an EL/CL based off a RNG seeded from the public key + (reth(network), nimbus(network)) + }; + + let download = mimalloc(Os::Alpine).to_string() + &el_download + &cl_download; + + let run = format!( + r#" +ADD /orchestration/{}/coins/ethereum/run.sh /run.sh +CMD ["/run.sh"] +"#, + network.label() + ); + let run = mimalloc(Os::Debian).to_string() + + &os(Os::Debian, &(el_run_as_root + "\r\n" + &cl_run_as_root), "ethereum") + + &el_run + + &cl_run + + &run; + + let res = download + &run; + + let mut ethereum_path = orchestration_path.to_path_buf(); + ethereum_path.push("coins"); + ethereum_path.push("ethereum"); + ethereum_path.push("Dockerfile"); + + write_dockerfile(ethereum_path, &res); +} diff --git a/orchestration/src/coins/monero.rs b/orchestration/src/coins/monero.rs index f64f0a047..c21bc6107 100644 --- a/orchestration/src/coins/monero.rs +++ b/orchestration/src/coins/monero.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use crate::{Network, Os, mimalloc, write_dockerfile}; @@ -55,12 +55,9 @@ CMD ["/run.sh"] network.label(), ); - let run = crate::os( - os, - &("RUN mkdir /volume && chown monero /volume\r\n".to_string() + - if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }), - "monero", - ) + &run_monero; + let run = + crate::os(os, if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }, "monero") + + &run_monero; let res = setup + &run; let mut monero_path = orchestration_path.to_path_buf(); diff --git a/orchestration/src/coordinator.rs b/orchestration/src/coordinator.rs index a8556a009..26058886e 100644 --- a/orchestration/src/coordinator.rs +++ b/orchestration/src/coordinator.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use zeroize::Zeroizing; @@ -11,12 +11,13 @@ pub fn coordinator( orchestration_path: &Path, network: Network, coordinator_key: Zeroizing<::F>, - serai_key: Zeroizing<::F>, + serai_key: &Zeroizing<::F>, ) { let db = network.db(); let longer_reattempts = if network == Network::Dev { "longer-reattempts" } else { "" }; let setup = mimalloc(Os::Debian).to_string() + &build_serai_service( + "", network.release(), &format!("{db} {longer_reattempts}"), "serai-coordinator", @@ -27,13 +28,16 @@ pub fn coordinator( RUN apt install -y ca-certificates "#; + #[rustfmt::skip] + const DEFAULT_RUST_LOG: &str = "info,serai_coordinator=debug,tributary_chain=debug,tendermint=debug,libp2p_gossipsub::behaviour=error"; + let env_vars = [ ("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())), ("MESSAGE_QUEUE_KEY", hex::encode(coordinator_key.to_repr())), - ("DB_PATH", "./coordinator-db".to_string()), + ("DB_PATH", "/volume/coordinator-db".to_string()), ("SERAI_KEY", hex::encode(serai_key.to_repr())), ("SERAI_HOSTNAME", format!("serai-{}-serai", network.label())), - ("RUST_LOG", "serai_coordinator=debug,tributary_chain=debug,tendermint=debug".to_string()), + ("RUST_LOG", DEFAULT_RUST_LOG.to_string()), ]; let mut env_vars_str = String::new(); for (env_var, value) in env_vars { diff --git a/orchestration/src/ethereum_relayer.rs b/orchestration/src/ethereum_relayer.rs new file mode 100644 index 000000000..523d3c62c --- /dev/null +++ b/orchestration/src/ethereum_relayer.rs @@ -0,0 +1,39 @@ +use std::path::Path; + +use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile}; + +pub fn ethereum_relayer(orchestration_path: &Path, network: Network) { + let setup = mimalloc(Os::Debian).to_string() + + &build_serai_service("", network.release(), network.db(), "serai-ethereum-relayer"); + + let env_vars = [ + ("DB_PATH", "/volume/ethereum-relayer-db".to_string()), + ("RUST_LOG", "info,serai_ethereum_relayer=trace".to_string()), + ]; + let mut env_vars_str = String::new(); + for (env_var, value) in env_vars { + env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#); + } + + let run_ethereum_relayer = format!( + r#" +# Copy the relayer server binary and relevant license +COPY --from=builder --chown=ethereumrelayer /serai/bin/serai-ethereum-relayer /bin + +# Run ethereum-relayer +EXPOSE 20830 +EXPOSE 20831 +CMD {env_vars_str} serai-ethereum-relayer +"# + ); + + let run = os(Os::Debian, "", "ethereumrelayer") + &run_ethereum_relayer; + let res = setup + &run; + + let mut ethereum_relayer_path = orchestration_path.to_path_buf(); + ethereum_relayer_path.push("coins"); + ethereum_relayer_path.push("ethereum-relayer"); + ethereum_relayer_path.push("Dockerfile"); + + write_dockerfile(ethereum_relayer_path, &res); +} diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index a3643e7ab..e8ea7654f 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -2,7 +2,14 @@ // TODO: Generate keys for a validator and the infra use core::ops::Deref; -use std::{collections::HashSet, env, path::PathBuf, io::Write, fs, process::Command}; +use std::{ + collections::{HashSet, HashMap}, + env, + path::PathBuf, + io::Write, + fs, + process::{Stdio, Command}, +}; use zeroize::Zeroizing; @@ -25,6 +32,9 @@ use mimalloc::mimalloc; mod coins; use coins::*; +mod ethereum_relayer; +use ethereum_relayer::ethereum_relayer; + mod message_queue; use message_queue::message_queue; @@ -89,8 +99,12 @@ ENV LD_PRELOAD=libmimalloc.so RUN apk update && apk upgrade -# System user (not a human), shell of nologin, no password assigned -RUN adduser -S -s /sbin/nologin -D {user} +RUN adduser --system --shell /sbin/nologin --disabled-password {user} +RUN addgroup {user} +RUN addgroup {user} {user} + +# Make the /volume directory and transfer it to the user +RUN mkdir /volume && chown {user}:{user} /volume {additional_root} @@ -110,7 +124,10 @@ RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload RUN apt update && apt upgrade -y && apt autoremove -y && apt clean -RUN useradd --system --create-home --shell /sbin/nologin {user} +RUN useradd --system --user-group --create-home --shell /sbin/nologin {user} + +# Make the /volume directory and transfer it to the user +RUN mkdir /volume && chown {user}:{user} /volume {additional_root} @@ -123,13 +140,13 @@ WORKDIR /home/{user} } } -fn build_serai_service(release: bool, features: &str, package: &str) -> String { +fn build_serai_service(prelude: &str, release: bool, features: &str, package: &str) -> String { let profile = if release { "release" } else { "debug" }; let profile_flag = if release { "--release" } else { "" }; format!( r#" -FROM rust:1.76-slim-bookworm as builder +FROM rust:1.77-slim-bookworm as builder COPY --from=mimalloc-debian libmimalloc.so /usr/lib RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload @@ -145,6 +162,8 @@ RUN apt install -y make protobuf-compiler # Add the wasm toolchain RUN rustup target add wasm32-unknown-unknown +{prelude} + # Add files for build ADD patches /serai/patches ADD common /serai/common @@ -199,38 +218,72 @@ fn orchestration_path(network: Network) -> PathBuf { orchestration_path } +type InfrastructureKeys = + HashMap<&'static str, (Zeroizing<::F>, ::G)>; +fn infrastructure_keys(network: Network) -> InfrastructureKeys { + // Generate entropy for the infrastructure keys + + let entropy = if network == Network::Dev { + // Don't use actual entropy if this is a dev environment + Zeroizing::new([0; 32]) + } else { + let path = home::home_dir() + .unwrap() + .join(".serai") + .join(network.label()) + .join("infrastructure_keys_entropy"); + // Check if there's existing entropy + if let Ok(entropy) = fs::read(&path).map(Zeroizing::new) { + assert_eq!(entropy.len(), 32, "entropy saved to disk wasn't 32 bytes"); + let mut res = Zeroizing::new([0; 32]); + res.copy_from_slice(entropy.as_ref()); + res + } else { + // If there isn't, generate fresh entropy + let mut res = Zeroizing::new([0; 32]); + OsRng.fill_bytes(res.as_mut()); + fs::write(&path, &res).unwrap(); + res + } + }; + + let mut transcript = + RecommendedTranscript::new(b"Serai Orchestrator Infrastructure Keys Transcript"); + transcript.append_message(b"network", network.label().as_bytes()); + transcript.append_message(b"entropy", entropy); + let mut rng = ChaCha20Rng::from_seed(transcript.rng_seed(b"infrastructure_keys")); + + let mut key_pair = || { + let key = Zeroizing::new(::F::random(&mut rng)); + let public = Ristretto::generator() * key.deref(); + (key, public) + }; + + HashMap::from([ + ("coordinator", key_pair()), + ("bitcoin", key_pair()), + ("ethereum", key_pair()), + ("monero", key_pair()), + ]) +} + fn dockerfiles(network: Network) { let orchestration_path = orchestration_path(network); bitcoin(&orchestration_path, network); - ethereum(&orchestration_path); + ethereum(&orchestration_path, network); monero(&orchestration_path, network); if network == Network::Dev { monero_wallet_rpc(&orchestration_path); } - // TODO: Generate infra keys in key_gen, yet service entropy here? + let mut infrastructure_keys = infrastructure_keys(network); + let coordinator_key = infrastructure_keys.remove("coordinator").unwrap(); + let bitcoin_key = infrastructure_keys.remove("bitcoin").unwrap(); + let ethereum_key = infrastructure_keys.remove("ethereum").unwrap(); + let monero_key = infrastructure_keys.remove("monero").unwrap(); - // Generate entropy for the infrastructure keys - let mut entropy = Zeroizing::new([0; 32]); - // Only use actual entropy if this isn't a development environment - if network != Network::Dev { - OsRng.fill_bytes(entropy.as_mut()); - } - let mut transcript = RecommendedTranscript::new(b"Serai Orchestrator Transcript"); - transcript.append_message(b"entropy", entropy); - let mut new_rng = |label| ChaCha20Rng::from_seed(transcript.rng_seed(label)); - - let mut message_queue_keys_rng = new_rng(b"message_queue_keys"); - let mut key_pair = || { - let key = Zeroizing::new(::F::random(&mut message_queue_keys_rng)); - let public = Ristretto::generator() * key.deref(); - (key, public) - }; - let coordinator_key = key_pair(); - let bitcoin_key = key_pair(); - let ethereum_key = key_pair(); - let monero_key = key_pair(); + ethereum_relayer(&orchestration_path, network); message_queue( &orchestration_path, @@ -241,10 +294,9 @@ fn dockerfiles(network: Network) { monero_key.1, ); - let mut processor_entropy_rng = new_rng(b"processor_entropy"); - let mut new_entropy = || { + let new_entropy = || { let mut res = Zeroizing::new([0; 32]); - processor_entropy_rng.fill_bytes(res.as_mut()); + OsRng.fill_bytes(res.as_mut()); res }; processor( @@ -276,9 +328,9 @@ fn dockerfiles(network: Network) { Zeroizing::new(::F::from_repr(*serai_key_repr).unwrap()) }; - coordinator(&orchestration_path, network, coordinator_key.0, serai_key); + coordinator(&orchestration_path, network, coordinator_key.0, &serai_key); - serai(&orchestration_path, network); + serai(&orchestration_path, network, &serai_key); } fn key_gen(network: Network) { @@ -316,6 +368,7 @@ fn start(network: Network, services: HashSet) { let name = match service.as_ref() { "serai" => "serai", "coordinator" => "coordinator", + "ethereum-relayer" => "ethereum-relayer", "message-queue" => "message-queue", "bitcoin-daemon" => "bitcoin", "bitcoin-processor" => "bitcoin-processor", @@ -325,6 +378,81 @@ fn start(network: Network, services: HashSet) { _ => panic!("starting unrecognized service"), }; + // If we're building the Serai service, first build the runtime + let serai_runtime_volume = format!("serai-{}-runtime-volume", network.label()); + if name == "serai" { + // Check if it's built by checking if the volume has the expected runtime file + let wasm_build_container_name = format!("serai-{}-runtime", network.label()); + let built = || { + if let Ok(state_and_status) = Command::new("docker") + .arg("inspect") + .arg("-f") + .arg("{{.State.Status}}:{{.State.ExitCode}}") + .arg(&wasm_build_container_name) + .output() + { + if let Ok(state_and_status) = String::from_utf8(state_and_status.stdout) { + return state_and_status.trim() == "exited:0"; + } + } + false + }; + + if !built() { + let mut repo_path = env::current_exe().unwrap(); + repo_path.pop(); + if repo_path.as_path().ends_with("deps") { + repo_path.pop(); + } + assert!(repo_path.as_path().ends_with("debug") || repo_path.as_path().ends_with("release")); + repo_path.pop(); + assert!(repo_path.as_path().ends_with("target")); + repo_path.pop(); + + // Build the image to build the runtime + if !Command::new("docker") + .current_dir(&repo_path) + .arg("build") + .arg("-f") + .arg("orchestration/runtime/Dockerfile") + .arg(".") + .arg("-t") + .arg(format!("serai-{}-runtime-img", network.label())) + .spawn() + .unwrap() + .wait() + .unwrap() + .success() + { + panic!("failed to build runtime image"); + } + + // Run the image, building the runtime + println!("Building the Serai runtime"); + let container_name = format!("serai-{}-runtime", network.label()); + let _ = + Command::new("docker").arg("rm").arg("-f").arg(&container_name).spawn().unwrap().wait(); + let _ = Command::new("docker") + .arg("run") + .arg("--name") + .arg(container_name) + .arg("--volume") + .arg(format!("{serai_runtime_volume}:/volume")) + .arg(format!("serai-{}-runtime-img", network.label())) + .spawn(); + + // Wait until its built + let mut ticks = 0; + while !built() { + std::thread::sleep(core::time::Duration::from_secs(60)); + ticks += 1; + if ticks > 6 * 60 { + panic!("couldn't build the runtime after 6 hours") + } + } + } + } + // Build it println!("Building {service}"); docker::build(&orchestration_path(network), network, name); @@ -335,6 +463,10 @@ fn start(network: Network, services: HashSet) { .arg("container") .arg("inspect") .arg(&docker_name) + // Use null for all IO to silence 'container does not exist' + .stdin(Stdio::null()) + .stdout(Stdio::null()) + .stderr(Stdio::null()) .status() .unwrap() .success() @@ -346,25 +478,57 @@ fn start(network: Network, services: HashSet) { let command = command.arg("create").arg("--name").arg(&docker_name); let command = command.arg("--network").arg("serai"); let command = command.arg("--restart").arg("always"); + let command = command.arg("--log-opt").arg("max-size=100m"); + let command = command.arg("--log-opt").arg("max-file=3"); + let command = if network == Network::Dev { + command + } else { + // Assign a persistent volume if this isn't for Dev + command.arg("--volume").arg(volume) + }; let command = match name { "bitcoin" => { + // Expose the RPC for tests if network == Network::Dev { command.arg("-p").arg("8332:8332") } else { - command.arg("--volume").arg(volume) + command } } + "ethereum-relayer" => { + // Expose the router command fetch server + command.arg("-p").arg("20831:20831") + } "monero" => { + // Expose the RPC for tests if network == Network::Dev { command.arg("-p").arg("18081:18081") } else { - command.arg("--volume").arg(volume) + command } } "monero-wallet-rpc" => { assert_eq!(network, Network::Dev, "monero-wallet-rpc is only for dev"); + // Expose the RPC for tests command.arg("-p").arg("18082:18082") } + "coordinator" => { + if network == Network::Dev { + command + } else { + // Publish the port + command.arg("-p").arg("30563:30563") + } + } + "serai" => { + let command = command.arg("--volume").arg(format!("{serai_runtime_volume}:/runtime")); + if network == Network::Dev { + command + } else { + // Publish the port + command.arg("-p").arg("30333:30333") + } + } _ => command, }; assert!( @@ -388,10 +552,10 @@ Serai Orchestrator v0.0.1 Commands: key_gen *network* - Generates a key for the validator. + Generate a key for the validator. setup *network* - Generate infrastructure keys and the Dockerfiles for every Serai service. + Generate the Dockerfiles for every Serai service. start *network* [service1, service2...] Start the specified services for the specified network ("dev" or "testnet"). @@ -401,6 +565,9 @@ Commands: - `message-queue` - `bitcoin-daemon` - `bitcoin-processor` + - `ethereum-daemon` + - `ethereum-processor` + - `ethereum-relayer` - `monero-daemon` - `monero-processor` - `monero-wallet-rpc` (if "dev") @@ -433,6 +600,9 @@ Commands: Some("start") => { let mut services = HashSet::new(); for arg in args { + if arg == "ethereum-processor" { + services.insert("ethereum-relayer".to_string()); + } if let Some(ext_network) = arg.strip_suffix("-processor") { services.insert(ext_network.to_string() + "-daemon"); } diff --git a/orchestration/src/message_queue.rs b/orchestration/src/message_queue.rs index ef6bdcbfc..ea97a6198 100644 --- a/orchestration/src/message_queue.rs +++ b/orchestration/src/message_queue.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; @@ -13,15 +13,15 @@ pub fn message_queue( monero_key: ::G, ) { let setup = mimalloc(Os::Debian).to_string() + - &build_serai_service(network.release(), network.db(), "serai-message-queue"); + &build_serai_service("", network.release(), network.db(), "serai-message-queue"); let env_vars = [ ("COORDINATOR_KEY", hex::encode(coordinator_key.to_bytes())), ("BITCOIN_KEY", hex::encode(bitcoin_key.to_bytes())), ("ETHEREUM_KEY", hex::encode(ethereum_key.to_bytes())), ("MONERO_KEY", hex::encode(monero_key.to_bytes())), - ("DB_PATH", "./message-queue-db".to_string()), - ("RUST_LOG", "serai_message_queue=trace".to_string()), + ("DB_PATH", "/volume/message-queue-db".to_string()), + ("RUST_LOG", "info,serai_message_queue=trace".to_string()), ]; let mut env_vars_str = String::new(); for (env_var, value) in env_vars { diff --git a/orchestration/src/processor.rs b/orchestration/src/processor.rs index e2afde09d..cefe6455b 100644 --- a/orchestration/src/processor.rs +++ b/orchestration/src/processor.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use zeroize::Zeroizing; @@ -17,6 +17,15 @@ pub fn processor( ) { let setup = mimalloc(Os::Debian).to_string() + &build_serai_service( + if coin == "ethereum" { + r#" +RUN cargo install svm-rs +RUN svm install 0.8.25 +RUN svm use 0.8.25 +"# + } else { + "" + }, network.release(), &format!("binaries {} {coin}", network.db()), "serai-processor", @@ -32,24 +41,32 @@ RUN apt install -y ca-certificates const RPC_PASS: &str = "seraidex"; // TODO: Isolate networks let hostname = format!("serai-{}-{coin}", network.label()); - let port = match coin { - "bitcoin" => 8332, - "ethereum" => return, // TODO - "monero" => 18081, - _ => panic!("unrecognized external network"), - }; + let port = format!( + "{}", + match coin { + "bitcoin" => 8332, + "ethereum" => 8545, + "monero" => 18081, + _ => panic!("unrecognized external network"), + } + ); - let env_vars = [ - ("MESSAGE_QUEUE_RPC", format!("serai-{}-message_queue", network.label())), + let mut env_vars = vec![ + ("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())), ("MESSAGE_QUEUE_KEY", hex::encode(coin_key.to_repr())), ("ENTROPY", hex::encode(entropy.as_ref())), ("NETWORK", coin.to_string()), ("NETWORK_RPC_LOGIN", format!("{RPC_USER}:{RPC_PASS}")), ("NETWORK_RPC_HOSTNAME", hostname), - ("NETWORK_RPC_PORT", format!("{port}")), - ("DB_PATH", "./processor-db".to_string()), - ("RUST_LOG", "serai_processor=debug".to_string()), + ("NETWORK_RPC_PORT", port), + ("DB_PATH", "/volume/processor-db".to_string()), + ("RUST_LOG", "info,serai_processor=debug".to_string()), ]; + if coin == "ethereum" { + env_vars + .push(("ETHEREUM_RELAYER_HOSTNAME", format!("serai-{}-ethereum-relayer", network.label()))); + env_vars.push(("ETHEREUM_RELAYER_PORT", "20830".to_string())); + } let mut env_vars_str = String::new(); for (env_var, value) in env_vars { env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#); diff --git a/orchestration/src/serai.rs b/orchestration/src/serai.rs index a3382acbe..e2f96f6ad 100644 --- a/orchestration/src/serai.rs +++ b/orchestration/src/serai.rs @@ -1,14 +1,26 @@ -use std::{path::Path}; +use std::path::Path; + +use zeroize::Zeroizing; +use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto}; use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile}; -pub fn serai(orchestration_path: &Path, network: Network) { +pub fn serai( + orchestration_path: &Path, + network: Network, + serai_key: &Zeroizing<::F>, +) { // Always builds in release for performance reasons - let setup = mimalloc(Os::Debian).to_string() + &build_serai_service(true, "", "serai-node"); + let setup = mimalloc(Os::Debian).to_string() + &build_serai_service("", true, "", "serai-node"); let setup_fast_epoch = - mimalloc(Os::Debian).to_string() + &build_serai_service(true, "fast-epoch", "serai-node"); + mimalloc(Os::Debian).to_string() + &build_serai_service("", true, "fast-epoch", "serai-node"); + + let env_vars = [("KEY", hex::encode(serai_key.to_repr()))]; + let mut env_vars_str = String::new(); + for (env_var, value) in env_vars { + env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#); + } - // TODO: Review the ports exposed here let run_serai = format!( r#" # Copy the Serai binary and relevant license @@ -16,12 +28,12 @@ COPY --from=builder --chown=serai /serai/bin/serai-node /bin/ COPY --from=builder --chown=serai /serai/AGPL-3.0 . # Run the Serai node -EXPOSE 30333 9615 9933 9944 +EXPOSE 30333 9944 ADD /orchestration/{}/serai/run.sh / -CMD ["/run.sh"] +CMD {env_vars_str} "/run.sh" "#, - network.label() + network.label(), ); let run = os(Os::Debian, "", "serai") + &run_serai; diff --git a/orchestration/testnet/coins/ethereum-relayer/.folder b/orchestration/testnet/coins/ethereum-relayer/.folder new file mode 100644 index 000000000..675d44382 --- /dev/null +++ b/orchestration/testnet/coins/ethereum-relayer/.folder @@ -0,0 +1,11 @@ +#!/bin/sh + +RPC_USER="${RPC_USER:=serai}" +RPC_PASS="${RPC_PASS:=seraidex}" + +# Run Monero +monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ + --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ + --rpc-access-control-origins "*" --disable-rpc-ban \ + --rpc-login=$RPC_USER:$RPC_PASS \ + $1 diff --git a/orchestration/testnet/coins/ethereum/consensus/lighthouse/run.sh b/orchestration/testnet/coins/ethereum/consensus/lighthouse/run.sh new file mode 100755 index 000000000..1b3857bfe --- /dev/null +++ b/orchestration/testnet/coins/ethereum/consensus/lighthouse/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +RUST_LOG=info lighthouse bn --execution-endpoint http://localhost:8551 --execution-jwt /home/ethereum/.jwt diff --git a/orchestration/testnet/coins/ethereum/consensus/nimbus/run.sh b/orchestration/testnet/coins/ethereum/consensus/nimbus/run.sh new file mode 100755 index 000000000..2bb8d868b --- /dev/null +++ b/orchestration/testnet/coins/ethereum/consensus/nimbus/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +exit 1 diff --git a/orchestration/testnet/coins/ethereum/execution/geth/run.sh b/orchestration/testnet/coins/ethereum/execution/geth/run.sh new file mode 100755 index 000000000..fee4a57c1 --- /dev/null +++ b/orchestration/testnet/coins/ethereum/execution/geth/run.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +#geth --dev --networkid 5208 \ +# --http --http.api "web3,net,eth,miner" \ +# --http.addr 0.0.0.0 --http.port 8545 \ +# --http.vhosts="*" --http.corsdomain "*" + +exit 1 diff --git a/orchestration/testnet/coins/ethereum/execution/reth/run.sh b/orchestration/testnet/coins/ethereum/execution/reth/run.sh new file mode 100755 index 000000000..5be8924a3 --- /dev/null +++ b/orchestration/testnet/coins/ethereum/execution/reth/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +RUST_LOG=info reth node --authrpc.jwtsecret /home/ethereum/.jwt diff --git a/orchestration/testnet/coins/ethereum/run.sh b/orchestration/testnet/coins/ethereum/run.sh index 2bb8d868b..82b8ff580 100755 --- a/orchestration/testnet/coins/ethereum/run.sh +++ b/orchestration/testnet/coins/ethereum/run.sh @@ -1,3 +1 @@ -#!/bin/sh - -exit 1 +/execution_layer.sh & /consensus_layer.sh diff --git a/orchestration/testnet/serai/run.sh b/orchestration/testnet/serai/run.sh index 2bb8d868b..ab3b59dfa 100755 --- a/orchestration/testnet/serai/run.sh +++ b/orchestration/testnet/serai/run.sh @@ -1,3 +1,3 @@ #!/bin/sh -exit 1 +serai-node --base-path /volume --unsafe-rpc-external --rpc-cors all --chain testnet --validator diff --git a/patches/parking_lot/Cargo.toml b/patches/parking_lot/Cargo.toml new file mode 100644 index 000000000..957b19bff --- /dev/null +++ b/patches/parking_lot/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "parking_lot" +version = "0.11.2" +description = "parking_lot which patches to the latest update" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/patches/parking_lot" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.70" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +parking_lot = "0.12" diff --git a/patches/parking_lot/src/lib.rs b/patches/parking_lot/src/lib.rs new file mode 100644 index 000000000..df10a74d9 --- /dev/null +++ b/patches/parking_lot/src/lib.rs @@ -0,0 +1 @@ +pub use parking_lot::*; diff --git a/patches/parking_lot_core/Cargo.toml b/patches/parking_lot_core/Cargo.toml new file mode 100644 index 000000000..37dcc7035 --- /dev/null +++ b/patches/parking_lot_core/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "parking_lot_core" +version = "0.8.6" +description = "parking_lot_core which patches to the latest update" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/patches/parking_lot_core" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.70" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +parking_lot_core = "0.9" diff --git a/patches/parking_lot_core/src/lib.rs b/patches/parking_lot_core/src/lib.rs new file mode 100644 index 000000000..bfecbfd8a --- /dev/null +++ b/patches/parking_lot_core/src/lib.rs @@ -0,0 +1 @@ +pub use parking_lot_core::*; diff --git a/patches/rocksdb/Cargo.toml b/patches/rocksdb/Cargo.toml new file mode 100644 index 000000000..3a92fafcd --- /dev/null +++ b/patches/rocksdb/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "rocksdb" +version = "0.21.0" +description = "rocksdb which patches to the latest update" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/patches/rocksdb" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.70" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +rocksdb = { version = "0.22", default-features = false } + +[features] +jemalloc = [] +snappy = ["rocksdb/snappy"] +lz4 = ["rocksdb/lz4"] +zstd = ["rocksdb/zstd"] +zlib = ["rocksdb/zlib"] +bzip2 = ["rocksdb/bzip2"] +default = ["snappy", "lz4", "zstd", "zlib", "bzip2"] diff --git a/patches/rocksdb/src/lib.rs b/patches/rocksdb/src/lib.rs new file mode 100644 index 000000000..bd209ce8e --- /dev/null +++ b/patches/rocksdb/src/lib.rs @@ -0,0 +1 @@ +pub use rocksdb::*; diff --git a/patches/zstd/Cargo.toml b/patches/zstd/Cargo.toml index f7bf11d6b..0d1368e4c 100644 --- a/patches/zstd/Cargo.toml +++ b/patches/zstd/Cargo.toml @@ -14,4 +14,4 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -zstd = "0.12" +zstd = "0.13" diff --git a/processor/Cargo.toml b/processor/Cargo.toml index 73a34efea..cc0108488 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -28,6 +28,7 @@ rand_core = { version = "0.6", default-features = false, features = ["std", "get rand_chacha = { version = "0.3", default-features = false, features = ["std"] } # Encoders +const-hex = { version = "1", default-features = false } hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } @@ -40,11 +41,16 @@ transcript = { package = "flexible-transcript", path = "../crypto/transcript", d frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] } frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false } -# Bitcoin -secp256k1 = { version = "0.28", default-features = false, features = ["std", "global-context", "rand-std"], optional = true } +# Bitcoin/Ethereum k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true } + +# Bitcoin +secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"], optional = true } bitcoin-serai = { path = "../coins/bitcoin", default-features = false, features = ["std"], optional = true } +# Ethereum +ethereum-serai = { path = "../coins/ethereum", default-features = false, optional = true } + # Monero dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"], optional = true } monero-serai = { path = "../coins/monero", default-features = false, features = ["std", "http-rpc", "multisig"], optional = true } @@ -55,12 +61,12 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } zalloc = { path = "../common/zalloc" } -serai-db = { path = "../common/db", optional = true } +serai-db = { path = "../common/db" } serai-env = { path = "../common/env", optional = true } # TODO: Replace with direct usage of primitives serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] } -messages = { package = "serai-processor-messages", path = "./messages", optional = true } +messages = { package = "serai-processor-messages", path = "./messages" } message-queue = { package = "serai-message-queue", path = "../message-queue", optional = true } @@ -69,6 +75,8 @@ frost = { package = "modular-frost", path = "../crypto/frost", features = ["test sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } +ethereum-serai = { path = "../coins/ethereum", default-features = false, features = ["tests"] } + dockertest = "0.4" serai-docker-tests = { path = "../tests/docker" } @@ -76,9 +84,11 @@ serai-docker-tests = { path = "../tests/docker" } secp256k1 = ["k256", "frost/secp256k1"] bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"] +ethereum = ["secp256k1", "ethereum-serai/tests"] + ed25519 = ["dalek-ff-group", "frost/ed25519"] monero = ["ed25519", "monero-serai", "serai-client/monero"] -binaries = ["env_logger", "serai-env", "messages", "message-queue"] +binaries = ["env_logger", "serai-env", "message-queue"] parity-db = ["serai-db/parity-db"] rocksdb = ["serai-db/rocksdb"] diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs index f1a5b47c0..6976e225d 100644 --- a/processor/src/key_gen.rs +++ b/processor/src/key_gen.rs @@ -512,6 +512,7 @@ impl KeyGen { ProcessorMessage::GeneratedKeyPair { id, substrate_key: generated_substrate_key.unwrap().to_bytes(), + // TODO: This can be made more efficient since tweaked keys may be a subset of keys network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(), } } diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 378b852da..19f67508b 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -1,7 +1,15 @@ +#![allow(dead_code)] + mod plan; pub use plan::*; +mod db; +pub(crate) use db::*; + +mod key_gen; + pub mod networks; +pub(crate) mod multisigs; mod additional_key; pub use additional_key::additional_key; diff --git a/processor/src/main.rs b/processor/src/main.rs index a4e9552de..e0d97aa68 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -31,6 +31,8 @@ mod networks; use networks::{Block, Network}; #[cfg(feature = "bitcoin")] use networks::Bitcoin; +#[cfg(feature = "ethereum")] +use networks::Ethereum; #[cfg(feature = "monero")] use networks::Monero; @@ -735,6 +737,7 @@ async fn main() { }; let network_id = match env::var("NETWORK").expect("network wasn't specified").as_str() { "bitcoin" => NetworkId::Bitcoin, + "ethereum" => NetworkId::Ethereum, "monero" => NetworkId::Monero, _ => panic!("unrecognized network"), }; @@ -744,6 +747,16 @@ async fn main() { match network_id { #[cfg(feature = "bitcoin")] NetworkId::Bitcoin => run(db, Bitcoin::new(url).await, coordinator).await, + #[cfg(feature = "ethereum")] + NetworkId::Ethereum => { + let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") + .expect("ethereum relayer hostname wasn't specified") + .to_string(); + let relayer_port = + env::var("ETHEREUM_RELAYER_PORT").expect("ethereum relayer port wasn't specified"); + let relayer_url = relayer_hostname + ":" + &relayer_port; + run(db.clone(), Ethereum::new(db, url, relayer_url).await, coordinator).await + } #[cfg(feature = "monero")] NetworkId::Monero => run(db, Monero::new(url).await, coordinator).await, _ => panic!("spawning a processor for an unsupported network"), diff --git a/processor/src/multisigs/db.rs b/processor/src/multisigs/db.rs index 51287a0e8..3d1d13bdf 100644 --- a/processor/src/multisigs/db.rs +++ b/processor/src/multisigs/db.rs @@ -1,3 +1,5 @@ +use std::io; + use ciphersuite::Ciphersuite; pub use serai_db::*; @@ -6,9 +8,59 @@ use serai_client::{primitives::Balance, in_instructions::primitives::InInstructi use crate::{ Get, Plan, - networks::{Transaction, Network}, + networks::{Output, Transaction, Network}, }; +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum PlanFromScanning { + Refund(N::Output, N::Address), + Forward(N::Output), +} + +impl PlanFromScanning { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + match kind[0] { + 0 => { + let output = N::Output::read(reader)?; + + let mut address_vec_len = [0; 4]; + reader.read_exact(&mut address_vec_len)?; + let mut address_vec = + vec![0; usize::try_from(u32::from_le_bytes(address_vec_len)).unwrap()]; + reader.read_exact(&mut address_vec)?; + let address = + N::Address::try_from(address_vec).map_err(|_| "invalid address saved to disk").unwrap(); + + Ok(PlanFromScanning::Refund(output, address)) + } + 1 => { + let output = N::Output::read(reader)?; + Ok(PlanFromScanning::Forward(output)) + } + _ => panic!("reading unrecognized PlanFromScanning"), + } + } + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + PlanFromScanning::Refund(output, address) => { + writer.write_all(&[0])?; + output.write(writer)?; + + let address_vec: Vec = + address.clone().try_into().map_err(|_| "invalid address being refunded to").unwrap(); + writer.write_all(&u32::try_from(address_vec.len()).unwrap().to_le_bytes())?; + writer.write_all(&address_vec) + } + PlanFromScanning::Forward(output) => { + writer.write_all(&[1])?; + output.write(writer) + } + } + } +} + create_db!( MultisigsDb { NextBatchDb: () -> u32, @@ -80,7 +132,11 @@ impl PlanDb { ) -> bool { let plan = Plan::::read::<&[u8]>(&mut &Self::get(getter, &id).unwrap()[8 ..]).unwrap(); assert_eq!(plan.id(), id); - (key == plan.key) && (Some(N::change_address(plan.key)) == plan.change) + if let Some(change) = N::change_address(plan.key) { + (key == plan.key) && (Some(change) == plan.change) + } else { + false + } } } @@ -130,7 +186,7 @@ impl PlansFromScanningDb { pub fn set_plans_from_scanning( txn: &mut impl DbTxn, block_number: usize, - plans: Vec>, + plans: Vec>, ) { let mut buf = vec![]; for plan in plans { @@ -142,13 +198,13 @@ impl PlansFromScanningDb { pub fn take_plans_from_scanning( txn: &mut impl DbTxn, block_number: usize, - ) -> Option>> { + ) -> Option>> { let block_number = u64::try_from(block_number).unwrap(); let res = Self::get(txn, block_number).map(|plans| { let mut plans_ref = plans.as_slice(); let mut res = vec![]; while !plans_ref.is_empty() { - res.push(Plan::::read(&mut plans_ref).unwrap()); + res.push(PlanFromScanning::::read(&mut plans_ref).unwrap()); } res }); @@ -175,7 +231,7 @@ impl ForwardedOutputDb { let res = InInstructionWithBalance::decode(&mut outputs_ref).unwrap(); assert!(outputs_ref.len() < outputs.len()); if outputs_ref.is_empty() { - txn.del(&Self::key(balance)); + txn.del(Self::key(balance)); } else { Self::set(txn, balance, &outputs); } diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs index a6e8bbc9d..12f017151 100644 --- a/processor/src/multisigs/mod.rs +++ b/processor/src/multisigs/mod.rs @@ -7,7 +7,7 @@ use scale::{Encode, Decode}; use messages::SubstrateContext; use serai_client::{ - primitives::{MAX_DATA_LEN, NetworkId, Coin, ExternalAddress, BlockHash, Data}, + primitives::{MAX_DATA_LEN, ExternalAddress, BlockHash, Data}, in_instructions::primitives::{ InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE, }, @@ -28,15 +28,12 @@ use scanner::{ScannerEvent, ScannerHandle, Scanner}; mod db; use db::*; -#[cfg(not(test))] -mod scheduler; -#[cfg(test)] -pub mod scheduler; +pub(crate) mod scheduler; use scheduler::Scheduler; use crate::{ Get, Db, Payment, Plan, - networks::{OutputType, Output, Transaction, SignableTransaction, Block, PreparedSend, Network}, + networks::{OutputType, Output, SignableTransaction, Eventuality, Block, PreparedSend, Network}, }; // InInstructionWithBalance from an external output @@ -66,9 +63,22 @@ fn instruction_from_output( return (presumed_origin, None); } - let Ok(shorthand) = Shorthand::decode(&mut data) else { return (presumed_origin, None) }; - let Ok(instruction) = RefundableInInstruction::try_from(shorthand) else { - return (presumed_origin, None); + let shorthand = match Shorthand::decode(&mut data) { + Ok(shorthand) => shorthand, + Err(e) => { + info!("data in output {} wasn't valid shorthand: {e:?}", hex::encode(output.id())); + return (presumed_origin, None); + } + }; + let instruction = match RefundableInInstruction::try_from(shorthand) { + Ok(instruction) => instruction, + Err(e) => { + info!( + "shorthand in output {} wasn't convertible to a RefundableInInstruction: {e:?}", + hex::encode(output.id()) + ); + return (presumed_origin, None); + } }; let mut balance = output.balance(); @@ -95,6 +105,8 @@ enum RotationStep { ClosingExisting, } +// This explicitly shouldn't take the database as we prepare Plans we won't execute for fee +// estimates async fn prepare_send( network: &N, block_number: usize, @@ -122,7 +134,7 @@ async fn prepare_send( pub struct MultisigViewer { activation_block: usize, key: ::G, - scheduler: Scheduler, + scheduler: N::Scheduler, } #[allow(clippy::type_complexity)] @@ -131,7 +143,7 @@ pub enum MultisigEvent { // Batches to publish Batches(Option<(::G, ::G)>, Vec), // Eventuality completion found on-chain - Completed(Vec, [u8; 32], N::Transaction), + Completed(Vec, [u8; 32], ::Completion), } pub struct MultisigManager { @@ -157,20 +169,7 @@ impl MultisigManager { assert!(current_keys.len() <= 2); let mut actively_signing = vec![]; for (_, key) in ¤t_keys { - schedulers.push( - Scheduler::from_db( - raw_db, - *key, - match N::NETWORK { - NetworkId::Serai => panic!("adding a key for Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - // TODO: This is incomplete to DAI - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - ) - .unwrap(), - ); + schedulers.push(N::Scheduler::from_db(raw_db, *key, N::NETWORK).unwrap()); // Load any TXs being actively signed let key = key.to_bytes(); @@ -245,17 +244,7 @@ impl MultisigManager { let viewer = Some(MultisigViewer { activation_block, key: external_key, - scheduler: Scheduler::::new::( - txn, - external_key, - match N::NETWORK { - NetworkId::Serai => panic!("adding a key for Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - // TODO: This is incomplete to DAI - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - ), + scheduler: N::Scheduler::new::(txn, external_key, N::NETWORK), }); if self.existing.is_none() { @@ -352,48 +341,30 @@ impl MultisigManager { (existing_outputs, new_outputs) } - fn refund_plan(output: N::Output, refund_to: N::Address) -> Plan { + fn refund_plan( + scheduler: &mut N::Scheduler, + txn: &mut D::Transaction<'_>, + output: N::Output, + refund_to: N::Address, + ) -> Plan { log::info!("creating refund plan for {}", hex::encode(output.id())); assert_eq!(output.kind(), OutputType::External); - Plan { - key: output.key(), - // Uses a payment as this will still be successfully sent due to fee amortization, - // and because change is currently always a Serai key - payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], - inputs: vec![output], - change: None, - } + scheduler.refund_plan::(txn, output, refund_to) } - fn forward_plan(&self, output: N::Output) -> Plan { + // Returns the plan for forwarding if one is needed. + // Returns None if one is not needed to forward this output. + fn forward_plan(&mut self, txn: &mut D::Transaction<'_>, output: &N::Output) -> Option> { log::info!("creating forwarding plan for {}", hex::encode(output.id())); - - /* - Sending a Plan, with arbitrary data proxying the InInstruction, would require adding - a flow for networks which drop their data to still embed arbitrary data. It'd also have - edge cases causing failures (we'd need to manually provide the origin if it was implied, - which may exceed the encoding limit). - - Instead, we save the InInstruction as we scan this output. Then, when the output is - successfully forwarded, we simply read it from the local database. This also saves the - costs of embedding arbitrary data. - - Since we can't rely on the Eventuality system to detect if it's a forwarded transaction, - due to the asynchonicity of the Eventuality system, we instead interpret an Forwarded - output which has an amount associated with an InInstruction which was forwarded as having - been forwarded. - */ - - Plan { - key: self.existing.as_ref().unwrap().key, - payments: vec![Payment { - address: N::forward_address(self.new.as_ref().unwrap().key), - data: None, - balance: output.balance(), - }], - inputs: vec![output], - change: None, + let res = self.existing.as_mut().unwrap().scheduler.forward_plan::( + txn, + output.clone(), + self.new.as_ref().expect("forwarding plan yet no new multisig").key, + ); + if res.is_none() { + log::info!("no forwarding plan was necessary for {}", hex::encode(output.id())); } + res } // Filter newly received outputs due to the step being RotationStep::ClosingExisting. @@ -605,7 +576,31 @@ impl MultisigManager { block_number { // Load plans crated when we scanned the block - plans = PlansFromScanningDb::take_plans_from_scanning::(txn, block_number).unwrap(); + let scanning_plans = + PlansFromScanningDb::take_plans_from_scanning::(txn, block_number).unwrap(); + // Expand into actual plans + plans = scanning_plans + .into_iter() + .map(|plan| match plan { + PlanFromScanning::Refund(output, refund_to) => { + let existing = self.existing.as_mut().unwrap(); + if output.key() == existing.key { + Self::refund_plan(&mut existing.scheduler, txn, output, refund_to) + } else { + let new = self + .new + .as_mut() + .expect("new multisig didn't expect yet output wasn't for existing multisig"); + assert_eq!(output.key(), new.key, "output wasn't for existing nor new multisig"); + Self::refund_plan(&mut new.scheduler, txn, output, refund_to) + } + } + PlanFromScanning::Forward(output) => self + .forward_plan(txn, &output) + .expect("supposed to forward an output yet no forwarding plan"), + }) + .collect(); + for plan in &plans { plans_from_scanning.insert(plan.id()); } @@ -665,13 +660,23 @@ impl MultisigManager { }); for plan in &plans { - if plan.change == Some(N::change_address(plan.key)) { - // Assert these are only created during the expected step - match *step { - RotationStep::UseExisting => {} - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => panic!("change was set to self despite rotating"), + // This first equality should 'never meaningfully' be false + // All created plans so far are by the existing multisig EXCEPT: + // A) If we created a refund plan from the new multisig (yet that wouldn't have change) + // B) The existing Scheduler returned a Plan for the new key (yet that happens with the SC + // scheduler, yet that doesn't have change) + // Despite being 'unnecessary' now, it's better to explicitly ensure and be robust + if plan.key == self.existing.as_ref().unwrap().key { + if let Some(change) = N::change_address(plan.key) { + if plan.change == Some(change) { + // Assert these (self-change) are only created during the expected step + match *step { + RotationStep::UseExisting => {} + RotationStep::NewAsChange | + RotationStep::ForwardFromExisting | + RotationStep::ClosingExisting => panic!("change was set to self despite rotating"), + } + } } } } @@ -853,15 +858,20 @@ impl MultisigManager { let plans_at_start = plans.len(); let (refund_to, instruction) = instruction_from_output::(output); if let Some(mut instruction) = instruction { - // Build a dedicated Plan forwarding this - let forward_plan = self.forward_plan(output.clone()); - plans.push(forward_plan.clone()); + let Some(shimmed_plan) = N::Scheduler::shim_forward_plan( + output.clone(), + self.new.as_ref().expect("forwarding from existing yet no new multisig").key, + ) else { + // If this network doesn't need forwarding, report the output now + return true; + }; + plans.push(PlanFromScanning::::Forward(output.clone())); // Set the instruction for this output to be returned // We need to set it under the amount it's forwarded with, so prepare its forwarding // TX to determine the fees involved let PreparedSend { tx, post_fee_branches: _, operating_costs } = - prepare_send(network, block_number, forward_plan, 0).await; + prepare_send(network, block_number, shimmed_plan, 0).await; // operating_costs should not increase in a forwarding TX assert_eq!(operating_costs, 0); @@ -872,12 +882,28 @@ impl MultisigManager { // letting it die out if let Some(tx) = &tx { instruction.balance.amount.0 -= tx.0.fee(); + + /* + Sending a Plan, with arbitrary data proxying the InInstruction, would require + adding a flow for networks which drop their data to still embed arbitrary data. + It'd also have edge cases causing failures (we'd need to manually provide the + origin if it was implied, which may exceed the encoding limit). + + Instead, we save the InInstruction as we scan this output. Then, when the + output is successfully forwarded, we simply read it from the local database. + This also saves the costs of embedding arbitrary data. + + Since we can't rely on the Eventuality system to detect if it's a forwarded + transaction, due to the asynchonicity of the Eventuality system, we instead + interpret an Forwarded output which has an amount associated with an + InInstruction which was forwarded as having been forwarded. + */ ForwardedOutputDb::save_forwarded_output(txn, &instruction); } } else if let Some(refund_to) = refund_to { if let Ok(refund_to) = refund_to.consume().try_into() { // Build a dedicated Plan refunding this - plans.push(Self::refund_plan(output.clone(), refund_to)); + plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); } } @@ -909,7 +935,7 @@ impl MultisigManager { let Some(instruction) = instruction else { if let Some(refund_to) = refund_to { if let Ok(refund_to) = refund_to.consume().try_into() { - plans.push(Self::refund_plan(output.clone(), refund_to)); + plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); } } continue; @@ -999,9 +1025,9 @@ impl MultisigManager { // This must be emitted before ScannerEvent::Block for all completions of known Eventualities // within the block. Unknown Eventualities may have their Completed events emitted after // ScannerEvent::Block however. - ScannerEvent::Completed(key, block_number, id, tx) => { - ResolvedDb::resolve_plan::(txn, &key, id, &tx.id()); - (block_number, MultisigEvent::Completed(key, id, tx)) + ScannerEvent::Completed(key, block_number, id, tx_id, completion) => { + ResolvedDb::resolve_plan::(txn, &key, id, &tx_id); + (block_number, MultisigEvent::Completed(key, id, completion)) } }; diff --git a/processor/src/multisigs/scanner.rs b/processor/src/multisigs/scanner.rs index cefa8a255..1b25e1086 100644 --- a/processor/src/multisigs/scanner.rs +++ b/processor/src/multisigs/scanner.rs @@ -17,15 +17,26 @@ use tokio::{ use crate::{ Get, DbTxn, Db, - networks::{Output, Transaction, EventualitiesTracker, Block, Network}, + networks::{Output, Transaction, Eventuality, EventualitiesTracker, Block, Network}, }; #[derive(Clone, Debug)] pub enum ScannerEvent { // Block scanned - Block { is_retirement_block: bool, block: >::Id, outputs: Vec }, + Block { + is_retirement_block: bool, + block: >::Id, + outputs: Vec, + }, // Eventuality completion found on-chain - Completed(Vec, usize, [u8; 32], N::Transaction), + // TODO: Move this from a tuple + Completed( + Vec, + usize, + [u8; 32], + >::Id, + ::Completion, + ), } pub type ScannerEventChannel = mpsc::UnboundedReceiver>; @@ -268,6 +279,8 @@ impl ScannerHandle { activation_number: usize, key: ::G, ) { + info!("Registering key {} in scanner at {activation_number}", hex::encode(key.to_bytes())); + let mut scanner_lock = self.scanner.write().await; let scanner = scanner_lock.as_mut().unwrap(); assert!( @@ -275,8 +288,6 @@ impl ScannerHandle { "activation block of new keys was already scanned", ); - info!("Registering key {} in scanner at {activation_number}", hex::encode(key.to_bytes())); - if scanner.keys.is_empty() { assert!(scanner.ram_scanned.is_none()); scanner.ram_scanned = Some(activation_number); @@ -555,19 +566,25 @@ impl Scanner { } } - for (id, (block_number, tx)) in network + for (id, (block_number, tx, completion)) in network .get_eventuality_completions(scanner.eventualities.get_mut(&key_vec).unwrap(), &block) .await { info!( "eventuality {} resolved by {}, as found on chain", hex::encode(id), - hex::encode(&tx.id()) + hex::encode(tx.as_ref()) ); completion_block_numbers.push(block_number); // This must be before the mission of ScannerEvent::Block, per commentary in mod.rs - if !scanner.emit(ScannerEvent::Completed(key_vec.clone(), block_number, id, tx)) { + if !scanner.emit(ScannerEvent::Completed( + key_vec.clone(), + block_number, + id, + tx, + completion, + )) { return; } } diff --git a/processor/src/multisigs/scheduler/mod.rs b/processor/src/multisigs/scheduler/mod.rs new file mode 100644 index 000000000..26c940fe8 --- /dev/null +++ b/processor/src/multisigs/scheduler/mod.rs @@ -0,0 +1,96 @@ +use core::fmt::Debug; +use std::io; + +use ciphersuite::Ciphersuite; + +use serai_client::primitives::{NetworkId, Balance}; + +use crate::{networks::Network, Db, Payment, Plan}; + +pub(crate) mod utxo; +pub(crate) mod smart_contract; + +pub trait SchedulerAddendum: Send + Clone + PartialEq + Debug { + fn read(reader: &mut R) -> io::Result; + fn write(&self, writer: &mut W) -> io::Result<()>; +} + +impl SchedulerAddendum for () { + fn read(_: &mut R) -> io::Result { + Ok(()) + } + fn write(&self, _: &mut W) -> io::Result<()> { + Ok(()) + } +} + +pub trait Scheduler: Sized + Clone + PartialEq + Debug { + type Addendum: SchedulerAddendum; + + /// Check if this Scheduler is empty. + fn empty(&self) -> bool; + + /// Create a new Scheduler. + fn new( + txn: &mut D::Transaction<'_>, + key: ::G, + network: NetworkId, + ) -> Self; + + /// Load a Scheduler from the DB. + fn from_db( + db: &D, + key: ::G, + network: NetworkId, + ) -> io::Result; + + /// Check if a branch is usable. + fn can_use_branch(&self, balance: Balance) -> bool; + + /// Schedule a series of outputs/payments. + fn schedule( + &mut self, + txn: &mut D::Transaction<'_>, + utxos: Vec, + payments: Vec>, + // TODO: Tighten this to multisig_for_any_change + key_for_any_change: ::G, + force_spend: bool, + ) -> Vec>; + + /// Consume all payments still pending within this Scheduler, without scheduling them. + fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec>; + + /// Note a branch output as having been created, with the amount it was actually created with, + /// or not having been created due to being too small. + fn created_output( + &mut self, + txn: &mut D::Transaction<'_>, + expected: u64, + actual: Option, + ); + + /// Refund a specific output. + fn refund_plan( + &mut self, + txn: &mut D::Transaction<'_>, + output: N::Output, + refund_to: N::Address, + ) -> Plan; + + /// Shim the forwarding Plan as necessary to obtain a fee estimate. + /// + /// If this Scheduler is for a Network which requires forwarding, this must return Some with a + /// plan with identical fee behavior. If forwarding isn't necessary, returns None. + fn shim_forward_plan(output: N::Output, to: ::G) -> Option>; + + /// Forward a specific output to the new multisig. + /// + /// Returns None if no forwarding is necessary. Must return Some if forwarding is necessary. + fn forward_plan( + &mut self, + txn: &mut D::Transaction<'_>, + output: N::Output, + to: ::G, + ) -> Option>; +} diff --git a/processor/src/multisigs/scheduler/smart_contract.rs b/processor/src/multisigs/scheduler/smart_contract.rs new file mode 100644 index 000000000..3da8acf48 --- /dev/null +++ b/processor/src/multisigs/scheduler/smart_contract.rs @@ -0,0 +1,208 @@ +use std::{io, collections::HashSet}; + +use ciphersuite::{group::GroupEncoding, Ciphersuite}; + +use serai_client::primitives::{NetworkId, Coin, Balance}; + +use crate::{ + Get, DbTxn, Db, Payment, Plan, create_db, + networks::{Output, Network}, + multisigs::scheduler::{SchedulerAddendum, Scheduler as SchedulerTrait}, +}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Scheduler { + key: ::G, + coins: HashSet, + rotated: bool, +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum Addendum { + Nonce(u64), + RotateTo { nonce: u64, new_key: ::G }, +} + +impl SchedulerAddendum for Addendum { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + match kind[0] { + 0 => { + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + Ok(Addendum::Nonce(u64::from_le_bytes(nonce))) + } + 1 => { + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + let nonce = u64::from_le_bytes(nonce); + + let new_key = N::Curve::read_G(reader)?; + Ok(Addendum::RotateTo { nonce, new_key }) + } + _ => Err(io::Error::other("reading unknown Addendum type"))?, + } + } + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Addendum::Nonce(nonce) => { + writer.write_all(&[0])?; + writer.write_all(&nonce.to_le_bytes()) + } + Addendum::RotateTo { nonce, new_key } => { + writer.write_all(&[1])?; + writer.write_all(&nonce.to_le_bytes())?; + writer.write_all(new_key.to_bytes().as_ref()) + } + } + } +} + +create_db! { + SchedulerDb { + LastNonce: () -> u64, + RotatedTo: (key: &[u8]) -> Vec, + } +} + +impl> SchedulerTrait for Scheduler { + type Addendum = Addendum; + + /// Check if this Scheduler is empty. + fn empty(&self) -> bool { + self.rotated + } + + /// Create a new Scheduler. + fn new( + _txn: &mut D::Transaction<'_>, + key: ::G, + network: NetworkId, + ) -> Self { + assert!(N::branch_address(key).is_none()); + assert!(N::change_address(key).is_none()); + assert!(N::forward_address(key).is_none()); + + Scheduler { key, coins: network.coins().iter().copied().collect(), rotated: false } + } + + /// Load a Scheduler from the DB. + fn from_db( + db: &D, + key: ::G, + network: NetworkId, + ) -> io::Result { + Ok(Scheduler { + key, + coins: network.coins().iter().copied().collect(), + rotated: RotatedTo::get(db, key.to_bytes().as_ref()).is_some(), + }) + } + + fn can_use_branch(&self, _balance: Balance) -> bool { + false + } + + fn schedule( + &mut self, + txn: &mut D::Transaction<'_>, + utxos: Vec, + payments: Vec>, + key_for_any_change: ::G, + force_spend: bool, + ) -> Vec> { + for utxo in utxos { + assert!(self.coins.contains(&utxo.balance().coin)); + } + + let mut nonce = LastNonce::get(txn).unwrap_or(1); + let mut plans = vec![]; + for chunk in payments.as_slice().chunks(N::MAX_OUTPUTS) { + // Once we rotate, all further payments should be scheduled via the new multisig + assert!(!self.rotated); + plans.push(Plan { + key: self.key, + inputs: vec![], + payments: chunk.to_vec(), + change: None, + scheduler_addendum: Addendum::Nonce(nonce), + }); + nonce += 1; + } + + // If we're supposed to rotate to the new key, create an empty Plan which will signify the key + // update + if force_spend && (!self.rotated) { + plans.push(Plan { + key: self.key, + inputs: vec![], + payments: vec![], + change: None, + scheduler_addendum: Addendum::RotateTo { nonce, new_key: key_for_any_change }, + }); + nonce += 1; + self.rotated = true; + RotatedTo::set( + txn, + self.key.to_bytes().as_ref(), + &key_for_any_change.to_bytes().as_ref().to_vec(), + ); + } + + LastNonce::set(txn, &nonce); + + plans + } + + fn consume_payments(&mut self, _txn: &mut D::Transaction<'_>) -> Vec> { + vec![] + } + + fn created_output( + &mut self, + _txn: &mut D::Transaction<'_>, + _expected: u64, + _actual: Option, + ) { + panic!("Smart Contract Scheduler created a Branch output") + } + + /// Refund a specific output. + fn refund_plan( + &mut self, + txn: &mut D::Transaction<'_>, + output: N::Output, + refund_to: N::Address, + ) -> Plan { + let current_key = RotatedTo::get(txn, self.key.to_bytes().as_ref()) + .and_then(|key_bytes| ::read_G(&mut key_bytes.as_slice()).ok()) + .unwrap_or(self.key); + + let nonce = LastNonce::get(txn).map_or(1, |nonce| nonce + 1); + LastNonce::set(txn, &(nonce + 1)); + Plan { + key: current_key, + inputs: vec![], + payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], + change: None, + scheduler_addendum: Addendum::Nonce(nonce), + } + } + + fn shim_forward_plan(_output: N::Output, _to: ::G) -> Option> { + None + } + + /// Forward a specific output to the new multisig. + /// + /// Returns None if no forwarding is necessary. + fn forward_plan( + &mut self, + _txn: &mut D::Transaction<'_>, + _output: N::Output, + _to: ::G, + ) -> Option> { + None + } +} diff --git a/processor/src/multisigs/scheduler.rs b/processor/src/multisigs/scheduler/utxo.rs similarity index 78% rename from processor/src/multisigs/scheduler.rs rename to processor/src/multisigs/scheduler/utxo.rs index abc81a80b..1865cab91 100644 --- a/processor/src/multisigs/scheduler.rs +++ b/processor/src/multisigs/scheduler/utxo.rs @@ -5,16 +5,17 @@ use std::{ use ciphersuite::{group::GroupEncoding, Ciphersuite}; -use serai_client::primitives::{Coin, Amount, Balance}; +use serai_client::primitives::{NetworkId, Coin, Amount, Balance}; use crate::{ - networks::{OutputType, Output, Network}, DbTxn, Db, Payment, Plan, + networks::{OutputType, Output, Network, UtxoNetwork}, + multisigs::scheduler::Scheduler as SchedulerTrait, }; -/// Stateless, deterministic output/payment manager. -#[derive(PartialEq, Eq, Debug)] -pub struct Scheduler { +/// Deterministic output/payment manager. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Scheduler { key: ::G, coin: Coin, @@ -46,7 +47,7 @@ fn scheduler_key(key: &G) -> Vec { D::key(b"SCHEDULER", b"scheduler", key.to_bytes()) } -impl Scheduler { +impl> Scheduler { pub fn empty(&self) -> bool { self.queued_plans.is_empty() && self.plans.is_empty() && @@ -144,8 +145,18 @@ impl Scheduler { pub fn new( txn: &mut D::Transaction<'_>, key: ::G, - coin: Coin, + network: NetworkId, ) -> Self { + assert!(N::branch_address(key).is_some()); + assert!(N::change_address(key).is_some()); + assert!(N::forward_address(key).is_some()); + + let coin = { + let coins = network.coins(); + assert_eq!(coins.len(), 1); + coins[0] + }; + let res = Scheduler { key, coin, @@ -159,7 +170,17 @@ impl Scheduler { res } - pub fn from_db(db: &D, key: ::G, coin: Coin) -> io::Result { + pub fn from_db( + db: &D, + key: ::G, + network: NetworkId, + ) -> io::Result { + let coin = { + let coins = network.coins(); + assert_eq!(coins.len(), 1); + coins[0] + }; + let scheduler = db.get(scheduler_key::(&key)).unwrap_or_else(|| { panic!("loading scheduler from DB without scheduler for {}", hex::encode(key.to_bytes())) }); @@ -201,7 +222,7 @@ impl Scheduler { amount }; - let branch_address = N::branch_address(self.key); + let branch_address = N::branch_address(self.key).unwrap(); // If we have more payments than we can handle in a single TX, create plans for them // TODO2: This isn't perfect. For 258 outputs, and a MAX_OUTPUTS of 16, this will create: @@ -237,7 +258,8 @@ impl Scheduler { key: self.key, inputs, payments, - change: Some(N::change_address(key_for_any_change)).filter(|_| change), + change: Some(N::change_address(key_for_any_change).unwrap()).filter(|_| change), + scheduler_addendum: (), } } @@ -305,7 +327,7 @@ impl Scheduler { its *own* branch address, since created_output is called on the signer's Scheduler. */ { - let branch_address = N::branch_address(self.key); + let branch_address = N::branch_address(self.key).unwrap(); payments = payments.drain(..).filter(|payment| payment.address != branch_address).collect::>(); } @@ -357,7 +379,8 @@ impl Scheduler { key: self.key, inputs: chunk, payments: vec![], - change: Some(N::change_address(key_for_any_change)), + change: Some(N::change_address(key_for_any_change).unwrap()), + scheduler_addendum: (), }) } @@ -403,12 +426,13 @@ impl Scheduler { key: self.key, inputs: self.utxos.drain(..).collect::>(), payments: vec![], - change: Some(N::change_address(key_for_any_change)), + change: Some(N::change_address(key_for_any_change).unwrap()), + scheduler_addendum: (), }); } // If there's a UTXO to restore, restore it - // This is down now as if there is a to_restore output, and it was inserted into self.utxos + // This is done now as if there is a to_restore output, and it was inserted into self.utxos // earlier, self.utxos.len() may become `N::MAX_INPUTS + 1` // The prior block requires the len to be `<= N::MAX_INPUTS` if let Some(to_restore) = to_restore { @@ -418,9 +442,10 @@ impl Scheduler { txn.put(scheduler_key::(&self.key), self.serialize()); log::info!( - "created {} plans containing {} payments to sign", + "created {} plans containing {} payments to sign, with {} payments pending scheduling", plans.len(), payments_at_start - self.payments.len(), + self.payments.len(), ); plans } @@ -435,9 +460,6 @@ impl Scheduler { // Note a branch output as having been created, with the amount it was actually created with, // or not having been created due to being too small - // This can be called whenever, so long as it's properly ordered - // (it's independent to Serai/the chain we're scheduling over, yet still expects outputs to be - // created in the same order Plans are returned in) pub fn created_output( &mut self, txn: &mut D::Transaction<'_>, @@ -501,3 +523,109 @@ impl Scheduler { txn.put(scheduler_key::(&self.key), self.serialize()); } } + +impl> SchedulerTrait for Scheduler { + type Addendum = (); + + /// Check if this Scheduler is empty. + fn empty(&self) -> bool { + Scheduler::empty(self) + } + + /// Create a new Scheduler. + fn new( + txn: &mut D::Transaction<'_>, + key: ::G, + network: NetworkId, + ) -> Self { + Scheduler::new::(txn, key, network) + } + + /// Load a Scheduler from the DB. + fn from_db( + db: &D, + key: ::G, + network: NetworkId, + ) -> io::Result { + Scheduler::from_db::(db, key, network) + } + + /// Check if a branch is usable. + fn can_use_branch(&self, balance: Balance) -> bool { + Scheduler::can_use_branch(self, balance) + } + + /// Schedule a series of outputs/payments. + fn schedule( + &mut self, + txn: &mut D::Transaction<'_>, + utxos: Vec, + payments: Vec>, + key_for_any_change: ::G, + force_spend: bool, + ) -> Vec> { + Scheduler::schedule::(self, txn, utxos, payments, key_for_any_change, force_spend) + } + + /// Consume all payments still pending within this Scheduler, without scheduling them. + fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec> { + Scheduler::consume_payments::(self, txn) + } + + /// Note a branch output as having been created, with the amount it was actually created with, + /// or not having been created due to being too small. + // TODO: Move this to Balance. + fn created_output( + &mut self, + txn: &mut D::Transaction<'_>, + expected: u64, + actual: Option, + ) { + Scheduler::created_output::(self, txn, expected, actual) + } + + fn refund_plan( + &mut self, + _: &mut D::Transaction<'_>, + output: N::Output, + refund_to: N::Address, + ) -> Plan { + let output_id = output.id().as_ref().to_vec(); + let res = Plan { + key: output.key(), + // Uses a payment as this will still be successfully sent due to fee amortization, + // and because change is currently always a Serai key + payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], + inputs: vec![output], + change: None, + scheduler_addendum: (), + }; + log::info!("refund plan for {} has ID {}", hex::encode(output_id), hex::encode(res.id())); + res + } + + fn shim_forward_plan(output: N::Output, to: ::G) -> Option> { + Some(Plan { + key: output.key(), + payments: vec![Payment { + address: N::forward_address(to).unwrap(), + data: None, + balance: output.balance(), + }], + inputs: vec![output], + change: None, + scheduler_addendum: (), + }) + } + + fn forward_plan( + &mut self, + _: &mut D::Transaction<'_>, + output: N::Output, + to: ::G, + ) -> Option> { + assert_eq!(self.key, output.key()); + // Call shim as shim returns the actual + Self::shim_forward_plan(output, to) + } +} diff --git a/processor/src/networks/bitcoin.rs b/processor/src/networks/bitcoin.rs index 606a3e123..183444b12 100644 --- a/processor/src/networks/bitcoin.rs +++ b/processor/src/networks/bitcoin.rs @@ -20,12 +20,11 @@ use bitcoin_serai::{ key::{Parity, XOnlyPublicKey}, consensus::{Encodable, Decodable}, script::Instruction, - address::{NetworkChecked, Address as BAddress}, - Transaction, Block, Network as BNetwork, ScriptBuf, + Transaction, Block, ScriptBuf, opcodes::all::{OP_SHA256, OP_EQUALVERIFY}, }, wallet::{ - tweak_keys, address_payload, ReceivedOutput, Scanner, TransactionError, + tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, SignableTransaction as BSignableTransaction, TransactionMachine, }, rpc::{RpcError, Rpc}, @@ -52,9 +51,10 @@ use crate::{ networks::{ NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, - Eventuality as EventualityTrait, EventualitiesTracker, Network, + Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, }, Payment, + multisigs::scheduler::utxo::Scheduler, }; #[derive(Clone, PartialEq, Eq, Debug)] @@ -174,18 +174,10 @@ pub struct Fee(u64); impl TransactionTrait for Transaction { type Id = [u8; 32]; fn id(&self) -> Self::Id { - let mut hash = *self.txid().as_raw_hash().as_byte_array(); + let mut hash = *self.compute_txid().as_raw_hash().as_byte_array(); hash.reverse(); hash } - fn serialize(&self) -> Vec { - let mut buf = vec![]; - self.consensus_encode(&mut buf).unwrap(); - buf - } - fn read(reader: &mut R) -> io::Result { - Transaction::consensus_decode(reader).map_err(|e| io::Error::other(format!("{e}"))) - } #[cfg(test)] async fn fee(&self, network: &Bitcoin) -> u64 { @@ -209,7 +201,23 @@ impl TransactionTrait for Transaction { #[derive(Clone, PartialEq, Eq, Debug)] pub struct Eventuality([u8; 32]); +#[derive(Clone, PartialEq, Eq, Default, Debug)] +pub struct EmptyClaim; +impl AsRef<[u8]> for EmptyClaim { + fn as_ref(&self) -> &[u8] { + &[] + } +} +impl AsMut<[u8]> for EmptyClaim { + fn as_mut(&mut self) -> &mut [u8] { + &mut [] + } +} + impl EventualityTrait for Eventuality { + type Claim = EmptyClaim; + type Completion = Transaction; + fn lookup(&self) -> Vec { self.0.to_vec() } @@ -224,6 +232,19 @@ impl EventualityTrait for Eventuality { fn serialize(&self) -> Vec { self.0.to_vec() } + + fn claim(_: &Transaction) -> EmptyClaim { + EmptyClaim + } + fn serialize_completion(completion: &Transaction) -> Vec { + let mut buf = vec![]; + completion.consensus_encode(&mut buf).unwrap(); + buf + } + fn read_completion(reader: &mut R) -> io::Result { + Transaction::consensus_decode(&mut io::BufReader::with_capacity(0, reader)) + .map_err(|e| io::Error::other(format!("{e}"))) + } } #[derive(Clone, Debug)] @@ -374,8 +395,12 @@ impl Bitcoin { for input in &tx.input { let mut input_tx = input.previous_output.txid.to_raw_hash().to_byte_array(); input_tx.reverse(); - in_value += self.get_transaction(&input_tx).await?.output - [usize::try_from(input.previous_output.vout).unwrap()] + in_value += self + .rpc + .get_transaction(&input_tx) + .await + .map_err(|_| NetworkError::ConnectionError)? + .output[usize::try_from(input.previous_output.vout).unwrap()] .value .to_sat(); } @@ -428,7 +453,7 @@ impl Bitcoin { match BSignableTransaction::new( inputs.iter().map(|input| input.output.clone()).collect(), &payments, - change.as_ref().map(AsRef::as_ref), + change.clone().map(Into::into), None, fee.0, ) { @@ -492,7 +517,7 @@ impl Bitcoin { if witness.len() >= 2 { let redeem_script = ScriptBuf::from_bytes(witness.last().unwrap().clone()); if Self::segwit_data_pattern(&redeem_script) == Some(true) { - data = witness[witness.len() - 2].clone(); // len() - 1 is the redeem_script + data.clone_from(&witness[witness.len() - 2]); // len() - 1 is the redeem_script break; } } @@ -509,12 +534,14 @@ impl Bitcoin { input_index: usize, private_key: &PrivateKey, ) -> ScriptBuf { + use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; + let public_key = PublicKey::from_private_key(SECP256K1, private_key); - let main_addr = BAddress::p2pkh(&public_key, BNetwork::Regtest); + let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); let mut der = SECP256K1 .sign_ecdsa_low_r( - &Message::from( + &Message::from_digest_slice( SighashCache::new(tx) .legacy_signature_hash( input_index, @@ -522,8 +549,10 @@ impl Bitcoin { EcdsaSighashType::All.to_u32(), ) .unwrap() - .to_raw_hash(), - ), + .to_raw_hash() + .as_ref(), + ) + .unwrap(), &private_key.inner, ) .serialize_der() @@ -537,6 +566,27 @@ impl Bitcoin { } } +// Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) +// A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes +// While our inputs are entirely SegWit, such fine tuning is not necessary and could create +// issues in the future (if the size decreases or we misevaluate it) +// It also offers a minimal amount of benefit when we are able to logarithmically accumulate +// inputs +// For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and +// 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 +// bytes +// 100,000 / 192 = 520 +// 520 * 192 leaves 160 bytes of overhead for the transaction structure itself +const MAX_INPUTS: usize = 520; +const MAX_OUTPUTS: usize = 520; + +fn address_from_key(key: ProjectivePoint) -> Address { + Address::new( + p2tr_script_buf(key).expect("creating address from key which isn't properly tweaked"), + ) + .expect("couldn't create Serai-representable address for P2TR script") +} + #[async_trait] impl Network for Bitcoin { type Curve = Secp256k1; @@ -549,6 +599,8 @@ impl Network for Bitcoin { type Eventuality = Eventuality; type TransactionMachine = TransactionMachine; + type Scheduler = Scheduler; + type Address = Address; const NETWORK: NetworkId = NetworkId::Bitcoin; @@ -598,19 +650,7 @@ impl Network for Bitcoin { // aggregation TX const COST_TO_AGGREGATE: u64 = 800; - // Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) - // A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes - // While our inputs are entirely SegWit, such fine tuning is not necessary and could create - // issues in the future (if the size decreases or we misevaluate it) - // It also offers a minimal amount of benefit when we are able to logarithmically accumulate - // inputs - // For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and - // 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 - // bytes - // 100,000 / 192 = 520 - // 520 * 192 leaves 160 bytes of overhead for the transaction structure itself - const MAX_INPUTS: usize = 520; - const MAX_OUTPUTS: usize = 520; + const MAX_OUTPUTS: usize = MAX_OUTPUTS; fn tweak_keys(keys: &mut ThresholdKeys) { *keys = tweak_keys(keys); @@ -618,24 +658,24 @@ impl Network for Bitcoin { scanner(keys.group_key()); } - fn external_address(key: ProjectivePoint) -> Address { - Address::new(BAddress::::new(BNetwork::Bitcoin, address_payload(key).unwrap())) - .unwrap() + #[cfg(test)] + async fn external_address(&self, key: ProjectivePoint) -> Address { + address_from_key(key) } - fn branch_address(key: ProjectivePoint) -> Address { + fn branch_address(key: ProjectivePoint) -> Option
{ let (_, offsets, _) = scanner(key); - Self::external_address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch])) + Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch]))) } - fn change_address(key: ProjectivePoint) -> Address { + fn change_address(key: ProjectivePoint) -> Option
{ let (_, offsets, _) = scanner(key); - Self::external_address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change])) + Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change]))) } - fn forward_address(key: ProjectivePoint) -> Address { + fn forward_address(key: ProjectivePoint) -> Option
{ let (_, offsets, _) = scanner(key); - Self::external_address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded])) + Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded]))) } async fn get_latest_block_number(&self) -> Result { @@ -682,7 +722,7 @@ impl Network for Bitcoin { spent_tx.reverse(); let mut tx; while { - tx = self.get_transaction(&spent_tx).await; + tx = self.rpc.get_transaction(&spent_tx).await; tx.is_err() } { log::error!("couldn't get transaction from bitcoin node: {tx:?}"); @@ -690,16 +730,14 @@ impl Network for Bitcoin { } tx.unwrap().output.swap_remove(usize::try_from(input.previous_output.vout).unwrap()) }; - BAddress::from_script(&spent_output.script_pubkey, BNetwork::Bitcoin) - .ok() - .and_then(Address::new) + Address::new(spent_output.script_pubkey) }; let data = Self::extract_serai_data(tx); for output in &mut outputs { if output.kind == OutputType::External { - output.data = data.clone(); + output.data.clone_from(&data); } - output.presumed_origin = presumed_origin.clone(); + output.presumed_origin.clone_from(&presumed_origin); } } @@ -710,7 +748,7 @@ impl Network for Bitcoin { &self, eventualities: &mut EventualitiesTracker, block: &Self::Block, - ) -> HashMap<[u8; 32], (usize, Transaction)> { + ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { let mut res = HashMap::new(); if eventualities.map.is_empty() { return res; @@ -719,11 +757,11 @@ impl Network for Bitcoin { fn check_block( eventualities: &mut EventualitiesTracker, block: &Block, - res: &mut HashMap<[u8; 32], (usize, Transaction)>, + res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, ) { for tx in &block.txdata[1 ..] { if let Some((plan, _)) = eventualities.map.remove(tx.id().as_slice()) { - res.insert(plan, (eventualities.block_number, tx.clone())); + res.insert(plan, (eventualities.block_number, tx.id(), tx.clone())); } } @@ -770,7 +808,6 @@ impl Network for Bitcoin { async fn needed_fee( &self, block_number: usize, - _: &[u8; 32], inputs: &[Output], payments: &[Payment], change: &Option
, @@ -787,9 +824,11 @@ impl Network for Bitcoin { &self, block_number: usize, plan_id: &[u8; 32], + _key: ProjectivePoint, inputs: &[Output], payments: &[Payment], change: &Option
, + (): &(), ) -> Result, NetworkError> { Ok(self.make_signable_transaction(block_number, inputs, payments, change, false).await?.map( |signable| { @@ -803,7 +842,7 @@ impl Network for Bitcoin { )) } - async fn attempt_send( + async fn attempt_sign( &self, keys: ThresholdKeys, transaction: Self::SignableTransaction, @@ -817,23 +856,25 @@ impl Network for Bitcoin { ) } - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> { + async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { match self.rpc.send_raw_transaction(tx).await { Ok(_) => (), Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?, // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs // invalid transaction - Err(e) => panic!("failed to publish TX {}: {e}", tx.txid()), + Err(e) => panic!("failed to publish TX {}: {e}", tx.compute_txid()), } Ok(()) } - async fn get_transaction(&self, id: &[u8; 32]) -> Result { - self.rpc.get_transaction(id).await.map_err(|_| NetworkError::ConnectionError) - } - - fn confirm_completion(&self, eventuality: &Self::Eventuality, tx: &Transaction) -> bool { - eventuality.0 == tx.id() + async fn confirm_completion( + &self, + eventuality: &Self::Eventuality, + _: &EmptyClaim, + ) -> Result, NetworkError> { + Ok(Some( + self.rpc.get_transaction(&eventuality.0).await.map_err(|_| NetworkError::ConnectionError)?, + )) } #[cfg(test)] @@ -841,8 +882,24 @@ impl Network for Bitcoin { self.rpc.get_block_number(id).await.unwrap() } + #[cfg(test)] + async fn check_eventuality_by_claim( + &self, + eventuality: &Self::Eventuality, + _: &EmptyClaim, + ) -> bool { + self.rpc.get_transaction(&eventuality.0).await.is_ok() + } + + #[cfg(test)] + async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventuality) -> Transaction { + self.rpc.get_transaction(&id.0).await.unwrap() + } + #[cfg(test)] async fn mine_block(&self) { + use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; + self .rpc .rpc_call::>( @@ -855,10 +912,12 @@ impl Network for Bitcoin { #[cfg(test)] async fn test_send(&self, address: Address) -> Block { + use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; + let secret_key = SecretKey::new(&mut rand_core::OsRng); let private_key = PrivateKey::new(secret_key, BNetwork::Regtest); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let main_addr = BAddress::p2pkh(&public_key, BNetwork::Regtest); + let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); let new_block = self.get_latest_block_number().await.unwrap() + 1; self @@ -872,14 +931,14 @@ impl Network for Bitcoin { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { - previous_output: OutPoint { txid: tx.txid(), vout: 0 }, + previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::default(), }], output: vec![TxOut { value: tx.output[0].value - BAmount::from_sat(10000), - script_pubkey: address.as_ref().script_pubkey(), + script_pubkey: address.clone().into(), }], }; tx.input[0].script_sig = Self::sign_btc_input_for_p2pkh(&tx, 0, &private_key); @@ -892,3 +951,7 @@ impl Network for Bitcoin { self.get_block(block).await.unwrap() } } + +impl UtxoNetwork for Bitcoin { + const MAX_INPUTS: usize = MAX_INPUTS; +} diff --git a/processor/src/networks/ethereum.rs b/processor/src/networks/ethereum.rs new file mode 100644 index 000000000..b1965bae7 --- /dev/null +++ b/processor/src/networks/ethereum.rs @@ -0,0 +1,939 @@ +use core::{fmt, time::Duration}; +use std::{ + sync::Arc, + collections::{HashSet, HashMap}, + io, +}; + +use async_trait::async_trait; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; +use frost::ThresholdKeys; + +use ethereum_serai::{ + alloy::{ + primitives::U256, + rpc_types::{BlockNumberOrTag, Transaction}, + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + }, + crypto::{PublicKey, Signature}, + erc20::Erc20, + deployer::Deployer, + router::{Router, Coin as EthereumCoin, InInstruction as EthereumInInstruction}, + machine::*, +}; +#[cfg(test)] +use ethereum_serai::alloy::primitives::B256; + +use tokio::{ + time::sleep, + sync::{RwLock, RwLockReadGuard}, +}; +#[cfg(not(test))] +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, +}; + +use serai_client::{ + primitives::{Coin, Amount, Balance, NetworkId}, + validator_sets::primitives::Session, +}; + +use crate::{ + Db, Payment, + networks::{ + OutputType, Output, Transaction as TransactionTrait, SignableTransaction, Block, + Eventuality as EventualityTrait, EventualitiesTracker, NetworkError, Network, + }, + key_gen::NetworkKeyDb, + multisigs::scheduler::{ + Scheduler as SchedulerTrait, + smart_contract::{Addendum, Scheduler}, + }, +}; + +#[cfg(not(test))] +const DAI: [u8; 20] = + match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { + Ok(res) => res, + Err(_) => panic!("invalid non-test DAI hex address"), + }; +#[cfg(test)] // TODO +const DAI: [u8; 20] = + match const_hex::const_decode_to_array(b"0000000000000000000000000000000000000000") { + Ok(res) => res, + Err(_) => panic!("invalid test DAI hex address"), + }; + +fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { + match coin { + EthereumCoin::Ether => Some(Coin::Ether), + EthereumCoin::Erc20(token) => { + if *token == DAI { + return Some(Coin::Dai); + } + None + } + } +} + +fn amount_to_serai_amount(coin: Coin, amount: U256) -> Amount { + assert_eq!(coin.network(), NetworkId::Ethereum); + assert_eq!(coin.decimals(), 8); + // Remove 10 decimals so we go from 18 decimals to 8 decimals + let divisor = U256::from(10_000_000_000u64); + // This is valid up to 184b, which is assumed for the coins allowed + Amount(u64::try_from(amount / divisor).unwrap()) +} + +fn balance_to_ethereum_amount(balance: Balance) -> U256 { + assert_eq!(balance.coin.network(), NetworkId::Ethereum); + assert_eq!(balance.coin.decimals(), 8); + // Restore 10 decimals so we go from 8 decimals to 18 decimals + let factor = U256::from(10_000_000_000u64); + U256::from(balance.amount.0) * factor +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Address(pub [u8; 20]); +impl TryFrom> for Address { + type Error = (); + fn try_from(bytes: Vec) -> Result { + if bytes.len() != 20 { + Err(())?; + } + let mut res = [0; 20]; + res.copy_from_slice(&bytes); + Ok(Address(res)) + } +} +impl TryInto> for Address { + type Error = (); + fn try_into(self) -> Result, ()> { + Ok(self.0.to_vec()) + } +} + +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ethereum_serai::alloy::primitives::Address::from(self.0).fmt(f) + } +} + +impl SignableTransaction for RouterCommand { + fn fee(&self) -> u64 { + // Return a fee of 0 as we'll handle amortization on our end + 0 + } +} + +#[async_trait] +impl TransactionTrait> for Transaction { + type Id = [u8; 32]; + fn id(&self) -> Self::Id { + self.hash.0 + } + + #[cfg(test)] + async fn fee(&self, _network: &Ethereum) -> u64 { + // Return a fee of 0 as we'll handle amortization on our end + 0 + } +} + +// We use 32-block Epochs to represent blocks. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Epoch { + // The hash of the block which ended the prior Epoch. + prior_end_hash: [u8; 32], + // The first block number within this Epoch. + start: u64, + // The hash of the last block within this Epoch. + end_hash: [u8; 32], + // The monotonic time for this Epoch. + time: u64, +} + +impl Epoch { + fn end(&self) -> u64 { + self.start + 31 + } +} + +#[async_trait] +impl Block> for Epoch { + type Id = [u8; 32]; + fn id(&self) -> [u8; 32] { + self.end_hash + } + fn parent(&self) -> [u8; 32] { + self.prior_end_hash + } + async fn time(&self, _: &Ethereum) -> u64 { + self.time + } +} + +impl Output> for EthereumInInstruction { + type Id = [u8; 32]; + + fn kind(&self) -> OutputType { + OutputType::External + } + + fn id(&self) -> Self::Id { + let mut id = [0; 40]; + id[.. 32].copy_from_slice(&self.id.0); + id[32 ..].copy_from_slice(&self.id.1.to_le_bytes()); + *ethereum_serai::alloy::primitives::keccak256(id) + } + fn tx_id(&self) -> [u8; 32] { + self.id.0 + } + fn key(&self) -> ::G { + self.key_at_end_of_block + } + + fn presumed_origin(&self) -> Option
{ + Some(Address(self.from)) + } + + fn balance(&self) -> Balance { + let coin = coin_to_serai_coin(&self.coin).unwrap_or_else(|| { + panic!( + "requesting coin for an EthereumInInstruction with a coin {}", + "we don't handle. this never should have been yielded" + ) + }); + Balance { coin, amount: amount_to_serai_amount(coin, self.amount) } + } + fn data(&self) -> &[u8] { + &self.data + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + EthereumInInstruction::write(self, writer) + } + fn read(reader: &mut R) -> io::Result { + EthereumInInstruction::read(reader) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Claim { + signature: [u8; 64], +} +impl AsRef<[u8]> for Claim { + fn as_ref(&self) -> &[u8] { + &self.signature + } +} +impl AsMut<[u8]> for Claim { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.signature + } +} +impl Default for Claim { + fn default() -> Self { + Self { signature: [0; 64] } + } +} +impl From<&Signature> for Claim { + fn from(sig: &Signature) -> Self { + Self { signature: sig.to_bytes() } + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Eventuality(PublicKey, RouterCommand); +impl EventualityTrait for Eventuality { + type Claim = Claim; + type Completion = SignedRouterCommand; + + fn lookup(&self) -> Vec { + match self.1 { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + nonce.as_le_bytes().to_vec() + } + } + } + + fn read(reader: &mut R) -> io::Result { + let point = Secp256k1::read_G(reader)?; + let command = RouterCommand::read(reader)?; + Ok(Eventuality( + PublicKey::new(point).ok_or(io::Error::other("unusable key within Eventuality"))?, + command, + )) + } + fn serialize(&self) -> Vec { + let mut res = vec![]; + res.extend(self.0.point().to_bytes().as_slice()); + self.1.write(&mut res).unwrap(); + res + } + + fn claim(completion: &Self::Completion) -> Self::Claim { + Claim::from(completion.signature()) + } + fn serialize_completion(completion: &Self::Completion) -> Vec { + let mut res = vec![]; + completion.write(&mut res).unwrap(); + res + } + fn read_completion(reader: &mut R) -> io::Result { + SignedRouterCommand::read(reader) + } +} + +#[derive(Clone)] +pub struct Ethereum { + // This DB is solely used to access the first key generated, as needed to determine the Router's + // address. Accordingly, all methods present are consistent to a Serai chain with a finalized + // first key (regardless of local state), and this is safe. + db: D, + #[cfg_attr(test, allow(unused))] + relayer_url: String, + provider: Arc>, + deployer: Deployer, + router: Arc>>, +} +impl PartialEq for Ethereum { + fn eq(&self, _other: &Ethereum) -> bool { + true + } +} +impl fmt::Debug for Ethereum { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt + .debug_struct("Ethereum") + .field("deployer", &self.deployer) + .field("router", &self.router) + .finish_non_exhaustive() + } +} +impl Ethereum { + pub async fn new(db: D, daemon_url: String, relayer_url: String) -> Self { + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(daemon_url), true), + )); + + let mut deployer = Deployer::new(provider.clone()).await; + while !matches!(deployer, Ok(Some(_))) { + log::error!("Deployer wasn't deployed yet or networking error"); + sleep(Duration::from_secs(5)).await; + deployer = Deployer::new(provider.clone()).await; + } + let deployer = deployer.unwrap().unwrap(); + + dbg!(&relayer_url); + dbg!(relayer_url.len()); + Ethereum { db, relayer_url, provider, deployer, router: Arc::new(RwLock::new(None)) } + } + + // Obtain a reference to the Router, sleeping until it's deployed if it hasn't already been. + // This is guaranteed to return Some. + pub async fn router(&self) -> RwLockReadGuard<'_, Option> { + // If we've already instantiated the Router, return a read reference + { + let router = self.router.read().await; + if router.is_some() { + return router; + } + } + + // Instantiate it + let mut router = self.router.write().await; + // If another attempt beat us to it, return + if router.is_some() { + drop(router); + return self.router.read().await; + } + + // Get the first key from the DB + let first_key = + NetworkKeyDb::get(&self.db, Session(0)).expect("getting outputs before confirming a key"); + let key = Secp256k1::read_G(&mut first_key.as_slice()).unwrap(); + let public_key = PublicKey::new(key).unwrap(); + + // Find the router + let mut found = self.deployer.find_router(self.provider.clone(), &public_key).await; + while !matches!(found, Ok(Some(_))) { + log::error!("Router wasn't deployed yet or networking error"); + sleep(Duration::from_secs(5)).await; + found = self.deployer.find_router(self.provider.clone(), &public_key).await; + } + + // Set it + *router = Some(found.unwrap().unwrap()); + + // Downgrade to a read lock + // Explicitly doesn't use `downgrade` so that another pending write txn can realize it's no + // longer necessary + drop(router); + self.router.read().await + } +} + +#[async_trait] +impl Network for Ethereum { + type Curve = Secp256k1; + + type Transaction = Transaction; + type Block = Epoch; + + type Output = EthereumInInstruction; + type SignableTransaction = RouterCommand; + type Eventuality = Eventuality; + type TransactionMachine = RouterCommandMachine; + + type Scheduler = Scheduler; + + type Address = Address; + + const NETWORK: NetworkId = NetworkId::Ethereum; + const ID: &'static str = "Ethereum"; + const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 32 * 12; + const CONFIRMATIONS: usize = 1; + + const DUST: u64 = 0; // TODO + + const COST_TO_AGGREGATE: u64 = 0; + + // TODO: usize::max, with a merkle tree in the router + const MAX_OUTPUTS: usize = 256; + + fn tweak_keys(keys: &mut ThresholdKeys) { + while PublicKey::new(keys.group_key()).is_none() { + *keys = keys.offset(::F::ONE); + } + } + + #[cfg(test)] + async fn external_address(&self, _key: ::G) -> Address { + Address(self.router().await.as_ref().unwrap().address()) + } + + fn branch_address(_key: ::G) -> Option
{ + None + } + + fn change_address(_key: ::G) -> Option
{ + None + } + + fn forward_address(_key: ::G) -> Option
{ + None + } + + async fn get_latest_block_number(&self) -> Result { + let actual_number = self + .provider + .get_block(BlockNumberOrTag::Finalized.into(), false) + .await + .map_err(|_| NetworkError::ConnectionError)? + .ok_or(NetworkError::ConnectionError)? + .header + .number + .unwrap(); + // Error if there hasn't been a full epoch yet + if actual_number < 32 { + Err(NetworkError::ConnectionError)? + } + // If this is 33, the division will return 1, yet 1 is the epoch in progress + let latest_full_epoch = (actual_number / 32).saturating_sub(1); + Ok(latest_full_epoch.try_into().unwrap()) + } + + async fn get_block(&self, number: usize) -> Result { + let latest_finalized = self.get_latest_block_number().await?; + if number > latest_finalized { + Err(NetworkError::ConnectionError)? + } + + let start = number * 32; + let prior_end_hash = if start == 0 { + [0; 32] + } else { + self + .provider + .get_block(u64::try_from(start - 1).unwrap().into(), false) + .await + .ok() + .flatten() + .ok_or(NetworkError::ConnectionError)? + .header + .hash + .unwrap() + .into() + }; + + let end_header = self + .provider + .get_block(u64::try_from(start + 31).unwrap().into(), false) + .await + .ok() + .flatten() + .ok_or(NetworkError::ConnectionError)? + .header; + + let end_hash = end_header.hash.unwrap().into(); + let time = end_header.timestamp; + + Ok(Epoch { prior_end_hash, start: start.try_into().unwrap(), end_hash, time }) + } + + async fn get_outputs( + &self, + block: &Self::Block, + _: ::G, + ) -> Vec { + let router = self.router().await; + let router = router.as_ref().unwrap(); + // Grab the key at the end of the epoch + let key_at_end_of_block = loop { + match router.key_at_end_of_block(block.start + 31).await { + Ok(Some(key)) => break key, + Ok(None) => return vec![], + Err(e) => { + log::error!("couldn't connect to router for the key at the end of the block: {e:?}"); + sleep(Duration::from_secs(5)).await; + continue; + } + } + }; + + let mut all_events = vec![]; + let mut top_level_txids = HashSet::new(); + for erc20_addr in [DAI] { + let erc20 = Erc20::new(self.provider.clone(), erc20_addr); + + for block in block.start .. (block.start + 32) { + let transfers = loop { + match erc20.top_level_transfers(block, router.address()).await { + Ok(transfers) => break transfers, + Err(e) => { + log::error!("couldn't connect to Ethereum node for the top-level transfers: {e:?}"); + sleep(Duration::from_secs(5)).await; + continue; + } + } + }; + + for transfer in transfers { + top_level_txids.insert(transfer.id); + all_events.push(EthereumInInstruction { + id: (transfer.id, 0), + from: transfer.from, + coin: EthereumCoin::Erc20(erc20_addr), + amount: transfer.amount, + data: transfer.data, + key_at_end_of_block, + }); + } + } + } + + for block in block.start .. (block.start + 32) { + let mut events = router.in_instructions(block, &HashSet::from([DAI])).await; + while let Err(e) = events { + log::error!("couldn't connect to Ethereum node for the Router's events: {e:?}"); + sleep(Duration::from_secs(5)).await; + events = router.in_instructions(block, &HashSet::from([DAI])).await; + } + let mut events = events.unwrap(); + for event in &mut events { + // A transaction should either be a top-level transfer or a Router InInstruction + if top_level_txids.contains(&event.id.0) { + panic!("top-level transfer had {} and router had {:?}", hex::encode(event.id.0), event); + } + // Overwrite the key at end of block to key at end of epoch + event.key_at_end_of_block = key_at_end_of_block; + } + all_events.extend(events); + } + + for event in &all_events { + assert!( + coin_to_serai_coin(&event.coin).is_some(), + "router yielded events for unrecognized coins" + ); + } + all_events + } + + async fn get_eventuality_completions( + &self, + eventualities: &mut EventualitiesTracker, + block: &Self::Block, + ) -> HashMap< + [u8; 32], + ( + usize, + >::Id, + ::Completion, + ), + > { + let mut res = HashMap::new(); + if eventualities.map.is_empty() { + return res; + } + + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let past_scanned_epoch = loop { + match self.get_block(eventualities.block_number).await { + Ok(block) => break block, + Err(e) => log::error!("couldn't get the last scanned block in the tracker: {}", e), + } + sleep(Duration::from_secs(10)).await; + }; + assert_eq!( + past_scanned_epoch.start / 32, + u64::try_from(eventualities.block_number).unwrap(), + "assumption of tracker block number's relation to epoch start is incorrect" + ); + + // Iterate from after the epoch number in the tracker to the end of this epoch + for block_num in (past_scanned_epoch.end() + 1) ..= block.end() { + let executed = loop { + match router.executed_commands(block_num).await { + Ok(executed) => break executed, + Err(e) => log::error!("couldn't get the executed commands in block {block_num}: {e}"), + } + sleep(Duration::from_secs(10)).await; + }; + + for executed in executed { + let lookup = executed.nonce.to_le_bytes().to_vec(); + if let Some((plan_id, eventuality)) = eventualities.map.get(&lookup) { + if let Some(command) = + SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &executed.signature) + { + res.insert(*plan_id, (block_num.try_into().unwrap(), executed.tx_id, command)); + eventualities.map.remove(&lookup); + } + } + } + } + eventualities.block_number = (block.start / 32).try_into().unwrap(); + + res + } + + async fn needed_fee( + &self, + _block_number: usize, + inputs: &[Self::Output], + _payments: &[Payment], + _change: &Option, + ) -> Result, NetworkError> { + assert_eq!(inputs.len(), 0); + // Claim no fee is needed so we can perform amortization ourselves + Ok(Some(0)) + } + + async fn signable_transaction( + &self, + _block_number: usize, + _plan_id: &[u8; 32], + key: ::G, + inputs: &[Self::Output], + payments: &[Payment], + change: &Option, + scheduler_addendum: &>::Addendum, + ) -> Result, NetworkError> { + assert_eq!(inputs.len(), 0); + assert!(change.is_none()); + let chain_id = self.provider.get_chain_id().await.map_err(|_| NetworkError::ConnectionError)?; + + // TODO: Perform fee amortization (in scheduler? + // TODO: Make this function internal and have needed_fee properly return None as expected? + // TODO: signable_transaction is written as cannot return None if needed_fee returns Some + // TODO: Why can this return None at all if it isn't allowed to return None? + + let command = match scheduler_addendum { + Addendum::Nonce(nonce) => RouterCommand::Execute { + chain_id: U256::try_from(chain_id).unwrap(), + nonce: U256::try_from(*nonce).unwrap(), + outs: payments + .iter() + .filter_map(|payment| { + Some(OutInstruction { + target: if let Some(data) = payment.data.as_ref() { + // This introspects the Call serialization format, expecting the first 20 bytes to + // be the address + // This avoids wasting the 20-bytes allocated within address + let full_data = [payment.address.0.as_slice(), data].concat(); + let mut reader = full_data.as_slice(); + + let mut calls = vec![]; + while !reader.is_empty() { + calls.push(Call::read(&mut reader).ok()?) + } + // The above must have executed at least once since reader contains the address + assert_eq!(calls[0].to, payment.address.0); + + OutInstructionTarget::Calls(calls) + } else { + OutInstructionTarget::Direct(payment.address.0) + }, + value: { + assert_eq!(payment.balance.coin, Coin::Ether); // TODO + balance_to_ethereum_amount(payment.balance) + }, + }) + }) + .collect(), + }, + Addendum::RotateTo { nonce, new_key } => { + assert!(payments.is_empty()); + RouterCommand::UpdateSeraiKey { + chain_id: U256::try_from(chain_id).unwrap(), + nonce: U256::try_from(*nonce).unwrap(), + key: PublicKey::new(*new_key).expect("new key wasn't a valid ETH public key"), + } + } + }; + Ok(Some(( + command.clone(), + Eventuality(PublicKey::new(key).expect("key wasn't a valid ETH public key"), command), + ))) + } + + async fn attempt_sign( + &self, + keys: ThresholdKeys, + transaction: Self::SignableTransaction, + ) -> Result { + Ok( + RouterCommandMachine::new(keys, transaction) + .expect("keys weren't usable to sign router commands"), + ) + } + + async fn publish_completion( + &self, + completion: &::Completion, + ) -> Result<(), NetworkError> { + // Publish this to the dedicated TX server for a solver to actually publish + #[cfg(not(test))] + { + let mut msg = vec![]; + match completion.command() { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + msg.extend(&u32::try_from(nonce).unwrap().to_le_bytes()); + } + } + completion.write(&mut msg).unwrap(); + + let Ok(mut socket) = TcpStream::connect(&self.relayer_url).await else { + log::warn!("couldn't connect to the relayer server"); + Err(NetworkError::ConnectionError)? + }; + let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { + log::warn!("couldn't send the message's len to the relayer server"); + Err(NetworkError::ConnectionError)? + }; + let Ok(()) = socket.write_all(&msg).await else { + log::warn!("couldn't write the message to the relayer server"); + Err(NetworkError::ConnectionError)? + }; + if socket.read_u8().await.ok() != Some(1) { + log::warn!("didn't get the ack from the relayer server"); + Err(NetworkError::ConnectionError)?; + } + + Ok(()) + } + + // Publish this using a dummy account we fund with magic RPC commands + #[cfg(test)] + { + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let mut tx = match completion.command() { + RouterCommand::UpdateSeraiKey { key, .. } => { + router.update_serai_key(key, completion.signature()) + } + RouterCommand::Execute { outs, .. } => router.execute( + &outs.iter().cloned().map(Into::into).collect::>(), + completion.signature(), + ), + }; + tx.gas_limit = 1_000_000u64.into(); + tx.gas_price = 1_000_000_000u64.into(); + let tx = ethereum_serai::crypto::deterministically_sign(&tx); + + if self.provider.get_transaction_by_hash(*tx.hash()).await.unwrap().is_none() { + self + .provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [ + tx.recover_signer().unwrap().to_string(), + (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), + ], + ) + .await + .unwrap(); + + let (tx, sig, _) = tx.into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + let pending_tx = self.provider.send_raw_transaction(&bytes).await.unwrap(); + self.mine_block().await; + assert!(pending_tx.get_receipt().await.unwrap().status()); + } + + Ok(()) + } + } + + async fn confirm_completion( + &self, + eventuality: &Self::Eventuality, + claim: &::Claim, + ) -> Result::Completion>, NetworkError> { + Ok(SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature)) + } + + #[cfg(test)] + async fn get_block_number(&self, id: &>::Id) -> usize { + self + .provider + .get_block(B256::from(*id).into(), false) + .await + .unwrap() + .unwrap() + .header + .number + .unwrap() + .try_into() + .unwrap() + } + + #[cfg(test)] + async fn check_eventuality_by_claim( + &self, + eventuality: &Self::Eventuality, + claim: &::Claim, + ) -> bool { + SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature).is_some() + } + + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Self::Eventuality, + ) -> Self::Transaction { + // We mine 96 blocks to ensure the 32 blocks relevant are finalized + // Back-check the prior two epochs in response to this + // TODO: Review why this is sub(3) and not sub(2) + for block in block.saturating_sub(3) ..= block { + match eventuality.1 { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let block = u64::try_from(block).unwrap(); + let filter = router + .key_updated_filter() + .from_block(block * 32) + .to_block(((block + 1) * 32) - 1) + .topic1(nonce); + let logs = self.provider.get_logs(&filter).await.unwrap(); + if let Some(log) = logs.first() { + return self + .provider + .get_transaction_by_hash(log.clone().transaction_hash.unwrap()) + .await + .unwrap() + .unwrap(); + }; + + let filter = router + .executed_filter() + .from_block(block * 32) + .to_block(((block + 1) * 32) - 1) + .topic1(nonce); + let logs = self.provider.get_logs(&filter).await.unwrap(); + if logs.is_empty() { + continue; + } + return self + .provider + .get_transaction_by_hash(logs[0].transaction_hash.unwrap()) + .await + .unwrap() + .unwrap(); + } + } + } + panic!("couldn't find completion in any three of checked blocks"); + } + + #[cfg(test)] + async fn mine_block(&self) { + self.provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, send_to: Self::Address) -> Self::Block { + use rand_core::OsRng; + use ciphersuite::group::ff::Field; + use ethereum_serai::alloy::sol_types::SolCall; + + let key = ::F::random(&mut OsRng); + let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); + + // Set a 1.1 ETH balance + self + .provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [Address(address).to_string(), "1100000000000000000".into()], + ) + .await + .unwrap(); + + let value = U256::from_str_radix("1000000000000000000", 10).unwrap(); + let tx = ethereum_serai::alloy::consensus::TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 1_000_000_000u128, + gas_limit: 200_000u128, + to: ethereum_serai::alloy::primitives::TxKind::Call(send_to.0.into()), + // 1 ETH + value, + input: ethereum_serai::router::abi::inInstructionCall::new(( + [0; 20].into(), + value, + vec![].into(), + )) + .abi_encode() + .into(), + }; + + use ethereum_serai::alloy::consensus::SignableTransaction; + let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap()) + .sign_prehash_recoverable(tx.signature_hash().as_ref()) + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig.into(), &mut bytes); + let pending_tx = self.provider.send_raw_transaction(&bytes).await.ok().unwrap(); + + // Mine an epoch containing this TX + self.mine_block().await; + assert!(pending_tx.get_receipt().await.unwrap().status()); + // Yield the freshly mined block + self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap() + } +} diff --git a/processor/src/networks/mod.rs b/processor/src/networks/mod.rs index d77d43f15..ee3cd24af 100644 --- a/processor/src/networks/mod.rs +++ b/processor/src/networks/mod.rs @@ -21,12 +21,17 @@ pub mod bitcoin; #[cfg(feature = "bitcoin")] pub use self::bitcoin::Bitcoin; +#[cfg(feature = "ethereum")] +pub mod ethereum; +#[cfg(feature = "ethereum")] +pub use ethereum::Ethereum; + #[cfg(feature = "monero")] pub mod monero; #[cfg(feature = "monero")] pub use monero::Monero; -use crate::{Payment, Plan}; +use crate::{Payment, Plan, multisigs::scheduler::Scheduler}; #[derive(Clone, Copy, Error, Debug)] pub enum NetworkError { @@ -105,7 +110,7 @@ pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Deb fn kind(&self) -> OutputType; fn id(&self) -> Self::Id; - fn tx_id(&self) -> >::Id; + fn tx_id(&self) -> >::Id; // TODO: Review use of fn key(&self) -> ::G; fn presumed_origin(&self) -> Option; @@ -118,25 +123,33 @@ pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Deb } #[async_trait] -pub trait Transaction: Send + Sync + Sized + Clone + Debug { +pub trait Transaction: Send + Sync + Sized + Clone + PartialEq + Debug { type Id: 'static + Id; fn id(&self) -> Self::Id; - fn serialize(&self) -> Vec; - fn read(reader: &mut R) -> io::Result; - + // TODO: Move to Balance #[cfg(test)] async fn fee(&self, network: &N) -> u64; } pub trait SignableTransaction: Send + Sync + Clone + Debug { + // TODO: Move to Balance fn fee(&self) -> u64; } -pub trait Eventuality: Send + Sync + Clone + Debug { +pub trait Eventuality: Send + Sync + Clone + PartialEq + Debug { + type Claim: Send + Sync + Clone + PartialEq + Default + AsRef<[u8]> + AsMut<[u8]> + Debug; + type Completion: Send + Sync + Clone + PartialEq + Debug; + fn lookup(&self) -> Vec; fn read(reader: &mut R) -> io::Result; fn serialize(&self) -> Vec; + + fn claim(completion: &Self::Completion) -> Self::Claim; + + // TODO: Make a dedicated Completion trait + fn serialize_completion(completion: &Self::Completion) -> Vec; + fn read_completion(reader: &mut R) -> io::Result; } #[derive(Clone, PartialEq, Eq, Debug)] @@ -211,7 +224,7 @@ fn drop_branches( ) -> Vec { let mut branch_outputs = vec![]; for payment in payments { - if payment.address == N::branch_address(key) { + if Some(&payment.address) == N::branch_address(key).as_ref() { branch_outputs.push(PostFeeBranch { expected: payment.balance.amount.0, actual: None }); } } @@ -227,12 +240,12 @@ pub struct PreparedSend { } #[async_trait] -pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { +pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug { /// The elliptic curve used for this network. type Curve: Curve; /// The type representing the transaction for this network. - type Transaction: Transaction; + type Transaction: Transaction; // TODO: Review use of /// The type representing the block for this network. type Block: Block; @@ -246,7 +259,12 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { /// This must be binding to both the outputs expected and the plan ID. type Eventuality: Eventuality; /// The FROST machine to sign a transaction. - type TransactionMachine: PreprocessMachine; + type TransactionMachine: PreprocessMachine< + Signature = ::Completion, + >; + + /// The scheduler for this network. + type Scheduler: Scheduler; /// The type representing an address. // This should NOT be a String, yet a tailored type representing an efficient binary encoding, @@ -269,10 +287,6 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize; /// The amount of confirmations required to consider a block 'final'. const CONFIRMATIONS: usize; - /// The maximum amount of inputs which will fit in a TX. - /// This should be equal to MAX_OUTPUTS unless one is specifically limited. - /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. - const MAX_INPUTS: usize; /// The maximum amount of outputs which will fit in a TX. /// This should be equal to MAX_INPUTS unless one is specifically limited. /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. @@ -293,13 +307,16 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { fn tweak_keys(key: &mut ThresholdKeys); /// Address for the given group key to receive external coins to. - fn external_address(key: ::G) -> Self::Address; + #[cfg(test)] + async fn external_address(&self, key: ::G) -> Self::Address; /// Address for the given group key to use for scheduled branches. - fn branch_address(key: ::G) -> Self::Address; + fn branch_address(key: ::G) -> Option; /// Address for the given group key to use for change. - fn change_address(key: ::G) -> Self::Address; + fn change_address(key: ::G) -> Option; /// Address for forwarded outputs from prior multisigs. - fn forward_address(key: ::G) -> Self::Address; + /// + /// forward_address must only return None if explicit forwarding isn't necessary. + fn forward_address(key: ::G) -> Option; /// Get the latest block's number. async fn get_latest_block_number(&self) -> Result; @@ -349,13 +366,24 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { /// registered eventualities may have been completed in. /// /// This may panic if not fed a block greater than the tracker's block number. + /// + /// Plan ID -> (block number, TX ID, completion) // TODO: get_eventuality_completions_internal + provided get_eventuality_completions for common // code + // TODO: Consider having this return the Transaction + the Completion? + // Or Transaction with extract_completion? async fn get_eventuality_completions( &self, eventualities: &mut EventualitiesTracker, block: &Self::Block, - ) -> HashMap<[u8; 32], (usize, Self::Transaction)>; + ) -> HashMap< + [u8; 32], + ( + usize, + >::Id, + ::Completion, + ), + >; /// Returns the needed fee to fulfill this Plan at this fee rate. /// @@ -363,7 +391,6 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { async fn needed_fee( &self, block_number: usize, - plan_id: &[u8; 32], inputs: &[Self::Output], payments: &[Payment], change: &Option, @@ -375,16 +402,25 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { /// 1) Call needed_fee /// 2) If the Plan is fulfillable, amortize the fee /// 3) Call signable_transaction *which MUST NOT return None if the above was done properly* + /// + /// This takes a destructured Plan as some of these arguments are malleated from the original + /// Plan. + // TODO: Explicit AmortizedPlan? + #[allow(clippy::too_many_arguments)] async fn signable_transaction( &self, block_number: usize, plan_id: &[u8; 32], + key: ::G, inputs: &[Self::Output], payments: &[Payment], change: &Option, + scheduler_addendum: &>::Addendum, ) -> Result, NetworkError>; /// Prepare a SignableTransaction for a transaction. + /// + /// This must not persist anything as we will prepare Plans we never intend to execute. async fn prepare_send( &self, block_number: usize, @@ -395,13 +431,15 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { assert!((!plan.payments.is_empty()) || plan.change.is_some()); let plan_id = plan.id(); - let Plan { key, inputs, mut payments, change } = plan; - let theoretical_change_amount = + let Plan { key, inputs, mut payments, change, scheduler_addendum } = plan; + let theoretical_change_amount = if change.is_some() { inputs.iter().map(|input| input.balance().amount.0).sum::() - - payments.iter().map(|payment| payment.balance.amount.0).sum::(); + payments.iter().map(|payment| payment.balance.amount.0).sum::() + } else { + 0 + }; - let Some(tx_fee) = self.needed_fee(block_number, &plan_id, &inputs, &payments, &change).await? - else { + let Some(tx_fee) = self.needed_fee(block_number, &inputs, &payments, &change).await? else { // This Plan is not fulfillable // TODO: Have Plan explicitly distinguish payments and branches in two separate Vecs? return Ok(PreparedSend { @@ -466,7 +504,7 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { // Note the branch outputs' new values let mut branch_outputs = vec![]; for (initial_amount, payment) in initial_payment_amounts.into_iter().zip(&payments) { - if payment.address == Self::branch_address(key) { + if Some(&payment.address) == Self::branch_address(key).as_ref() { branch_outputs.push(PostFeeBranch { expected: initial_amount, actual: if payment.balance.amount.0 == 0 { @@ -508,11 +546,20 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { ) })(); - let Some(tx) = - self.signable_transaction(block_number, &plan_id, &inputs, &payments, &change).await? + let Some(tx) = self + .signable_transaction( + block_number, + &plan_id, + key, + &inputs, + &payments, + &change, + &scheduler_addendum, + ) + .await? else { panic!( - "{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}", + "{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}, {}: {:?}", "signable_transaction returned None for a TX we prior successfully calculated the fee for", "id", hex::encode(plan_id), @@ -524,6 +571,8 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { change, "successfully amoritized fee", tx_fee, + "scheduler's addendum", + scheduler_addendum, ) }; @@ -546,31 +595,49 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { } /// Attempt to sign a SignableTransaction. - async fn attempt_send( + async fn attempt_sign( &self, keys: ThresholdKeys, transaction: Self::SignableTransaction, ) -> Result; - /// Publish a transaction. - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError>; - - /// Get a transaction by its ID. - async fn get_transaction( + /// Publish a completion. + async fn publish_completion( &self, - id: &>::Id, - ) -> Result; + completion: &::Completion, + ) -> Result<(), NetworkError>; - /// Confirm a plan was completed by the specified transaction. - // This is allowed to take shortcuts. - // This may assume an honest multisig, solely checking the inputs specified were spent. - // This may solely check the outputs are equivalent *so long as it's locked to the plan ID*. - fn confirm_completion(&self, eventuality: &Self::Eventuality, tx: &Self::Transaction) -> bool; + /// Confirm a plan was completed by the specified transaction, per our bounds. + /// + /// Returns Err if there was an error with the confirmation methodology. + /// Returns Ok(None) if this is not a valid completion. + /// Returns Ok(Some(_)) with the completion if it's valid. + async fn confirm_completion( + &self, + eventuality: &Self::Eventuality, + claim: &::Claim, + ) -> Result::Completion>, NetworkError>; /// Get a block's number by its ID. #[cfg(test)] async fn get_block_number(&self, id: &>::Id) -> usize; + /// Check an Eventuality is fulfilled by a claim. + #[cfg(test)] + async fn check_eventuality_by_claim( + &self, + eventuality: &Self::Eventuality, + claim: &::Claim, + ) -> bool; + + /// Get a transaction by the Eventuality it completes. + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Self::Eventuality, + ) -> Self::Transaction; + #[cfg(test)] async fn mine_block(&self); @@ -579,3 +646,10 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { #[cfg(test)] async fn test_send(&self, key: Self::Address) -> Self::Block; } + +pub trait UtxoNetwork: Network { + /// The maximum amount of inputs which will fit in a TX. + /// This should be equal to MAX_OUTPUTS unless one is specifically limited. + /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. + const MAX_INPUTS: usize; +} diff --git a/processor/src/networks/monero.rs b/processor/src/networks/monero.rs index 8d58ee1a2..8d4d17606 100644 --- a/processor/src/networks/monero.rs +++ b/processor/src/networks/monero.rs @@ -39,8 +39,9 @@ use crate::{ networks::{ NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, - Eventuality as EventualityTrait, EventualitiesTracker, Network, + Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, }, + multisigs::scheduler::utxo::Scheduler, }; #[derive(Clone, PartialEq, Eq, Debug)] @@ -117,12 +118,6 @@ impl TransactionTrait for Transaction { fn id(&self) -> Self::Id { self.hash() } - fn serialize(&self) -> Vec { - self.serialize() - } - fn read(reader: &mut R) -> io::Result { - Transaction::read(reader) - } #[cfg(test)] async fn fee(&self, _: &Monero) -> u64 { @@ -131,6 +126,9 @@ impl TransactionTrait for Transaction { } impl EventualityTrait for Eventuality { + type Claim = [u8; 32]; + type Completion = Transaction; + // Use the TX extra to look up potential matches // While anyone can forge this, a transaction with distinct outputs won't actually match // Extra includess the one time keys which are derived from the plan ID, so a collision here is a @@ -145,6 +143,16 @@ impl EventualityTrait for Eventuality { fn serialize(&self) -> Vec { self.serialize() } + + fn claim(tx: &Transaction) -> [u8; 32] { + tx.id() + } + fn serialize_completion(completion: &Transaction) -> Vec { + completion.serialize() + } + fn read_completion(reader: &mut R) -> io::Result { + Transaction::read(reader) + } } #[derive(Clone, Debug)] @@ -274,7 +282,8 @@ impl Monero { async fn median_fee(&self, block: &Block) -> Result { let mut fees = vec![]; for tx_hash in &block.txs { - let tx = self.get_transaction(tx_hash).await?; + let tx = + self.rpc.get_transaction(*tx_hash).await.map_err(|_| NetworkError::ConnectionError)?; // Only consider fees from RCT transactions, else the fee property read wouldn't be accurate if tx.rct_signatures.rct_type() != RctType::Null { continue; @@ -454,6 +463,8 @@ impl Network for Monero { type Eventuality = Eventuality; type TransactionMachine = TransactionMachine; + type Scheduler = Scheduler; + type Address = Address; const NETWORK: NetworkId = NetworkId::Monero; @@ -461,11 +472,6 @@ impl Network for Monero { const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 120; const CONFIRMATIONS: usize = 10; - // wallet2 will not create a transaction larger than 100kb, and Monero won't relay a transaction - // larger than 150kb. This fits within the 100kb mark - // Technically, it can be ~124, yet a small bit of buffer is appreciated - // TODO: Test creating a TX this big - const MAX_INPUTS: usize = 120; const MAX_OUTPUTS: usize = 16; // 0.01 XMR @@ -478,20 +484,21 @@ impl Network for Monero { // Monero doesn't require/benefit from tweaking fn tweak_keys(_: &mut ThresholdKeys) {} - fn external_address(key: EdwardsPoint) -> Address { + #[cfg(test)] + async fn external_address(&self, key: EdwardsPoint) -> Address { Self::address_internal(key, EXTERNAL_SUBADDRESS) } - fn branch_address(key: EdwardsPoint) -> Address { - Self::address_internal(key, BRANCH_SUBADDRESS) + fn branch_address(key: EdwardsPoint) -> Option
{ + Some(Self::address_internal(key, BRANCH_SUBADDRESS)) } - fn change_address(key: EdwardsPoint) -> Address { - Self::address_internal(key, CHANGE_SUBADDRESS) + fn change_address(key: EdwardsPoint) -> Option
{ + Some(Self::address_internal(key, CHANGE_SUBADDRESS)) } - fn forward_address(key: EdwardsPoint) -> Address { - Self::address_internal(key, FORWARD_SUBADDRESS) + fn forward_address(key: EdwardsPoint) -> Option
{ + Some(Self::address_internal(key, FORWARD_SUBADDRESS)) } async fn get_latest_block_number(&self) -> Result { @@ -558,7 +565,7 @@ impl Network for Monero { &self, eventualities: &mut EventualitiesTracker, block: &Block, - ) -> HashMap<[u8; 32], (usize, Transaction)> { + ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { let mut res = HashMap::new(); if eventualities.map.is_empty() { return res; @@ -568,13 +575,13 @@ impl Network for Monero { network: &Monero, eventualities: &mut EventualitiesTracker, block: &Block, - res: &mut HashMap<[u8; 32], (usize, Transaction)>, + res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, ) { for hash in &block.txs { let tx = { let mut tx; while { - tx = network.get_transaction(hash).await; + tx = network.rpc.get_transaction(*hash).await; tx.is_err() } { log::error!("couldn't get transaction {}: {}", hex::encode(hash), tx.err().unwrap()); @@ -587,7 +594,7 @@ impl Network for Monero { if eventuality.matches(&tx) { res.insert( eventualities.map.remove(&tx.prefix.extra).unwrap().0, - (usize::try_from(block.number().unwrap()).unwrap(), tx), + (usize::try_from(block.number().unwrap()).unwrap(), tx.id(), tx), ); } } @@ -625,14 +632,13 @@ impl Network for Monero { async fn needed_fee( &self, block_number: usize, - plan_id: &[u8; 32], inputs: &[Output], payments: &[Payment], change: &Option
, ) -> Result, NetworkError> { Ok( self - .make_signable_transaction(block_number, plan_id, inputs, payments, change, true) + .make_signable_transaction(block_number, &[0; 32], inputs, payments, change, true) .await? .map(|(_, signable)| signable.fee()), ) @@ -642,9 +648,11 @@ impl Network for Monero { &self, block_number: usize, plan_id: &[u8; 32], + _key: EdwardsPoint, inputs: &[Output], payments: &[Payment], change: &Option
, + (): &(), ) -> Result, NetworkError> { Ok( self @@ -658,7 +666,7 @@ impl Network for Monero { ) } - async fn attempt_send( + async fn attempt_sign( &self, keys: ThresholdKeys, transaction: SignableTransaction, @@ -669,7 +677,7 @@ impl Network for Monero { } } - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> { + async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { match self.rpc.publish_transaction(tx).await { Ok(()) => Ok(()), Err(RpcError::ConnectionError(e)) => { @@ -682,12 +690,17 @@ impl Network for Monero { } } - async fn get_transaction(&self, id: &[u8; 32]) -> Result { - self.rpc.get_transaction(*id).await.map_err(map_rpc_err) - } - - fn confirm_completion(&self, eventuality: &Eventuality, tx: &Transaction) -> bool { - eventuality.matches(tx) + async fn confirm_completion( + &self, + eventuality: &Eventuality, + id: &[u8; 32], + ) -> Result, NetworkError> { + let tx = self.rpc.get_transaction(*id).await.map_err(map_rpc_err)?; + if eventuality.matches(&tx) { + Ok(Some(tx)) + } else { + Ok(None) + } } #[cfg(test)] @@ -695,6 +708,31 @@ impl Network for Monero { self.rpc.get_block(*id).await.unwrap().number().unwrap().try_into().unwrap() } + #[cfg(test)] + async fn check_eventuality_by_claim( + &self, + eventuality: &Self::Eventuality, + claim: &[u8; 32], + ) -> bool { + return eventuality.matches(&self.rpc.get_transaction(*claim).await.unwrap()); + } + + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Eventuality, + ) -> Transaction { + let block = self.rpc.get_block_by_number(block).await.unwrap(); + for tx in &block.txs { + let tx = self.rpc.get_transaction(*tx).await.unwrap(); + if eventuality.matches(&tx) { + return tx; + } + } + panic!("block didn't have a transaction for this eventuality") + } + #[cfg(test)] async fn mine_block(&self) { // https://github.com/serai-dex/serai/issues/198 @@ -775,3 +813,11 @@ impl Network for Monero { self.get_block(block).await.unwrap() } } + +impl UtxoNetwork for Monero { + // wallet2 will not create a transaction larger than 100kb, and Monero won't relay a transaction + // larger than 150kb. This fits within the 100kb mark + // Technically, it can be ~124, yet a small bit of buffer is appreciated + // TODO: Test creating a TX this big + const MAX_INPUTS: usize = 120; +} diff --git a/processor/src/plan.rs b/processor/src/plan.rs index 3e10c7d39..58a8a5e11 100644 --- a/processor/src/plan.rs +++ b/processor/src/plan.rs @@ -8,7 +8,10 @@ use frost::curve::Ciphersuite; use serai_client::primitives::Balance; -use crate::networks::{Output, Network}; +use crate::{ + networks::{Output, Network}, + multisigs::scheduler::{SchedulerAddendum, Scheduler}, +}; #[derive(Clone, PartialEq, Eq, Debug)] pub struct Payment { @@ -73,7 +76,7 @@ impl Payment { } } -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq)] pub struct Plan { pub key: ::G, pub inputs: Vec, @@ -90,7 +93,11 @@ pub struct Plan { /// This MUST contain a Serai address. Operating costs may be deducted from the payments in this /// Plan on the premise that the change address is Serai's, and accordingly, Serai will recoup /// the operating costs. + // + // TODO: Consider moving to ::G? pub change: Option, + /// The scheduler's additional data. + pub scheduler_addendum: >::Addendum, } impl core::fmt::Debug for Plan { fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { @@ -100,6 +107,7 @@ impl core::fmt::Debug for Plan { .field("inputs", &self.inputs) .field("payments", &self.payments) .field("change", &self.change.as_ref().map(ToString::to_string)) + .field("scheduler_addendum", &self.scheduler_addendum) .finish() } } @@ -125,6 +133,10 @@ impl Plan { transcript.append_message(b"change", change.to_string()); } + let mut addendum_bytes = vec![]; + self.scheduler_addendum.write(&mut addendum_bytes).unwrap(); + transcript.append_message(b"scheduler_addendum", addendum_bytes); + transcript } @@ -161,7 +173,8 @@ impl Plan { }; assert!(serai_client::primitives::MAX_ADDRESS_LEN <= u8::MAX.into()); writer.write_all(&[u8::try_from(change.len()).unwrap()])?; - writer.write_all(&change) + writer.write_all(&change)?; + self.scheduler_addendum.write(writer) } pub fn read(reader: &mut R) -> io::Result { @@ -193,6 +206,7 @@ impl Plan { })?) }; - Ok(Plan { key, inputs, payments, change }) + let scheduler_addendum = >::Addendum::read(reader)?; + Ok(Plan { key, inputs, payments, change, scheduler_addendum }) } } diff --git a/processor/src/signer.rs b/processor/src/signer.rs index 7a4fcbedb..cab0bceb1 100644 --- a/processor/src/signer.rs +++ b/processor/src/signer.rs @@ -2,7 +2,6 @@ use core::{marker::PhantomData, fmt}; use std::collections::HashMap; use rand_core::OsRng; -use ciphersuite::group::GroupEncoding; use frost::{ ThresholdKeys, FrostError, sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, @@ -17,7 +16,7 @@ pub use serai_db::*; use crate::{ Get, DbTxn, Db, - networks::{Transaction, Eventuality, Network}, + networks::{Eventuality, Network}, }; create_db!( @@ -25,7 +24,7 @@ create_db!( CompletionsDb: (id: [u8; 32]) -> Vec, EventualityDb: (id: [u8; 32]) -> Vec, AttemptDb: (id: &SignId) -> (), - TransactionDb: (id: &[u8]) -> Vec, + CompletionDb: (claim: &[u8]) -> Vec, ActiveSignsDb: () -> Vec<[u8; 32]>, CompletedOnChainDb: (id: &[u8; 32]) -> (), } @@ -59,12 +58,20 @@ impl CompletionsDb { fn completions( getter: &impl Get, id: [u8; 32], - ) -> Vec<>::Id> { - let completions = Self::get(getter, id).unwrap_or_default(); + ) -> Vec<::Claim> { + let Some(completions) = Self::get(getter, id) else { return vec![] }; + + // If this was set yet is empty, it's because it's the encoding of a claim with a length of 0 + if completions.is_empty() { + let default = ::Claim::default(); + assert_eq!(default.as_ref().len(), 0); + return vec![default]; + } + let mut completions_ref = completions.as_slice(); let mut res = vec![]; while !completions_ref.is_empty() { - let mut id = >::Id::default(); + let mut id = ::Claim::default(); let id_len = id.as_ref().len(); id.as_mut().copy_from_slice(&completions_ref[.. id_len]); completions_ref = &completions_ref[id_len ..]; @@ -73,25 +80,37 @@ impl CompletionsDb { res } - fn complete(txn: &mut impl DbTxn, id: [u8; 32], tx: &N::Transaction) { - let tx_id = tx.id(); - // Transactions can be completed by multiple signatures + fn complete( + txn: &mut impl DbTxn, + id: [u8; 32], + completion: &::Completion, + ) { + // Completions can be completed by multiple signatures // Save every solution in order to be robust - TransactionDb::save_transaction::(txn, tx); + CompletionDb::save_completion::(txn, completion); + + let claim = N::Eventuality::claim(completion); + let claim: &[u8] = claim.as_ref(); + + // If claim has a 0-byte encoding, the set key, even if empty, is the claim + if claim.is_empty() { + Self::set(txn, id, &vec![]); + return; + } + let mut existing = Self::get(txn, id).unwrap_or_default(); - // Don't add this TX if it's already present - let tx_len = tx_id.as_ref().len(); - assert_eq!(existing.len() % tx_len, 0); + assert_eq!(existing.len() % claim.len(), 0); + // Don't add this completion if it's already present let mut i = 0; while i < existing.len() { - if &existing[i .. (i + tx_len)] == tx_id.as_ref() { + if &existing[i .. (i + claim.len())] == claim { return; } - i += tx_len; + i += claim.len(); } - existing.extend(tx_id.as_ref()); + existing.extend(claim); Self::set(txn, id, &existing); } } @@ -110,25 +129,33 @@ impl EventualityDb { } } -impl TransactionDb { - fn save_transaction(txn: &mut impl DbTxn, tx: &N::Transaction) { - Self::set(txn, tx.id().as_ref(), &tx.serialize()); +impl CompletionDb { + fn save_completion( + txn: &mut impl DbTxn, + completion: &::Completion, + ) { + let claim = N::Eventuality::claim(completion); + let claim: &[u8] = claim.as_ref(); + Self::set(txn, claim, &N::Eventuality::serialize_completion(completion)); } - fn transaction( + fn completion( getter: &impl Get, - id: &>::Id, - ) -> Option { - Self::get(getter, id.as_ref()).map(|tx| N::Transaction::read(&mut tx.as_slice()).unwrap()) + claim: &::Claim, + ) -> Option<::Completion> { + Self::get(getter, claim.as_ref()) + .map(|completion| N::Eventuality::read_completion::<&[u8]>(&mut completion.as_ref()).unwrap()) } } type PreprocessFor = <::TransactionMachine as PreprocessMachine>::Preprocess; type SignMachineFor = <::TransactionMachine as PreprocessMachine>::SignMachine; -type SignatureShareFor = - as SignMachine<::Transaction>>::SignatureShare; -type SignatureMachineFor = - as SignMachine<::Transaction>>::SignatureMachine; +type SignatureShareFor = as SignMachine< + <::Eventuality as Eventuality>::Completion, +>>::SignatureShare; +type SignatureMachineFor = as SignMachine< + <::Eventuality as Eventuality>::Completion, +>>::SignatureMachine; pub struct Signer { db: PhantomData, @@ -164,12 +191,11 @@ impl Signer { log::info!("rebroadcasting transactions for plans whose completions yet to be confirmed..."); loop { for active in ActiveSignsDb::get(&db).unwrap_or_default() { - for completion in CompletionsDb::completions::(&db, active) { - log::info!("rebroadcasting {}", hex::encode(&completion)); + for claim in CompletionsDb::completions::(&db, active) { + log::info!("rebroadcasting completion with claim {}", hex::encode(claim.as_ref())); // TODO: Don't drop the error entirely. Check for invariants - let _ = network - .publish_transaction(&TransactionDb::transaction::(&db, &completion).unwrap()) - .await; + let _ = + network.publish_completion(&CompletionDb::completion::(&db, &claim).unwrap()).await; } } // Only run every five minutes so we aren't frequently loading tens to hundreds of KB from @@ -242,7 +268,7 @@ impl Signer { fn complete( &mut self, id: [u8; 32], - tx_id: &>::Id, + claim: &::Claim, ) -> ProcessorMessage { // Assert we're actively signing for this TX assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for"); @@ -256,7 +282,7 @@ impl Signer { self.signing.remove(&id); // Emit the event for it - ProcessorMessage::Completed { session: self.session, id, tx: tx_id.as_ref().to_vec() } + ProcessorMessage::Completed { session: self.session, id, tx: claim.as_ref().to_vec() } } #[must_use] @@ -264,16 +290,16 @@ impl Signer { &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], - tx: &N::Transaction, + completion: &::Completion, ) -> Option { let first_completion = !Self::already_completed(txn, id); // Save this completion to the DB CompletedOnChainDb::complete_on_chain(txn, &id); - CompletionsDb::complete::(txn, id, tx); + CompletionsDb::complete::(txn, id, completion); if first_completion { - Some(self.complete(id, &tx.id())) + Some(self.complete(id, &N::Eventuality::claim(completion))) } else { None } @@ -286,49 +312,50 @@ impl Signer { &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], - tx_id: &>::Id, + claim: &::Claim, ) -> Option { if let Some(eventuality) = EventualityDb::eventuality::(txn, id) { - // Transaction hasn't hit our mempool/was dropped for a different signature - // The latter can happen given certain latency conditions/a single malicious signer - // In the case of a single malicious signer, they can drag multiple honest validators down - // with them, so we unfortunately can't slash on this case - let Ok(tx) = self.network.get_transaction(tx_id).await else { - warn!( - "a validator claimed {} completed {} yet we didn't have that TX in our mempool {}", - hex::encode(tx_id), - hex::encode(id), - "(or had another connectivity issue)", - ); - return None; - }; - - if self.network.confirm_completion(&eventuality, &tx) { - info!("signer eventuality for {} resolved in TX {}", hex::encode(id), hex::encode(tx_id)); + match self.network.confirm_completion(&eventuality, claim).await { + Ok(Some(completion)) => { + info!( + "signer eventuality for {} resolved in {}", + hex::encode(id), + hex::encode(claim.as_ref()) + ); - let first_completion = !Self::already_completed(txn, id); + let first_completion = !Self::already_completed(txn, id); - // Save this completion to the DB - CompletionsDb::complete::(txn, id, &tx); + // Save this completion to the DB + CompletionsDb::complete::(txn, id, &completion); - if first_completion { - return Some(self.complete(id, &tx.id())); + if first_completion { + return Some(self.complete(id, claim)); + } + } + Ok(None) => { + warn!( + "a validator claimed {} completed {} when it did not", + hex::encode(claim.as_ref()), + hex::encode(id), + ); + } + Err(_) => { + // Transaction hasn't hit our mempool/was dropped for a different signature + // The latter can happen given certain latency conditions/a single malicious signer + // In the case of a single malicious signer, they can drag multiple honest validators down + // with them, so we unfortunately can't slash on this case + warn!( + "a validator claimed {} completed {} yet we couldn't check that claim", + hex::encode(claim.as_ref()), + hex::encode(id), + ); } - } else { - warn!( - "a validator claimed {} completed {} when it did not", - hex::encode(tx_id), - hex::encode(id) - ); } } else { - // If we don't have this in RAM, it should be because we already finished signing it - assert!(!CompletionsDb::completions::(txn, id).is_empty()); - info!( - "signer {} informed of the eventuality completion for plan {}, {}", - hex::encode(self.keys[0].group_key().to_bytes()), + warn!( + "informed of completion {} for eventuality {}, when we didn't have that eventuality", + hex::encode(claim.as_ref()), hex::encode(id), - "which we already marked as completed", ); } None @@ -405,7 +432,7 @@ impl Signer { let mut preprocesses = vec![]; let mut serialized_preprocesses = vec![]; for keys in &self.keys { - let machine = match self.network.attempt_send(keys.clone(), tx.clone()).await { + let machine = match self.network.attempt_sign(keys.clone(), tx.clone()).await { Err(e) => { error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e); return None; @@ -572,7 +599,7 @@ impl Signer { assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); } - let tx = match machine.complete(shares) { + let completion = match machine.complete(shares) { Ok(res) => res, Err(e) => match e { FrostError::InternalError(_) | @@ -588,40 +615,39 @@ impl Signer { }, }; - // Save the transaction in case it's needed for recovery - CompletionsDb::complete::(txn, id.id, &tx); + // Save the completion in case it's needed for recovery + CompletionsDb::complete::(txn, id.id, &completion); // Publish it - let tx_id = tx.id(); - if let Err(e) = self.network.publish_transaction(&tx).await { - error!("couldn't publish {:?}: {:?}", tx, e); + if let Err(e) = self.network.publish_completion(&completion).await { + error!("couldn't publish completion for plan {}: {:?}", hex::encode(id.id), e); } else { - info!("published {} for plan {}", hex::encode(&tx_id), hex::encode(id.id)); + info!("published completion for plan {}", hex::encode(id.id)); } // Stop trying to sign for this TX - Some(self.complete(id.id, &tx_id)) + Some(self.complete(id.id, &N::Eventuality::claim(&completion))) } CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await, - CoordinatorMessage::Completed { session: _, id, tx: mut tx_vec } => { - let mut tx = >::Id::default(); - if tx.as_ref().len() != tx_vec.len() { - let true_len = tx_vec.len(); - tx_vec.truncate(2 * tx.as_ref().len()); + CoordinatorMessage::Completed { session: _, id, tx: mut claim_vec } => { + let mut claim = ::Claim::default(); + if claim.as_ref().len() != claim_vec.len() { + let true_len = claim_vec.len(); + claim_vec.truncate(2 * claim.as_ref().len()); warn!( "a validator claimed {}... (actual length {}) completed {} yet {}", - hex::encode(&tx_vec), + hex::encode(&claim_vec), true_len, hex::encode(id), - "that's not a valid TX ID", + "that's not a valid Claim", ); return None; } - tx.as_mut().copy_from_slice(&tx_vec); + claim.as_mut().copy_from_slice(&claim_vec); - self.claimed_eventuality_completion(txn, id, &tx).await + self.claimed_eventuality_completion(txn, id, &claim).await } } } diff --git a/processor/src/tests/addresses.rs b/processor/src/tests/addresses.rs index da20091be..3d4d6d4c1 100644 --- a/processor/src/tests/addresses.rs +++ b/processor/src/tests/addresses.rs @@ -1,4 +1,4 @@ -use core::time::Duration; +use core::{time::Duration, pin::Pin, future::Future}; use std::collections::HashMap; use rand_core::OsRng; @@ -13,18 +13,23 @@ use serai_db::{DbTxn, MemDb}; use crate::{ Plan, Db, - networks::{OutputType, Output, Block, Network}, - multisigs::scanner::{ScannerEvent, Scanner, ScannerHandle}, + networks::{OutputType, Output, Block, UtxoNetwork}, + multisigs::{ + scheduler::Scheduler, + scanner::{ScannerEvent, Scanner, ScannerHandle}, + }, tests::sign, }; -async fn spend( +async fn spend( db: &mut D, network: &N, keys: &HashMap>, scanner: &mut ScannerHandle, outputs: Vec, -) { +) where + >::Addendum: From<()>, +{ let key = keys[&Participant::new(1).unwrap()].group_key(); let mut keys_txs = HashMap::new(); @@ -41,7 +46,8 @@ async fn spend( key, inputs: outputs.clone(), payments: vec![], - change: Some(N::change_address(key)), + change: Some(N::change_address(key).unwrap()), + scheduler_addendum: ().into(), }, 0, ) @@ -70,25 +76,31 @@ async fn spend( scanner.release_lock().await; txn.commit(); } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } } } -pub async fn test_addresses(network: N) { +pub async fn test_addresses( + new_network: impl Fn(MemDb) -> Pin>>, +) where + >::Addendum: From<()>, +{ let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); + let mut db = MemDb::new(); + let network = new_network(db.clone()).await; + // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } - let mut db = MemDb::new(); let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone()); assert!(current_keys.is_empty()); let mut txn = db.txn(); @@ -101,10 +113,10 @@ pub async fn test_addresses(network: N) { // Receive funds to the various addresses and make sure they're properly identified let mut received_outputs = vec![]; for (kind, address) in [ - (OutputType::External, N::external_address(key)), - (OutputType::Branch, N::branch_address(key)), - (OutputType::Change, N::change_address(key)), - (OutputType::Forwarded, N::forward_address(key)), + (OutputType::External, N::external_address(&network, key).await), + (OutputType::Branch, N::branch_address(key).unwrap()), + (OutputType::Change, N::change_address(key).unwrap()), + (OutputType::Forwarded, N::forward_address(key).unwrap()), ] { let block_id = network.test_send(address).await.id(); @@ -123,7 +135,7 @@ pub async fn test_addresses(network: N) { txn.commit(); received_outputs.extend(outputs); } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; diff --git a/processor/src/tests/literal/mod.rs b/processor/src/tests/literal/mod.rs index 192214eb1..d45649d59 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/src/tests/literal/mod.rs @@ -3,6 +3,8 @@ use dockertest::{ TestBodySpecification, DockerOperations, DockerTest, }; +use serai_db::MemDb; + #[cfg(feature = "bitcoin")] mod bitcoin { use std::sync::Arc; @@ -33,8 +35,6 @@ mod bitcoin { sync::Mutex, }; - use serai_db::MemDb; - use super::*; use crate::{ networks::{Network, Bitcoin, Output, OutputType, Block}, @@ -57,7 +57,7 @@ mod bitcoin { fn test_receive_data_from_input() { let docker = spawn_bitcoin(); docker.run(|ops| async move { - let btc = bitcoin(&ops).await; + let btc = bitcoin(&ops).await(MemDb::new()).await; // generate a multisig address to receive the coins let mut keys = frost::tests::key_gen::<_, ::Curve>(&mut OsRng) @@ -65,12 +65,12 @@ mod bitcoin { .unwrap(); ::tweak_keys(&mut keys); let group_key = keys.group_key(); - let serai_btc_address = ::external_address(group_key); + let serai_btc_address = ::external_address(&btc, group_key).await; // btc key pair to send from let private_key = PrivateKey::new(SecretKey::new(&mut rand_core::OsRng), BNetwork::Regtest); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let main_addr = BAddress::p2pkh(&public_key, BNetwork::Regtest); + let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); // get unlocked coins let new_block = btc.get_latest_block_number().await.unwrap() + 1; @@ -107,7 +107,7 @@ mod bitcoin { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { - previous_output: OutPoint { txid: tx.txid(), vout: 0 }, + previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::default(), @@ -128,14 +128,14 @@ mod bitcoin { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { - previous_output: OutPoint { txid: tx.txid(), vout: 0 }, + previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::new(), }], output: vec![TxOut { value: tx.output[0].value - BAmount::from_sat(10000), - script_pubkey: serai_btc_address.as_ref().script_pubkey(), + script_pubkey: serai_btc_address.into(), }], }; @@ -143,12 +143,14 @@ mod bitcoin { // This is the standard script with an extra argument of the InInstruction let mut sig = SECP256K1 .sign_ecdsa_low_r( - &Message::from( + &Message::from_digest_slice( SighashCache::new(&tx) .p2wsh_signature_hash(0, &script, initial_output_value, EcdsaSighashType::All) .unwrap() - .to_raw_hash(), - ), + .to_raw_hash() + .as_ref(), + ) + .unwrap(), &private_key.inner, ) .serialize_der() @@ -208,23 +210,26 @@ mod bitcoin { test } - async fn bitcoin(ops: &DockerOperations) -> Bitcoin { + async fn bitcoin( + ops: &DockerOperations, + ) -> impl Fn(MemDb) -> Pin>> { let handle = ops.handle("serai-dev-bitcoin").host_port(8332).unwrap(); - let bitcoin = Bitcoin::new(format!("http://serai:seraidex@{}:{}", handle.0, handle.1)).await; + let url = format!("http://serai:seraidex@{}:{}", handle.0, handle.1); + let bitcoin = Bitcoin::new(url.clone()).await; bitcoin.fresh_chain().await; - bitcoin + move |_db| Box::pin(Bitcoin::new(url.clone())) } - test_network!( + test_utxo_network!( Bitcoin, spawn_bitcoin, bitcoin, bitcoin_key_gen, bitcoin_scanner, + bitcoin_no_deadlock_in_multisig_completed, bitcoin_signer, bitcoin_wallet, bitcoin_addresses, - bitcoin_no_deadlock_in_multisig_completed, ); } @@ -252,24 +257,185 @@ mod monero { test } - async fn monero(ops: &DockerOperations) -> Monero { + async fn monero( + ops: &DockerOperations, + ) -> impl Fn(MemDb) -> Pin>> { let handle = ops.handle("serai-dev-monero").host_port(18081).unwrap(); - let monero = Monero::new(format!("http://serai:seraidex@{}:{}", handle.0, handle.1)).await; + let url = format!("http://serai:seraidex@{}:{}", handle.0, handle.1); + let monero = Monero::new(url.clone()).await; while monero.get_latest_block_number().await.unwrap() < 150 { monero.mine_block().await; } - monero + move |_db| Box::pin(Monero::new(url.clone())) } - test_network!( + test_utxo_network!( Monero, spawn_monero, monero, monero_key_gen, monero_scanner, + monero_no_deadlock_in_multisig_completed, monero_signer, monero_wallet, monero_addresses, - monero_no_deadlock_in_multisig_completed, + ); +} + +#[cfg(feature = "ethereum")] +mod ethereum { + use super::*; + + use ciphersuite::{Ciphersuite, Secp256k1}; + + use serai_client::validator_sets::primitives::Session; + + use crate::networks::Ethereum; + + fn spawn_ethereum() -> DockerTest { + serai_docker_tests::build("ethereum".to_string()); + + let composition = TestBodySpecification::with_image( + Image::with_repository("serai-dev-ethereum").pull_policy(PullPolicy::Never), + ) + .set_start_policy(StartPolicy::Strict) + .set_log_options(Some(LogOptions { + action: LogAction::Forward, + policy: LogPolicy::OnError, + source: LogSource::Both, + })) + .set_publish_all_ports(true); + + let mut test = DockerTest::new(); + test.provide_container(composition); + test + } + + async fn ethereum( + ops: &DockerOperations, + ) -> impl Fn(MemDb) -> Pin>>> { + use std::sync::Arc; + use ethereum_serai::{ + alloy::{ + primitives::U256, + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + }, + deployer::Deployer, + }; + + let handle = ops.handle("serai-dev-ethereum").host_port(8545).unwrap(); + let url = format!("http://{}:{}", handle.0, handle.1); + tokio::time::sleep(core::time::Duration::from_secs(15)).await; + + { + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(url.clone()), true), + )); + provider.raw_request::<_, ()>("evm_setAutomine".into(), [false]).await.unwrap(); + provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + + // Perform deployment + { + // Make sure the Deployer constructor returns None, as it doesn't exist yet + assert!(Deployer::new(provider.clone()).await.unwrap().is_none()); + + // Deploy the Deployer + let tx = Deployer::deployment_tx(); + + provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [ + tx.recover_signer().unwrap().to_string(), + (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), + ], + ) + .await + .unwrap(); + + let (tx, sig, _) = tx.into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + + let pending_tx = provider.send_raw_transaction(&bytes).await.unwrap(); + provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + //tokio::time::sleep(core::time::Duration::from_secs(15)).await; + let receipt = pending_tx.get_receipt().await.unwrap(); + assert!(receipt.status()); + + let _ = Deployer::new(provider.clone()) + .await + .expect("network error") + .expect("deployer wasn't deployed"); + } + } + + move |db| { + let url = url.clone(); + Box::pin(async move { + { + let db = db.clone(); + let url = url.clone(); + // Spawn a task to deploy the proper Router when the time comes + tokio::spawn(async move { + let key = loop { + let Some(key) = crate::key_gen::NetworkKeyDb::get(&db, Session(0)) else { + tokio::time::sleep(core::time::Duration::from_secs(1)).await; + continue; + }; + break ethereum_serai::crypto::PublicKey::new( + Secp256k1::read_G(&mut key.as_slice()).unwrap(), + ) + .unwrap(); + }; + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(url.clone()), true), + )); + let deployer = Deployer::new(provider.clone()).await.unwrap().unwrap(); + + let mut tx = deployer.deploy_router(&key); + tx.gas_limit = 1_000_000u64.into(); + tx.gas_price = 1_000_000_000u64.into(); + let tx = ethereum_serai::crypto::deterministically_sign(&tx); + + provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [ + tx.recover_signer().unwrap().to_string(), + (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), + ], + ) + .await + .unwrap(); + + let (tx, sig, _) = tx.into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + let pending_tx = provider.send_raw_transaction(&bytes).await.unwrap(); + provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + let receipt = pending_tx.get_receipt().await.unwrap(); + assert!(receipt.status()); + + let _router = deployer.find_router(provider.clone(), &key).await.unwrap().unwrap(); + }); + } + + Ethereum::new(db, url.clone(), String::new()).await + }) + } + } + + test_network!( + Ethereum, + spawn_ethereum, + ethereum, + ethereum_key_gen, + ethereum_scanner, + ethereum_no_deadlock_in_multisig_completed, + ethereum_signer, + ethereum_wallet, ); } diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index 974be10b5..7ab57bdef 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -1,22 +1,18 @@ use std::sync::OnceLock; mod key_gen; -pub(crate) use key_gen::test_key_gen; mod scanner; -pub(crate) use scanner::{test_scanner, test_no_deadlock_in_multisig_completed}; mod signer; -pub(crate) use signer::{sign, test_signer}; +pub(crate) use signer::sign; mod cosigner; mod batch_signer; mod wallet; -pub(crate) use wallet::test_wallet; mod addresses; -pub(crate) use addresses::test_addresses; // Effective Once static INIT_LOGGER_CELL: OnceLock<()> = OnceLock::new(); @@ -27,22 +23,25 @@ fn init_logger() { #[macro_export] macro_rules! test_network { ( - $N: ident, + $N: ty, $docker: ident, $network: ident, $key_gen: ident, $scanner: ident, + $no_deadlock_in_multisig_completed: ident, $signer: ident, $wallet: ident, - $addresses: ident, - $no_deadlock_in_multisig_completed: ident, ) => { + use core::{pin::Pin, future::Future}; use $crate::tests::{ - init_logger, test_key_gen, test_scanner, test_no_deadlock_in_multisig_completed, test_signer, - test_wallet, test_addresses, + init_logger, + key_gen::test_key_gen, + scanner::{test_scanner, test_no_deadlock_in_multisig_completed}, + signer::test_signer, + wallet::test_wallet, }; - // This doesn't interact with a node and accordingly doesn't need to be run + // This doesn't interact with a node and accordingly doesn't need to be spawn one #[tokio::test] async fn $key_gen() { init_logger(); @@ -54,43 +53,76 @@ macro_rules! test_network { init_logger(); let docker = $docker(); docker.run(|ops| async move { - test_scanner($network(&ops).await).await; + let new_network = $network(&ops).await; + test_scanner(new_network).await; }); } #[test] - fn $signer() { + fn $no_deadlock_in_multisig_completed() { init_logger(); let docker = $docker(); docker.run(|ops| async move { - test_signer($network(&ops).await).await; + let new_network = $network(&ops).await; + test_no_deadlock_in_multisig_completed(new_network).await; }); } #[test] - fn $wallet() { + fn $signer() { init_logger(); let docker = $docker(); docker.run(|ops| async move { - test_wallet($network(&ops).await).await; + let new_network = $network(&ops).await; + test_signer(new_network).await; }); } #[test] - fn $addresses() { + fn $wallet() { init_logger(); let docker = $docker(); docker.run(|ops| async move { - test_addresses($network(&ops).await).await; + let new_network = $network(&ops).await; + test_wallet(new_network).await; }); } + }; +} + +#[macro_export] +macro_rules! test_utxo_network { + ( + $N: ty, + $docker: ident, + $network: ident, + $key_gen: ident, + $scanner: ident, + $no_deadlock_in_multisig_completed: ident, + $signer: ident, + $wallet: ident, + $addresses: ident, + ) => { + use $crate::tests::addresses::test_addresses; + + test_network!( + $N, + $docker, + $network, + $key_gen, + $scanner, + $no_deadlock_in_multisig_completed, + $signer, + $wallet, + ); #[test] - fn $no_deadlock_in_multisig_completed() { + fn $addresses() { init_logger(); let docker = $docker(); docker.run(|ops| async move { - test_no_deadlock_in_multisig_completed($network(&ops).await).await; + let new_network = $network(&ops).await; + test_addresses(new_network).await; }); } }; diff --git a/processor/src/tests/scanner.rs b/processor/src/tests/scanner.rs index 5aad5bb51..6421c499a 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/src/tests/scanner.rs @@ -1,17 +1,19 @@ -use core::time::Duration; +use core::{pin::Pin, time::Duration, future::Future}; use std::sync::Arc; -use ciphersuite::Ciphersuite; use rand_core::OsRng; +use ciphersuite::{group::GroupEncoding, Ciphersuite}; use frost::{Participant, tests::key_gen}; use tokio::{sync::Mutex, time::timeout}; use serai_db::{DbTxn, Db, MemDb}; +use serai_client::validator_sets::primitives::Session; use crate::{ networks::{OutputType, Output, Block, Network}, + key_gen::NetworkKeyDb, multisigs::scanner::{ScannerEvent, Scanner, ScannerHandle}, }; @@ -40,23 +42,32 @@ pub async fn new_scanner( scanner } -pub async fn test_scanner(network: N) { +pub async fn test_scanner( + new_network: impl Fn(MemDb) -> Pin>>, +) { let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap(); N::tweak_keys(&mut keys); let group_key = keys.group_key(); + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + NetworkKeyDb::set(&mut txn, Session(0), &group_key.to_bytes().as_ref().to_vec()); + txn.commit(); + } + let network = new_network(db.clone()).await; + // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } - let db = MemDb::new(); let first = Arc::new(Mutex::new(true)); let scanner = new_scanner(&network, &db, group_key, &first).await; // Receive funds - let block = network.test_send(N::external_address(keys.group_key())).await; + let block = network.test_send(N::external_address(&network, keys.group_key()).await).await; let block_id = block.id(); // Verify the Scanner picked them up @@ -71,7 +82,7 @@ pub async fn test_scanner(network: N) { assert_eq!(outputs[0].kind(), OutputType::External); outputs } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; @@ -101,40 +112,63 @@ pub async fn test_scanner(network: N) { .is_err()); } -pub async fn test_no_deadlock_in_multisig_completed(network: N) { +pub async fn test_no_deadlock_in_multisig_completed( + new_network: impl Fn(MemDb) -> Pin>>, +) { + // This test scans two blocks then acknowledges one, yet a network with one confirm won't scan + // two blocks before the first is acknowledged (due to the look-ahead limit) + if N::CONFIRMATIONS <= 1 { + return; + } + + let mut db = MemDb::new(); + let network = new_network(db.clone()).await; + // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } - let mut db = MemDb::new(); let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone()); assert!(current_keys.is_empty()); - let mut txn = db.txn(); // Register keys to cause Block events at CONFIRMATIONS (dropped since first keys), // CONFIRMATIONS + 1, and CONFIRMATIONS + 2 for i in 0 .. 3 { + let key = { + let mut keys = key_gen(&mut OsRng); + for keys in keys.values_mut() { + N::tweak_keys(keys); + } + let key = keys[&Participant::new(1).unwrap()].group_key(); + if i == 0 { + let mut txn = db.txn(); + NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec()); + txn.commit(); + + // Sleep for 5 seconds as setting the Network key value will trigger an async task for + // Ethereum + tokio::time::sleep(Duration::from_secs(5)).await; + } + key + }; + + let mut txn = db.txn(); scanner .register_key( &mut txn, network.get_latest_block_number().await.unwrap() + N::CONFIRMATIONS + i, - { - let mut keys = key_gen(&mut OsRng); - for keys in keys.values_mut() { - N::tweak_keys(keys); - } - keys[&Participant::new(1).unwrap()].group_key() - }, + key, ) .await; + txn.commit(); } - txn.commit(); for _ in 0 .. (3 * N::CONFIRMATIONS) { network.mine_block().await; } + // Block for the second set of keys registered let block_id = match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { is_retirement_block, block, outputs: _ } => { @@ -142,14 +176,15 @@ pub async fn test_no_deadlock_in_multisig_completed(network: N) { assert!(!is_retirement_block); block } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; + // Block for the third set of keys registered match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { .. } => {} - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; diff --git a/processor/src/tests/signer.rs b/processor/src/tests/signer.rs index 89d57bf39..77307ef26 100644 --- a/processor/src/tests/signer.rs +++ b/processor/src/tests/signer.rs @@ -1,7 +1,9 @@ +use core::{pin::Pin, future::Future}; use std::collections::HashMap; use rand_core::{RngCore, OsRng}; +use ciphersuite::group::GroupEncoding; use frost::{ Participant, ThresholdKeys, dkg::tests::{key_gen, clone_without}, @@ -16,8 +18,10 @@ use serai_client::{ use messages::sign::*; use crate::{ - Payment, Plan, - networks::{Output, Transaction, Network}, + Payment, + networks::{Output, Transaction, Eventuality, Network}, + key_gen::NetworkKeyDb, + multisigs::scheduler::Scheduler, signer::Signer, }; @@ -29,7 +33,7 @@ pub async fn sign( Participant, (ThresholdKeys, (N::SignableTransaction, N::Eventuality)), >, -) -> >::Id { +) -> ::Claim { let actual_id = SignId { session, id: [0xaa; 32], attempt: 0 }; let mut keys = HashMap::new(); @@ -65,14 +69,15 @@ pub async fn sign( let mut preprocesses = HashMap::new(); + let mut eventuality = None; for i in 1 ..= signers.len() { let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); - let (tx, eventuality) = txs.remove(&i).unwrap(); + let (tx, this_eventuality) = txs.remove(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); match signers .get_mut(&i) .unwrap() - .sign_transaction(&mut txn, actual_id.id, tx, &eventuality) + .sign_transaction(&mut txn, actual_id.id, tx, &this_eventuality) .await { // All participants should emit a preprocess @@ -86,6 +91,11 @@ pub async fn sign( _ => panic!("didn't get preprocess back"), } txn.commit(); + + if eventuality.is_none() { + eventuality = Some(this_eventuality.clone()); + } + assert_eq!(eventuality, Some(this_eventuality)); } let mut shares = HashMap::new(); @@ -140,78 +150,97 @@ pub async fn sign( txn.commit(); } - let mut typed_tx_id = >::Id::default(); - typed_tx_id.as_mut().copy_from_slice(tx_id.unwrap().as_ref()); - typed_tx_id + let mut typed_claim = ::Claim::default(); + typed_claim.as_mut().copy_from_slice(tx_id.unwrap().as_ref()); + assert!(network.check_eventuality_by_claim(&eventuality.unwrap(), &typed_claim).await); + typed_claim } -pub async fn test_signer(network: N) { +pub async fn test_signer( + new_network: impl Fn(MemDb) -> Pin>>, +) { let mut keys = key_gen(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); - let outputs = network.get_outputs(&network.test_send(N::external_address(key)).await, key).await; + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec()); + txn.commit(); + } + let network = new_network(db.clone()).await; + + let outputs = network + .get_outputs(&network.test_send(N::external_address(&network, key).await).await, key) + .await; let sync_block = network.get_latest_block_number().await.unwrap() - N::CONFIRMATIONS; - let amount = 2 * N::DUST; + let amount = (2 * N::DUST) + 1000; + let plan = { + let mut txn = db.txn(); + let mut scheduler = N::Scheduler::new::(&mut txn, key, N::NETWORK); + let payments = vec![Payment { + address: N::external_address(&network, key).await, + data: None, + balance: Balance { + coin: match N::NETWORK { + NetworkId::Serai => panic!("test_signer called with Serai"), + NetworkId::Bitcoin => Coin::Bitcoin, + NetworkId::Ethereum => Coin::Ether, + NetworkId::Monero => Coin::Monero, + }, + amount: Amount(amount), + }, + }]; + let mut plans = scheduler.schedule::(&mut txn, outputs.clone(), payments, key, false); + assert_eq!(plans.len(), 1); + plans.swap_remove(0) + }; + let mut keys_txs = HashMap::new(); let mut eventualities = vec![]; for (i, keys) in keys.drain() { - let (signable, eventuality) = network - .prepare_send( - sync_block, - Plan { - key, - inputs: outputs.clone(), - payments: vec![Payment { - address: N::external_address(key), - data: None, - balance: Balance { - coin: match N::NETWORK { - NetworkId::Serai => panic!("test_signer called with Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - amount: Amount(amount), - }, - }], - change: Some(N::change_address(key)), - }, - 0, - ) - .await - .unwrap() - .tx - .unwrap(); + let (signable, eventuality) = + network.prepare_send(sync_block, plan.clone(), 0).await.unwrap().tx.unwrap(); eventualities.push(eventuality.clone()); keys_txs.insert(i, (keys, (signable, eventuality))); } - // The signer may not publish the TX if it has a connection error - // It doesn't fail in this case - let txid = sign(network.clone(), Session(0), keys_txs).await; - let tx = network.get_transaction(&txid).await.unwrap(); - assert_eq!(tx.id(), txid); + let claim = sign(network.clone(), Session(0), keys_txs).await; + // Mine a block, and scan it, to ensure that the TX actually made it on chain network.mine_block().await; + let block_number = network.get_latest_block_number().await.unwrap(); + let tx = network.get_transaction_by_eventuality(block_number, &eventualities[0]).await; let outputs = network .get_outputs( &network.get_block(network.get_latest_block_number().await.unwrap()).await.unwrap(), key, ) .await; - assert_eq!(outputs.len(), 2); - // Adjust the amount for the fees - let amount = amount - tx.fee(&network).await; - // Check either output since Monero will randomize its output order - assert!((outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount)); + // Don't run if Ethereum as the received output will revert by the contract + // (and therefore not actually exist) + if N::NETWORK != NetworkId::Ethereum { + assert_eq!(outputs.len(), 1 + usize::from(u8::from(plan.change.is_some()))); + // Adjust the amount for the fees + let amount = amount - tx.fee(&network).await; + if plan.change.is_some() { + // Check either output since Monero will randomize its output order + assert!( + (outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount) + ); + } else { + assert!(outputs[0].balance().amount.0 == amount); + } + } // Check the eventualities pass for eventuality in eventualities { - assert!(network.confirm_completion(&eventuality, &tx)); + let completion = network.confirm_completion(&eventuality, &claim).await.unwrap().unwrap(); + assert_eq!(N::Eventuality::claim(&completion), claim); } } diff --git a/processor/src/tests/wallet.rs b/processor/src/tests/wallet.rs index c9cc6c667..86a27349d 100644 --- a/processor/src/tests/wallet.rs +++ b/processor/src/tests/wallet.rs @@ -1,7 +1,9 @@ -use std::{time::Duration, collections::HashMap}; +use core::{time::Duration, pin::Pin, future::Future}; +use std::collections::HashMap; use rand_core::OsRng; +use ciphersuite::group::GroupEncoding; use frost::{Participant, dkg::tests::key_gen}; use tokio::time::timeout; @@ -15,21 +17,19 @@ use serai_client::{ use crate::{ Payment, Plan, - networks::{Output, Transaction, Block, Network}, + networks::{Output, Transaction, Eventuality, Block, Network}, + key_gen::NetworkKeyDb, multisigs::{ scanner::{ScannerEvent, Scanner}, - scheduler::Scheduler, + scheduler::{self, Scheduler}, }, tests::sign, }; // Tests the Scanner, Scheduler, and Signer together -pub async fn test_wallet(network: N) { - // Mine blocks so there's a confirmed block - for _ in 0 .. N::CONFIRMATIONS { - network.mine_block().await; - } - +pub async fn test_wallet( + new_network: impl Fn(MemDb) -> Pin>>, +) { let mut keys = key_gen(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); @@ -37,6 +37,18 @@ pub async fn test_wallet(network: N) { let key = keys[&Participant::new(1).unwrap()].group_key(); let mut db = MemDb::new(); + { + let mut txn = db.txn(); + NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec()); + txn.commit(); + } + let network = new_network(db.clone()).await; + + // Mine blocks so there's a confirmed block + for _ in 0 .. N::CONFIRMATIONS { + network.mine_block().await; + } + let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone()); assert!(current_keys.is_empty()); let (block_id, outputs) = { @@ -47,7 +59,7 @@ pub async fn test_wallet(network: N) { network.mine_block().await; } - let block = network.test_send(N::external_address(key)).await; + let block = network.test_send(N::external_address(&network, key).await).await; let block_id = block.id(); match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { @@ -58,7 +70,7 @@ pub async fn test_wallet(network: N) { assert_eq!(outputs.len(), 1); (block_id, outputs) } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } } @@ -69,22 +81,13 @@ pub async fn test_wallet(network: N) { txn.commit(); let mut txn = db.txn(); - let mut scheduler = Scheduler::new::( - &mut txn, - key, - match N::NETWORK { - NetworkId::Serai => panic!("test_wallet called with Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - ); + let mut scheduler = N::Scheduler::new::(&mut txn, key, N::NETWORK); let amount = 2 * N::DUST; let plans = scheduler.schedule::( &mut txn, outputs.clone(), vec![Payment { - address: N::external_address(key), + address: N::external_address(&network, key).await, data: None, balance: Balance { coin: match N::NETWORK { @@ -100,27 +103,32 @@ pub async fn test_wallet(network: N) { false, ); txn.commit(); + assert_eq!(plans.len(), 1); + assert_eq!(plans[0].key, key); + if std::any::TypeId::of::() == + std::any::TypeId::of::>() + { + assert_eq!(plans[0].inputs, vec![]); + } else { + assert_eq!(plans[0].inputs, outputs); + } assert_eq!( - plans, - vec![Plan { - key, - inputs: outputs.clone(), - payments: vec![Payment { - address: N::external_address(key), - data: None, - balance: Balance { - coin: match N::NETWORK { - NetworkId::Serai => panic!("test_wallet called with Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - amount: Amount(amount), - } - }], - change: Some(N::change_address(key)), + plans[0].payments, + vec![Payment { + address: N::external_address(&network, key).await, + data: None, + balance: Balance { + coin: match N::NETWORK { + NetworkId::Serai => panic!("test_wallet called with Serai"), + NetworkId::Bitcoin => Coin::Bitcoin, + NetworkId::Ethereum => Coin::Ether, + NetworkId::Monero => Coin::Monero, + }, + amount: Amount(amount), + } }] ); + assert_eq!(plans[0].change, N::change_address(key)); { let mut buf = vec![]; @@ -143,39 +151,55 @@ pub async fn test_wallet(network: N) { keys_txs.insert(i, (keys, (signable, eventuality))); } - let txid = sign(network.clone(), Session(0), keys_txs).await; - let tx = network.get_transaction(&txid).await.unwrap(); + let claim = sign(network.clone(), Session(0), keys_txs).await; network.mine_block().await; let block_number = network.get_latest_block_number().await.unwrap(); + let tx = network.get_transaction_by_eventuality(block_number, &eventualities[0]).await; let block = network.get_block(block_number).await.unwrap(); let outputs = network.get_outputs(&block, key).await; - assert_eq!(outputs.len(), 2); - let amount = amount - tx.fee(&network).await; - assert!((outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount)); + + // Don't run if Ethereum as the received output will revert by the contract + // (and therefore not actually exist) + if N::NETWORK != NetworkId::Ethereum { + assert_eq!(outputs.len(), 1 + usize::from(u8::from(plans[0].change.is_some()))); + // Adjust the amount for the fees + let amount = amount - tx.fee(&network).await; + if plans[0].change.is_some() { + // Check either output since Monero will randomize its output order + assert!( + (outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount) + ); + } else { + assert!(outputs[0].balance().amount.0 == amount); + } + } for eventuality in eventualities { - assert!(network.confirm_completion(&eventuality, &tx)); + let completion = network.confirm_completion(&eventuality, &claim).await.unwrap().unwrap(); + assert_eq!(N::Eventuality::claim(&completion), claim); } for _ in 1 .. N::CONFIRMATIONS { network.mine_block().await; } - match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { - ScannerEvent::Block { is_retirement_block, block: block_id, outputs: these_outputs } => { - scanner.multisig_completed.send(false).unwrap(); - assert!(!is_retirement_block); - assert_eq!(block_id, block.id()); - assert_eq!(these_outputs, outputs); - } - ScannerEvent::Completed(_, _, _, _) => { - panic!("unexpectedly got eventuality completion"); + if N::NETWORK != NetworkId::Ethereum { + match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { + ScannerEvent::Block { is_retirement_block, block: block_id, outputs: these_outputs } => { + scanner.multisig_completed.send(false).unwrap(); + assert!(!is_retirement_block); + assert_eq!(block_id, block.id()); + assert_eq!(these_outputs, outputs); + } + ScannerEvent::Completed(_, _, _, _, _) => { + panic!("unexpectedly got eventuality completion"); + } } - } - // Check the Scanner DB can reload the outputs - let mut txn = db.txn(); - assert_eq!(scanner.ack_block(&mut txn, block.id()).await.1, outputs); - scanner.release_lock().await; - txn.commit(); + // Check the Scanner DB can reload the outputs + let mut txn = db.txn(); + assert_eq!(scanner.ack_block(&mut txn, block.id()).await.1, outputs); + scanner.release_lock().await; + txn.commit(); + } } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 227b9c218..77a0cea27 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.76" +channel = "1.77" targets = ["wasm32-unknown-unknown"] profile = "minimal" components = ["rust-src", "rustfmt", "clippy"] diff --git a/docs/DKG Exclusions.md b/spec/DKG Exclusions.md similarity index 100% rename from docs/DKG Exclusions.md rename to spec/DKG Exclusions.md diff --git a/docs/Getting Started.md b/spec/Getting Started.md similarity index 95% rename from docs/Getting Started.md rename to spec/Getting Started.md index 0034d69d4..c2530b2ac 100644 --- a/docs/Getting Started.md +++ b/spec/Getting Started.md @@ -36,16 +36,16 @@ rustup target add wasm32-unknown-unknown --toolchain nightly ``` cargo install svm-rs -svm install 0.8.16 -svm use 0.8.16 +svm install 0.8.25 +svm use 0.8.25 ``` ### Install Solidity Compiler Version Manager ``` cargo install svm-rs -svm install 0.8.16 -svm use 0.8.16 +svm install 0.8.25 +svm use 0.8.25 ``` ### Install foundry (for tests) diff --git a/docs/Serai.md b/spec/Serai.md similarity index 100% rename from docs/Serai.md rename to spec/Serai.md diff --git a/docs/coordinator/Coordinator.md b/spec/coordinator/Coordinator.md similarity index 100% rename from docs/coordinator/Coordinator.md rename to spec/coordinator/Coordinator.md diff --git a/docs/coordinator/Tributary.md b/spec/coordinator/Tributary.md similarity index 100% rename from docs/coordinator/Tributary.md rename to spec/coordinator/Tributary.md diff --git a/docs/cryptography/Distributed Key Generation.md b/spec/cryptography/Distributed Key Generation.md similarity index 100% rename from docs/cryptography/Distributed Key Generation.md rename to spec/cryptography/Distributed Key Generation.md diff --git a/docs/cryptography/FROST.md b/spec/cryptography/FROST.md similarity index 100% rename from docs/cryptography/FROST.md rename to spec/cryptography/FROST.md diff --git a/docs/integrations/Bitcoin.md b/spec/integrations/Bitcoin.md similarity index 100% rename from docs/integrations/Bitcoin.md rename to spec/integrations/Bitcoin.md diff --git a/spec/integrations/Ethereum.md b/spec/integrations/Ethereum.md new file mode 100644 index 000000000..1e1f3ba19 --- /dev/null +++ b/spec/integrations/Ethereum.md @@ -0,0 +1,27 @@ +# Ethereum + +### Addresses + +Ethereum addresses are 20-byte hashes, identical to Ethereum proper. + +### In Instructions + +In Instructions may be created in one of two ways. + +1) Have an EOA call `transfer` or `transferFrom` on an ERC20, appending the + encoded InInstruction directly after the calldata. `origin` defaults to the + party transferred from. +2) Call `inInstruction` on the Router. `origin` defaults to `msg.sender`. + +### Out Instructions + +`data` is limited to 512 bytes. + +If `data` isn't provided or is malformed, ETH transfers will execute with 5,000 +gas and token transfers with 100,000 gas. + +If `data` is provided and well-formed, `destination` is ignored and the Ethereum +Router will construct and call a new contract to proxy the contained calls. The +transfer executes to the constructed contract as above, before the constructed +contract is called with the calls inside `data`. The sandboxed execution has a +gas limit of 350,000. diff --git a/docs/integrations/Instructions.md b/spec/integrations/Instructions.md similarity index 100% rename from docs/integrations/Instructions.md rename to spec/integrations/Instructions.md diff --git a/docs/integrations/Monero.md b/spec/integrations/Monero.md similarity index 100% rename from docs/integrations/Monero.md rename to spec/integrations/Monero.md diff --git a/docs/media/icon.svg b/spec/media/icon.svg similarity index 100% rename from docs/media/icon.svg rename to spec/media/icon.svg diff --git a/docs/policy/Canonical Chain.md b/spec/policy/Canonical Chain.md similarity index 100% rename from docs/policy/Canonical Chain.md rename to spec/policy/Canonical Chain.md diff --git a/docs/processor/Multisig Rotation.md b/spec/processor/Multisig Rotation.md similarity index 100% rename from docs/processor/Multisig Rotation.md rename to spec/processor/Multisig Rotation.md diff --git a/docs/processor/Processor.md b/spec/processor/Processor.md similarity index 100% rename from docs/processor/Processor.md rename to spec/processor/Processor.md diff --git a/docs/processor/Scanning.md b/spec/processor/Scanning.md similarity index 100% rename from docs/processor/Scanning.md rename to spec/processor/Scanning.md diff --git a/docs/processor/UTXO Management.md b/spec/processor/UTXO Management.md similarity index 100% rename from docs/processor/UTXO Management.md rename to spec/processor/UTXO Management.md diff --git a/docs/protocol/Constants.md b/spec/protocol/Constants.md similarity index 100% rename from docs/protocol/Constants.md rename to spec/protocol/Constants.md diff --git a/docs/protocol/In Instructions.md b/spec/protocol/In Instructions.md similarity index 100% rename from docs/protocol/In Instructions.md rename to spec/protocol/In Instructions.md diff --git a/docs/protocol/Validator Sets.md b/spec/protocol/Validator Sets.md similarity index 100% rename from docs/protocol/Validator Sets.md rename to spec/protocol/Validator Sets.md diff --git a/substrate/abi/Cargo.toml b/substrate/abi/Cargo.toml index 043504868..3aac979ac 100644 --- a/substrate/abi/Cargo.toml +++ b/substrate/abi/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/abi" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.69" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/client/Cargo.toml b/substrate/client/Cargo.toml index f97e40fba..0eeb3a2f5 100644 --- a/substrate/client/Cargo.toml +++ b/substrate/client/Cargo.toml @@ -36,7 +36,7 @@ async-lock = "3" simple-request = { path = "../../common/request", version = "0.1", optional = true } -bitcoin = { version = "0.31", optional = true } +bitcoin = { version = "0.32", optional = true } ciphersuite = { path = "../../crypto/ciphersuite", version = "0.4", optional = true } monero-serai = { path = "../../coins/monero", version = "0.1.4-alpha", optional = true } diff --git a/substrate/client/src/networks/bitcoin.rs b/substrate/client/src/networks/bitcoin.rs index 42cf41bfc..502bfb440 100644 --- a/substrate/client/src/networks/bitcoin.rs +++ b/substrate/client/src/networks/bitcoin.rs @@ -1,4 +1,4 @@ -use core::str::FromStr; +use core::{str::FromStr, fmt}; use scale::{Encode, Decode}; @@ -6,38 +6,46 @@ use bitcoin::{ hashes::{Hash as HashTrait, hash160::Hash}, PubkeyHash, ScriptHash, network::Network, - WitnessVersion, WitnessProgram, - address::{Error, Payload, NetworkChecked, Address as BAddressGeneric}, + WitnessVersion, WitnessProgram, ScriptBuf, + address::{AddressType, NetworkChecked, Address as BAddress}, }; -type BAddress = BAddressGeneric; - #[derive(Clone, Eq, Debug)] -pub struct Address(BAddress); +pub struct Address(ScriptBuf); impl PartialEq for Address { fn eq(&self, other: &Self) -> bool { - // Since Serai defines the Bitcoin-address specification as a variant of the payload alone, - // define equivalency as the payload alone - self.0.payload() == other.0.payload() + // Since Serai defines the Bitcoin-address specification as a variant of the script alone, + // define equivalency as the script alone + self.0 == other.0 + } +} + +impl From
for ScriptBuf { + fn from(addr: Address) -> ScriptBuf { + addr.0 } } impl FromStr for Address { - type Err = Error; - fn from_str(str: &str) -> Result { + type Err = (); + fn from_str(str: &str) -> Result { Address::new( - BAddressGeneric::from_str(str) - .map_err(|_| Error::UnrecognizedScript)? - .require_network(Network::Bitcoin)?, + BAddress::from_str(str) + .map_err(|_| ())? + .require_network(Network::Bitcoin) + .map_err(|_| ())? + .script_pubkey(), ) - .ok_or(Error::UnrecognizedScript) + .ok_or(()) } } -impl ToString for Address { - fn to_string(&self) -> String { - self.0.to_string() +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + BAddress::::from_script(&self.0, Network::Bitcoin) + .map_err(|_| fmt::Error)? + .fmt(f) } } @@ -54,55 +62,52 @@ enum EncodedAddress { impl TryFrom> for Address { type Error = (); fn try_from(data: Vec) -> Result { - Ok(Address(BAddress::new( - Network::Bitcoin, - match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? { - EncodedAddress::P2PKH(hash) => { - Payload::PubkeyHash(PubkeyHash::from_raw_hash(Hash::from_byte_array(hash))) - } - EncodedAddress::P2SH(hash) => { - Payload::ScriptHash(ScriptHash::from_raw_hash(Hash::from_byte_array(hash))) - } - EncodedAddress::P2WPKH(hash) => { - Payload::WitnessProgram(WitnessProgram::new(WitnessVersion::V0, hash).unwrap()) - } - EncodedAddress::P2WSH(hash) => { - Payload::WitnessProgram(WitnessProgram::new(WitnessVersion::V0, hash).unwrap()) - } - EncodedAddress::P2TR(key) => { - Payload::WitnessProgram(WitnessProgram::new(WitnessVersion::V1, key).unwrap()) - } - }, - ))) + Ok(Address(match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? { + EncodedAddress::P2PKH(hash) => { + ScriptBuf::new_p2pkh(&PubkeyHash::from_raw_hash(Hash::from_byte_array(hash))) + } + EncodedAddress::P2SH(hash) => { + ScriptBuf::new_p2sh(&ScriptHash::from_raw_hash(Hash::from_byte_array(hash))) + } + EncodedAddress::P2WPKH(hash) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) + } + EncodedAddress::P2WSH(hash) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) + } + EncodedAddress::P2TR(key) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V1, &key).unwrap()) + } + })) } } fn try_to_vec(addr: &Address) -> Result, ()> { + let parsed_addr = + BAddress::::from_script(&addr.0, Network::Bitcoin).map_err(|_| ())?; Ok( - (match addr.0.payload() { - Payload::PubkeyHash(hash) => EncodedAddress::P2PKH(*hash.as_raw_hash().as_byte_array()), - Payload::ScriptHash(hash) => EncodedAddress::P2SH(*hash.as_raw_hash().as_byte_array()), - Payload::WitnessProgram(program) => match program.version() { - WitnessVersion::V0 => { - let program = program.program(); - if program.len() == 20 { - let mut buf = [0; 20]; - buf.copy_from_slice(program.as_ref()); - EncodedAddress::P2WPKH(buf) - } else if program.len() == 32 { - let mut buf = [0; 32]; - buf.copy_from_slice(program.as_ref()); - EncodedAddress::P2WSH(buf) - } else { - Err(())? - } - } - WitnessVersion::V1 => { - let program_ref: &[u8] = program.program().as_ref(); - EncodedAddress::P2TR(program_ref.try_into().map_err(|_| ())?) - } - _ => Err(())?, - }, + (match parsed_addr.address_type() { + Some(AddressType::P2pkh) => { + EncodedAddress::P2PKH(*parsed_addr.pubkey_hash().unwrap().as_raw_hash().as_byte_array()) + } + Some(AddressType::P2sh) => { + EncodedAddress::P2SH(*parsed_addr.script_hash().unwrap().as_raw_hash().as_byte_array()) + } + Some(AddressType::P2wpkh) => { + let program = parsed_addr.witness_program().ok_or(())?; + let program = program.program().as_bytes(); + EncodedAddress::P2WPKH(program.try_into().map_err(|_| ())?) + } + Some(AddressType::P2wsh) => { + let program = parsed_addr.witness_program().ok_or(())?; + let program = program.program().as_bytes(); + EncodedAddress::P2WSH(program.try_into().map_err(|_| ())?) + } + Some(AddressType::P2tr) => { + let program = parsed_addr.witness_program().ok_or(())?; + let program = program.program().as_bytes(); + EncodedAddress::P2TR(program.try_into().map_err(|_| ())?) + } _ => Err(())?, }) .encode(), @@ -116,20 +121,8 @@ impl From
for Vec { } } -impl From
for BAddress { - fn from(addr: Address) -> BAddress { - addr.0 - } -} - -impl AsRef for Address { - fn as_ref(&self) -> &BAddress { - &self.0 - } -} - impl Address { - pub fn new(address: BAddress) -> Option { + pub fn new(address: ScriptBuf) -> Option { let res = Self(address); if try_to_vec(&res).is_ok() { return Some(res); diff --git a/substrate/client/src/networks/monero.rs b/substrate/client/src/networks/monero.rs index e2268ec2e..5b43860e9 100644 --- a/substrate/client/src/networks/monero.rs +++ b/substrate/client/src/networks/monero.rs @@ -1,4 +1,4 @@ -use core::str::FromStr; +use core::{str::FromStr, fmt}; use scale::{Encode, Decode}; @@ -24,9 +24,9 @@ impl FromStr for Address { } } -impl ToString for Address { - fn to_string(&self) -> String { - self.0.to_string() +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) } } diff --git a/substrate/client/tests/common/mod.rs b/substrate/client/tests/common/mod.rs index d887b0b1d..d7e8436b2 100644 --- a/substrate/client/tests/common/mod.rs +++ b/substrate/client/tests/common/mod.rs @@ -29,7 +29,12 @@ macro_rules! serai_test { "--rpc-cors".to_string(), "all".to_string(), ]) - .replace_env(HashMap::from([("RUST_LOG".to_string(), "runtime=debug".to_string())])) + .replace_env( + HashMap::from([ + ("RUST_LOG".to_string(), "runtime=debug".to_string()), + ("KEY".to_string(), " ".to_string()), + ]) + ) .set_publish_all_ports(true) .set_handle(handle) .set_start_policy(StartPolicy::Strict) diff --git a/substrate/client/tests/dht.rs b/substrate/client/tests/dht.rs index 2fd40b126..82450e461 100644 --- a/substrate/client/tests/dht.rs +++ b/substrate/client/tests/dht.rs @@ -14,7 +14,9 @@ async fn dht() { TestBodySpecification::with_image( Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never), ) - .replace_env([("SERAI_NAME".to_string(), name.to_string())].into()) + .replace_env( + [("SERAI_NAME".to_string(), name.to_string()), ("KEY".to_string(), " ".to_string())].into(), + ) .set_publish_all_ports(true) .set_handle(handle(name)) .set_start_policy(StartPolicy::Strict) diff --git a/substrate/client/tests/validator_sets.rs b/substrate/client/tests/validator_sets.rs index 4b0e55304..4e5aa6e80 100644 --- a/substrate/client/tests/validator_sets.rs +++ b/substrate/client/tests/validator_sets.rs @@ -102,7 +102,10 @@ async fn validator_set_rotation() { "local".to_string(), format!("--{name}"), ]) - .replace_env(HashMap::from([("RUST_LOG=runtime".to_string(), "debug".to_string())])) + .replace_env(HashMap::from([ + ("RUST_LOG".to_string(), "runtime=debug".to_string()), + ("KEY".to_string(), " ".to_string()), + ])) .set_publish_all_ports(true) .set_handle(handle(name)) .set_start_policy(StartPolicy::Strict) diff --git a/substrate/coins/pallet/Cargo.toml b/substrate/coins/pallet/Cargo.toml index 75011add0..2aba1fbd1 100644 --- a/substrate/coins/pallet/Cargo.toml +++ b/substrate/coins/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/coins/pallet" authors = ["Akil Demir "] edition = "2021" -rust-version = "1.70" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true @@ -49,6 +49,9 @@ std = [ "coins-primitives/std", ] +# TODO +try-runtime = [] + runtime-benchmarks = [ "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks", diff --git a/substrate/coins/primitives/Cargo.toml b/substrate/coins/primitives/Cargo.toml index 322016da1..ec906929d 100644 --- a/substrate/coins/primitives/Cargo.toml +++ b/substrate/coins/primitives/Cargo.toml @@ -5,7 +5,7 @@ description = "Serai coins primitives" license = "MIT" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.69" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/dex/pallet/Cargo.toml b/substrate/dex/pallet/Cargo.toml index 6a2eadb8a..e2ed39288 100644 --- a/substrate/dex/pallet/Cargo.toml +++ b/substrate/dex/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/dex/pallet" authors = ["Parity Technologies , Akil Demir "] edition = "2021" -rust-version = "1.70" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/dex/pallet/src/benchmarking.rs b/substrate/dex/pallet/src/benchmarking.rs index a00b6edcd..fb23b12a4 100644 --- a/substrate/dex/pallet/src/benchmarking.rs +++ b/substrate/dex/pallet/src/benchmarking.rs @@ -43,7 +43,7 @@ fn create_coin(coin: &Coin) -> (T::AccountId, AccountIdLookupOf) { let caller_lookup = T::Lookup::unlookup(caller); assert_ok!(Coins::::mint( caller, - Balance { coin: Coin::native(), amount: Amount(SubstrateAmount::max_value().div(1000u64)) } + Balance { coin: Coin::native(), amount: Amount(SubstrateAmount::MAX.div(1000u64)) } )); assert_ok!(Coins::::mint( caller, diff --git a/substrate/in-instructions/pallet/Cargo.toml b/substrate/in-instructions/pallet/Cargo.toml index f313a22a6..676d11f5e 100644 --- a/substrate/in-instructions/pallet/Cargo.toml +++ b/substrate/in-instructions/pallet/Cargo.toml @@ -60,3 +60,6 @@ std = [ "validator-sets-pallet/std", ] default = ["std"] + +# TODO +try-runtime = [] diff --git a/substrate/in-instructions/pallet/src/lib.rs b/substrate/in-instructions/pallet/src/lib.rs index 3ec63ae58..955a54df7 100644 --- a/substrate/in-instructions/pallet/src/lib.rs +++ b/substrate/in-instructions/pallet/src/lib.rs @@ -10,7 +10,7 @@ pub use in_instructions_primitives as primitives; use primitives::*; // TODO: Investigate why Substrate generates these -#[allow(clippy::cast_possible_truncation, clippy::no_effect_underscore_binding)] +#[allow(clippy::cast_possible_truncation, clippy::no_effect_underscore_binding, clippy::empty_docs)] #[frame_support::pallet] pub mod pallet { use sp_std::vec; diff --git a/substrate/in-instructions/primitives/Cargo.toml b/substrate/in-instructions/primitives/Cargo.toml index f579f59d4..545511347 100644 --- a/substrate/in-instructions/primitives/Cargo.toml +++ b/substrate/in-instructions/primitives/Cargo.toml @@ -5,7 +5,7 @@ description = "Serai instructions library, enabling encoding and decoding" license = "MIT" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.69" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/node/Cargo.toml b/substrate/node/Cargo.toml index e35bc0ea2..0e551c72b 100644 --- a/substrate/node/Cargo.toml +++ b/substrate/node/Cargo.toml @@ -20,12 +20,15 @@ workspace = true name = "serai-node" [dependencies] -zeroize = "1" rand_core = "0.6" +zeroize = "1" hex = "0.4" +log = "0.4" schnorrkel = "0.11" +libp2p = "0.52" + sp-core = { git = "https://github.com/serai-dex/substrate" } sp-keystore = { git = "https://github.com/serai-dex/substrate" } sp-timestamp = { git = "https://github.com/serai-dex/substrate" } diff --git a/substrate/node/src/chain_spec.rs b/substrate/node/src/chain_spec.rs index 042f51783..e66ee4a6d 100644 --- a/substrate/node/src/chain_spec.rs +++ b/substrate/node/src/chain_spec.rs @@ -1,6 +1,7 @@ use core::marker::PhantomData; +use std::collections::HashSet; -use sp_core::Pair as PairTrait; +use sp_core::{Decode, Pair as PairTrait, sr25519::Public}; use sc_service::ChainType; @@ -15,7 +16,18 @@ fn account_from_name(name: &'static str) -> PublicKey { insecure_pair_from_name(name).public() } -fn testnet_genesis( +fn wasm_binary() -> Vec { + // TODO: Accept a config of runtime path + const WASM_PATH: &str = "/runtime/serai.wasm"; + if let Ok(binary) = std::fs::read(WASM_PATH) { + log::info!("using {WASM_PATH}"); + return binary; + } + log::info!("using built-in wasm"); + WASM_BINARY.ok_or("compiled in wasm not available").unwrap().to_vec() +} + +fn devnet_genesis( wasm_binary: &[u8], validators: &[&'static str], endowed_accounts: Vec, @@ -64,18 +76,69 @@ fn testnet_genesis( } } -pub fn development_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or("Development wasm not available")?; +fn testnet_genesis(wasm_binary: &[u8], validators: Vec<&'static str>) -> RuntimeGenesisConfig { + let validators = validators + .into_iter() + .map(|validator| Public::decode(&mut hex::decode(validator).unwrap().as_slice()).unwrap()) + .collect::>(); + + assert_eq!(validators.iter().collect::>().len(), validators.len()); + + RuntimeGenesisConfig { + system: SystemConfig { code: wasm_binary.to_vec(), _config: PhantomData }, + + transaction_payment: Default::default(), + + coins: CoinsConfig { + accounts: validators + .iter() + .map(|a| (*a, Balance { coin: Coin::Serai, amount: Amount(5_000_000 * 10_u64.pow(8)) })) + .collect(), + _ignore: Default::default(), + }, + + dex: DexConfig { + pools: vec![Coin::Bitcoin, Coin::Ether, Coin::Dai, Coin::Monero], + _ignore: Default::default(), + }, + + validator_sets: ValidatorSetsConfig { + networks: serai_runtime::primitives::NETWORKS + .iter() + .map(|network| match network { + NetworkId::Serai => (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))), + NetworkId::Bitcoin => (NetworkId::Bitcoin, Amount(1_000_000 * 10_u64.pow(8))), + NetworkId::Ethereum => (NetworkId::Ethereum, Amount(1_000_000 * 10_u64.pow(8))), + NetworkId::Monero => (NetworkId::Monero, Amount(100_000 * 10_u64.pow(8))), + }) + .collect(), + participants: validators.clone(), + }, + signals: SignalsConfig::default(), + babe: BabeConfig { + authorities: validators.iter().map(|validator| ((*validator).into(), 1)).collect(), + epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), + _config: PhantomData, + }, + grandpa: GrandpaConfig { + authorities: validators.into_iter().map(|validator| (validator.into(), 1)).collect(), + _config: PhantomData, + }, + } +} + +pub fn development_config() -> ChainSpec { + let wasm_binary = wasm_binary(); - Ok(ChainSpec::from_genesis( + ChainSpec::from_genesis( // Name "Development Network", // ID "devnet", ChainType::Development, - || { - testnet_genesis( - wasm_binary, + move || { + devnet_genesis( + &wasm_binary, &["Alice"], vec![ account_from_name("Alice"), @@ -92,28 +155,28 @@ pub fn development_config() -> Result { // Telemetry None, // Protocol ID - Some("serai"), + Some("serai-devnet"), // Fork ID None, // Properties None, // Extensions None, - )) + ) } -pub fn testnet_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or("Testnet wasm not available")?; +pub fn local_config() -> ChainSpec { + let wasm_binary = wasm_binary(); - Ok(ChainSpec::from_genesis( + ChainSpec::from_genesis( // Name "Local Test Network", // ID "local", ChainType::Local, - || { - testnet_genesis( - wasm_binary, + move || { + devnet_genesis( + &wasm_binary, &["Alice", "Bob", "Charlie", "Dave"], vec![ account_from_name("Alice"), @@ -130,12 +193,48 @@ pub fn testnet_config() -> Result { // Telemetry None, // Protocol ID - Some("serai"), + Some("serai-local"), // Fork ID None, // Properties None, // Extensions None, - )) + ) +} + +pub fn testnet_config() -> ChainSpec { + let wasm_binary = wasm_binary(); + + ChainSpec::from_genesis( + // Name + "Test Network 2", + // ID + "testnet-2", + ChainType::Live, + move || { + let _ = testnet_genesis(&wasm_binary, vec![]); + todo!() + }, + // Bootnodes + vec![], + // Telemetry + None, + // Protocol ID + Some("serai-testnet-2"), + // Fork ID + None, + // Properties + None, + // Extensions + None, + ) +} + +pub fn bootnode_multiaddrs(id: &str) -> Vec { + match id { + "devnet" | "local" => vec![], + "testnet-2" => todo!(), + _ => panic!("unrecognized network ID"), + } } diff --git a/substrate/node/src/command.rs b/substrate/node/src/command.rs index 3588f95f0..71eee0479 100644 --- a/substrate/node/src/command.rs +++ b/substrate/node/src/command.rs @@ -39,8 +39,9 @@ impl SubstrateCli for Cli { fn load_spec(&self, id: &str) -> Result, String> { match id { - "dev" | "devnet" => Ok(Box::new(chain_spec::development_config()?)), - "local" => Ok(Box::new(chain_spec::testnet_config()?)), + "dev" | "devnet" => Ok(Box::new(chain_spec::development_config())), + "local" => Ok(Box::new(chain_spec::local_config())), + "testnet" => Ok(Box::new(chain_spec::testnet_config())), _ => panic!("Unknown network ID"), } } diff --git a/substrate/node/src/keystore.rs b/substrate/node/src/keystore.rs index 333cd9c3a..c313773a2 100644 --- a/substrate/node/src/keystore.rs +++ b/substrate/node/src/keystore.rs @@ -1,5 +1,4 @@ use zeroize::Zeroize; -use rand_core::RngCore; use sp_core::{crypto::*, ed25519, sr25519}; use sp_keystore::*; @@ -9,12 +8,14 @@ pub struct Keystore(sr25519::Pair); impl Keystore { pub fn from_env() -> Option { let mut key_hex = serai_env::var("KEY")?; + if key_hex.trim().is_empty() { + None?; + } let mut key = hex::decode(&key_hex).expect("KEY from environment wasn't hex"); key_hex.zeroize(); assert_eq!(key.len(), 32, "KEY from environment wasn't 32 bytes"); - key.extend([0; 32]); - rand_core::OsRng.fill_bytes(&mut key[32 ..]); + key.extend(sp_core::blake2_256(&key)); let res = Self(sr25519::Pair::from(schnorrkel::SecretKey::from_bytes(&key).unwrap())); key.zeroize(); diff --git a/substrate/node/src/rpc.rs b/substrate/node/src/rpc.rs index f5ed25820..b818c7981 100644 --- a/substrate/node/src/rpc.rs +++ b/substrate/node/src/rpc.rs @@ -1,5 +1,7 @@ use std::{sync::Arc, collections::HashSet}; +use rand_core::{RngCore, OsRng}; + use sp_blockchain::{Error as BlockchainError, HeaderBackend, HeaderMetadata}; use sp_block_builder::BlockBuilder; use sp_api::ProvideRuntimeApi; @@ -17,6 +19,7 @@ pub use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; pub struct FullDeps { + pub id: String, pub client: Arc, pub pool: Arc

, pub deny_unsafe: DenyUnsafe, @@ -44,18 +47,19 @@ where use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; let mut module = RpcModule::new(()); - let FullDeps { client, pool, deny_unsafe, authority_discovery } = deps; + let FullDeps { id, client, pool, deny_unsafe, authority_discovery } = deps; module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; module.merge(TransactionPayment::new(client.clone()).into_rpc())?; if let Some(authority_discovery) = authority_discovery { - let mut authority_discovery_module = RpcModule::new((client, RwLock::new(authority_discovery))); + let mut authority_discovery_module = + RpcModule::new((id, client, RwLock::new(authority_discovery))); authority_discovery_module.register_async_method( "p2p_validators", |params, context| async move { let network: NetworkId = params.parse()?; - let (client, authority_discovery) = &*context; + let (id, client, authority_discovery) = &*context; let latest_block = client.info().best_hash; let validators = client.runtime_api().validators(latest_block, network).map_err(|_| { @@ -64,7 +68,9 @@ where "please report this at https://github.com/serai-dex/serai", ))) })?; - let mut all_p2p_addresses = vec![]; + // Always return the protocol's bootnodes + let mut all_p2p_addresses = crate::chain_spec::bootnode_multiaddrs(id); + // Additionally returns validators found over the DHT for validator in validators { let mut returned_addresses = authority_discovery .write() @@ -72,14 +78,19 @@ where .get_addresses_by_authority_id(validator.into()) .await .unwrap_or_else(HashSet::new) - .into_iter(); - // Only take a single address + .into_iter() + .collect::>(); + // Randomly select an address // There should be one, there may be two if their IP address changed, and more should only // occur if they have multiple proxies/an IP address changing frequently/some issue // preventing consistent self-identification // It isn't beneficial to use multiple addresses for a single peer here - if let Some(address) = returned_addresses.next() { - all_p2p_addresses.push(address); + if !returned_addresses.is_empty() { + all_p2p_addresses.push( + returned_addresses.remove( + usize::try_from(OsRng.next_u64() >> 32).unwrap() % returned_addresses.len(), + ), + ); } } Ok(all_p2p_addresses) diff --git a/substrate/node/src/service.rs b/substrate/node/src/service.rs index 686e4c39a..5f76decf0 100644 --- a/substrate/node/src/service.rs +++ b/substrate/node/src/service.rs @@ -161,7 +161,7 @@ pub fn new_partial( )) } -pub fn new_full(config: Configuration) -> Result { +pub fn new_full(mut config: Configuration) -> Result { let ( sc_service::PartialComponents { client, @@ -176,6 +176,11 @@ pub fn new_full(config: Configuration) -> Result { keystore_container, ) = new_partial(&config)?; + config.network.node_name = "serai".to_string(); + config.network.client_version = "0.1.0".to_string(); + config.network.listen_addresses = + vec!["/ip4/0.0.0.0/tcp/30333".parse().unwrap(), "/ip6/::/tcp/30333".parse().unwrap()]; + let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); let grandpa_protocol_name = grandpa::protocol_standard_name(&client.block_hash(0).unwrap().unwrap(), &config.chain_spec); @@ -203,6 +208,59 @@ pub fn new_full(config: Configuration) -> Result { warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), })?; + task_manager.spawn_handle().spawn("bootnodes", "bootnodes", { + let network = network.clone(); + let id = config.chain_spec.id().to_string(); + + async move { + // Transforms the above Multiaddrs into MultiaddrWithPeerIds + // While the PeerIds *should* be known in advance and hardcoded, that data wasn't collected in + // time and this fine for a testnet + let bootnodes = || async { + use libp2p::{Transport as TransportTrait, tcp::tokio::Transport, noise::Config}; + + let bootnode_multiaddrs = crate::chain_spec::bootnode_multiaddrs(&id); + + let mut tasks = vec![]; + for multiaddr in bootnode_multiaddrs { + tasks.push(tokio::time::timeout( + core::time::Duration::from_secs(10), + tokio::task::spawn(async move { + let Ok(noise) = Config::new(&sc_network::Keypair::generate_ed25519()) else { None? }; + let mut transport = Transport::default() + .upgrade(libp2p::core::upgrade::Version::V1) + .authenticate(noise) + .multiplex(libp2p::yamux::Config::default()); + let Ok(transport) = transport.dial(multiaddr.clone()) else { None? }; + let Ok((peer_id, _)) = transport.await else { None? }; + Some(sc_network::config::MultiaddrWithPeerId { multiaddr, peer_id }) + }), + )); + } + + let mut res = vec![]; + for task in tasks { + if let Ok(Ok(Some(bootnode))) = task.await { + res.push(bootnode); + } + } + res + }; + + use sc_network::{NetworkStatusProvider, NetworkPeers}; + loop { + if let Ok(status) = network.status().await { + if status.num_connected_peers < 3 { + for bootnode in bootnodes().await { + let _ = network.add_reserved_peer(bootnode); + } + } + } + tokio::time::sleep(core::time::Duration::from_secs(60)).await; + } + } + }); + if config.offchain_worker.enabled { task_manager.spawn_handle().spawn( "offchain-workers-runner", @@ -258,11 +316,13 @@ pub fn new_full(config: Configuration) -> Result { }; let rpc_builder = { + let id = config.chain_spec.id().to_string(); let client = client.clone(); let pool = transaction_pool.clone(); Box::new(move |deny_unsafe, _| { crate::rpc::create_full(crate::rpc::FullDeps { + id: id.clone(), client: client.clone(), pool: pool.clone(), deny_unsafe, diff --git a/substrate/primitives/Cargo.toml b/substrate/primitives/Cargo.toml index 22fc47097..0e1e8f387 100644 --- a/substrate/primitives/Cargo.toml +++ b/substrate/primitives/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/primitives" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.69" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/runtime/src/lib.rs b/substrate/runtime/src/lib.rs index d7c67f476..9a534a725 100644 --- a/substrate/runtime/src/lib.rs +++ b/substrate/runtime/src/lib.rs @@ -314,12 +314,10 @@ pub type ReportLongevity = ::EpochDuration; impl babe::Config for Runtime { #[cfg(feature = "fast-epoch")] - #[allow(clippy::identity_op)] - type EpochDuration = ConstU64<{ DAYS / (24 * 2) }>; // 30 minutes + type EpochDuration = ConstU64<{ MINUTES / 2 }>; // 30 seconds #[cfg(not(feature = "fast-epoch"))] - #[allow(clippy::identity_op)] - type EpochDuration = ConstU64<{ DAYS }>; + type EpochDuration = ConstU64<{ 4 * 7 * DAYS }>; type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>; type EpochChangeTrigger = babe::ExternalTrigger; diff --git a/substrate/signals/pallet/Cargo.toml b/substrate/signals/pallet/Cargo.toml index 582a3e097..e06b5e6bb 100644 --- a/substrate/signals/pallet/Cargo.toml +++ b/substrate/signals/pallet/Cargo.toml @@ -57,4 +57,7 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", ] +# TODO +try-runtime = [] + default = ["std"] diff --git a/substrate/signals/pallet/src/lib.rs b/substrate/signals/pallet/src/lib.rs index 3fad27c92..54d6086ac 100644 --- a/substrate/signals/pallet/src/lib.rs +++ b/substrate/signals/pallet/src/lib.rs @@ -142,6 +142,7 @@ pub mod pallet { } // 80% threshold + // TODO: Use 34% for halting a set (not 80%) const REQUIREMENT_NUMERATOR: u64 = 4; const REQUIREMENT_DIVISOR: u64 = 5; diff --git a/substrate/validator-sets/pallet/Cargo.toml b/substrate/validator-sets/pallet/Cargo.toml index 3b5537884..dd67d1bc3 100644 --- a/substrate/validator-sets/pallet/Cargo.toml +++ b/substrate/validator-sets/pallet/Cargo.toml @@ -70,6 +70,9 @@ std = [ "dex-pallet/std", ] +# TODO +try-runtime = [] + runtime-benchmarks = [ "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks", diff --git a/substrate/validator-sets/pallet/src/lib.rs b/substrate/validator-sets/pallet/src/lib.rs index 76c07e1cb..c852c4ce2 100644 --- a/substrate/validator-sets/pallet/src/lib.rs +++ b/substrate/validator-sets/pallet/src/lib.rs @@ -363,21 +363,26 @@ pub mod pallet { let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0; - let mut iter = SortedAllocationsIter::::new(network); let mut participants = vec![]; - let mut key_shares = 0; let mut total_stake = 0; - while key_shares < u64::from(MAX_KEY_SHARES_PER_SET) { - let Some((key, amount)) = iter.next() else { break }; + { + let mut iter = SortedAllocationsIter::::new(network); + let mut key_shares = 0; + while key_shares < u64::from(MAX_KEY_SHARES_PER_SET) { + let Some((key, amount)) = iter.next() else { break }; + + let these_key_shares = + (amount.0 / allocation_per_key_share).min(u64::from(MAX_KEY_SHARES_PER_SET)); + participants.push((key, these_key_shares)); - let these_key_shares = amount.0 / allocation_per_key_share; - InSet::::set(network, key, Some(these_key_shares)); - participants.push((key, these_key_shares)); + key_shares += these_key_shares; + total_stake += amount.0; + } + amortize_excess_key_shares(&mut participants); + } - // This can technically set key_shares to a value exceeding MAX_KEY_SHARES_PER_SET - // Off-chain, the key shares per validator will be accordingly adjusted - key_shares += these_key_shares; - total_stake += amount.0; + for (key, shares) in &participants { + InSet::::set(network, key, Some(*shares)); } TotalAllocatedStake::::set(network, Some(Amount(total_stake))); @@ -472,7 +477,7 @@ pub mod pallet { let Some(top) = top else { return false }; - // key_shares may be over MAX_KEY_SHARES_PER_SET, which will cause an off-chain reduction of + // key_shares may be over MAX_KEY_SHARES_PER_SET, which will cause a round robin reduction of // each validator's key shares until their sum is MAX_KEY_SHARES_PER_SET // post_amortization_key_shares_for_top_validator yields what the top validator's key shares // would be after such a reduction, letting us evaluate this correctly diff --git a/substrate/validator-sets/primitives/src/lib.rs b/substrate/validator-sets/primitives/src/lib.rs index 644b19e18..c900b0a99 100644 --- a/substrate/validator-sets/primitives/src/lib.rs +++ b/substrate/validator-sets/primitives/src/lib.rs @@ -115,11 +115,11 @@ pub fn report_slashes_message(set: &ValidatorSet, slashes: &[(Public, u32)]) -> /// maximum. /// /// Reduction occurs by reducing each validator in a reverse round-robin. -pub fn amortize_excess_key_shares(validators: &mut [(Public, u16)]) { - let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::(); - for i in 0 .. usize::from( - total_key_shares.saturating_sub(u16::try_from(MAX_KEY_SHARES_PER_SET).unwrap()), - ) { +pub fn amortize_excess_key_shares(validators: &mut [(Public, u64)]) { + let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::(); + for i in 0 .. usize::try_from(total_key_shares.saturating_sub(u64::from(MAX_KEY_SHARES_PER_SET))) + .unwrap() + { validators[validators.len() - ((i % validators.len()) + 1)].1 -= 1; } } diff --git a/tests/coordinator/Cargo.toml b/tests/coordinator/Cargo.toml index a331d484b..f5bc6426e 100644 --- a/tests/coordinator/Cargo.toml +++ b/tests/coordinator/Cargo.toml @@ -20,7 +20,6 @@ workspace = true hex = "0.4" async-trait = "0.1" -async-recursion = "1" zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false } diff --git a/tests/coordinator/src/lib.rs b/tests/coordinator/src/lib.rs index da0e28dd0..0baa84a39 100644 --- a/tests/coordinator/src/lib.rs +++ b/tests/coordinator/src/lib.rs @@ -5,7 +5,10 @@ use std::{ time::Duration, }; -use tokio::{task::AbortHandle, sync::Mutex as AsyncMutex}; +use tokio::{ + task::AbortHandle, + sync::{Mutex as AsyncMutex, mpsc}, +}; use rand_core::{RngCore, OsRng}; @@ -58,21 +61,21 @@ pub fn coordinator_instance( } pub fn serai_composition(name: &str, fast_epoch: bool) -> TestBodySpecification { - if fast_epoch { + (if fast_epoch { serai_docker_tests::build("serai-fast-epoch".to_string()); TestBodySpecification::with_image( Image::with_repository("serai-dev-serai-fast-epoch").pull_policy(PullPolicy::Never), ) - .replace_env([("SERAI_NAME".to_string(), name.to_lowercase())].into()) - .set_publish_all_ports(true) } else { serai_docker_tests::build("serai".to_string()); TestBodySpecification::with_image( Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never), ) - .replace_env([("SERAI_NAME".to_string(), name.to_lowercase())].into()) - .set_publish_all_ports(true) - } + }) + .replace_env( + [("SERAI_NAME".to_string(), name.to_lowercase()), ("KEY".to_string(), " ".to_string())].into(), + ) + .set_publish_all_ports(true) } fn is_cosign_message(msg: &CoordinatorMessage) -> bool { @@ -104,7 +107,6 @@ pub struct Handles { pub(crate) message_queue: String, } -#[derive(Clone)] pub struct Processor { network: NetworkId, @@ -112,7 +114,8 @@ pub struct Processor { #[allow(unused)] handles: Handles, - queue: Arc>, + msgs: mpsc::UnboundedReceiver, + queue_for_sending: MessageQueue, abort_handle: Option>, substrate_key: Arc::F>>>>, @@ -153,156 +156,173 @@ impl Processor { // The Serai RPC may or may not be started // Assume it is and continue, so if it's a few seconds late, it's still within tolerance + // Create the queue + let mut queue = ( + 0, + Arc::new(MessageQueue::new( + Service::Processor(network), + message_queue_rpc.clone(), + Zeroizing::new(processor_key), + )), + ); + + let (msg_send, msg_recv) = mpsc::unbounded_channel(); + + let substrate_key = Arc::new(AsyncMutex::new(None)); let mut res = Processor { network, serai_rpc, handles, - queue: Arc::new(AsyncMutex::new(( - 0, - 0, - MessageQueue::new( - Service::Processor(network), - message_queue_rpc, - Zeroizing::new(processor_key), - ), - ))), + queue_for_sending: MessageQueue::new( + Service::Processor(network), + message_queue_rpc, + Zeroizing::new(processor_key), + ), + msgs: msg_recv, abort_handle: None, - substrate_key: Arc::new(AsyncMutex::new(None)), + substrate_key: substrate_key.clone(), }; - // Handle any cosigns which come up - res.abort_handle = Some(Arc::new( - tokio::spawn({ - let mut res = res.clone(); - async move { - loop { - tokio::task::yield_now().await; - - let msg = { - let mut queue_lock = res.queue.lock().await; - let (_, next_recv_id, queue) = &mut *queue_lock; - let Ok(msg) = - tokio::time::timeout(Duration::from_secs(1), queue.next(Service::Coordinator)) - .await + // Spawn a task to handle cosigns and forward messages as appropriate + let abort_handle = tokio::spawn({ + async move { + loop { + // Get new messages + let (next_recv_id, queue) = &mut queue; + let msg = queue.next(Service::Coordinator).await; + assert_eq!(msg.from, Service::Coordinator); + assert_eq!(msg.id, *next_recv_id); + queue.ack(Service::Coordinator, msg.id).await; + *next_recv_id += 1; + + let msg_msg = borsh::from_slice(&msg.msg).unwrap(); + + // Remove any BatchReattempts clogging the pipe + // TODO: Set up a wrapper around serai-client so we aren't throwing this away yet + // leave it for the tests + if matches!( + msg_msg, + messages::CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::BatchReattempt { .. } + ) + ) { + continue; + } + + if !is_cosign_message(&msg_msg) { + msg_send.send(msg_msg).unwrap(); + continue; + } + let msg = msg_msg; + + let send_message = |msg: ProcessorMessage| async move { + queue + .queue( + Metadata { + from: Service::Processor(network), + to: Service::Coordinator, + intent: msg.intent(), + }, + borsh::to_vec(&msg).unwrap(), + ) + .await; + }; + + struct CurrentCosign { + block_number: u64, + block: [u8; 32], + } + static CURRENT_COSIGN: OnceLock>> = OnceLock::new(); + let mut current_cosign = + CURRENT_COSIGN.get_or_init(|| AsyncMutex::new(None)).lock().await; + match msg { + // If this is a CosignSubstrateBlock, reset the CurrentCosign + // While technically, each processor should individually track the current cosign, + // this is fine for current testing purposes + CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { id, block_number }, + ) => { + let SubstrateSignId { + id: SubstrateSignableId::CosigningSubstrateBlock(block), .. + } = id else { - continue; + panic!("CosignSubstrateBlock didn't have CosigningSubstrateBlock ID") }; - assert_eq!(msg.from, Service::Coordinator); - assert_eq!(msg.id, *next_recv_id); - - let msg_msg = borsh::from_slice(&msg.msg).unwrap(); - // Remove any BatchReattempts clogging the pipe - // TODO: Set up a wrapper around serai-client so we aren't throwing this away yet - // leave it for the tests - if matches!( - msg_msg, - messages::CoordinatorMessage::Coordinator( - messages::coordinator::CoordinatorMessage::BatchReattempt { .. } - ) - ) { - queue.ack(Service::Coordinator, msg.id).await; - *next_recv_id += 1; - continue; + + let new_cosign = CurrentCosign { block_number, block }; + if current_cosign.is_none() || (current_cosign.as_ref().unwrap().block != block) { + *current_cosign = Some(new_cosign); } - if !is_cosign_message(&msg_msg) { - continue; - }; - queue.ack(Service::Coordinator, msg.id).await; - *next_recv_id += 1; - msg_msg - }; - - struct CurrentCosign { - block_number: u64, - block: [u8; 32], + send_message( + messages::coordinator::ProcessorMessage::CosignPreprocess { + id: id.clone(), + preprocesses: vec![[raw_i; 64]], + } + .into(), + ) + .await; } - static CURRENT_COSIGN: OnceLock>> = OnceLock::new(); - let mut current_cosign = - CURRENT_COSIGN.get_or_init(|| AsyncMutex::new(None)).lock().await; - match msg { - // If this is a CosignSubstrateBlock, reset the CurrentCosign - // While technically, each processor should individually track the current cosign, - // this is fine for current testing purposes - CoordinatorMessage::Coordinator( - messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { + CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. }, + ) => { + // TODO: Assert the ID matches CURRENT_COSIGN + // TODO: Verify the received preprocesses + send_message( + messages::coordinator::ProcessorMessage::SubstrateShare { id, + shares: vec![[raw_i; 32]], + } + .into(), + ) + .await; + } + CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::SubstrateShares { .. }, + ) => { + // TODO: Assert the ID matches CURRENT_COSIGN + // TODO: Verify the shares + + let block_number = current_cosign.as_ref().unwrap().block_number; + let block = current_cosign.as_ref().unwrap().block; + + let substrate_key = substrate_key.lock().await.clone().unwrap(); + + // Expand to a key pair as Schnorrkel expects + // It's the private key + 32-bytes of entropy for nonces + the public key + let mut schnorrkel_key_pair = [0; 96]; + schnorrkel_key_pair[.. 32].copy_from_slice(&substrate_key.to_repr()); + OsRng.fill_bytes(&mut schnorrkel_key_pair[32 .. 64]); + schnorrkel_key_pair[64 ..].copy_from_slice( + &(::generator() * *substrate_key).to_bytes(), + ); + let signature = Signature( + schnorrkel::keys::Keypair::from_bytes(&schnorrkel_key_pair) + .unwrap() + .sign_simple(b"substrate", &cosign_block_msg(block_number, block)) + .to_bytes(), + ); + + send_message( + messages::coordinator::ProcessorMessage::CosignedBlock { block_number, - }, - ) => { - let SubstrateSignId { - id: SubstrateSignableId::CosigningSubstrateBlock(block), .. - } = id - else { - panic!("CosignSubstrateBlock didn't have CosigningSubstrateBlock ID") - }; - - let new_cosign = CurrentCosign { block_number, block }; - if current_cosign.is_none() || (current_cosign.as_ref().unwrap().block != block) { - *current_cosign = Some(new_cosign); + block, + signature: signature.0.to_vec(), } - res - .send_message(messages::coordinator::ProcessorMessage::CosignPreprocess { - id: id.clone(), - preprocesses: vec![[raw_i; 64]], - }) - .await; - } - CoordinatorMessage::Coordinator( - messages::coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. }, - ) => { - // TODO: Assert the ID matches CURRENT_COSIGN - // TODO: Verify the received preprocesses - res - .send_message(messages::coordinator::ProcessorMessage::SubstrateShare { - id, - shares: vec![[raw_i; 32]], - }) - .await; - } - CoordinatorMessage::Coordinator( - messages::coordinator::CoordinatorMessage::SubstrateShares { .. }, - ) => { - // TODO: Assert the ID matches CURRENT_COSIGN - // TODO: Verify the shares - - let block_number = current_cosign.as_ref().unwrap().block_number; - let block = current_cosign.as_ref().unwrap().block; - - let substrate_key = res.substrate_key.lock().await.clone().unwrap(); - - // Expand to a key pair as Schnorrkel expects - // It's the private key + 32-bytes of entropy for nonces + the public key - let mut schnorrkel_key_pair = [0; 96]; - schnorrkel_key_pair[.. 32].copy_from_slice(&substrate_key.to_repr()); - OsRng.fill_bytes(&mut schnorrkel_key_pair[32 .. 64]); - schnorrkel_key_pair[64 ..].copy_from_slice( - &(::generator() * *substrate_key).to_bytes(), - ); - let signature = Signature( - schnorrkel::keys::Keypair::from_bytes(&schnorrkel_key_pair) - .unwrap() - .sign_simple(b"substrate", &cosign_block_msg(block_number, block)) - .to_bytes(), - ); - - res - .send_message(messages::coordinator::ProcessorMessage::CosignedBlock { - block_number, - block, - signature: signature.0.to_vec(), - }) - .await; - } - _ => panic!("unexpected message passed is_cosign_message"), + .into(), + ) + .await; } + _ => panic!("unexpected message passed is_cosign_message"), } } - }) - .abort_handle(), - )); + } + }) + .abort_handle(); + + res.abort_handle = Some(Arc::new(abort_handle)); res } @@ -315,9 +335,8 @@ impl Processor { pub async fn send_message(&mut self, msg: impl Into) { let msg: ProcessorMessage = msg.into(); - let mut queue_lock = self.queue.lock().await; - let (next_send_id, _, queue) = &mut *queue_lock; - queue + self + .queue_for_sending .queue( Metadata { from: Service::Processor(self.network), @@ -327,36 +346,13 @@ impl Processor { borsh::to_vec(&msg).unwrap(), ) .await; - *next_send_id += 1; - } - - async fn recv_message_inner(&mut self) -> CoordinatorMessage { - loop { - tokio::task::yield_now().await; - - let mut queue_lock = self.queue.lock().await; - let (_, next_recv_id, queue) = &mut *queue_lock; - let msg = queue.next(Service::Coordinator).await; - assert_eq!(msg.from, Service::Coordinator); - assert_eq!(msg.id, *next_recv_id); - - // If this is a cosign message, let the cosign task handle it - let msg_msg = borsh::from_slice(&msg.msg).unwrap(); - if is_cosign_message(&msg_msg) { - continue; - } - - queue.ack(Service::Coordinator, msg.id).await; - *next_recv_id += 1; - return msg_msg; - } } /// Receive a message from the coordinator as a processor. pub async fn recv_message(&mut self) -> CoordinatorMessage { // Set a timeout of 30 minutes to allow effectively any protocol to occur without a fear of // an arbitrary timeout cutting it short - tokio::time::timeout(Duration::from_secs(30 * 60), self.recv_message_inner()).await.unwrap() + tokio::time::timeout(Duration::from_secs(20 * 60), self.msgs.recv()).await.unwrap().unwrap() } pub async fn set_substrate_key( diff --git a/tests/coordinator/src/tests/batch.rs b/tests/coordinator/src/tests/batch.rs index 3c4ecaa42..9cbd4b29a 100644 --- a/tests/coordinator/src/tests/batch.rs +++ b/tests/coordinator/src/tests/batch.rs @@ -245,7 +245,7 @@ pub async fn batch( ) ); - // Send the ack as expected, though it shouldn't trigger any observable behavior + // Send the ack as expected processor .send_message(messages::ProcessorMessage::Coordinator( messages::coordinator::ProcessorMessage::SubstrateBlockAck { diff --git a/tests/coordinator/src/tests/mod.rs b/tests/coordinator/src/tests/mod.rs index 95b8f682d..d7ba21c22 100644 --- a/tests/coordinator/src/tests/mod.rs +++ b/tests/coordinator/src/tests/mod.rs @@ -137,7 +137,6 @@ pub(crate) async fn new_test(test_body: impl TestBody, fast_epoch: bool) { *OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None; // Spawns a coordinator, if one has yet to be spawned, or else runs the test. - #[async_recursion::async_recursion] async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) { // If the outer operations have yet to be set, these *are* the outer operations let outer_ops = OUTER_OPS.get().unwrap(); @@ -180,7 +179,10 @@ pub(crate) async fn new_test(test_body: impl TestBody, fast_epoch: bool) { test.provide_container(composition); drop(context_lock); - test.run_async(spawn_coordinator_or_run_test).await; + fn recurse(ops: DockerOperations) -> core::pin::Pin>> { + Box::pin(spawn_coordinator_or_run_test(ops)) + } + test.run_async(recurse).await; } else { let outer_ops = outer_ops.lock().await.take().unwrap(); diff --git a/tests/docker/src/lib.rs b/tests/docker/src/lib.rs index ee68b9795..986a1793c 100644 --- a/tests/docker/src/lib.rs +++ b/tests/docker/src/lib.rs @@ -85,7 +85,7 @@ pub fn build(name: String) { } let mut dockerfile_path = orchestration_path.clone(); - if HashSet::from(["bitcoin", "ethereum", "monero"]).contains(name.as_str()) { + if HashSet::from(["bitcoin", "ethereum", "ethereum-relayer", "monero"]).contains(name.as_str()) { dockerfile_path = dockerfile_path.join("coins"); } if name.contains("-processor") { @@ -124,7 +124,8 @@ pub fn build(name: String) { // Check any additionally specified paths let meta = |path: PathBuf| (path.clone(), fs::metadata(path)); let mut metadatas = match name.as_str() { - "bitcoin" | "monero" => vec![], + "bitcoin" | "ethereum" | "monero" => vec![], + "ethereum-relayer" => vec![meta(repo_path.join("common")), meta(repo_path.join("coins"))], "message-queue" => vec![ meta(repo_path.join("common")), meta(repo_path.join("crypto")), diff --git a/tests/full-stack/Cargo.toml b/tests/full-stack/Cargo.toml index b45d7b53c..58e6de28c 100644 --- a/tests/full-stack/Cargo.toml +++ b/tests/full-stack/Cargo.toml @@ -20,7 +20,6 @@ workspace = true hex = "0.4" async-trait = "0.1" -async-recursion = "1" zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false } diff --git a/tests/full-stack/src/tests/mint_and_burn.rs b/tests/full-stack/src/tests/mint_and_burn.rs index 51b8156cf..4093e47dd 100644 --- a/tests/full-stack/src/tests/mint_and_burn.rs +++ b/tests/full-stack/src/tests/mint_and_burn.rs @@ -57,7 +57,7 @@ async fn mint_and_burn_test() { }; let addr = Address::p2pkh( - &PublicKey::from_private_key( + PublicKey::from_private_key( SECP256K1, &PrivateKey::new(SecretKey::from_slice(&[0x01; 32]).unwrap(), Network::Bitcoin), ), @@ -266,14 +266,13 @@ async fn mint_and_burn_test() { script::{PushBytesBuf, Script, ScriptBuf, Builder}, absolute::LockTime, transaction::{Version, Transaction}, - address::Payload, - Sequence, Witness, OutPoint, TxIn, Amount, TxOut, Network, + Sequence, Witness, OutPoint, TxIn, Amount, TxOut, Network, Address, }; let private_key = PrivateKey::new(SecretKey::from_slice(&[0x01; 32]).unwrap(), Network::Bitcoin); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let addr = Payload::p2pkh(&public_key); + let addr = Address::p2pkh(public_key, Network::Bitcoin); // Use the first block's coinbase let rpc = handles[0].bitcoin(&ops).await; @@ -284,7 +283,7 @@ async fn mint_and_burn_test() { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { - previous_output: OutPoint { txid: tx.txid(), vout: 0 }, + previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::default(), @@ -292,17 +291,23 @@ async fn mint_and_burn_test() { output: vec![ TxOut { value: Amount::from_sat(1_100_000_00), - script_pubkey: Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked( - XOnlyPublicKey::from_slice(&bitcoin_key_pair.1[1 ..]).unwrap(), - )) + script_pubkey: Address::p2tr_tweaked( + TweakedPublicKey::dangerous_assume_tweaked( + XOnlyPublicKey::from_slice(&bitcoin_key_pair.1[1 ..]).unwrap(), + ), + Network::Bitcoin, + ) .script_pubkey(), }, TxOut { // change = amount spent - fee value: Amount::from_sat(tx.output[0].value.to_sat() - 1_100_000_00 - 1_000_00), - script_pubkey: Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked( - XOnlyPublicKey::from_slice(&public_key.inner.serialize()[1 ..]).unwrap(), - )) + script_pubkey: Address::p2tr_tweaked( + TweakedPublicKey::dangerous_assume_tweaked( + XOnlyPublicKey::from_slice(&public_key.inner.serialize()[1 ..]).unwrap(), + ), + Network::Bitcoin, + ) .script_pubkey(), }, TxOut { @@ -316,12 +321,14 @@ async fn mint_and_burn_test() { let mut der = SECP256K1 .sign_ecdsa_low_r( - &Message::from( + &Message::from_digest_slice( SighashCache::new(&tx) .legacy_signature_hash(0, &addr.script_pubkey(), EcdsaSighashType::All.to_u32()) .unwrap() - .to_raw_hash(), - ), + .to_raw_hash() + .as_ref(), + ) + .unwrap(), &private_key.inner, ) .serialize_der() @@ -447,19 +454,17 @@ async fn mint_and_burn_test() { // Create a random Bitcoin/Monero address let bitcoin_addr = { - use bitcoin_serai::bitcoin::{network::Network, key::PublicKey, address::Address}; - // Uses Network::Bitcoin since it doesn't actually matter, Serai strips it out - // TODO: Move Serai to Payload from Address - Address::p2pkh( - &loop { + use bitcoin_serai::bitcoin::{key::PublicKey, ScriptBuf}; + ScriptBuf::new_p2pkh( + &(loop { let mut bytes = [0; 33]; OsRng.fill_bytes(&mut bytes); bytes[0] %= 4; if let Ok(key) = PublicKey::from_slice(&bytes) { break key; } - }, - Network::Bitcoin, + }) + .pubkey_hash(), ) }; @@ -552,7 +557,7 @@ async fn mint_and_burn_test() { let received_output = block.txdata[1] .output .iter() - .find(|output| output.script_pubkey == bitcoin_addr.script_pubkey()) + .find(|output| output.script_pubkey == bitcoin_addr) .unwrap(); let tx_fee = 1_100_000_00 - diff --git a/tests/full-stack/src/tests/mod.rs b/tests/full-stack/src/tests/mod.rs index 95f840f40..7d92070ef 100644 --- a/tests/full-stack/src/tests/mod.rs +++ b/tests/full-stack/src/tests/mod.rs @@ -57,12 +57,16 @@ pub(crate) async fn new_test(test_body: impl TestBody) { let (coord_key, message_queue_keys, message_queue_composition) = message_queue_instance(); let (bitcoin_composition, bitcoin_port) = network_instance(NetworkId::Bitcoin); - let bitcoin_processor_composition = + let mut bitcoin_processor_composition = processor_instance(NetworkId::Bitcoin, bitcoin_port, message_queue_keys[&NetworkId::Bitcoin]); + assert_eq!(bitcoin_processor_composition.len(), 1); + let bitcoin_processor_composition = bitcoin_processor_composition.swap_remove(0); let (monero_composition, monero_port) = network_instance(NetworkId::Monero); - let monero_processor_composition = + let mut monero_processor_composition = processor_instance(NetworkId::Monero, monero_port, message_queue_keys[&NetworkId::Monero]); + assert_eq!(monero_processor_composition.len(), 1); + let monero_processor_composition = monero_processor_composition.swap_remove(0); let coordinator_composition = coordinator_instance(name, coord_key); let serai_composition = serai_composition(name, false); @@ -161,54 +165,57 @@ pub(crate) async fn new_test(test_body: impl TestBody) { *OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None; // Spawns a coordinator, if one has yet to be spawned, or else runs the test. - #[async_recursion::async_recursion] - async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) { - // If the outer operations have yet to be set, these *are* the outer operations - let outer_ops = OUTER_OPS.get().unwrap(); - if outer_ops.lock().await.is_none() { - *outer_ops.lock().await = Some(inner_ops); - } - - let context_lock = CONTEXT.get().unwrap().lock().await; - let Context { pending_coordinator_compositions, handles, test_body } = - context_lock.as_ref().unwrap(); - - // Check if there is a coordinator left - let maybe_coordinator = { - let mut remaining = pending_coordinator_compositions.lock().await; - let maybe_coordinator = if !remaining.is_empty() { - let handles = handles[handles.len() - remaining.len()].clone(); - let composition = remaining.remove(0); - Some((composition, handles)) - } else { - None + pub(crate) fn spawn_coordinator_or_run_test( + inner_ops: DockerOperations, + ) -> core::pin::Pin>> { + Box::pin(async { + // If the outer operations have yet to be set, these *are* the outer operations + let outer_ops = OUTER_OPS.get().unwrap(); + if outer_ops.lock().await.is_none() { + *outer_ops.lock().await = Some(inner_ops); + } + + let context_lock = CONTEXT.get().unwrap().lock().await; + let Context { pending_coordinator_compositions, handles, test_body } = + context_lock.as_ref().unwrap(); + + // Check if there is a coordinator left + let maybe_coordinator = { + let mut remaining = pending_coordinator_compositions.lock().await; + let maybe_coordinator = if !remaining.is_empty() { + let handles = handles[handles.len() - remaining.len()].clone(); + let composition = remaining.remove(0); + Some((composition, handles)) + } else { + None + }; + drop(remaining); + maybe_coordinator }; - drop(remaining); - maybe_coordinator - }; - if let Some((mut composition, handles)) = maybe_coordinator { - let network = { - let outer_ops = outer_ops.lock().await; - let outer_ops = outer_ops.as_ref().unwrap(); - // Spawn it by building another DockerTest which recursively calls this function - // TODO: Spawn this outside of DockerTest so we can remove the recursion - let serai_container = outer_ops.handle(&handles.serai); - composition.modify_env("SERAI_HOSTNAME", serai_container.ip()); - let message_queue_container = outer_ops.handle(&handles.message_queue); - composition.modify_env("MESSAGE_QUEUE_RPC", message_queue_container.ip()); - - format!("container:{}", serai_container.name()) - }; - let mut test = DockerTest::new().with_network(dockertest::Network::External(network)); - test.provide_container(composition); - - drop(context_lock); - test.run_async(spawn_coordinator_or_run_test).await; - } else { - let outer_ops = outer_ops.lock().await.take().unwrap(); - test_body.body(outer_ops, handles.clone()).await; - } + if let Some((mut composition, handles)) = maybe_coordinator { + let network = { + let outer_ops = outer_ops.lock().await; + let outer_ops = outer_ops.as_ref().unwrap(); + // Spawn it by building another DockerTest which recursively calls this function + // TODO: Spawn this outside of DockerTest so we can remove the recursion + let serai_container = outer_ops.handle(&handles.serai); + composition.modify_env("SERAI_HOSTNAME", serai_container.ip()); + let message_queue_container = outer_ops.handle(&handles.message_queue); + composition.modify_env("MESSAGE_QUEUE_RPC", message_queue_container.ip()); + + format!("container:{}", serai_container.name()) + }; + let mut test = DockerTest::new().with_network(dockertest::Network::External(network)); + test.provide_container(composition); + + drop(context_lock); + test.run_async(spawn_coordinator_or_run_test).await; + } else { + let outer_ops = outer_ops.lock().await.take().unwrap(); + test_body.body(outer_ops, handles.clone()).await; + } + }) } test.run_async(spawn_coordinator_or_run_test).await; diff --git a/tests/processor/Cargo.toml b/tests/processor/Cargo.toml index 686dbcea7..e46312c59 100644 --- a/tests/processor/Cargo.toml +++ b/tests/processor/Cargo.toml @@ -23,16 +23,21 @@ zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } curve25519-dalek = "4" -ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["ristretto"] } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["secp256k1", "ristretto"] } dkg = { path = "../../crypto/dkg", default-features = false, features = ["tests"] } bitcoin-serai = { path = "../../coins/bitcoin" } + +k256 = "0.13" +ethereum-serai = { path = "../../coins/ethereum" } + monero-serai = { path = "../../coins/monero" } messages = { package = "serai-processor-messages", path = "../../processor/messages" } scale = { package = "parity-scale-codec", version = "3" } serai-client = { path = "../../substrate/client" } +serai-db = { path = "../../common/db", default-features = false } serai-message-queue = { path = "../../message-queue" } borsh = { version = "1", features = ["de_strict_order"] } @@ -41,7 +46,7 @@ serde_json = { version = "1", default-features = false } tokio = { version = "1", features = ["time"] } -processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "monero"] } +processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "ethereum", "monero"] } dockertest = "0.4" serai-docker-tests = { path = "../docker" } diff --git a/tests/processor/src/lib.rs b/tests/processor/src/lib.rs index 511382abb..1964e641c 100644 --- a/tests/processor/src/lib.rs +++ b/tests/processor/src/lib.rs @@ -28,7 +28,7 @@ pub fn processor_instance( network: NetworkId, port: u32, message_queue_key: ::F, -) -> TestBodySpecification { +) -> Vec { let mut entropy = [0; 32]; OsRng.fill_bytes(&mut entropy); @@ -41,7 +41,7 @@ pub fn processor_instance( let image = format!("{network_str}-processor"); serai_docker_tests::build(image.clone()); - TestBodySpecification::with_image( + let mut res = vec![TestBodySpecification::with_image( Image::with_repository(format!("serai-dev-{image}")).pull_policy(PullPolicy::Never), ) .replace_env( @@ -55,19 +55,40 @@ pub fn processor_instance( ("RUST_LOG".to_string(), "serai_processor=trace,".to_string()), ] .into(), - ) + )]; + + if network == NetworkId::Ethereum { + serai_docker_tests::build("ethereum-relayer".to_string()); + res.push( + TestBodySpecification::with_image( + Image::with_repository("serai-dev-ethereum-relayer".to_string()) + .pull_policy(PullPolicy::Never), + ) + .replace_env( + [ + ("DB_PATH".to_string(), "./ethereum-relayer-db".to_string()), + ("RUST_LOG".to_string(), "serai_ethereum_relayer=trace,".to_string()), + ] + .into(), + ) + .set_publish_all_ports(true), + ); + } + + res } -pub type Handles = (String, String, String); +pub type Handles = (String, String, String, String); pub fn processor_stack( network: NetworkId, + network_hostname_override: Option, ) -> (Handles, ::F, Vec) { let (network_composition, network_rpc_port) = network_instance(network); let (coord_key, message_queue_keys, message_queue_composition) = serai_message_queue_tests::instance(); - let processor_composition = + let mut processor_compositions = processor_instance(network, network_rpc_port, message_queue_keys[&network]); // Give every item in this stack a unique ID @@ -83,7 +104,7 @@ pub fn processor_stack( let mut compositions = vec![]; let mut handles = vec![]; for (name, composition) in [ - ( + Some(( match network { NetworkId::Serai => unreachable!(), NetworkId::Bitcoin => "bitcoin", @@ -91,10 +112,14 @@ pub fn processor_stack( NetworkId::Monero => "monero", }, network_composition, - ), - ("message_queue", message_queue_composition), - ("processor", processor_composition), - ] { + )), + Some(("message_queue", message_queue_composition)), + Some(("processor", processor_compositions.remove(0))), + processor_compositions.pop().map(|composition| ("relayer", composition)), + ] + .into_iter() + .flatten() + { let handle = format!("processor-{name}-{unique_id}"); compositions.push( composition.set_start_policy(StartPolicy::Strict).set_handle(handle.clone()).set_log_options( @@ -112,11 +137,27 @@ pub fn processor_stack( handles.push(handle); } - let processor_composition = compositions.last_mut().unwrap(); - processor_composition.inject_container_name(handles[0].clone(), "NETWORK_RPC_HOSTNAME"); + let processor_composition = compositions.get_mut(2).unwrap(); + processor_composition.inject_container_name( + network_hostname_override.unwrap_or_else(|| handles[0].clone()), + "NETWORK_RPC_HOSTNAME", + ); + if let Some(hostname) = handles.get(3) { + processor_composition.inject_container_name(hostname, "ETHEREUM_RELAYER_HOSTNAME"); + processor_composition.modify_env("ETHEREUM_RELAYER_PORT", "20830"); + } processor_composition.inject_container_name(handles[1].clone(), "MESSAGE_QUEUE_RPC"); - ((handles[0].clone(), handles[1].clone(), handles[2].clone()), coord_key, compositions) + ( + ( + handles[0].clone(), + handles[1].clone(), + handles[2].clone(), + handles.get(3).cloned().unwrap_or(String::new()), + ), + coord_key, + compositions, + ) } #[derive(serde::Deserialize, Debug)] @@ -130,6 +171,7 @@ pub struct Coordinator { message_queue_handle: String, #[allow(unused)] processor_handle: String, + relayer_handle: String, next_send_id: u64, next_recv_id: u64, @@ -140,7 +182,7 @@ impl Coordinator { pub fn new( network: NetworkId, ops: &DockerOperations, - handles: (String, String, String), + handles: Handles, coord_key: ::F, ) -> Coordinator { let rpc = ops.handle(&handles.1).host_port(2287).unwrap(); @@ -152,6 +194,7 @@ impl Coordinator { network_handle: handles.0, message_queue_handle: handles.1, processor_handle: handles.2, + relayer_handle: handles.3, next_send_id: 0, next_recv_id: 0, @@ -181,7 +224,55 @@ impl Coordinator { break; } } - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => { + use std::sync::Arc; + use ethereum_serai::{ + alloy::{ + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, + }, + deployer::Deployer, + }; + + let provider = Arc::new(RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + )); + + if handle + .block_on(provider.raw_request::<_, ()>("evm_setAutomine".into(), [false])) + .is_ok() + { + handle.block_on(async { + // Deploy the deployer + let tx = Deployer::deployment_tx(); + let signer = tx.recover_signer().unwrap(); + let (tx, sig, _) = tx.into_parts(); + + provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [signer.to_string(), (tx.gas_limit * tx.gas_price).to_string()], + ) + .await + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + let _ = provider.send_raw_transaction(&bytes).await.unwrap(); + + provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + + let _ = Deployer::new(provider.clone()).await.unwrap().unwrap(); + + // Sleep until the actual time is ahead of whatever time is in the epoch we just + // mined + tokio::time::sleep(core::time::Duration::from_secs(30)).await; + }); + break; + } + } NetworkId::Monero => { use monero_serai::rpc::HttpRpc; @@ -271,7 +362,45 @@ impl Coordinator { block.consensus_encode(&mut block_buf).unwrap(); (hash, block_buf) } - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => { + use ethereum_serai::alloy::{ + simple_request_transport::SimpleRequest, + rpc_types::BlockNumberOrTag, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, + }; + + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + let start = provider + .get_block(BlockNumberOrTag::Latest.into(), false) + .await + .unwrap() + .unwrap() + .header + .number + .unwrap(); + // We mine 96 blocks to mine one epoch, then cause its finalization + provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + let end_of_epoch = start + 31; + let hash = provider + .get_block(BlockNumberOrTag::Number(end_of_epoch).into(), false) + .await + .unwrap() + .unwrap() + .header + .hash + .unwrap(); + + let state = provider + .raw_request::<_, String>("anvil_dumpState".into(), ()) + .await + .unwrap() + .into_bytes(); + (hash.into(), state) + } NetworkId::Monero => { use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar}; use monero_serai::{ @@ -303,39 +432,6 @@ impl Coordinator { } } - pub async fn broadcast_block(&self, ops: &DockerOperations, block: &[u8]) { - let rpc_url = network_rpc(self.network, ops, &self.network_handle); - match self.network { - NetworkId::Bitcoin => { - use bitcoin_serai::rpc::Rpc; - - let rpc = - Rpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Bitcoin RPC"); - let res: Option = - rpc.rpc_call("submitblock", serde_json::json!([hex::encode(block)])).await.unwrap(); - if let Some(err) = res { - panic!("submitblock failed: {err}"); - } - } - NetworkId::Ethereum => todo!(), - NetworkId::Monero => { - use monero_serai::rpc::HttpRpc; - - let rpc = - HttpRpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Monero RPC"); - let res: serde_json::Value = rpc - .json_rpc_call("submit_block", Some(serde_json::json!([hex::encode(block)]))) - .await - .unwrap(); - let err = res.get("error"); - if err.is_some() && (err.unwrap() != &serde_json::Value::Null) { - panic!("failed to submit Monero block: {res}"); - } - } - NetworkId::Serai => panic!("processor tests broadcasting block to Serai"), - } - } - pub async fn sync(&self, ops: &DockerOperations, others: &[Coordinator]) { let rpc_url = network_rpc(self.network, ops, &self.network_handle); match self.network { @@ -345,13 +441,11 @@ impl Coordinator { let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the Bitcoin RPC"); let to = rpc.get_latest_block_number().await.unwrap(); for coordinator in others { - let from = Rpc::new(network_rpc(self.network, ops, &coordinator.network_handle)) + let other_rpc = Rpc::new(network_rpc(self.network, ops, &coordinator.network_handle)) .await - .expect("couldn't connect to the Bitcoin RPC") - .get_latest_block_number() - .await - .unwrap() + - 1; + .expect("couldn't connect to the Bitcoin RPC"); + let from = other_rpc.get_latest_block_number().await.unwrap() + 1; + for b in from ..= to { let mut buf = vec![]; rpc @@ -360,30 +454,92 @@ impl Coordinator { .unwrap() .consensus_encode(&mut buf) .unwrap(); - coordinator.broadcast_block(ops, &buf).await; + + let res: Option = other_rpc + .rpc_call("submitblock", serde_json::json!([hex::encode(buf)])) + .await + .unwrap(); + if let Some(err) = res { + panic!("submitblock failed: {err}"); + } } } } - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => { + use ethereum_serai::alloy::{ + simple_request_transport::SimpleRequest, + rpc_types::BlockNumberOrTag, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, + }; + + let (expected_number, state) = { + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + + let expected_number = provider + .get_block(BlockNumberOrTag::Latest.into(), false) + .await + .unwrap() + .unwrap() + .header + .number; + ( + expected_number, + provider.raw_request::<_, String>("anvil_dumpState".into(), ()).await.unwrap(), + ) + }; + + for coordinator in others { + let rpc_url = network_rpc(coordinator.network, ops, &coordinator.network_handle); + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + assert!(provider + .raw_request::<_, bool>("anvil_loadState".into(), &[&state]) + .await + .unwrap()); + + let new_number = provider + .get_block(BlockNumberOrTag::Latest.into(), false) + .await + .unwrap() + .unwrap() + .header + .number; + + // TODO: https://github.com/foundry-rs/foundry/issues/7955 + let _ = expected_number; + let _ = new_number; + //assert_eq!(expected_number, new_number); + } + } NetworkId::Monero => { use monero_serai::rpc::HttpRpc; let rpc = HttpRpc::new(rpc_url).await.expect("couldn't connect to the Monero RPC"); let to = rpc.get_height().await.unwrap(); for coordinator in others { - let from = HttpRpc::new(network_rpc(self.network, ops, &coordinator.network_handle)) - .await - .expect("couldn't connect to the Monero RPC") - .get_height() - .await - .unwrap(); + let other_rpc = + HttpRpc::new(network_rpc(coordinator.network, ops, &coordinator.network_handle)) + .await + .expect("couldn't connect to the Monero RPC"); + + let from = other_rpc.get_height().await.unwrap(); for b in from .. to { - coordinator - .broadcast_block( - ops, - &rpc.get_block(rpc.get_block_hash(b).await.unwrap()).await.unwrap().serialize(), - ) - .await; + let block = + rpc.get_block(rpc.get_block_hash(b).await.unwrap()).await.unwrap().serialize(); + + let res: serde_json::Value = other_rpc + .json_rpc_call("submit_block", Some(serde_json::json!([hex::encode(block)]))) + .await + .unwrap(); + let err = res.get("error"); + if err.is_some() && (err.unwrap() != &serde_json::Value::Null) { + panic!("failed to submit Monero block: {res}"); + } } } } @@ -391,7 +547,7 @@ impl Coordinator { } } - pub async fn publish_transacton(&self, ops: &DockerOperations, tx: &[u8]) { + pub async fn publish_transaction(&self, ops: &DockerOperations, tx: &[u8]) { let rpc_url = network_rpc(self.network, ops, &self.network_handle); match self.network { NetworkId::Bitcoin => { @@ -404,7 +560,19 @@ impl Coordinator { Rpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Bitcoin RPC"); rpc.send_raw_transaction(&Transaction::consensus_decode(&mut &*tx).unwrap()).await.unwrap(); } - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => { + use ethereum_serai::alloy::{ + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, + }; + + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + let _ = provider.send_raw_transaction(tx).await.unwrap(); + } NetworkId::Monero => { use monero_serai::{transaction::Transaction, rpc::HttpRpc}; @@ -416,7 +584,19 @@ impl Coordinator { } } - pub async fn get_transaction(&self, ops: &DockerOperations, tx: &[u8]) -> Option> { + pub async fn publish_eventuality_completion(&self, ops: &DockerOperations, tx: &[u8]) { + match self.network { + NetworkId::Bitcoin | NetworkId::Monero => self.publish_transaction(ops, tx).await, + NetworkId::Ethereum => (), + NetworkId::Serai => panic!("processor tests broadcasting block to Serai"), + } + } + + pub async fn get_published_transaction( + &self, + ops: &DockerOperations, + tx: &[u8], + ) -> Option> { let rpc_url = network_rpc(self.network, ops, &self.network_handle); match self.network { NetworkId::Bitcoin => { @@ -424,8 +604,15 @@ impl Coordinator { let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Bitcoin RPC"); + + // Bitcoin publishes a 0-byte TX ID to reduce variables + // Accordingly, read the mempool to find the (presumed relevant) TX + let entries: Vec = + rpc.rpc_call("getrawmempool", serde_json::json!([false])).await.unwrap(); + assert_eq!(entries.len(), 1, "more than one entry in the mempool, so unclear which to get"); + let mut hash = [0; 32]; - hash.copy_from_slice(tx); + hash.copy_from_slice(&hex::decode(&entries[0]).unwrap()); if let Ok(tx) = rpc.get_transaction(&hash).await { let mut buf = vec![]; tx.consensus_encode(&mut buf).unwrap(); @@ -434,7 +621,56 @@ impl Coordinator { None } } - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => { + /* + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + let mut hash = [0; 32]; + hash.copy_from_slice(tx); + let tx = provider.get_transaction_by_hash(hash.into()).await.unwrap()?; + let (tx, sig, _) = Signed::::try_from(tx).unwrap().into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + Some(bytes) + */ + + // This is being passed a signature. We need to check the relayer has a TX with this + // signature + + use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, + }; + + let (ip, port) = ops.handle(&self.relayer_handle).host_port(20831).unwrap(); + let relayer_url = format!("{ip}:{port}"); + + let mut socket = TcpStream::connect(&relayer_url).await.unwrap(); + // Iterate over every published command + for i in 1 .. u32::MAX { + socket.write_all(&i.to_le_bytes()).await.unwrap(); + + let mut recvd_len = [0; 4]; + socket.read_exact(&mut recvd_len).await.unwrap(); + if recvd_len == [0; 4] { + break; + } + + let mut msg = vec![0; usize::try_from(u32::from_le_bytes(recvd_len)).unwrap()]; + socket.read_exact(&mut msg).await.unwrap(); + for start_pos in 0 .. msg.len() { + if (start_pos + tx.len()) > msg.len() { + break; + } + if &msg[start_pos .. (start_pos + tx.len())] == tx { + return Some(msg); + } + } + } + + None + } NetworkId::Monero => { use monero_serai::rpc::HttpRpc; diff --git a/tests/processor/src/networks.rs b/tests/processor/src/networks.rs index 882b9e895..9af339b74 100644 --- a/tests/processor/src/networks.rs +++ b/tests/processor/src/networks.rs @@ -19,6 +19,7 @@ pub const RPC_USER: &str = "serai"; pub const RPC_PASS: &str = "seraidex"; pub const BTC_PORT: u32 = 8332; +pub const ETH_PORT: u32 = 8545; pub const XMR_PORT: u32 = 18081; pub fn bitcoin_instance() -> (TestBodySpecification, u32) { @@ -31,6 +32,17 @@ pub fn bitcoin_instance() -> (TestBodySpecification, u32) { (composition, BTC_PORT) } +pub fn ethereum_instance() -> (TestBodySpecification, u32) { + serai_docker_tests::build("ethereum".to_string()); + + let composition = TestBodySpecification::with_image( + Image::with_repository("serai-dev-ethereum").pull_policy(PullPolicy::Never), + ) + .set_start_policy(StartPolicy::Strict) + .set_publish_all_ports(true); + (composition, ETH_PORT) +} + pub fn monero_instance() -> (TestBodySpecification, u32) { serai_docker_tests::build("monero".to_string()); @@ -45,7 +57,7 @@ pub fn monero_instance() -> (TestBodySpecification, u32) { pub fn network_instance(network: NetworkId) -> (TestBodySpecification, u32) { match network { NetworkId::Bitcoin => bitcoin_instance(), - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => ethereum_instance(), NetworkId::Monero => monero_instance(), NetworkId::Serai => { panic!("Serai is not a valid network to spawn an instance of for a processor") @@ -58,7 +70,7 @@ pub fn network_rpc(network: NetworkId, ops: &DockerOperations, handle: &str) -> .handle(handle) .host_port(match network { NetworkId::Bitcoin => BTC_PORT, - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => ETH_PORT, NetworkId::Monero => XMR_PORT, NetworkId::Serai => panic!("getting port for external network yet it was Serai"), }) @@ -70,7 +82,7 @@ pub fn confirmations(network: NetworkId) -> usize { use processor::networks::*; match network { NetworkId::Bitcoin => Bitcoin::CONFIRMATIONS, - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => Ethereum::::CONFIRMATIONS, NetworkId::Monero => Monero::CONFIRMATIONS, NetworkId::Serai => panic!("getting confirmations required for Serai"), } @@ -83,6 +95,11 @@ pub enum Wallet { public_key: bitcoin_serai::bitcoin::PublicKey, input_tx: bitcoin_serai::bitcoin::Transaction, }, + Ethereum { + rpc_url: String, + key: ::F, + nonce: u64, + }, Monero { handle: String, spend_key: Zeroizing, @@ -109,7 +126,7 @@ impl Wallet { let secret_key = SecretKey::new(&mut rand_core::OsRng); let private_key = PrivateKey::new(secret_key, Network::Regtest); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let main_addr = Address::p2pkh(&public_key, Network::Regtest); + let main_addr = Address::p2pkh(public_key, Network::Regtest); let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the Bitcoin RPC"); @@ -138,7 +155,37 @@ impl Wallet { Wallet::Bitcoin { private_key, public_key, input_tx: funds } } - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => { + use ciphersuite::{group::ff::Field, Secp256k1}; + use ethereum_serai::alloy::{ + primitives::{U256, Address}, + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, + }; + + let key = ::F::random(&mut OsRng); + let address = + ethereum_serai::crypto::address(&(::generator() * key)); + + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + + provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [Address(address.into()).to_string(), { + let nine_decimals = U256::from(1_000_000_000u64); + (U256::from(100u64) * nine_decimals * nine_decimals).to_string() + }], + ) + .await + .unwrap(); + + Wallet::Ethereum { rpc_url: rpc_url.clone(), key, nonce: 0 } + } NetworkId::Monero => { use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar}; @@ -211,7 +258,6 @@ impl Wallet { consensus::Encodable, sighash::{EcdsaSighashType, SighashCache}, script::{PushBytesBuf, Script, ScriptBuf, Builder}, - address::Payload, OutPoint, Sequence, Witness, TxIn, Amount, TxOut, absolute::LockTime, transaction::{Version, Transaction}, @@ -222,7 +268,7 @@ impl Wallet { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { - previous_output: OutPoint { txid: input_tx.txid(), vout: 0 }, + previous_output: OutPoint { txid: input_tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::default(), @@ -234,10 +280,11 @@ impl Wallet { }, TxOut { value: Amount::from_sat(AMOUNT), - script_pubkey: Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked( - XOnlyPublicKey::from_slice(&to[1 ..]).unwrap(), - )) - .script_pubkey(), + script_pubkey: ScriptBuf::new_p2tr_tweaked( + TweakedPublicKey::dangerous_assume_tweaked( + XOnlyPublicKey::from_slice(&to[1 ..]).unwrap(), + ), + ), }, ], }; @@ -256,7 +303,7 @@ impl Wallet { let mut der = SECP256K1 .sign_ecdsa_low_r( - &Message::from( + &Message::from_digest_slice( SighashCache::new(&tx) .legacy_signature_hash( 0, @@ -264,8 +311,10 @@ impl Wallet { EcdsaSighashType::All.to_u32(), ) .unwrap() - .to_raw_hash(), - ), + .to_raw_hash() + .as_ref(), + ) + .unwrap(), &private_key.inner, ) .serialize_der() @@ -282,6 +331,109 @@ impl Wallet { (buf, Balance { coin: Coin::Bitcoin, amount: Amount(AMOUNT) }) } + Wallet::Ethereum { rpc_url, key, ref mut nonce } => { + use std::sync::Arc; + use ethereum_serai::{ + alloy::{ + primitives::{U256, TxKind}, + sol_types::SolCall, + simple_request_transport::SimpleRequest, + consensus::{TxLegacy, SignableTransaction}, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, + }, + crypto::PublicKey, + deployer::Deployer, + }; + + let eight_decimals = U256::from(100_000_000u64); + let nine_decimals = eight_decimals * U256::from(10u64); + let eighteen_decimals = nine_decimals * nine_decimals; + let one_eth = eighteen_decimals; + + let provider = Arc::new(RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + )); + + let to_as_key = PublicKey::new( + ::read_G(&mut to.as_slice()).unwrap(), + ) + .unwrap(); + let router_addr = { + // Find the deployer + let deployer = Deployer::new(provider.clone()).await.unwrap().unwrap(); + + // Find the router, deploying if non-existent + let router = if let Some(router) = + deployer.find_router(provider.clone(), &to_as_key).await.unwrap() + { + router + } else { + let mut tx = deployer.deploy_router(&to_as_key); + tx.gas_price = 1_000_000_000u64.into(); + let tx = ethereum_serai::crypto::deterministically_sign(&tx); + let signer = tx.recover_signer().unwrap(); + let (tx, sig, _) = tx.into_parts(); + + provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [signer.to_string(), (tx.gas_limit * tx.gas_price).to_string()], + ) + .await + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + let _ = provider.send_raw_transaction(&bytes).await.unwrap(); + + provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + + deployer.find_router(provider.clone(), &to_as_key).await.unwrap().unwrap() + }; + + router.address() + }; + + let tx = TxLegacy { + chain_id: None, + nonce: *nonce, + gas_price: 1_000_000_000u128, + gas_limit: 200_000u128, + to: TxKind::Call(router_addr.into()), + // 1 ETH + value: one_eth, + input: ethereum_serai::router::abi::inInstructionCall::new(( + [0; 20].into(), + one_eth, + if let Some(instruction) = instruction { + Shorthand::Raw(RefundableInInstruction { origin: None, instruction }).encode().into() + } else { + vec![].into() + }, + )) + .abi_encode() + .into(), + }; + + *nonce += 1; + + let sig = + k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(*key).unwrap()) + .sign_prehash_recoverable(tx.signature_hash().as_ref()) + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig.into(), &mut bytes); + + // We drop the bottom 10 decimals + ( + bytes, + Balance { coin: Coin::Ether, amount: Amount(u64::try_from(eight_decimals).unwrap()) }, + ) + } + Wallet::Monero { handle, ref spend_key, ref view_pair, ref mut inputs } => { use curve25519_dalek::constants::ED25519_BASEPOINT_POINT; use monero_serai::{ @@ -366,14 +518,18 @@ impl Wallet { match self { Wallet::Bitcoin { public_key, .. } => { - use bitcoin_serai::bitcoin::{Network, Address}; + use bitcoin_serai::bitcoin::ScriptBuf; ExternalAddress::new( - networks::bitcoin::Address::new(Address::p2pkh(public_key, Network::Regtest)) + networks::bitcoin::Address::new(ScriptBuf::new_p2pkh(&public_key.pubkey_hash())) .unwrap() .into(), ) .unwrap() } + Wallet::Ethereum { key, .. } => ExternalAddress::new( + ethereum_serai::crypto::address(&(ciphersuite::Secp256k1::generator() * key)).into(), + ) + .unwrap(), Wallet::Monero { view_pair, .. } => { use monero_serai::wallet::address::{Network, AddressSpec}; ExternalAddress::new( diff --git a/tests/processor/src/tests/batch.rs b/tests/processor/src/tests/batch.rs index 5729fd73e..6170270ac 100644 --- a/tests/processor/src/tests/batch.rs +++ b/tests/processor/src/tests/batch.rs @@ -17,7 +17,8 @@ use serai_client::{ validator_sets::primitives::Session, }; -use processor::networks::{Network, Bitcoin, Monero}; +use serai_db::MemDb; +use processor::networks::{Network, Bitcoin, Ethereum, Monero}; use crate::{*, tests::*}; @@ -188,7 +189,7 @@ pub(crate) async fn substrate_block( #[test] fn batch_test() { - for network in [NetworkId::Bitcoin, NetworkId::Monero] { + for network in [NetworkId::Bitcoin, NetworkId::Ethereum, NetworkId::Monero] { let (coordinators, test) = new_test(network); test.run(|ops| async move { @@ -228,7 +229,7 @@ fn batch_test() { let (tx, balance_sent) = wallet.send_to_address(&ops, &key_pair.1, instruction.clone()).await; for coordinator in &mut coordinators { - coordinator.publish_transacton(&ops, &tx).await; + coordinator.publish_transaction(&ops, &tx).await; } // Put the TX past the confirmation depth @@ -245,6 +246,8 @@ fn batch_test() { // The scanner works on a 5s interval, so this leaves a few s for any processing/latency tokio::time::sleep(Duration::from_secs(10)).await; + println!("sent in transaction. with in instruction: {}", instruction.is_some()); + let expected_batch = Batch { network, id: i, @@ -256,10 +259,11 @@ fn batch_test() { coin: balance_sent.coin, amount: Amount( balance_sent.amount.0 - - (2 * if network == NetworkId::Bitcoin { - Bitcoin::COST_TO_AGGREGATE - } else { - Monero::COST_TO_AGGREGATE + (2 * match network { + NetworkId::Bitcoin => Bitcoin::COST_TO_AGGREGATE, + NetworkId::Ethereum => Ethereum::::COST_TO_AGGREGATE, + NetworkId::Monero => Monero::COST_TO_AGGREGATE, + NetworkId::Serai => panic!("minted for Serai?"), }), ), }, @@ -272,6 +276,8 @@ fn batch_test() { }, }; + println!("receiving batch preprocesses..."); + // Make sure the processors picked it up by checking they're trying to sign a batch for it let (mut id, mut preprocesses) = recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, 0).await; @@ -291,6 +297,8 @@ fn batch_test() { recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, attempt).await; } + println!("signing batch..."); + // Continue with signing the batch let batch = sign_batch(&mut coordinators, key_pair.0 .0, id, preprocesses).await; diff --git a/tests/processor/src/tests/key_gen.rs b/tests/processor/src/tests/key_gen.rs index d50c12b79..7dea0bfd5 100644 --- a/tests/processor/src/tests/key_gen.rs +++ b/tests/processor/src/tests/key_gen.rs @@ -144,7 +144,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair { #[test] fn key_gen_test() { - for network in [NetworkId::Bitcoin, NetworkId::Monero] { + for network in [NetworkId::Bitcoin, NetworkId::Ethereum, NetworkId::Monero] { let (coordinators, test) = new_test(network); test.run(|ops| async move { diff --git a/tests/processor/src/tests/mod.rs b/tests/processor/src/tests/mod.rs index 54a17020f..afda97d5e 100644 --- a/tests/processor/src/tests/mod.rs +++ b/tests/processor/src/tests/mod.rs @@ -20,8 +20,14 @@ pub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1; fn new_test(network: NetworkId) -> (Vec<(Handles, ::F)>, DockerTest) { let mut coordinators = vec![]; let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); + let mut eth_handle = None; for _ in 0 .. COORDINATORS { - let (handles, coord_key, compositions) = processor_stack(network); + let (handles, coord_key, compositions) = processor_stack(network, eth_handle.clone()); + // TODO: Remove this once https://github.com/foundry-rs/foundry/issues/7955 + // This has all processors share an Ethereum node until we can sync controlled nodes + if network == NetworkId::Ethereum { + eth_handle = eth_handle.or_else(|| Some(handles.0.clone())); + } coordinators.push((handles, coord_key)); for composition in compositions { test.provide_container(composition); diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index 8685af047..62e80c095 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -8,12 +8,15 @@ use dkg::{Participant, tests::clone_without}; use messages::{sign::SignId, SubstrateContext}; use serai_client::{ - primitives::{BlockHash, NetworkId}, + primitives::{BlockHash, NetworkId, Amount, Balance, SeraiAddress}, coins::primitives::{OutInstruction, OutInstructionWithBalance}, - in_instructions::primitives::Batch, + in_instructions::primitives::{InInstruction, InInstructionWithBalance, Batch}, validator_sets::primitives::Session, }; +use serai_db::MemDb; +use processor::networks::{Network, Bitcoin, Ethereum, Monero}; + use crate::{*, tests::*}; #[allow(unused)] @@ -144,7 +147,7 @@ pub(crate) async fn sign_tx( #[test] fn send_test() { - for network in [NetworkId::Bitcoin, NetworkId::Monero] { + for network in [NetworkId::Bitcoin, NetworkId::Ethereum, NetworkId::Monero] { let (coordinators, test) = new_test(network); test.run(|ops| async move { @@ -173,9 +176,13 @@ fn send_test() { coordinators[0].sync(&ops, &coordinators[1 ..]).await; // Send into the processor's wallet - let (tx, balance_sent) = wallet.send_to_address(&ops, &key_pair.1, None).await; + let mut serai_address = [0; 32]; + OsRng.fill_bytes(&mut serai_address); + let instruction = InInstruction::Transfer(SeraiAddress(serai_address)); + let (tx, balance_sent) = + wallet.send_to_address(&ops, &key_pair.1, Some(instruction.clone())).await; for coordinator in &mut coordinators { - coordinator.publish_transacton(&ops, &tx).await; + coordinator.publish_transaction(&ops, &tx).await; } // Put the TX past the confirmation depth @@ -192,8 +199,25 @@ fn send_test() { // The scanner works on a 5s interval, so this leaves a few s for any processing/latency tokio::time::sleep(Duration::from_secs(10)).await; - let expected_batch = - Batch { network, id: 0, block: BlockHash(block_with_tx.unwrap()), instructions: vec![] }; + let amount_minted = Amount( + balance_sent.amount.0 - + (2 * match network { + NetworkId::Bitcoin => Bitcoin::COST_TO_AGGREGATE, + NetworkId::Ethereum => Ethereum::::COST_TO_AGGREGATE, + NetworkId::Monero => Monero::COST_TO_AGGREGATE, + NetworkId::Serai => panic!("minted for Serai?"), + }), + ); + + let expected_batch = Batch { + network, + id: 0, + block: BlockHash(block_with_tx.unwrap()), + instructions: vec![InInstructionWithBalance { + instruction, + balance: Balance { coin: balance_sent.coin, amount: amount_minted }, + }], + }; // Make sure the proceessors picked it up by checking they're trying to sign a batch for it let (id, preprocesses) = @@ -221,7 +245,7 @@ fn send_test() { block: substrate_block_num, burns: vec![OutInstructionWithBalance { instruction: OutInstruction { address: wallet.address(), data: None }, - balance: balance_sent, + balance: Balance { coin: balance_sent.coin, amount: amount_minted }, }], batches: vec![batch.batch.id], }, @@ -261,17 +285,17 @@ fn send_test() { let participating = participating.iter().map(|p| usize::from(u16::from(*p) - 1)).collect::>(); for participant in &participating { - assert!(coordinators[*participant].get_transaction(&ops, &tx_id).await.is_some()); + assert!(coordinators[*participant].get_published_transaction(&ops, &tx_id).await.is_some()); } // Publish this transaction to the left out nodes let tx = coordinators[*participating.iter().next().unwrap()] - .get_transaction(&ops, &tx_id) + .get_published_transaction(&ops, &tx_id) .await .unwrap(); for (i, coordinator) in coordinators.iter_mut().enumerate() { if !participating.contains(&i) { - coordinator.publish_transacton(&ops, &tx).await; + coordinator.publish_eventuality_completion(&ops, &tx).await; // Tell them of it as a completion of the relevant signing nodes coordinator .send_message(messages::sign::CoordinatorMessage::Completed {