diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index d08df142033..9baef8ad1ee 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -47,10 +47,10 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2022-11-14 + toolchain: nightly-2022-12-24 target: ${{ matrix.target }} override: true - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 - uses: actions-rs/cargo@v1 if: matrix.platform != 'arm64' with: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1e25304dc1b..f19c49ff100 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,7 +4,7 @@ on: push: branches: [main, staging, trying] pull_request: - branches: [main] + branches: [main, 'testnet_*'] types: - opened - reopened @@ -27,19 +27,23 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2022-11-14 + toolchain: nightly-2022-12-24 components: rustfmt override: true - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 + with: + shared-key: "sanity" + save-if: ${{ github.ref == 'refs/heads/main' }} - uses: actions-rs/cargo@v1 with: command: fmt args: --all -- --check - + check: if: github.ref != 'refs/heads/staging' needs: sanity runs-on: ubuntu-latest + continue-on-error: true steps: - uses: actions/checkout@v2 with: @@ -47,54 +51,18 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2022-11-14 - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: check - - test: - if: github.ref != 'refs/heads/staging' - needs: check - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 + toolchain: nightly-2022-12-24 + - uses: Swatinem/rust-cache@v2 with: - submodules: "recursive" - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: nightly-2022-11-14 - override: true - - uses: Swatinem/rust-cache@v1 + shared-key: "check" + save-if: ${{ github.ref == 'refs/heads/main' }} - uses: actions-rs/cargo@v1 with: - command: test - args: --lib - - doctest: - if: github.ref != 'refs/heads/staging' - needs: check - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - submodules: "recursive" - - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: nightly-2022-11-14 - override: true - - uses: Swatinem/rust-cache@v1 - - uses: actions-rs/cargo@v1 - with: - command: test - args: --doc + command: check clippy: if: github.ref != 'refs/heads/staging' - needs: check + needs: sanity runs-on: ubuntu-latest continue-on-error: true steps: @@ -104,10 +72,13 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2022-11-14 + toolchain: nightly-2022-12-24 components: clippy override: true - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 + with: + shared-key: "clippy" + save-if: ${{ github.ref == 'refs/heads/main' }} - uses: actions-rs/clippy-check@v1 with: token: ${{ secrets.GITHUB_TOKEN }} @@ -116,7 +87,6 @@ jobs: # Full cross-platform tests required by bors to merge on main branch full: - if: github.ref == 'refs/heads/staging' name: full needs: sanity runs-on: ${{ matrix.os }} @@ -133,9 +103,12 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2022-11-14 + toolchain: nightly-2022-12-24 override: true - - uses: Swatinem/rust-cache@v1 + - uses: Swatinem/rust-cache@v2 + with: + shared-key: "massa" + save-if: ${{ github.ref == 'refs/heads/main' }} - uses: actions-rs/cargo@v1 with: command: install @@ -143,10 +116,9 @@ jobs: - uses: actions-rs/cargo@v1 with: command: nextest - args: run --retries 2 + args: run --features testing --retries 10 build: # quick hack because bors wrongly detect matrix jobs status - if: github.ref == 'refs/heads/staging' needs: full runs-on: ubuntu-latest steps: @@ -154,7 +126,6 @@ jobs: doc: runs-on: ubuntu-latest - if: github.ref == 'refs/heads/main' steps: - uses: actions/checkout@v2 with: @@ -162,9 +133,13 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly-2022-11-14 + toolchain: nightly-2022-12-24 components: rustfmt override: true + - uses: Swatinem/rust-cache@v2 + with: + shared-key: "doc" + save-if: ${{ github.ref == 'refs/heads/main' }} - uses: actions-rs/cargo@v1 with: command: doc diff --git a/.gitignore b/.gitignore index 31644ff391d..a9f7bcc542f 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ .envrc .idea .vscode +.fleet *.swp* **/Cargo.lock **/target diff --git a/.gitpod.yml b/.gitpod.yml index 8342849cda6..f6d7fbc3e9a 100644 --- a/.gitpod.yml +++ b/.gitpod.yml @@ -6,6 +6,7 @@ ports: - port: 31245 - port: 33034 - port: 33035 + - port: 33036 tasks: - init: cargo build diff --git a/Cargo.lock b/Cargo.lock index eb7527d8917..fd3e22a226a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,11 +4,11 @@ version = 3 [[package]] name = "addr2line" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ - "gimli", + "gimli 0.27.0", ] [[package]] @@ -87,6 +87,21 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anes" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" + [[package]] name = "ansi_term" version = "0.12.1" @@ -98,9 +113,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.66" +version = "1.0.68" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" +checksum = "2cb2f989d18dd141ab8ae82f64d1a8cdd37e0840f73a406896cf5e99502fab61" [[package]] name = "arrayref" @@ -116,9 +131,8 @@ checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "as-ffi-bindings" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2208edd363e0fa2147e52cf05a9ed66cb56636e91fc92a78ba0a9193b977b049" +version = "0.2.5" +source = "git+https://github.com/massalabs/as-ffi-bindings.git?tag=v0.3.0#d40a1586953d396508ef739f39f48e1a18e0b0cc" dependencies = [ "anyhow", "wasmer", @@ -162,9 +176,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.58" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +checksum = "677d1d8ab452a3936018a687b20e6f7cf5363d713b732b8884001317b0e48aa3" dependencies = [ "proc-macro2", "quote", @@ -177,7 +191,7 @@ version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi", ] @@ -190,16 +204,16 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.66" +version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" +checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ "addr2line", "cc", "cfg-if", "libc", - "miniz_oxide 0.5.4", - "object 0.29.0", + "miniz_oxide", + "object", "rustc-demangle", ] @@ -428,11 +442,17 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "cast" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" + [[package]] name = "cc" -version = "1.0.77" +version = "1.0.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4" +checksum = "a20104e2335ce8a659d6dd92a51a767a0c062599c73b343fd152cb401e828c3d" dependencies = [ "jobserver", ] @@ -452,6 +472,46 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "chrono" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" +dependencies = [ + "iana-time-zone", + "num-integer", + "num-traits", + "serde", + "winapi", +] + +[[package]] +name = "ciborium" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0c137568cc60b904a7724001b35ce2630fd00d5d84805fbb608ab89509d788f" +dependencies = [ + "ciborium-io", + "ciborium-ll", + "serde", +] + +[[package]] +name = "ciborium-io" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "346de753af073cc87b52b2083a506b38ac176a44cfb05497b622e27be899b369" + +[[package]] +name = "ciborium-ll" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213030a2b5a4e0c0892b6652260cf6ccac84827b83a85a534e178e3906c4cf1b" +dependencies = [ + "ciborium-io", + "half", +] + [[package]] name = "cipher" version = "0.4.3" @@ -482,12 +542,33 @@ dependencies = [ "ansi_term", "atty", "bitflags", - "strsim", - "textwrap", + "strsim 0.8.0", + "textwrap 0.11.0", "unicode-width", "vec_map", ] +[[package]] +name = "clap" +version = "3.2.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71655c45cb9845d3270c9d6df84ebe72b4dad3c2ba3f7023ad47c144e4e473a5" +dependencies = [ + "bitflags", + "clap_lex", + "indexmap", + "textwrap 0.16.0", +] + +[[package]] +name = "clap_lex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" +dependencies = [ + "os_str_bytes", +] + [[package]] name = "clipboard-win" version = "4.4.2" @@ -500,20 +581,20 @@ dependencies = [ ] [[package]] -name = "combine" -version = "4.6.6" +name = "codespan-reporting" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ed6e9d84f0b51a7f52daf1c7d71dd136fd7a3f41a8462b8cdb8c78d920fad4" +checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" dependencies = [ - "bytes", - "memchr", + "termcolor", + "unicode-width", ] [[package]] name = "config" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11f1667b8320afa80d69d8bbe40830df2c8a06003d86f73d8e003b2c48df416d" +checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7" dependencies = [ "async-trait", "json5", @@ -564,19 +645,6 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" -[[package]] -name = "cornetto" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a47c2a04089d9de013f44c298c551919be450885957facba37c7831826fc63f" -dependencies = [ - "anyhow", - "lazy_static", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "corosensei" version = "0.1.3" @@ -601,56 +669,57 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.82.3" +version = "0.86.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38faa2a16616c8e78a18d37b4726b98bfd2de192f2fdc8a39ddf568a408a0f75" +checksum = "529ffacce2249ac60edba2941672dfedf3d96558b415d0d8083cd007456e0f55" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.82.3" +version = "0.86.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26f192472a3ba23860afd07d2b0217dc628f21fcc72617aa1336d98e1671f33b" +checksum = "427d105f617efc8cb55f8d036a7fded2e227892d8780b4985e5551f8d27c4a92" dependencies = [ "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", "cranelift-entity", - "gimli", + "cranelift-isle", + "gimli 0.26.2", "log", - "regalloc", + "regalloc2", "smallvec", "target-lexicon", ] [[package]] name = "cranelift-codegen-meta" -version = "0.82.3" +version = "0.86.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32ddb89e9b89d3d9b36a5b7d7ea3261c98235a76ac95ba46826b8ec40b1a24" +checksum = "551674bed85b838d45358e3eab4f0ffaa6790c70dc08184204b9a54b41cdb7d1" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.82.3" +version = "0.86.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01fd0d9f288cc1b42d9333b7a776b17e278fc888c28e6a0f09b5573d45a150bc" +checksum = "2b3a63ae57498c3eb495360944a33571754241e15e47e3bcae6082f40fec5866" [[package]] name = "cranelift-entity" -version = "0.82.3" +version = "0.86.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e3bfe172b83167604601faf9dc60453e0d0a93415b57a9c4d1a7ae6849185cf" +checksum = "11aa8aa624c72cc1c94ea3d0739fa61248260b5b14d3646f51593a88d67f3e6e" [[package]] name = "cranelift-frontend" -version = "0.82.3" +version = "0.86.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a006e3e32d80ce0e4ba7f1f9ddf66066d052a8c884a110b91d05404d6ce26dce" +checksum = "544ee8f4d1c9559c9aa6d46e7aaeac4a13856d620561094f35527356c7d21bd0" dependencies = [ "cranelift-codegen", "log", @@ -658,6 +727,12 @@ dependencies = [ "target-lexicon", ] +[[package]] +name = "cranelift-isle" +version = "0.86.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed16b14363d929b8c37e3c557d0a7396791b383ecc302141643c054343170aad" + [[package]] name = "crc32fast" version = "1.3.2" @@ -667,6 +742,42 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "criterion" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7c76e09c1aae2bc52b3d2f29e13c6572553b30c4aa1b8a49fd70de6412654cb" +dependencies = [ + "anes", + "atty", + "cast", + "ciborium", + "clap 3.2.23", + "criterion-plot", + "itertools", + "lazy_static", + "num-traits", + "oorandom", + "plotters", + "rayon", + "regex", + "serde", + "serde_derive", + "serde_json", + "tinytemplate", + "walkdir", +] + +[[package]] +name = "criterion-plot" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" +dependencies = [ + "cast", + "itertools", +] + [[package]] name = "crossbeam-channel" version = "0.5.6" @@ -753,6 +864,50 @@ dependencies = [ "zeroize", ] +[[package]] +name = "cxx" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5add3fc1717409d029b20c5b6903fc0c0b02fa6741d820054f4a2efa5e5816fd" +dependencies = [ + "cc", + "cxxbridge-flags", + "cxxbridge-macro", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c87959ba14bc6fbc61df77c3fcfe180fc32b93538c4f1031dd802ccb5f2ff0" +dependencies = [ + "cc", + "codespan-reporting", + "once_cell", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69a3e162fde4e594ed2b07d0f83c6c67b745e7f28ce58c6df5e6b6bef99dfb59" + +[[package]] +name = "cxxbridge-macro" +version = "1.0.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e7e2adeb6a0d4a282e581096b06e1791532b7d576dcde5ccd9382acf55db8e6" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "darling" version = "0.14.2" @@ -773,6 +928,7 @@ dependencies = [ "ident_case", "proc-macro2", "quote", + "strsim 0.10.0", "syn", ] @@ -985,9 +1141,9 @@ dependencies = [ [[package]] name = "enum-map" -version = "2.4.1" +version = "2.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5a56d54c8dd9b3ad34752ed197a4eb2a6601bc010808eb097a04a58ae4c43e1" +checksum = "50c25992259941eb7e57b936157961b217a4fc8597829ddef0596d6c3cd86e1a" dependencies = [ "enum-map-derive", "serde", @@ -995,9 +1151,9 @@ dependencies = [ [[package]] name = "enum-map-derive" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9045e2676cd5af83c3b167d917b0a5c90a4d8e266e2683d6631b235c457fc27" +checksum = "2a4da76b3b6116d758c7ba93f7ec6a35d2e2cf24feda76c6e38a375f4d5c59f2" dependencies = [ "proc-macro2", "quote", @@ -1027,9 +1183,9 @@ dependencies = [ [[package]] name = "erased-serde" -version = "0.3.23" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54558e0ba96fbe24280072642eceb9d7d442e32c7ec0ea9e7ecd7b4ea2cf4e11" +checksum = "e4ca605381c017ec7a5fef5e548f1cfaa419ed0f6df6367339300db74c92aa7d" dependencies = [ "serde", ] @@ -1110,7 +1266,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8a2db397cb1c8772f31494cb8917e48cd1e64f0fa7efac59fbd741a0a8ce841" dependencies = [ "crc32fast", - "miniz_oxide 0.6.2", + "miniz_oxide", ] [[package]] @@ -1119,6 +1275,21 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "function_name" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1ab577a896d09940b5fe12ec5ae71f9d8211fff62c919c03a3750a9901e98a7" +dependencies = [ + "function_name-proc-macro", +] + +[[package]] +name = "function_name-proc-macro" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673464e1e314dd67a0fd9544abc99e8eb28d0c7e3b69b033bcff9b2d00b87333" + [[package]] name = "funty" version = "2.0.0" @@ -1239,6 +1410,15 @@ dependencies = [ "slab", ] +[[package]] +name = "fxhash" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +dependencies = [ + "byteorder", +] + [[package]] name = "generic-array" version = "0.14.6" @@ -1292,6 +1472,12 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "gimli" +version = "0.27.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dec7af912d60cdbd3677c1af9352ebae6fb8394d165568a2234df0fa00f87793" + [[package]] name = "glob" version = "0.3.0" @@ -1313,9 +1499,9 @@ dependencies = [ [[package]] name = "gloo-net" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec897194fb9ac576c708f63d35604bc58f2a262b8cec0fabfed26f3991255f21" +checksum = "9050ff8617e950288d7bf7f300707639fdeda5ca0d0ecf380cff448cfd52f4a6" dependencies = [ "futures-channel", "futures-core", @@ -1333,9 +1519,9 @@ dependencies = [ [[package]] name = "gloo-timers" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fb7d06c1c8cc2a29bee7ec961009a0b2caa0793ee4900c2ffb348734ba1c8f9" +checksum = "98c4a8d6391675c6b2ee1a6c8d06e8e2d03605c44cec1270675985a4c2a5500b" dependencies = [ "futures-channel", "futures-core", @@ -1345,9 +1531,9 @@ dependencies = [ [[package]] name = "gloo-utils" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40913a05c8297adca04392f707b1e73b12ba7b8eab7244a4961580b1fd34063c" +checksum = "a8e8fc851e9c7b9852508bc6e3f690f452f474417e8545ec9857b7f7377036b5" dependencies = [ "js-sys", "serde", @@ -1375,6 +1561,12 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" + [[package]] name = "hashbrown" version = "0.11.2" @@ -1427,6 +1619,21 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee512640fe35acbfb4bb779db6f0d80704c2cacfa2e39b601ef3e3f47d1ae4c7" +dependencies = [ + "libc", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + [[package]] name = "hmac" version = "0.12.1" @@ -1508,9 +1715,9 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.23.1" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59df7c4e19c950e6e0e868dcc0a300b09a9b88e9ec55bd879ca819087a77355d" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ "http", "hyper", @@ -1522,6 +1729,30 @@ dependencies = [ "webpki-roots", ] +[[package]] +name = "iana-time-zone" +version = "0.1.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + [[package]] name = "ident_case" version = "1.0.1" @@ -1587,9 +1818,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" +checksum = "fad582f4b9e86b6caa621cabeb0963332d92eea04729ab12892c2533951e6440" [[package]] name = "jobserver" @@ -1622,9 +1853,9 @@ dependencies = [ [[package]] name = "jsonrpsee" -version = "0.16.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5af9646e616e37c61093ef85e25bd883ae0c22e2fa1e6eedfe590048247116e3" +checksum = "7d291e3a5818a2384645fd9756362e6d89cf0541b0b916fa7702ea4a9833608e" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -1639,9 +1870,9 @@ dependencies = [ [[package]] name = "jsonrpsee-client-transport" -version = "0.16.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e85cfc9c2f17eab237fdfa2efe5c1608fd06a90e1e0d7fd7b10f2d0e153f375" +checksum = "965de52763f2004bc91ac5bcec504192440f0b568a5d621c59d9dbd6f886c3fb" dependencies = [ "anyhow", "futures-channel", @@ -1664,9 +1895,9 @@ dependencies = [ [[package]] name = "jsonrpsee-core" -version = "0.16.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "673d68136e2f0f67323bab95b3a7177df26ac21ddbf395fc32d60f30fe5a1364" +checksum = "a4e70b4439a751a5de7dd5ed55eacff78ebf4ffe0fc009cb1ebb11417f5b536b" dependencies = [ "anyhow", "arrayvec", @@ -1693,9 +1924,9 @@ dependencies = [ [[package]] name = "jsonrpsee-http-client" -version = "0.16.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42007820863ab29f3adeacf43886ef54abaedb35bc33dada25771db4e1f94de4" +checksum = "cc345b0a43c6bc49b947ebeb936e886a419ee3d894421790c969cc56040542ad" dependencies = [ "async-trait", "hyper", @@ -1712,9 +1943,9 @@ dependencies = [ [[package]] name = "jsonrpsee-proc-macros" -version = "0.16.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ed8b96f9d2d6a984fd75784ac8bfed994ee40980626b85791782dcd13ffb7ac" +checksum = "baa6da1e4199c10d7b1d0a6e5e8bd8e55f351163b6f4b3cbb044672a69bd4c1c" dependencies = [ "heck 0.4.0", "proc-macro-crate 1.2.1", @@ -1725,9 +1956,9 @@ dependencies = [ [[package]] name = "jsonrpsee-server" -version = "0.16.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78f34520019321bd466d00620606db2f40827362d0185b3b95040328eb502f6" +checksum = "1fb69dad85df79527c019659a992498d03f8495390496da2f07e6c24c2b356fc" dependencies = [ "futures-channel", "futures-util", @@ -1747,9 +1978,9 @@ dependencies = [ [[package]] name = "jsonrpsee-types" -version = "0.16.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7985a27ee315c7c8c5c5033ac133e9472aec881edfd947780f5a9970efb7cbbf" +checksum = "5bd522fe1ce3702fd94812965d7bb7a3364b1c9aba743944c5a00529aae80f8c" dependencies = [ "anyhow", "beef", @@ -1761,9 +1992,9 @@ dependencies = [ [[package]] name = "jsonrpsee-wasm-client" -version = "0.16.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46811fcec615d8e58228e7e281b3238693b26da1eb2469ac208af40a217bc8d9" +checksum = "a77310456f43c6c89bcba1f6b2fc2a28300da7c341f320f5128f8c83cc63232d" dependencies = [ "jsonrpsee-client-transport", "jsonrpsee-core", @@ -1772,9 +2003,9 @@ dependencies = [ [[package]] name = "jsonrpsee-ws-client" -version = "0.16.1" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "480fc9922f10b8fca3f07c07c51e137ddcf13fd60a304f117cfaa9e9bf41c60b" +checksum = "0b83daeecfc6517cfe210df24e570fb06213533dfb990318fae781f4c7119dd9" dependencies = [ "http", "jsonrpsee-client-transport", @@ -1811,9 +2042,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.137" +version = "0.2.139" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" +checksum = "201de327520df007757c1f0adce6e827fe8562fbc28bfd9c15571c66ca1f5f79" [[package]] name = "libloading" @@ -1857,6 +2088,15 @@ dependencies = [ "vcpkg", ] +[[package]] +name = "link-cplusplus" +version = "1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd207c9c713c34f95a097a5b029ac2ce6010530c7b49d7fea24d977dede04f5" +dependencies = [ + "cc", +] + [[package]] name = "linked-hash-map" version = "0.5.6" @@ -1865,9 +2105,9 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linux-raw-sys" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f9f08d8963a6c613f4b1a78f4f4a4dbfadf8e6545b2d72861731e4858b8b47f" +checksum = "f051f77a7c8e6957c0696eac88f26b0117e54f52d3fc682ab19397a8812846a4" [[package]] name = "lock_api" @@ -1894,7 +2134,6 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b6a72dfa44fe15b5e76b94307eeb2ff995a8c5b283b55008940c02e0c5b634d" dependencies = [ - "indexmap", "loupe-derive", "rustversion", ] @@ -1993,14 +2232,14 @@ dependencies = [ [[package]] name = "massa-sc-runtime" -version = "0.9.0" -source = "git+https://github.com/massalabs/massa-sc-runtime?tag=v0.9.0#d082983e73f19d236aed24ac2e4607414d43368b" +version = "0.10.0" +source = "git+https://github.com/massalabs/massa-sc-runtime#04082b4986753160f9c3a075daf9f6f772d73c29" dependencies = [ "anyhow", "as-ffi-bindings", "base64", - "cornetto", - "lazy_static", + "displaydoc", + "function_name", "loupe", "more-asserts 0.3.1", "parking_lot", @@ -2009,9 +2248,9 @@ dependencies = [ "serde", "serde_json", "serial_test 0.8.0", + "thiserror", "wasmer", "wasmer-compiler-singlepass", - "wasmer-engine-universal", "wasmer-middlewares", "wasmer-types", ] @@ -2043,6 +2282,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", + "tokio-stream", "tower", "tower-http", "tracing", @@ -2056,6 +2296,7 @@ dependencies = [ "futures", "lazy_static", "massa_hash", + "massa_ledger_exports", "massa_logging", "massa_models", "massa_serialization", @@ -2067,7 +2308,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "serial_test 0.9.0", + "serial_test 0.10.0", "thiserror", "tracing", ] @@ -2103,7 +2344,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "serial_test 0.9.0", + "serial_test 0.10.0", "tempfile", "thiserror", "tokio", @@ -2132,6 +2373,7 @@ version = "0.1.0" dependencies = [ "crossbeam-channel", "displaydoc", + "jsonrpsee", "massa_execution_exports", "massa_hash", "massa_models", @@ -2146,6 +2388,7 @@ dependencies = [ "serde", "serde_json", "thiserror", + "tokio", ] [[package]] @@ -2182,6 +2425,7 @@ name = "massa_execution_exports" version = "0.1.0" dependencies = [ "displaydoc", + "massa-sc-runtime", "massa_final_state", "massa_hash", "massa_ledger_exports", @@ -2198,6 +2442,7 @@ name = "massa_execution_worker" version = "0.1.0" dependencies = [ "anyhow", + "criterion", "massa-sc-runtime", "massa_async_pool", "massa_executed_ops", @@ -2217,7 +2462,7 @@ dependencies = [ "rand 0.8.5", "rand_xoshiro", "serde_json", - "serial_test 0.9.0", + "serial_test 0.10.0", "tempfile", "tracing", ] @@ -2268,7 +2513,7 @@ dependencies = [ "parking_lot", "serde", "serde_json", - "serial_test 0.9.0", + "serial_test 0.10.0", "tracing", ] @@ -2302,7 +2547,7 @@ dependencies = [ "nom", "serde", "serde_json", - "serial_test 0.9.0", + "serial_test 0.10.0", "thiserror", ] @@ -2343,7 +2588,7 @@ version = "0.1.0" dependencies = [ "pretty_assertions", "serde_json", - "serial_test 0.9.0", + "serial_test 0.10.0", "tracing", ] @@ -2366,7 +2611,9 @@ dependencies = [ "num_enum", "rust_decimal", "serde", - "serial_test 0.9.0", + "serde_with", + "serial_test 0.10.0", + "strum", "thiserror", ] @@ -2409,7 +2656,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", - "serial_test 0.9.0", + "serial_test 0.10.0", "tempfile", "tokio", "tracing", @@ -2523,7 +2770,7 @@ dependencies = [ "massa_time", "rayon", "serde_json", - "serial_test 0.9.0", + "serial_test 0.10.0", "tokio", "tracing", ] @@ -2561,7 +2808,7 @@ dependencies = [ "rand 0.7.3", "serde", "serde_json", - "serial_test 0.9.0", + "serial_test 0.10.0", "thiserror", ] @@ -2673,15 +2920,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" -[[package]] -name = "miniz_oxide" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" -dependencies = [ - "adler", -] - [[package]] name = "miniz_oxide" version = "0.6.2" @@ -2726,9 +2964,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" +checksum = "fa52e972a9a719cecb6864fb88568781eb706bac2cd1d4f04a648542dbf78069" dependencies = [ "bitflags", "cfg-if", @@ -2745,6 +2983,16 @@ dependencies = [ "minimal-lexical", ] +[[package]] +name = "nom8" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75d908f0297c3526d34e478d438b07eefe3d7b0416494d7ffccb17f1c7f7262c" +dependencies = [ + "memchr", + "minimal-lexical", +] + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2837,11 +3085,11 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" +checksum = "0fac9e2da13b5eb447a6ce3d392f23a29d8694bff781bf03a16cd9ac8697593b" dependencies = [ - "hermit-abi", + "hermit-abi 0.2.6", "libc", ] @@ -2868,21 +3116,9 @@ dependencies = [ [[package]] name = "object" -version = "0.28.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e42c982f2d955fac81dd7e1d0e1426a7d702acd9c98d19ab01083a6a0328c424" -dependencies = [ - "crc32fast", - "hashbrown 0.11.2", - "indexmap", - "memchr", -] - -[[package]] -name = "object" -version = "0.29.0" +version = "0.30.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +checksum = "239da7f290cfa979f43f85a8efeee9a8a76d0827c356d37f9d3d7254d6b537fb" dependencies = [ "memchr", ] @@ -2893,6 +3129,12 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" +[[package]] +name = "oorandom" +version = "11.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" + [[package]] name = "opaque-debug" version = "0.3.0" @@ -2915,6 +3157,12 @@ dependencies = [ "hashbrown 0.12.3", ] +[[package]] +name = "os_str_bytes" +version = "6.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b7820b9daea5457c9f21c69448905d723fbd21136ccf521748f23fd49e723ee" + [[package]] name = "output_vt100" version = "0.1.3" @@ -2948,9 +3196,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" +checksum = "7ff9f3fef3968a3ec5945535ed654cb38ff72d7495a25619e2247fb15a2ed9ba" dependencies = [ "backtrace", "cfg-if", @@ -3032,9 +3280,9 @@ checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f400b0f7905bf702f9f3dc3df5a121b16c54e9e8012c082905fdf09a931861a" +checksum = "0f6e86fb9e7026527a0d46bc308b841d73170ef8f443e1807f6ef88526a816d4" dependencies = [ "thiserror", "ucd-trie", @@ -3042,9 +3290,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "423c2ba011d6e27b02b482a3707c773d19aec65cc024637aec44e19652e66f63" +checksum = "96504449aa860c8dcde14f9fba5c58dc6658688ca1fe363589d6327b8662c603" dependencies = [ "pest", "pest_generator", @@ -3052,9 +3300,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e64e6c2c85031c02fdbd9e5c72845445ca0a724d419aa0bc068ac620c9935c1" +checksum = "798e0220d1111ae63d66cb66a5dcb3fc2d986d520b98e49e1852bfdb11d7c5e7" dependencies = [ "pest", "pest_meta", @@ -3065,9 +3313,9 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.5.0" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57959b91f0a133f89a68be874a5c88ed689c19cd729ecdb5d762ebf16c64d662" +checksum = "984298b75898e30a843e278a9f2452c31e349a073a0ce6fd950a12a74464e065" dependencies = [ "once_cell", "pest", @@ -3122,6 +3370,34 @@ version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" +[[package]] +name = "plotters" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2538b639e642295546c50fcd545198c9d64ee2a38620a628724a3b266d5fbf97" +dependencies = [ + "num-traits", + "plotters-backend", + "plotters-svg", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "plotters-backend" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "193228616381fecdc1224c62e96946dfbc73ff4384fba576e052ff8c1bea8142" + +[[package]] +name = "plotters-svg" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9a81d2759aae1dae668f783c308bc5c8ebd191ff4184aaa1b37f65a6ae5a56f" +dependencies = [ + "plotters-backend", +] + [[package]] name = "polyval" version = "0.6.0" @@ -3198,9 +3474,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.47" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" +checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" dependencies = [ "unicode-ident", ] @@ -3227,9 +3503,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.21" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" +checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" dependencies = [ "proc-macro2", ] @@ -3342,11 +3618,10 @@ dependencies = [ [[package]] name = "rayon" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e060280438193c554f654141c9ea9417886713b7acd75974c85b18a69a88e0b" +checksum = "6db3a213adf02b3bcfd2d3846bb41cb22857d131789e01df434fb7e7bc0759b7" dependencies = [ - "crossbeam-deque", "either", "rayon-core", ] @@ -3384,13 +3659,14 @@ dependencies = [ ] [[package]] -name = "regalloc" -version = "0.0.34" +name = "regalloc2" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62446b1d3ebf980bdc68837700af1d77b37bc430e524bf95319c6eada2a4cc02" +checksum = "d43a209257d978ef079f3d446331d0f1794f5e0fc19b306a199983857833a779" dependencies = [ + "fxhash", "log", - "rustc-hash", + "slice-group-by", "smallvec", ] @@ -3464,6 +3740,7 @@ checksum = "cec2b3485b07d96ddfd3134767b8a447b45ea4eb91448d0a35180ec0ffd5ed15" dependencies = [ "bytecheck", "hashbrown 0.12.3", + "indexmap", "ptr_meta", "rend", "rkyv_derive", @@ -3544,9 +3821,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustix" -version = "0.36.3" +version = "0.36.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b1fbb4dfc4eb1d390c02df47760bb19a84bb80b301ecc947ab5406394d8223e" +checksum = "4feacf7db682c6c329c4ede12649cd36ecab0f3be5b7d74e6a20304725db4549" dependencies = [ "bitflags", "errno", @@ -3591,9 +3868,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" +checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70" [[package]] name = "rustyline" @@ -3631,9 +3908,18 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.11" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b4b9743ed687d4b4bcedf9ff5eaa7398495ae14e61cba0a295704edbc7decde" + +[[package]] +name = "same-file" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] [[package]] name = "schannel" @@ -3651,6 +3937,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "scratch" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddccb15bcce173023b3fedd9436f882a0739b8dfb45e4f6b6002bee5929f61b2" + [[package]] name = "sct" version = "0.7.0" @@ -3698,27 +3990,29 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.148" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc" +checksum = "bb7d1f0d3021d347a83e556fc4683dea2ea09d87bccdf88ff5c12545d89d5efb" dependencies = [ "serde_derive", ] [[package]] -name = "serde_bytes" -version = "0.11.7" +name = "serde-wasm-bindgen" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b" +checksum = "e3b4c031cd0d9014307d82b8abf653c0290fbdaeb4c02d00c63cf52f728628bf" dependencies = [ + "js-sys", "serde", + "wasm-bindgen", ] [[package]] name = "serde_derive" -version = "1.0.148" +version = "1.0.152" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c" +checksum = "af487d118eecd09402d70a5d72551860e788df87b464af30e5ea6a38c75c541e" dependencies = [ "proc-macro2", "quote", @@ -3727,9 +4021,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.89" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" +checksum = "877c235533714907a8c2464236f5c4b2a17262ef1bd71f38f35ea592c8da6883" dependencies = [ "itoa", "ryu", @@ -3747,6 +4041,34 @@ dependencies = [ "thiserror", ] +[[package]] +name = "serde_with" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25bf4a5a814902cd1014dbccfa4d4560fb8432c779471e96e035602519f82eef" +dependencies = [ + "base64", + "chrono", + "hex", + "indexmap", + "serde", + "serde_json", + "serde_with_macros", + "time", +] + +[[package]] +name = "serde_with_macros" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3452b4c0f6c1e357f73fdb87cd1efabaa12acf328c7a528e252893baeb3f4aa" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "serial_test" version = "0.8.0" @@ -3762,16 +4084,16 @@ dependencies = [ [[package]] name = "serial_test" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92761393ee4dc3ff8f4af487bd58f4307c9329bbedea02cac0089ad9c411e153" +checksum = "1c789ec87f4687d022a2405cf46e0cd6284889f1839de292cadeb6c6019506f2" dependencies = [ "dashmap", "futures", "lazy_static", "log", "parking_lot", - "serial_test_derive 0.9.0", + "serial_test_derive 0.10.0", ] [[package]] @@ -3789,11 +4111,10 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b6f5d1c3087fb119617cff2966fe3808a80e5eb59a8c1601d5994d66f4346a5" +checksum = "b64f9e531ce97c88b4778aad0ceee079216071cffec6ac9b904277f8f92e7fe3" dependencies = [ - "proc-macro-error", "proc-macro2", "quote", "syn", @@ -3886,6 +4207,12 @@ dependencies = [ "autocfg", ] +[[package]] +name = "slice-group-by" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec" + [[package]] name = "smallvec" version = "1.10.0" @@ -3942,13 +4269,19 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + [[package]] name = "structopt" version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c6b5c64445ba8094a6ab0c3cd2ad323e07171012d9c98b0b15651daf1787a10" dependencies = [ - "clap", + "clap 2.34.0", "lazy_static", "paw", "structopt-derive", @@ -3972,6 +4305,9 @@ name = "strum" version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "063e6045c0e62079840579a7e47a355ae92f60eb74daaf156fb1e84ba164e63f" +dependencies = [ + "strum_macros", +] [[package]] name = "strum_macros" @@ -3994,9 +4330,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.104" +version = "1.0.107" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce" +checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" dependencies = [ "proc-macro2", "quote", @@ -4041,6 +4377,15 @@ dependencies = [ "winapi", ] +[[package]] +name = "termcolor" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" +dependencies = [ + "winapi-util", +] + [[package]] name = "terminal_size" version = "0.1.17" @@ -4060,20 +4405,26 @@ dependencies = [ "unicode-width", ] +[[package]] +name = "textwrap" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" + [[package]] name = "thiserror" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" +checksum = "6a9cd18aa97d5c45c6603caea1da6628790b37f7a34b6ca89522331c5180fed0" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.37" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" +checksum = "1fb327af4685e4d03fa8cbcf1716380da910eeb2bb8be417e7f9fd3fb164f36f" dependencies = [ "proc-macro2", "quote", @@ -4127,11 +4478,21 @@ dependencies = [ "time-core", ] +[[package]] +name = "tinytemplate" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "tokio" -version = "1.22.0" +version = "1.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3" +checksum = "eab6d665857cc6ca78d6e80303a02cea7a7851e85dfbd77cbdc09bd129f1ef46" dependencies = [ "autocfg", "bytes", @@ -4144,14 +4505,14 @@ dependencies = [ "signal-hook-registry", "socket2", "tokio-macros", - "winapi", + "windows-sys 0.42.0", ] [[package]] name = "tokio-macros" -version = "1.8.0" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" +checksum = "d266c00fde287f55d3f1c3e96c500c362a2b8c695076ec180f27918820bc6df8" dependencies = [ "proc-macro2", "quote", @@ -4178,6 +4539,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", + "tokio-util", ] [[package]] @@ -4197,9 +4559,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "1333c76748e868a4d9d1017b5ab53171dfd095f70c712fdb4653a406547f598f" dependencies = [ "serde", ] @@ -4212,13 +4574,13 @@ checksum = "808b51e57d0ef8f71115d8f3a01e7d3750d01c79cac4b3eda910f4389fdf92fd" [[package]] name = "toml_edit" -version = "0.15.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1541ba70885967e662f69d31ab3aeca7b1aaecfcd58679590b893e9239c3646" +checksum = "5c040d7eb2b695a2a39048f9d8e7ee865ef1c57cd9c44ba9b4a4d389095f7e6a" dependencies = [ - "combine", "indexmap", "itertools", + "nom8", "toml_datetime", ] @@ -4245,9 +4607,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +checksum = "f873044bf02dd1e8239e9c1293ea39dad76dc594ec16185d0a1bf31d8dc8d858" dependencies = [ "async-compression", "base64", @@ -4352,9 +4714,9 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "typenum" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" +checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" [[package]] name = "ucd-trie" @@ -4373,9 +4735,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" +checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" [[package]] name = "unicode-segmentation" @@ -4464,6 +4826,17 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" +[[package]] +name = "walkdir" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +dependencies = [ + "same-file", + "winapi", + "winapi-util", +] + [[package]] name = "want" version = "0.3.0" @@ -4493,8 +4866,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ "cfg-if", - "serde", - "serde_json", "wasm-bindgen-macro", ] @@ -4513,6 +4884,29 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-downcast" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dac026d43bcca6e7ce1c0956ba68f59edf6403e8e930a5d891be72c31a44340" +dependencies = [ + "js-sys", + "once_cell", + "wasm-bindgen", + "wasm-bindgen-downcast-macros", +] + +[[package]] +name = "wasm-bindgen-downcast-macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5020cfa87c7cecefef118055d44e3c1fc122c7ec25701d528ee458a0b45f38f" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "wasm-bindgen-futures" version = "0.4.33" @@ -4565,73 +4959,64 @@ dependencies = [ [[package]] name = "wasmer" -version = "2.3.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea8d8361c9d006ea3d7797de7bd6b1492ffd0f91a22430cfda6c1658ad57bedf" +checksum = "740f96c9e5d49f0056d716977657f3f7f8eea9923b41f46d1046946707aa038f" dependencies = [ + "bytes", "cfg-if", "indexmap", "js-sys", - "loupe", "more-asserts 0.2.2", + "serde", + "serde-wasm-bindgen", "target-lexicon", "thiserror", "wasm-bindgen", - "wasmer-artifact", + "wasm-bindgen-downcast", "wasmer-compiler", "wasmer-compiler-cranelift", "wasmer-derive", - "wasmer-engine", - "wasmer-engine-dylib", - "wasmer-engine-universal", "wasmer-types", "wasmer-vm", "wat", "winapi", ] -[[package]] -name = "wasmer-artifact" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aaf9428c29c1d8ad2ac0e45889ba8a568a835e33fd058964e5e500f2f7ce325" -dependencies = [ - "enumset", - "loupe", - "thiserror", - "wasmer-compiler", - "wasmer-types", -] - [[package]] name = "wasmer-compiler" -version = "2.3.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67a6cd866aed456656db2cfea96c18baabbd33f676578482b85c51e1ee19d2c" +checksum = "001d072dd9823e5a06052621eadb531627b4a508d74b67da4590a3d5d9332dc8" dependencies = [ + "backtrace", + "cfg-if", + "enum-iterator", "enumset", - "loupe", - "rkyv", - "serde", - "serde_bytes", + "lazy_static", + "leb128", + "memmap2", + "more-asserts 0.2.2", + "region", + "rustc-demangle", "smallvec", - "target-lexicon", "thiserror", "wasmer-types", + "wasmer-vm", "wasmparser", + "winapi", ] [[package]] name = "wasmer-compiler-cranelift" -version = "2.3.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48be2f9f6495f08649e4f8b946a2cbbe119faf5a654aa1457f9504a99d23dae0" +checksum = "2974856a7ce40eb033efc9db3d480845385c27079b6e33ce51751f2f3c67e9bd" dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "gimli", - "loupe", + "gimli 0.26.2", "more-asserts 0.2.2", "rayon", "smallvec", @@ -4643,16 +5028,16 @@ dependencies = [ [[package]] name = "wasmer-compiler-singlepass" -version = "2.3.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29ca2a35204d8befa85062bc7aac259a8db8070b801b8a783770ba58231d729e" +checksum = "1c6baae9a0b87050564178fc34138411682aeb725b57255b9b03735d6620d065" dependencies = [ "byteorder", "dynasm", "dynasmrt", - "gimli", + "enumset", + "gimli 0.26.2", "lazy_static", - "loupe", "more-asserts 0.2.2", "rayon", "smallvec", @@ -4662,9 +5047,9 @@ dependencies = [ [[package]] name = "wasmer-derive" -version = "2.3.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00e50405cc2a2f74ff574584710a5f2c1d5c93744acce2ca0866084739284b51" +checksum = "36b23b52272494369a1f96428f0056425a85a66154610c988d971bbace8230f1" dependencies = [ "proc-macro-error", "proc-macro2", @@ -4672,136 +5057,37 @@ dependencies = [ "syn", ] -[[package]] -name = "wasmer-engine" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f98f010978c244db431b392aeab0661df7ea0822343334f8f2a920763548e45" -dependencies = [ - "backtrace", - "enumset", - "lazy_static", - "loupe", - "memmap2", - "more-asserts 0.2.2", - "rustc-demangle", - "serde", - "serde_bytes", - "target-lexicon", - "thiserror", - "wasmer-artifact", - "wasmer-compiler", - "wasmer-types", - "wasmer-vm", -] - -[[package]] -name = "wasmer-engine-dylib" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0358af9c154724587731175553805648d9acb8f6657880d165e378672b7e53" -dependencies = [ - "cfg-if", - "enum-iterator", - "enumset", - "leb128", - "libloading", - "loupe", - "object 0.28.4", - "rkyv", - "serde", - "tempfile", - "tracing", - "wasmer-artifact", - "wasmer-compiler", - "wasmer-engine", - "wasmer-object", - "wasmer-types", - "wasmer-vm", - "which", -] - -[[package]] -name = "wasmer-engine-universal" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "440dc3d93c9ca47865a4f4edd037ea81bf983b5796b59b3d712d844b32dbef15" -dependencies = [ - "cfg-if", - "enumset", - "leb128", - "loupe", - "region", - "rkyv", - "wasmer-compiler", - "wasmer-engine", - "wasmer-engine-universal-artifact", - "wasmer-types", - "wasmer-vm", - "winapi", -] - -[[package]] -name = "wasmer-engine-universal-artifact" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f1db3f54152657eb6e86c44b66525ff7801dad8328fe677da48dd06af9ad41" -dependencies = [ - "enum-iterator", - "enumset", - "loupe", - "rkyv", - "thiserror", - "wasmer-artifact", - "wasmer-compiler", - "wasmer-types", -] - [[package]] name = "wasmer-middlewares" -version = "2.3.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7812438ed2f37203a37007cdb5332b8475cb2b16e15d51299b2647894e9ed3a" +checksum = "3ebe29eb090b5212606a2f295ded55d44f38f65ff9cfa85795127f77e119a729" dependencies = [ - "loupe", "wasmer", "wasmer-types", "wasmer-vm", ] -[[package]] -name = "wasmer-object" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d831335ff3a44ecf451303f6f891175c642488036b92ceceb24ac8623a8fa8b" -dependencies = [ - "object 0.28.4", - "thiserror", - "wasmer-compiler", - "wasmer-types", -] - [[package]] name = "wasmer-types" -version = "2.3.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39df01ea05dc0a9bab67e054c7cb01521e53b35a7bb90bd02eca564ed0b2667f" +checksum = "3bc6cd7a2d2d3bd901ff491f131188c1030694350685279e16e1233b9922846b" dependencies = [ - "backtrace", "enum-iterator", + "enumset", "indexmap", - "loupe", "more-asserts 0.2.2", "rkyv", - "serde", + "target-lexicon", "thiserror", ] [[package]] name = "wasmer-vm" -version = "2.3.0" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30d965fa61f4dc4cdb35a54daaf7ecec3563fbb94154a6c35433f879466247dd" +checksum = "e67d0cd6c0ef4985d1ce9c7d7cccf34e910804417a230fa16ab7ee904efb4c34" dependencies = [ "backtrace", "cc", @@ -4811,16 +5097,12 @@ dependencies = [ "indexmap", "lazy_static", "libc", - "loupe", "mach", "memoffset 0.6.5", "more-asserts 0.2.2", "region", - "rkyv", "scopeguard", - "serde", "thiserror", - "wasmer-artifact", "wasmer-types", "winapi", ] @@ -4874,24 +5156,13 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" +checksum = "b6c71e40d7d2c34a5106301fb632274ca37242cd0c9d3e64dbece371a40a2d87" dependencies = [ "webpki", ] -[[package]] -name = "which" -version = "4.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" -dependencies = [ - "either", - "libc", - "once_cell", -] - [[package]] name = "winapi" version = "0.3.9" @@ -4908,6 +5179,15 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" +[[package]] +name = "winapi-util" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +dependencies = [ + "winapi", +] + [[package]] name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" @@ -5092,9 +5372,9 @@ dependencies = [ [[package]] name = "zeroize_derive" -version = "1.3.2" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" +checksum = "44bf07cb3e50ea2003396695d58bf46bc9887a1f362260446fad6bc4e79bd36c" dependencies = [ "proc-macro2", "quote", @@ -5104,9 +5384,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.3+zstd.1.5.2" +version = "2.0.4+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44ccf97612ac95f3ccb89b2d7346b345e52f1c3019be4984f0455fb4ba991f8a" +checksum = "4fa202f2ef00074143e219d15b62ffc317d17cc33909feac471c044087cad7b0" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index 4c01e663b8e..6eb3224f43e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,7 +42,6 @@ resolver = "2" [profile.dev.package."*"] opt-level = 3 # Speed-up the CI - # # Features # # * testing: enable some tests specific exports. diff --git a/README.md b/README.md index 36a1e13ea51..f951e1b8fe2 100644 --- a/README.md +++ b/README.md @@ -19,8 +19,7 @@ published in this [technical paper](https://arxiv.org/pdf/1803.09029). It shows that throughput of 10'000 transactions per second is reached even in a fully decentralized network with thousands of nodes. -An easy-to-read blog post introduction with videos is written -[here](https://massa.net/blog/introduction/). +You can watch a short introduction video [here](https://www.youtube.com/watch?v=HbILgK1Wh-4). We are now releasing the **Massa testnet** in this GitHub repository, with its explorer available at . diff --git a/massa-api/Cargo.toml b/massa-api/Cargo.toml index 6d6fb383eac..0a5190689fc 100644 --- a/massa-api/Cargo.toml +++ b/massa-api/Cargo.toml @@ -7,7 +7,7 @@ edition = "2021" [dependencies] displaydoc = "0.2" -jsonrpsee = { version = "0.16.1", features = ["server", "macros"] } +jsonrpsee = { version = "0.16.2", features = ["server", "macros"] } async-trait = "0.1.58" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0.87" @@ -16,6 +16,7 @@ tower = { version = "0.4.13", features = ["full"] } hyper = "0.14.20" thiserror = "1.0" tokio = { version = "1.21", features = ["full"] } +tokio-stream = { version = "0.1", features = ["sync"] } tracing = "0.1" itertools = "0.10" parking_lot = { version = "0.12", features = ["deadlock_detection"] } diff --git a/massa-api/src/api.rs b/massa-api/src/api.rs new file mode 100644 index 00000000000..5e9f2ea4d19 --- /dev/null +++ b/massa-api/src/api.rs @@ -0,0 +1,91 @@ +//! Copyright (c) 2022 MASSA LABS +//! Json RPC API for a massa-node +use std::net::SocketAddr; + +use crate::api_trait::MassaApiServer; +use crate::{APIConfig, ApiServer, ApiV2, StopHandle, API}; +use async_trait::async_trait; +use jsonrpsee::core::error::SubscriptionClosed; +use jsonrpsee::core::{Error as JsonRpseeError, RpcResult}; +use jsonrpsee::types::SubscriptionResult; +use jsonrpsee::SubscriptionSink; +use massa_consensus_exports::ConsensusChannels; +use massa_models::version::Version; +use massa_protocol_exports::ProtocolSenders; +use serde::Serialize; +use tokio_stream::wrappers::BroadcastStream; + +impl API { + /// generate a new massa API + pub fn new( + consensus_channels: ConsensusChannels, + protocol_senders: ProtocolSenders, + api_settings: APIConfig, + version: Version, + ) -> Self { + API(ApiV2 { + consensus_channels, + protocol_senders, + api_settings, + version, + }) + } +} + +#[async_trait] +impl ApiServer for API { + async fn serve( + self, + url: &SocketAddr, + api_config: &APIConfig, + ) -> Result { + crate::serve(self.into_rpc(), url, api_config).await + } +} + +#[doc(hidden)] +#[async_trait] +impl MassaApiServer for API { + async fn get_version(&self) -> RpcResult { + Ok(self.0.version) + } + + fn subscribe_new_blocks(&self, sink: SubscriptionSink) -> SubscriptionResult { + broadcast_via_ws(self.0.consensus_channels.block_sender.clone(), sink); + Ok(()) + } + + fn subscribe_new_blocks_headers(&self, sink: SubscriptionSink) -> SubscriptionResult { + broadcast_via_ws(self.0.consensus_channels.block_header_sender.clone(), sink); + Ok(()) + } + + fn subscribe_new_filled_blocks(&self, sink: SubscriptionSink) -> SubscriptionResult { + broadcast_via_ws(self.0.consensus_channels.filled_block_sender.clone(), sink); + Ok(()) + } + + fn subscribe_new_operations(&self, sink: SubscriptionSink) -> SubscriptionResult { + broadcast_via_ws(self.0.protocol_senders.operation_sender.clone(), sink); + Ok(()) + } +} + +/// Brodcast the stream(sender) content via a WebSocket +fn broadcast_via_ws( + sender: tokio::sync::broadcast::Sender, + mut sink: SubscriptionSink, +) { + let rx = BroadcastStream::new(sender.subscribe()); + tokio::spawn(async move { + match sink.pipe_from_try_stream(rx).await { + SubscriptionClosed::Success => { + sink.close(SubscriptionClosed::Success); + } + SubscriptionClosed::RemotePeerAborted => (), + SubscriptionClosed::Failed(err) => { + sink.close(err); + } + }; + }); +} diff --git a/massa-api/src/api_trait.rs b/massa-api/src/api_trait.rs new file mode 100644 index 00000000000..baf6f90e05b --- /dev/null +++ b/massa-api/src/api_trait.rs @@ -0,0 +1,45 @@ +//! Copyright (c) 2022 MASSA LABS +//! Json RPC API for a massa-node +use jsonrpsee::core::RpcResult; +use jsonrpsee::proc_macros::rpc; +use massa_models::version::Version; + +/// Exposed API methods +#[rpc(server)] +pub trait MassaApi { + /// Get Massa node version. + #[method(name = "get_version")] + async fn get_version(&self) -> RpcResult; + + /// New produced block. + #[subscription( + name = "subscribe_new_blocks" => "new_blocks", + unsubscribe = "unsubscribe_new_blocks", + item = Block + )] + fn subscribe_new_blocks(&self); + + /// New produced blocks headers. + #[subscription( + name = "subscribe_new_blocks_headers" => "new_blocks_headers", + unsubscribe = "unsubscribe_new_blocks_headers", + item = BlockHeader + )] + fn subscribe_new_blocks_headers(&self); + + /// New produced block with operations content. + #[subscription( + name = "subscribe_new_filled_blocks" => "new_filled_blocks", + unsubscribe = "unsubscribe_new_filled_blocks", + item = FilledBlock + )] + fn subscribe_new_filled_blocks(&self); + + /// New produced operations. + #[subscription( + name = "subscribe_new_operations" => "new_operations", + unsubscribe = "unsubscribe_new_operations", + item = Operation + )] + fn subscribe_new_operations(&self); +} diff --git a/massa-api/src/config.rs b/massa-api/src/config.rs index 086dcc1909d..6f1348d7e9b 100644 --- a/massa-api/src/config.rs +++ b/massa-api/src/config.rs @@ -16,10 +16,16 @@ pub struct APIConfig { pub bind_private: SocketAddr, /// bind for the public API pub bind_public: SocketAddr, + /// bind for the Massa API + pub bind_api: SocketAddr, /// max argument count pub max_arguments: u64, /// openrpc specification path pub openrpc_spec_path: PathBuf, + /// bootstrap whitelist path + pub bootstrap_whitelist_path: PathBuf, + /// bootstrap blacklist path + pub bootstrap_blacklist_path: PathBuf, /// maximum size in bytes of a request. pub max_request_body_size: u32, /// maximum size in bytes of a response. diff --git a/massa-api/src/error.rs b/massa-api/src/error.rs index 958b4def26f..2ce7d9efd54 100644 --- a/massa-api/src/error.rs +++ b/massa-api/src/error.rs @@ -5,6 +5,7 @@ use jsonrpsee::{ core::Error as JsonRpseeError, types::error::{CallError, ErrorObject}, }; + use massa_consensus_exports::error::ConsensusError; use massa_execution_exports::ExecutionError; use massa_hash::MassaHashError; @@ -13,10 +14,9 @@ use massa_network_exports::NetworkError; use massa_protocol_exports::ProtocolError; use massa_time::TimeError; use massa_wallet::WalletError; -use thiserror::Error; #[non_exhaustive] -#[derive(Display, Error, Debug)] +#[derive(Display, thiserror::Error, Debug)] pub enum ApiError { /// Send channel error: {0} SendChannelError(String), diff --git a/massa-api/src/lib.rs b/massa-api/src/lib.rs index 9033f4426a9..b6b72e5333e 100644 --- a/massa-api/src/lib.rs +++ b/massa-api/src/lib.rs @@ -3,12 +3,14 @@ #![feature(async_closure)] #![warn(missing_docs)] #![warn(unused_crate_dependencies)] +use crate::api_trait::MassaApiServer; use crate::error::ApiError::WrongAPI; use hyper::Method; use jsonrpsee::core::{Error as JsonRpseeError, RpcResult}; use jsonrpsee::proc_macros::rpc; use jsonrpsee::server::{AllowHosts, ServerBuilder, ServerHandle}; -use massa_consensus_exports::ConsensusController; +use jsonrpsee::RpcModule; +use massa_consensus_exports::{ConsensusChannels, ConsensusController}; use massa_execution_exports::ExecutionController; use massa_models::api::{ AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, @@ -32,7 +34,7 @@ use massa_models::{ use massa_network_exports::{NetworkCommandSender, NetworkConfig}; use massa_pool_exports::PoolController; use massa_pos_exports::SelectorController; -use massa_protocol_exports::ProtocolCommandSender; +use massa_protocol_exports::{ProtocolCommandSender, ProtocolSenders}; use massa_storage::Storage; use massa_wallet::Wallet; use parking_lot::RwLock; @@ -44,6 +46,8 @@ use tower_http::cors::{Any, CorsLayer}; use tokio::sync::mpsc; use tracing::{info, warn}; +mod api; +mod api_trait; mod config; mod error; mod private; @@ -72,8 +76,6 @@ pub struct Public { pub version: Version, /// link to the network component pub network_command_sender: NetworkCommandSender, - /// compensation milliseconds (used to sync time with bootstrap server) - pub compensation_millis: i64, /// our node id pub node_id: NodeId, } @@ -92,6 +94,18 @@ pub struct Private { pub node_wallet: Arc>, } +/// API v2 content +pub struct ApiV2 { + /// link(channels) to the consensus component + pub consensus_channels: ConsensusChannels, + /// link(channels) to the protocol component + pub protocol_senders: ProtocolSenders, + /// API settings + pub api_settings: APIConfig, + /// node version + pub version: Version, +} + /// The API wrapper pub struct API(T); @@ -106,8 +120,19 @@ pub trait RpcServer: MassaRpcServer { ) -> Result; } -async fn serve( - api: impl MassaRpcServer, +/// Used to manage the API +#[async_trait::async_trait] +pub trait ApiServer: MassaApiServer { + /// Start the API + async fn serve( + self, + url: &SocketAddr, + api_config: &APIConfig, + ) -> Result; +} + +async fn serve( + api: RpcModule, url: &SocketAddr, api_config: &APIConfig, ) -> Result { @@ -134,8 +159,8 @@ async fn serve( server_builder = server_builder.http_only(); } else if api_config.enable_ws && !api_config.enable_http { server_builder = server_builder.ws_only() - } else { - panic!("wrong server configuration, you can't disable both http and ws") + } else if !api_config.enable_http && !api_config.enable_ws { + panic!("wrong server configuration, you can't disable both http and ws"); } let cors = CorsLayer::new() @@ -153,7 +178,7 @@ async fn serve( .await .expect("failed to build server"); - let server_handler = server.start(api.into_rpc()).expect("server start failed"); + let server_handler = server.start(api).expect("server start failed"); let stop_handler = StopHandle { server_handler }; Ok(stop_handler) @@ -226,17 +251,50 @@ pub trait MassaRpc { #[method(name = "node_ban_by_id")] async fn node_ban_by_id(&self, arg: Vec) -> RpcResult<()>; - /// whitelist given IP address. + /// Returns node peers whitelist IP address(es). + #[method(name = "node_peers_whitelist")] + async fn node_peers_whitelist(&self) -> RpcResult>; + + /// Add IP address(es) to node peers whitelist. /// No confirmation to expect. /// Note: If the ip was unknown it adds it to the known peers, otherwise it updates the peer type - #[method(name = "node_whitelist")] - async fn node_whitelist(&self, arg: Vec) -> RpcResult<()>; + #[method(name = "node_add_to_peers_whitelist")] + async fn node_add_to_peers_whitelist(&self, arg: Vec) -> RpcResult<()>; - /// remove from whitelist given IP address. + /// Remove from peers whitelist given IP address(es). /// keep it as standard /// No confirmation to expect. - #[method(name = "node_remove_from_whitelist")] - async fn node_remove_from_whitelist(&self, arg: Vec) -> RpcResult<()>; + #[method(name = "node_remove_from_peers_whitelist")] + async fn node_remove_from_peers_whitelist(&self, arg: Vec) -> RpcResult<()>; + + /// Returns node bootsrap whitelist IP address(es). + #[method(name = "node_bootstrap_whitelist")] + async fn node_bootstrap_whitelist(&self) -> RpcResult>; + + /// Allow everyone to bootsrap from the node. + /// remove bootsrap whitelist configuration file. + #[method(name = "node_bootstrap_whitelist_allow_all")] + async fn node_bootstrap_whitelist_allow_all(&self) -> RpcResult<()>; + + /// Add IP address(es) to node bootsrap whitelist. + #[method(name = "node_add_to_bootstrap_whitelist")] + async fn node_add_to_bootstrap_whitelist(&self, arg: Vec) -> RpcResult<()>; + + /// Remove IP address(es) to bootsrap whitelist. + #[method(name = "node_remove_from_bootstrap_whitelist")] + async fn node_remove_from_bootstrap_whitelist(&self, arg: Vec) -> RpcResult<()>; + + /// Returns node bootsrap blacklist IP address(es). + #[method(name = "node_bootstrap_blacklist")] + async fn node_bootstrap_blacklist(&self) -> RpcResult>; + + /// Add IP address(es) to node bootsrap blacklist. + #[method(name = "node_add_to_bootstrap_blacklist")] + async fn node_add_to_bootstrap_blacklist(&self, arg: Vec) -> RpcResult<()>; + + /// Remove IP address(es) to bootsrap blacklist. + #[method(name = "node_remove_from_bootstrap_blacklist")] + async fn node_remove_from_bootstrap_blacklist(&self, arg: Vec) -> RpcResult<()>; /// Unban given IP address(es). /// No confirmation to expect. @@ -260,17 +318,17 @@ pub trait MassaRpc { #[method(name = "get_stakers")] async fn get_stakers(&self) -> RpcResult>; - /// Returns operations information associated to a given list of operations' IDs. + /// Returns operation(s) information associated to a given list of operation(s) ID(s). #[method(name = "get_operations")] async fn get_operations(&self, arg: Vec) -> RpcResult>; - /// Get endorsements (not yet implemented). + /// Returns endorsement(s) information associated to a given list of endorsement(s) ID(s) #[method(name = "get_endorsements")] async fn get_endorsements(&self, arg: Vec) -> RpcResult>; - /// Get information on a block given its hash. - #[method(name = "get_block")] - async fn get_block(&self, arg: BlockId) -> RpcResult; + /// Returns block(s) information associated to a given list of block(s) ID(s) + #[method(name = "get_blocks")] + async fn get_blocks(&self, arg: Vec) -> RpcResult>; /// Get information on the block at a slot in the blockclique. /// If there is no block at this slot a `None` is returned. @@ -315,7 +373,3 @@ pub trait MassaRpc { fn wrong_api() -> RpcResult { Err((WrongAPI).into()) } - -fn _jsonrpsee_assert(_method: &str, _request: Value, _response: Value) { - // TODO: jsonrpsee_client_transports::RawClient::call_method ... see #1182 -} diff --git a/massa-api/src/private.rs b/massa-api/src/private.rs index 161b2610053..c1048cac6a6 100644 --- a/massa-api/src/private.rs +++ b/massa-api/src/private.rs @@ -5,12 +5,13 @@ use crate::error::ApiError; use crate::{MassaRpcServer, Private, RpcServer, StopHandle, Value, API}; use async_trait::async_trait; +use itertools::Itertools; use jsonrpsee::core::{Error as JsonRpseeError, RpcResult}; use massa_execution_exports::ExecutionController; use massa_models::api::{ AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, - EndorsementInfo, EventFilter, NodeStatus, OperationInfo, OperationInput, - ReadOnlyBytecodeExecution, ReadOnlyCall, TimeInterval, + EndorsementInfo, EventFilter, ListType, NodeStatus, OperationInfo, OperationInput, + ReadOnlyBytecodeExecution, ReadOnlyCall, ScrudOperation, TimeInterval, }; use massa_models::clique::Clique; use massa_models::composite::PubkeySig; @@ -30,7 +31,10 @@ use massa_signature::KeyPair; use massa_wallet::Wallet; use parking_lot::RwLock; +use std::collections::BTreeSet; +use std::fs::{remove_file, OpenOptions}; use std::net::{IpAddr, SocketAddr}; +use std::path::PathBuf; use std::str::FromStr; use std::sync::Arc; use tokio::sync::mpsc; @@ -64,7 +68,7 @@ impl RpcServer for API { url: &SocketAddr, settings: &APIConfig, ) -> Result { - crate::serve(self, url, settings).await + crate::serve(self.into_rpc(), url, settings).await } } @@ -81,10 +85,10 @@ impl MassaRpcServer for API { async fn node_sign_message(&self, message: Vec) -> RpcResult { let network_command_sender = self.0.network_command_sender.clone(); - match network_command_sender.node_sign_message(message).await { - Ok(public_key_signature) => return Ok(public_key_signature), - Err(e) => return Err(ApiError::from(e).into()), - }; + network_command_sender + .node_sign_message(message) + .await + .map_err(|e| ApiError::NetworkError(e).into()) } async fn add_staking_secret_keys(&self, secret_keys: Vec) -> RpcResult<()> { @@ -95,10 +99,10 @@ impl MassaRpcServer for API { let node_wallet = self.0.node_wallet.clone(); let mut w_wallet = node_wallet.write(); - match w_wallet.add_keypairs(keypairs) { - Ok(_) => return Ok(()), - Err(e) => return Err(ApiError::from(e).into()), - }; + w_wallet + .add_keypairs(keypairs) + .map(|_| ()) + .map_err(|e| ApiError::WalletError(e).into()) } async fn execute_read_only_bytecode( @@ -118,48 +122,47 @@ impl MassaRpcServer for API { async fn remove_staking_addresses(&self, addresses: Vec
) -> RpcResult<()> { let node_wallet = self.0.node_wallet.clone(); let mut w_wallet = node_wallet.write(); - match w_wallet.remove_addresses(&addresses) { - Ok(()) => return Ok(()), - Err(e) => return Err(ApiError::from(e).into()), - }; + w_wallet + .remove_addresses(&addresses) + .map_err(|e| ApiError::WalletError(e).into()) } async fn get_staking_addresses(&self) -> RpcResult> { let node_wallet = self.0.node_wallet.clone(); - let addresses_set = node_wallet.write().get_wallet_address_list(); - Ok(addresses_set) + let w_wallet = node_wallet.read(); + Ok(w_wallet.get_wallet_address_list()) } async fn node_ban_by_ip(&self, ips: Vec) -> RpcResult<()> { let network_command_sender = self.0.network_command_sender.clone(); - match network_command_sender.node_ban_by_ips(ips).await { - Ok(()) => return Ok(()), - Err(e) => return Err(ApiError::from(e).into()), - }; + network_command_sender + .node_ban_by_ips(ips) + .await + .map_err(|e| ApiError::NetworkError(e).into()) } async fn node_ban_by_id(&self, ids: Vec) -> RpcResult<()> { let network_command_sender = self.0.network_command_sender.clone(); - match network_command_sender.node_ban_by_ids(ids).await { - Ok(()) => return Ok(()), - Err(e) => return Err(ApiError::from(e).into()), - }; + network_command_sender + .node_ban_by_ids(ids) + .await + .map_err(|e| ApiError::NetworkError(e).into()) } async fn node_unban_by_id(&self, ids: Vec) -> RpcResult<()> { let network_command_sender = self.0.network_command_sender.clone(); - match network_command_sender.node_unban_by_ids(ids).await { - Ok(()) => return Ok(()), - Err(e) => return Err(ApiError::from(e).into()), - }; + network_command_sender + .node_unban_by_ids(ids) + .await + .map_err(|e| ApiError::NetworkError(e).into()) } async fn node_unban_by_ip(&self, ips: Vec) -> RpcResult<()> { let network_command_sender = self.0.network_command_sender.clone(); - match network_command_sender.node_unban_ips(ips).await { - Ok(()) => return Ok(()), - Err(e) => return Err(ApiError::from(e).into()), - }; + network_command_sender + .node_unban_ips(ips) + .await + .map_err(|e| ApiError::NetworkError(e).into()) } async fn get_status(&self) -> RpcResult { @@ -182,8 +185,8 @@ impl MassaRpcServer for API { crate::wrong_api::>() } - async fn get_block(&self, _: BlockId) -> RpcResult { - crate::wrong_api::() + async fn get_blocks(&self, _: Vec) -> RpcResult> { + crate::wrong_api::>() } async fn get_blockclique_block_by_slot(&self, _: Slot) -> RpcResult> { @@ -213,23 +216,227 @@ impl MassaRpcServer for API { crate::wrong_api::>() } - async fn node_whitelist(&self, ips: Vec) -> RpcResult<()> { + async fn node_peers_whitelist(&self) -> RpcResult> { let network_command_sender = self.0.network_command_sender.clone(); - match network_command_sender.whitelist(ips).await { - Ok(()) => return Ok(()), - Err(e) => return Err(ApiError::from(e).into()), - }; + match network_command_sender.get_peers().await { + Ok(peers) => Ok(peers.peers.into_keys().sorted().collect::>()), + Err(e) => Err(ApiError::NetworkError(e).into()), + } } - async fn node_remove_from_whitelist(&self, ips: Vec) -> RpcResult<()> { + async fn node_add_to_peers_whitelist(&self, ips: Vec) -> RpcResult<()> { let network_command_sender = self.0.network_command_sender.clone(); - match network_command_sender.remove_from_whitelist(ips).await { - Ok(()) => return Ok(()), - Err(e) => return Err(ApiError::from(e).into()), - }; + network_command_sender + .add_to_whitelist(ips) + .await + .map_err(|e| ApiError::NetworkError(e).into()) + } + + async fn node_remove_from_peers_whitelist(&self, ips: Vec) -> RpcResult<()> { + let network_command_sender = self.0.network_command_sender.clone(); + network_command_sender + .remove_from_whitelist(ips) + .await + .map_err(|e| ApiError::NetworkError(e).into()) + } + + async fn node_bootstrap_whitelist(&self) -> RpcResult> { + read_ips_from_jsonfile( + self.0.api_settings.bootstrap_whitelist_path.clone(), + &ListType::Whitelist, + ) + } + + async fn node_bootstrap_whitelist_allow_all(&self) -> RpcResult<()> { + remove_file(self.0.api_settings.bootstrap_whitelist_path.clone()).map_err(|e| { + ApiError::InternalServerError(format!( + "failed to delete bootsrap whitelist configuration file: {}", + e + )) + .into() + }) + } + + async fn node_add_to_bootstrap_whitelist(&self, ips: Vec) -> RpcResult<()> { + run_scrud_operation( + self.0.api_settings.bootstrap_whitelist_path.clone(), + ips, + ListType::Whitelist, + ScrudOperation::Create, + ) + } + + async fn node_remove_from_bootstrap_whitelist(&self, ips: Vec) -> RpcResult<()> { + run_scrud_operation( + self.0.api_settings.bootstrap_whitelist_path.clone(), + ips, + ListType::Whitelist, + ScrudOperation::Delete, + ) + } + + async fn node_bootstrap_blacklist(&self) -> RpcResult> { + read_ips_from_jsonfile( + self.0.api_settings.bootstrap_blacklist_path.clone(), + &ListType::Blacklist, + ) + } + + async fn node_add_to_bootstrap_blacklist(&self, ips: Vec) -> RpcResult<()> { + run_scrud_operation( + self.0.api_settings.bootstrap_blacklist_path.clone(), + ips, + ListType::Blacklist, + ScrudOperation::Create, + ) + } + + async fn node_remove_from_bootstrap_blacklist(&self, ips: Vec) -> RpcResult<()> { + run_scrud_operation( + self.0.api_settings.bootstrap_blacklist_path.clone(), + ips, + ListType::Blacklist, + ScrudOperation::Delete, + ) } async fn get_openrpc_spec(&self) -> RpcResult { crate::wrong_api::() } } + +/// Run Search, Create, Read, Update, Delete operation on bootsrap list of IP(s) +fn run_scrud_operation( + bootstrap_list_file: PathBuf, + ips: Vec, + list_type: ListType, + scrud_operation: ScrudOperation, +) -> RpcResult<()> { + match scrud_operation { + ScrudOperation::Create => get_file_len(bootstrap_list_file.clone(), &list_type, true) + .and_then(|length| { + if length == 0 { + write_ips_to_jsonfile(bootstrap_list_file, BTreeSet::from_iter(ips), &list_type) + } else { + read_ips_from_jsonfile(bootstrap_list_file.clone(), &list_type) + .map(BTreeSet::from_iter) + .and_then(|mut list_ips: BTreeSet| { + list_ips.extend(ips); + write_ips_to_jsonfile(bootstrap_list_file, list_ips, &list_type) + }) + } + }), + ScrudOperation::Delete => get_file_len(bootstrap_list_file.clone(), &list_type, false) + .and_then(|length| { + if length == 0 { + Err(ApiError::InternalServerError(format!( + "failed, bootsrap {} configuration file is empty", + list_type + )) + .into()) + } else { + read_ips_from_jsonfile(bootstrap_list_file.clone(), &list_type) + .map(BTreeSet::from_iter) + .and_then(|mut list_ips: BTreeSet| { + if list_ips.is_empty() { + return Err(ApiError::InternalServerError(format!( + "failed to execute delete operation, bootsrap {} is empty", + list_type + )) + .into()); + } + ips.into_iter().for_each(|ip| { + list_ips.remove(&ip); + }); + write_ips_to_jsonfile(bootstrap_list_file, list_ips, &list_type) + }) + } + }), + _ => Err(ApiError::BadRequest(format!( + "failed operation {} is not supported on {}", + list_type, scrud_operation + )) + .into()), + } +} + +/// Get length of the given file if it exists(or create it if requested) +fn get_file_len( + bootstrap_list_file: PathBuf, + list_type: &ListType, + create: bool, +) -> RpcResult { + OpenOptions::new() + .read(true) + .write(true) + .create(create) + .open(bootstrap_list_file) + .map_err(|e| { + ApiError::InternalServerError(format!( + "failed to read bootsrap {} configuration file: {}", + list_type, e + )) + .into() + }) + .and_then(|file| match file.metadata() { + Ok(metadata) => Ok(metadata.len()), + Err(e) => Err(ApiError::InternalServerError(format!( + "failed to read bootsrap {} configuration file metadata: {}", + list_type, e + )) + .into()), + }) +} + +/// Read bootsrap list IP(s) from json file +fn read_ips_from_jsonfile( + bootstrap_list_file: PathBuf, + list_type: &ListType, +) -> RpcResult> { + std::fs::read_to_string(bootstrap_list_file) + .map_err(|e| { + ApiError::InternalServerError(format!( + "failed to read bootsrap {} configuration file: {}", + list_type, e + )) + .into() + }) + .and_then(|bootsrap_list_str| { + serde_json::from_str(&bootsrap_list_str).map_err(|e| { + ApiError::InternalServerError(format!( + "failed to parse bootsrap {} configuration file: {}", + list_type, e + )) + .into() + }) + }) +} + +/// Write bootsrap list IP(s) from json file +fn write_ips_to_jsonfile( + bootstrap_list_file: PathBuf, + ips: BTreeSet, + list_type: &ListType, +) -> RpcResult<()> { + OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(bootstrap_list_file) + .map_err(|e| { + ApiError::InternalServerError(format!( + "failed to create bootsrap {} configuration file: {}", + list_type, e + )) + .into() + }) + .and_then(|file| { + serde_json::to_writer_pretty(file, &ips).map_err(|e| { + ApiError::InternalServerError(format!( + "failed to write bootsrap {} configuration file: {}", + list_type, e + )) + .into() + }) + }) +} diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 8d40ac9511d..58b82824d8d 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -68,7 +68,6 @@ impl API { network_settings: NetworkConfig, version: Version, network_command_sender: NetworkCommandSender, - compensation_millis: i64, node_id: NodeId, storage: Storage, ) -> Self { @@ -80,7 +79,6 @@ impl API { version, network_command_sender, protocol_command_sender, - compensation_millis, node_id, execution_controller, selector_controller, @@ -96,7 +94,7 @@ impl RpcServer for API { url: &SocketAddr, api_config: &APIConfig, ) -> Result { - crate::serve(self, url, api_config).await + crate::serve(self.into_rpc(), url, api_config).await } } @@ -184,7 +182,7 @@ impl MassaRpcServer for API { .map_or_else(|_| Slot::new(0, 0), |v| v.out.slot), result: result.as_ref().map_or_else( |err| ReadOnlyResult::Error(format!("readonly call failed: {}", err)), - |_| ReadOnlyResult::Ok, + |res| ReadOnlyResult::Ok(res.call_result.clone()), ), gas_cost: result.as_ref().map_or_else(|_| 0, |v| v.gas_cost), output_events: result @@ -259,7 +257,7 @@ impl MassaRpcServer for API { .map_or_else(|_| Slot::new(0, 0), |v| v.out.slot), result: result.as_ref().map_or_else( |err| ReadOnlyResult::Error(format!("readonly call failed: {}", err)), - |_| ReadOnlyResult::Ok, + |res| ReadOnlyResult::Ok(res.call_result.clone()), ), gas_cost: result.as_ref().map_or_else(|_| 0, |v| v.gas_cost), output_events: result @@ -304,13 +302,12 @@ impl MassaRpcServer for API { let network_config = self.0.network_settings.clone(); let version = self.0.version; let api_settings = self.0.api_settings.clone(); - let compensation_millis = self.0.compensation_millis; let pool_command_sender = self.0.pool_command_sender.clone(); let node_id = self.0.node_id; let config = CompactConfig::default(); - let now = match MassaTime::now(compensation_millis) { + let now = match MassaTime::now() { Ok(now) => now, - Err(e) => return Err(ApiError::from(e).into()), + Err(e) => return Err(ApiError::TimeError(e).into()), }; let last_slot_result = get_latest_block_slot_at_timestamp( @@ -321,14 +318,14 @@ impl MassaRpcServer for API { ); let last_slot = match last_slot_result { Ok(last_slot) => last_slot, - Err(e) => return Err(ApiError::from(e).into()), + Err(e) => return Err(ApiError::ModelsError(e).into()), }; let execution_stats = execution_controller.get_stats(); let consensus_stats_result = consensus_controller.get_stats(); let consensus_stats = match consensus_stats_result { Ok(consensus_stats) => consensus_stats, - Err(e) => return Err(ApiError::from(e).into()), + Err(e) => return Err(ApiError::ConsensusError(e).into()), }; let (network_stats_result, peers_result) = tokio::join!( @@ -338,12 +335,12 @@ impl MassaRpcServer for API { let network_stats = match network_stats_result { Ok(network_stats) => network_stats, - Err(e) => return Err(ApiError::from(e).into()), + Err(e) => return Err(ApiError::NetworkError(e).into()), }; let peers = match peers_result { Ok(peers) => peers, - Err(e) => return Err(ApiError::from(e).into()), + Err(e) => return Err(ApiError::NetworkError(e).into()), }; let pool_stats = ( @@ -357,7 +354,7 @@ impl MassaRpcServer for API { let next_slot = match next_slot_result { Ok(next_slot) => next_slot, - Err(e) => return Err(ApiError::from(e).into()), + Err(e) => return Err(ApiError::ModelsError(e).into()), }; let connected_nodes = peers @@ -397,11 +394,10 @@ impl MassaRpcServer for API { async fn get_stakers(&self) -> RpcResult> { let execution_controller = self.0.execution_controller.clone(); let cfg = self.0.api_settings.clone(); - let compensation_millis = self.0.compensation_millis; - let now = match MassaTime::now(compensation_millis) { + let now = match MassaTime::now() { Ok(now) => now, - Err(e) => return Err(ApiError::from(e).into()), + Err(e) => return Err(ApiError::TimeError(e).into()), }; let latest_block_slot_at_timestamp_result = get_latest_block_slot_at_timestamp( @@ -415,7 +411,7 @@ impl MassaRpcServer for API { Ok(curr_cycle) => curr_cycle .unwrap_or_else(|| Slot::new(0, 0)) .get_cycle(cfg.periods_per_cycle), - Err(e) => return Err(ApiError::from(e).into()), + Err(e) => return Err(ApiError::ModelsError(e).into()), }; let mut staker_vec = execution_controller @@ -584,40 +580,45 @@ impl MassaRpcServer for API { Ok(res) } - /// gets a block. Returns None if not found + /// gets a block(s). Returns nothing if not found /// only active blocks are returned - async fn get_block(&self, id: BlockId) -> RpcResult { + async fn get_blocks(&self, ids: Vec) -> RpcResult> { let consensus_controller = self.0.consensus_controller.clone(); let storage = self.0.storage.clone_without_refs(); - let block = match storage.read_blocks().get(&id).cloned() { - Some(b) => b.content, - None => { - return Ok(BlockInfo { id, content: None }); - } - }; - - let graph_status = consensus_controller - .get_block_statuses(&[id]) + let blocks = ids .into_iter() - .next() - .expect("expected get_block_statuses to return one element"); - - let is_final = graph_status == BlockGraphStatus::Final; - let is_in_blockclique = graph_status == BlockGraphStatus::ActiveInBlockclique; - let is_candidate = graph_status == BlockGraphStatus::ActiveInBlockclique - || graph_status == BlockGraphStatus::ActiveInAlternativeCliques; - let is_discarded = graph_status == BlockGraphStatus::Discarded; - - Ok(BlockInfo { - id, - content: Some(BlockInfoContent { - is_final, - is_in_blockclique, - is_candidate, - is_discarded, - block, - }), - }) + .filter_map(|id| { + if let Some(wrapped_block) = storage.read_blocks().get(&id).cloned() { + if let Some(graph_status) = consensus_controller + .get_block_statuses(&[id]) + .into_iter() + .next() + { + let is_final = graph_status == BlockGraphStatus::Final; + let is_in_blockclique = + graph_status == BlockGraphStatus::ActiveInBlockclique; + let is_candidate = graph_status == BlockGraphStatus::ActiveInBlockclique + || graph_status == BlockGraphStatus::ActiveInAlternativeCliques; + let is_discarded = graph_status == BlockGraphStatus::Discarded; + + return Some(BlockInfo { + id, + content: Some(BlockInfoContent { + is_final, + is_in_blockclique, + is_candidate, + is_discarded, + block: wrapped_block.content, + }), + }); + } + } + + None + }) + .collect::>(); + + Ok(blocks) } async fn get_blockclique_block_by_slot(&self, slot: Slot) -> RpcResult> { @@ -655,12 +656,12 @@ impl MassaRpcServer for API { let (start_slot, end_slot) = match time_range_to_slot_range_result { Ok(time_range_to_slot_range) => time_range_to_slot_range, - Err(e) => return Err(ApiError::from(e).into()), + Err(e) => return Err(ApiError::ModelsError(e).into()), }; let graph = match consensus_controller.get_block_graph_status(start_slot, end_slot) { Ok(graph) => graph, - Err(e) => return Err(ApiError::from(e).into()), + Err(e) => return Err(ApiError::ConsensusError(e).into()), }; let mut res = Vec::with_capacity(graph.active_blocks.len()); @@ -765,7 +766,6 @@ impl MassaRpcServer for API { self.0.api_settings.thread_count, self.0.api_settings.t0, self.0.api_settings.genesis_timestamp, - self.0.compensation_millis, ) .expect("could not get latest current slot") .unwrap_or_else(|| Slot::new(0, 0)); @@ -892,7 +892,7 @@ impl MassaRpcServer for API { Ok(operation) => { let _verify_signature = match operation.verify_signature() { Ok(()) => (), - Err(e) => return Err(ApiError::from(e).into()), + Err(e) => return Err(ApiError::ModelsError(e).into()), }; Ok(operation) } @@ -931,11 +931,43 @@ impl MassaRpcServer for API { Ok(events) } - async fn node_whitelist(&self, _: Vec) -> RpcResult<()> { + async fn node_peers_whitelist(&self) -> RpcResult> { + crate::wrong_api::>() + } + + async fn node_add_to_peers_whitelist(&self, _: Vec) -> RpcResult<()> { + crate::wrong_api::<()>() + } + + async fn node_remove_from_peers_whitelist(&self, _: Vec) -> RpcResult<()> { + crate::wrong_api::<()>() + } + + async fn node_bootstrap_whitelist(&self) -> RpcResult> { + crate::wrong_api::>() + } + + async fn node_bootstrap_whitelist_allow_all(&self) -> RpcResult<()> { + crate::wrong_api::<()>() + } + + async fn node_add_to_bootstrap_whitelist(&self, _: Vec) -> RpcResult<()> { + crate::wrong_api::<()>() + } + + async fn node_remove_from_bootstrap_whitelist(&self, _: Vec) -> RpcResult<()> { + crate::wrong_api::<()>() + } + + async fn node_bootstrap_blacklist(&self) -> RpcResult> { + crate::wrong_api::>() + } + + async fn node_add_to_bootstrap_blacklist(&self, _: Vec) -> RpcResult<()> { crate::wrong_api::<()>() } - async fn node_remove_from_whitelist(&self, _: Vec) -> RpcResult<()> { + async fn node_remove_from_bootstrap_blacklist(&self, _: Vec) -> RpcResult<()> { crate::wrong_api::<()>() } diff --git a/massa-async-pool/Cargo.toml b/massa-async-pool/Cargo.toml index 0af55657d3e..d428ccda643 100644 --- a/massa-async-pool/Cargo.toml +++ b/massa-async-pool/Cargo.toml @@ -17,6 +17,7 @@ tracing = "0.1" rand = "0.8" # custom modules massa_hash = { path = "../massa-hash" } +massa_ledger_exports = { path = "../massa-ledger-exports" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } massa_serialization = { path = "../massa-serialization" } @@ -25,7 +26,7 @@ massa_time = { path = "../massa-time" } [dev-dependencies] pretty_assertions = "1.2" -serial_test = "0.9" +serial_test = "0.10" # for more information on what are the following features used for, see the cargo.toml at workspace level [features] diff --git a/massa-async-pool/src/changes.rs b/massa-async-pool/src/changes.rs index bdbb79719cd..13073dafdc1 100644 --- a/massa-async-pool/src/changes.rs +++ b/massa-async-pool/src/changes.rs @@ -24,10 +24,20 @@ pub enum Change { /// an item with identifier T and value U is added Add(T, U), + /// an item with identifier T is ready to be executed + Activate(T), + /// an item with identifier T is deleted Delete(T), } +#[repr(u32)] +enum ChangeId { + Add = 0, + Activate = 1, + Delete = 2, +} + /// represents a list of additions and deletions to the asynchronous message pool #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct AsyncPoolChanges(pub Vec>); @@ -76,6 +86,7 @@ impl Serializer for AsyncPoolChangesSerializer { /// Slot::new(2, 0), /// Slot::new(3, 0), /// vec![1, 2, 3, 4], + /// None /// ); /// let changes: AsyncPoolChanges = AsyncPoolChanges(vec![Change::Add(message.compute_id(), message)]); /// let mut serialized = Vec::new(); @@ -96,12 +107,16 @@ impl Serializer for AsyncPoolChangesSerializer { for change in &value.0 { match change { Change::Add(id, message) => { - buffer.push(0); + buffer.push(ChangeId::Add as u8); self.id_serializer.serialize(id, buffer)?; self.message_serializer.serialize(message, buffer)?; } + Change::Activate(id) => { + buffer.push(ChangeId::Activate as u8); + self.id_serializer.serialize(id, buffer)?; + } Change::Delete(id) => { - buffer.push(1); + buffer.push(ChangeId::Delete as u8); self.id_serializer.serialize(id, buffer)?; } } @@ -117,7 +132,12 @@ pub struct AsyncPoolChangesDeserializer { } impl AsyncPoolChangesDeserializer { - pub fn new(thread_count: u8, max_async_pool_changes: u64, max_async_message_data: u64) -> Self { + pub fn new( + thread_count: u8, + max_async_pool_changes: u64, + max_async_message_data: u64, + max_key_length: u32, + ) -> Self { Self { async_pool_changes_length: U64VarIntDeserializer::new( Included(u64::MIN), @@ -127,6 +147,7 @@ impl AsyncPoolChangesDeserializer { message_deserializer: AsyncMessageDeserializer::new( thread_count, max_async_message_data, + max_key_length, ), } } @@ -139,7 +160,7 @@ impl Deserializer for AsyncPoolChangesDeserializer { /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// use massa_models::{address::Address, amount::Amount, slot::Slot}; /// use std::str::FromStr; - /// use massa_async_pool::{AsyncMessage, Change, AsyncPoolChanges, AsyncPoolChangesSerializer, AsyncPoolChangesDeserializer}; + /// use massa_async_pool::{AsyncMessage, AsyncMessageTrigger, Change, AsyncPoolChanges, AsyncPoolChangesSerializer, AsyncPoolChangesDeserializer}; /// /// let message = AsyncMessage::new_with_hash( /// Slot::new(1, 0), @@ -153,11 +174,15 @@ impl Deserializer for AsyncPoolChangesDeserializer { /// Slot::new(2, 0), /// Slot::new(3, 0), /// vec![1, 2, 3, 4], + /// Some(AsyncMessageTrigger { + /// address: Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + /// datastore_key: Some(vec![1, 2, 3, 4]), + /// }) /// ); /// let changes: AsyncPoolChanges = AsyncPoolChanges(vec![Change::Add(message.compute_id(), message)]); /// let mut serialized = Vec::new(); /// let serializer = AsyncPoolChangesSerializer::new(); - /// let deserializer = AsyncPoolChangesDeserializer::new(32, 100000, 100000); + /// let deserializer = AsyncPoolChangesDeserializer::new(32, 100000, 100000, 100000); /// serializer.serialize(&changes, &mut serialized).unwrap(); /// let (rest, changes_deser) = deserializer.deserialize::(&serialized).unwrap(); /// assert!(rest.is_empty()); @@ -237,4 +262,12 @@ impl AsyncPoolChanges { pub fn push_delete(&mut self, msg_id: AsyncMessageId) { self.0.push(Change::Delete(msg_id)); } + + /// Pushes a message activation to the list of changes. + /// + /// Arguments: + /// * `msg_id`: ID of the message to push as ready to be executed to the list of changes + pub fn push_activate(&mut self, msg_id: AsyncMessageId) { + self.0.push(Change::Activate(msg_id)); + } } diff --git a/massa-async-pool/src/lib.rs b/massa-async-pool/src/lib.rs index 8a2d8b222a4..b564bb65990 100644 --- a/massa-async-pool/src/lib.rs +++ b/massa-async-pool/src/lib.rs @@ -86,6 +86,7 @@ //! See `test_exports/mod.rs` for details. #![feature(btree_drain_filter)] +#![feature(let_chains)] #![feature(drain_filter)] mod changes; @@ -99,7 +100,7 @@ pub use changes::{ pub use config::AsyncPoolConfig; pub use message::{ AsyncMessage, AsyncMessageDeserializer, AsyncMessageId, AsyncMessageIdDeserializer, - AsyncMessageIdSerializer, AsyncMessageSerializer, + AsyncMessageIdSerializer, AsyncMessageSerializer, AsyncMessageTrigger, }; pub use pool::{AsyncPool, AsyncPoolDeserializer, AsyncPoolSerializer}; diff --git a/massa-async-pool/src/message.rs b/massa-async-pool/src/message.rs index 21e967efd28..7e14c586509 100644 --- a/massa-async-pool/src/message.rs +++ b/massa-async-pool/src/message.rs @@ -3,7 +3,7 @@ //! This file defines the structure representing an asynchronous message use massa_hash::Hash; -use massa_models::address::AddressDeserializer; +use massa_models::address::{AddressDeserializer, AddressSerializer}; use massa_models::amount::{AmountDeserializer, AmountSerializer}; use massa_models::slot::{SlotDeserializer, SlotSerializer}; use massa_models::{ @@ -13,7 +13,8 @@ use massa_models::{ slot::Slot, }; use massa_serialization::{ - Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, + Deserializer, OptionDeserializer, OptionSerializer, SerializeError, Serializer, + U64VarIntDeserializer, U64VarIntSerializer, }; use nom::error::{context, ContextError, ParseError}; use nom::multi::length_data; @@ -70,6 +71,7 @@ impl Serializer for AsyncMessageIdSerializer { /// Slot::new(2, 0), /// Slot::new(3, 0), /// vec![1, 2, 3, 4], + /// None /// ); /// let id: AsyncMessageId = message.compute_id(); /// let mut serialized = Vec::new(); @@ -128,6 +130,7 @@ impl Deserializer for AsyncMessageIdDeserializer { /// Slot::new(2, 0), /// Slot::new(3, 0), /// vec![1, 2, 3, 4], + /// None /// ); /// let id: AsyncMessageId = message.compute_id(); /// let mut serialized = Vec::new(); @@ -164,6 +167,86 @@ impl Deserializer for AsyncMessageIdDeserializer { } } +/// Structure defining a trigger for an asynchronous message +#[derive(Clone, Debug, Eq, PartialEq, Serialize, Deserialize)] +pub struct AsyncMessageTrigger { + /// Filter on the address + pub address: Address, + + /// Filter on the datastore key + pub datastore_key: Option>, +} + +/// Serializer for a trigger for an asynchronous message +struct AsyncMessageTriggerSerializer { + address_serializer: AddressSerializer, + key_serializer: OptionSerializer, VecU8Serializer>, +} + +impl AsyncMessageTriggerSerializer { + pub fn new() -> Self { + Self { + address_serializer: AddressSerializer::new(), + key_serializer: OptionSerializer::new(VecU8Serializer::new()), + } + } +} + +impl Serializer for AsyncMessageTriggerSerializer { + fn serialize( + &self, + value: &AsyncMessageTrigger, + buffer: &mut Vec, + ) -> Result<(), SerializeError> { + self.address_serializer.serialize(&value.address, buffer)?; + self.key_serializer + .serialize(&value.datastore_key, buffer)?; + Ok(()) + } +} + +/// Deserializer for a trigger for an asynchronous message +struct AsyncMessageTriggerDeserializer { + address_deserializer: AddressDeserializer, + key_serializer: OptionDeserializer, VecU8Deserializer>, +} + +impl AsyncMessageTriggerDeserializer { + pub fn new(max_key_length: u32) -> Self { + Self { + address_deserializer: AddressDeserializer::new(), + key_serializer: OptionDeserializer::new(VecU8Deserializer::new( + Included(0), + Excluded(max_key_length as u64), + )), + } + } +} + +impl Deserializer for AsyncMessageTriggerDeserializer { + fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> IResult<&'a [u8], AsyncMessageTrigger, E> { + context( + "Failed AsyncMessageTrigger deserialization", + tuple(( + context("Failed address deserialization", |input| { + self.address_deserializer.deserialize(input) + }), + context("Failed datastore_key deserialization", |input| { + self.key_serializer.deserialize(input) + }), + )), + ) + .map(|(address, datastore_key)| AsyncMessageTrigger { + address, + datastore_key, + }) + .parse(buffer) + } +} + /// Structure defining an asynchronous smart contract message #[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] pub struct AsyncMessage { @@ -204,6 +287,13 @@ pub struct AsyncMessage { /// Raw payload data of the message pub data: Vec, + /// Trigger that define whenever a message can be executed + pub trigger: Option, + + /// Boolean that determine if the message can be executed. For messages without filter this boolean is always true. + /// For messages with filter, this boolean is true if the filter has been matched between `validity_start` and current slot. + pub can_be_executed: bool, + /// Hash of the message pub hash: Hash, } @@ -223,6 +313,7 @@ impl AsyncMessage { validity_start: Slot, validity_end: Slot, data: Vec, + trigger: Option, ) -> Self { let async_message_ser = AsyncMessageSerializer::new(); let mut buffer = Vec::new(); @@ -238,6 +329,8 @@ impl AsyncMessage { validity_start, validity_end, data, + can_be_executed: trigger.is_none(), + trigger, // placeholder hash to serialize the message, replaced below hash: Hash::from_bytes(&[0; 32]), }; @@ -257,6 +350,16 @@ impl AsyncMessage { self.emission_index, ) } + + /// Recompute the hash of the message. Must be used each time we modify one field + pub fn compute_hash(&mut self) { + let async_message_ser = AsyncMessageSerializer::new(); + let mut buffer = Vec::new(); + async_message_ser.serialize(self, &mut buffer).expect( + "critical: asynchronous message serialization should never fail in recompute hash", + ); + self.hash = Hash::compute_from(&buffer); + } } pub struct AsyncMessageSerializer { @@ -264,6 +367,7 @@ pub struct AsyncMessageSerializer { amount_serializer: AmountSerializer, u64_serializer: U64VarIntSerializer, vec_u8_serializer: VecU8Serializer, + trigger_serializer: OptionSerializer, } impl AsyncMessageSerializer { @@ -273,6 +377,7 @@ impl AsyncMessageSerializer { amount_serializer: AmountSerializer::new(), u64_serializer: U64VarIntSerializer::new(), vec_u8_serializer: VecU8Serializer::new(), + trigger_serializer: OptionSerializer::new(AsyncMessageTriggerSerializer::new()), } } } @@ -286,7 +391,7 @@ impl Default for AsyncMessageSerializer { impl Serializer for AsyncMessageSerializer { /// ## Example /// ``` - /// use massa_async_pool::{AsyncMessage, AsyncMessageSerializer}; + /// use massa_async_pool::{AsyncMessage, AsyncMessageSerializer, AsyncMessageTrigger}; /// use massa_models::{address::Address, amount::Amount, slot::Slot}; /// use massa_serialization::Serializer; /// use std::str::FromStr; @@ -303,6 +408,10 @@ impl Serializer for AsyncMessageSerializer { /// Slot::new(2, 0), /// Slot::new(3, 0), /// vec![1, 2, 3, 4], + /// Some(AsyncMessageTrigger { + /// address: Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + /// datastore_key: Some(vec![1, 2, 3, 4]) + /// }) /// ); /// let mut buffer = Vec::new(); /// let message_serializer = AsyncMessageSerializer::new(); @@ -335,6 +444,7 @@ impl Serializer for AsyncMessageSerializer { self.slot_serializer .serialize(&value.validity_end, buffer)?; self.vec_u8_serializer.serialize(&value.data, buffer)?; + self.trigger_serializer.serialize(&value.trigger, buffer)?; Ok(()) } } @@ -346,10 +456,11 @@ pub struct AsyncMessageDeserializer { max_gas_deserializer: U64VarIntDeserializer, data_deserializer: VecU8Deserializer, address_deserializer: AddressDeserializer, + trigger_deserializer: OptionDeserializer, } impl AsyncMessageDeserializer { - pub fn new(thread_count: u8, max_async_message_data: u64) -> Self { + pub fn new(thread_count: u8, max_async_message_data: u64, max_key_length: u32) -> Self { Self { slot_deserializer: SlotDeserializer::new( (Included(0), Included(u64::MAX)), @@ -369,6 +480,9 @@ impl AsyncMessageDeserializer { Included(max_async_message_data), ), address_deserializer: AddressDeserializer::new(), + trigger_deserializer: OptionDeserializer::new(AsyncMessageTriggerDeserializer::new( + max_key_length, + )), } } } @@ -376,7 +490,7 @@ impl AsyncMessageDeserializer { impl Deserializer for AsyncMessageDeserializer { /// ## Example /// ``` - /// use massa_async_pool::{AsyncMessage, AsyncMessageSerializer, AsyncMessageDeserializer}; + /// use massa_async_pool::{AsyncMessage, AsyncMessageSerializer, AsyncMessageDeserializer, AsyncMessageTrigger}; /// use massa_models::{address::Address, amount::Amount, slot::Slot}; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// use std::str::FromStr; @@ -393,11 +507,15 @@ impl Deserializer for AsyncMessageDeserializer { /// Slot::new(2, 0), /// Slot::new(3, 0), /// vec![1, 2, 3, 4], + /// Some(AsyncMessageTrigger { + /// address: Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + /// datastore_key: Some(vec![1, 2, 3, 4]), + /// }) /// ); /// let message_serializer = AsyncMessageSerializer::new(); /// let mut serialized = Vec::new(); /// message_serializer.serialize(&message, &mut serialized).unwrap(); - /// let message_deserializer = AsyncMessageDeserializer::new(32, 100000); + /// let message_deserializer = AsyncMessageDeserializer::new(32, 100000, 255); /// let (rest, message_deserialized) = message_deserializer.deserialize::(&serialized).unwrap(); /// assert!(rest.is_empty()); /// assert_eq!(message, message_deserialized); @@ -458,6 +576,9 @@ impl Deserializer for AsyncMessageDeserializer { context("Failed data deserialization", |input| { self.data_deserializer.deserialize(input) }), + context("Failed filter deserialization", |input| { + self.trigger_deserializer.deserialize(input) + }), )), ) .map( @@ -473,6 +594,7 @@ impl Deserializer for AsyncMessageDeserializer { validity_start, validity_end, data, + filter, )| { AsyncMessage::new_with_hash( emission_slot, @@ -486,6 +608,7 @@ impl Deserializer for AsyncMessageDeserializer { validity_start, validity_end, data, + filter, ) }, ) @@ -501,11 +624,13 @@ mod tests { use massa_models::{ address::Address, amount::Amount, - config::{MAX_ASYNC_MESSAGE_DATA, THREAD_COUNT}, + config::{MAX_ASYNC_MESSAGE_DATA, MAX_DATASTORE_KEY_LENGTH, THREAD_COUNT}, slot::Slot, }; use std::str::FromStr; + use super::AsyncMessageTrigger; + #[test] fn bad_serialization_version() { let message = AsyncMessage::new_with_hash( @@ -520,14 +645,22 @@ mod tests { Slot::new(2, 0), Slot::new(3, 0), vec![1, 2, 3, 4], + Some(AsyncMessageTrigger { + address: Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G") + .unwrap(), + datastore_key: None, + }), ); let message_serializer = AsyncMessageSerializer::new(); let mut serialized = Vec::new(); message_serializer .serialize(&message, &mut serialized) .unwrap(); - let message_deserializer = - AsyncMessageDeserializer::new(THREAD_COUNT, MAX_ASYNC_MESSAGE_DATA); + let message_deserializer = AsyncMessageDeserializer::new( + THREAD_COUNT, + MAX_ASYNC_MESSAGE_DATA, + MAX_DATASTORE_KEY_LENGTH as u32, + ); serialized[1] = 50; message_deserializer .deserialize::(&serialized) diff --git a/massa-async-pool/src/pool.rs b/massa-async-pool/src/pool.rs index eb35656248f..abb09eac061 100644 --- a/massa-async-pool/src/pool.rs +++ b/massa-async-pool/src/pool.rs @@ -7,9 +7,10 @@ use crate::{ config::AsyncPoolConfig, message::{AsyncMessage, AsyncMessageId}, AsyncMessageDeserializer, AsyncMessageIdDeserializer, AsyncMessageIdSerializer, - AsyncMessageSerializer, + AsyncMessageSerializer, AsyncMessageTrigger, }; use massa_hash::{Hash, HASH_SIZE_BYTES}; +use massa_ledger_exports::LedgerChanges; use massa_models::{slot::Slot, streaming_step::StreamingStep}; use massa_serialization::{ Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, @@ -65,6 +66,15 @@ impl AsyncPool { } } + Change::Activate(message_id) => { + if let Some(message) = self.messages.get_mut(message_id) { + self.hash ^= message.hash; + message.can_be_executed = true; + message.compute_hash(); + self.hash ^= message.hash; + } + } + // delete a message from the pool Change::Delete(message_id) => { if let Some(removed_message) = self.messages.remove(message_id) { @@ -88,11 +98,17 @@ impl AsyncPool { /// * expired messages from the pool, in priority order (from highest to lowest priority) /// * expired messages from `new_messages` (in the order they appear in `new_messages`) /// * excess messages after inserting all remaining `new_messages`, in priority order (from highest to lowest priority) + /// The list of message that their trigger has been triggered. + #[allow(clippy::type_complexity)] pub fn settle_slot( &mut self, slot: &Slot, new_messages: &mut Vec<(AsyncMessageId, AsyncMessage)>, - ) -> Vec<(AsyncMessageId, AsyncMessage)> { + ledger_changes: &LedgerChanges, + ) -> ( + Vec<(AsyncMessageId, AsyncMessage)>, + Vec<(AsyncMessageId, AsyncMessage)>, + ) { // Filter out all messages for which the validity end is expired. // Note that the validity_end bound is NOT included in the validity interval of the message. let mut eliminated: Vec<_> = self @@ -113,7 +129,15 @@ impl AsyncPool { for _ in 0..excess_count { eliminated.push(self.messages.pop_last().unwrap()); // will not panic (checked at excess_count computation) } - eliminated + let mut triggered = Vec::new(); + for (id, message) in self.messages.iter_mut() { + if let Some(filter) = &message.trigger && !message.can_be_executed && is_triggered(filter, ledger_changes) + { + message.can_be_executed = true; + triggered.push((*id, message.clone())); + } + } + (eliminated, triggered) } /// Takes the best possible batch of messages to execute, with gas limits and slot validity filtering. @@ -139,6 +163,7 @@ impl AsyncPool { if available_gas >= message.max_gas && slot >= message.validity_start && slot < message.validity_end + && message.can_be_executed { available_gas -= message.max_gas; true @@ -211,6 +236,11 @@ impl AsyncPool { } } +/// Check in the ledger changes if a message trigger has been triggered +fn is_triggered(filter: &AsyncMessageTrigger, ledger_changes: &LedgerChanges) -> bool { + ledger_changes.has_changes(&filter.address, filter.datastore_key.clone()) +} + /// Serializer for `AsyncPool` pub struct AsyncPoolSerializer { u64_serializer: U64VarIntSerializer, @@ -267,6 +297,7 @@ impl AsyncPoolDeserializer { thread_count: u8, max_async_pool_length: u64, max_async_message_data: u64, + max_key_length: u32, ) -> AsyncPoolDeserializer { AsyncPoolDeserializer { u64_deserializer: U64VarIntDeserializer::new( @@ -277,6 +308,7 @@ impl AsyncPoolDeserializer { async_message_deserializer: AsyncMessageDeserializer::new( thread_count, max_async_message_data, + max_key_length, ), } } @@ -335,6 +367,7 @@ fn test_take_batch() { Slot::new(1, 0), Slot::new(3, 0), Vec::new(), + None, ); pool.messages.insert(message.compute_id(), message); } diff --git a/massa-async-pool/src/test_exports/bootstrap.rs b/massa-async-pool/src/test_exports/bootstrap.rs index a0d4175d32c..56130bf4bd0 100644 --- a/massa-async-pool/src/test_exports/bootstrap.rs +++ b/massa-async-pool/src/test_exports/bootstrap.rs @@ -38,6 +38,7 @@ pub fn get_random_message(fee: Option) -> AsyncMessage { Slot::new(2, 0), Slot::new(4, 0), vec![1, 2, 3], + None, ) } diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index b894ea828eb..4ebf71d2d0a 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -41,7 +41,7 @@ massa_time = { path = "../massa-time" } [dev-dependencies] bitvec = { version = "1.0", features = ["serde"] } -serial_test = "0.9" +serial_test = "0.10" massa_final_state = { path = "../massa-final-state", features = ["testing"] } massa_async_pool = { path = "../massa-async-pool", features = ["testing"] } massa_ledger_worker = { path = "../massa-ledger-worker", features = [ diff --git a/massa-bootstrap/src/client.rs b/massa-bootstrap/src/client.rs index e1d0c1ea8f5..1768b67b0d2 100644 --- a/massa-bootstrap/src/client.rs +++ b/massa-bootstrap/src/client.rs @@ -166,7 +166,7 @@ async fn stream_final_state_and_consensus( last_ops_step: StreamingStep::Started, last_consensus_step: StreamingStep::Started, }; - panic!("Bootstrap failed, try to bootstrap again."); + return Err(BootstrapError::GeneralError(String::from("Slot too old"))); } BootstrapServerMessage::BootstrapError { error } => { return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, error).into()) @@ -216,7 +216,7 @@ async fn bootstrap_from_server( }; // handshake - let send_time_uncompensated = MassaTime::now(0)?; + let send_time_uncompensated = MassaTime::now()?; // client.handshake() is not cancel-safe but we drop the whole client object if cancelled => it's OK match tokio::time::timeout(cfg.write_timeout.into(), client.handshake(our_version)).await { Err(_) => { @@ -231,7 +231,7 @@ async fn bootstrap_from_server( } // compute ping - let ping = MassaTime::now(0)?.saturating_sub(send_time_uncompensated); + let ping = MassaTime::now()?.saturating_sub(send_time_uncompensated); if ping > cfg.max_ping { return Err(BootstrapError::GeneralError( "bootstrap ping too high".into(), @@ -267,39 +267,32 @@ async fn bootstrap_from_server( Ok(Ok(msg)) => return Err(BootstrapError::UnexpectedServerMessage(msg)), }; - let recv_time_uncompensated = MassaTime::now(0)?; + // get the time of reception + let recv_time = MassaTime::now()?; // compute ping - let ping = recv_time_uncompensated.saturating_sub(send_time_uncompensated); + let ping = recv_time.saturating_sub(send_time_uncompensated); if ping > cfg.max_ping { return Err(BootstrapError::GeneralError( "bootstrap ping too high".into(), )); } - // compute compensation - let compensation_millis = if cfg.enable_clock_synchronization { - let local_time_uncompensated = - recv_time_uncompensated.checked_sub(ping.checked_div_u64(2)?)?; - let compensation_millis = if server_time >= local_time_uncompensated { - server_time - .saturating_sub(local_time_uncompensated) - .to_millis() - } else { - local_time_uncompensated - .saturating_sub(server_time) - .to_millis() - }; - let compensation_millis: i64 = compensation_millis.try_into().map_err(|_| { - BootstrapError::GeneralError("Failed to convert compensation time into i64".into()) - })?; - debug!("Server clock compensation set to: {}", compensation_millis); - compensation_millis - } else { - 0 - }; + // compute client / server clock delta + // div 2 is an approximation of the time it took the message to do server -> client + // the complete ping value being client -> server -> client + let adjusted_server_time = server_time.checked_add(ping.checked_div_u64(2)?)?; + let clock_delta = adjusted_server_time.abs_diff(recv_time); - global_bootstrap_state.compensation_millis = compensation_millis; + // if clock delta is too high warn the user and restart bootstrap + if clock_delta > cfg.max_clock_delta { + warn!("client and server clocks differ too much, please check your clock"); + let message = format!( + "client = {}, server = {}, ping = {}, max_delta = {}", + recv_time, server_time, ping, cfg.max_clock_delta + ); + return Err(BootstrapError::ClockError(message)); + } let write_timeout: std::time::Duration = cfg.write_timeout.into(); // Loop to ask data to the server depending on the last message we sent @@ -425,7 +418,7 @@ pub async fn get_state( end_timestamp: Option, ) -> Result { massa_trace!("bootstrap.lib.get_state", {}); - let now = MassaTime::now(0)?; + let now = MassaTime::now()?; // if we are before genesis, do not bootstrap if now < genesis_timestamp { massa_trace!("bootstrap.lib.get_state.init_from_scratch", {}); @@ -467,7 +460,7 @@ pub async fn get_state( loop { for (addr, pub_key) in shuffled_list.iter() { if let Some(end) = end_timestamp { - if MassaTime::now(0).expect("could not get now time") > end { + if MassaTime::now().expect("could not get now time") > end { panic!("This episode has come to an end, please get the latest testnet node version to continue"); } } diff --git a/massa-bootstrap/src/error.rs b/massa-bootstrap/src/error.rs index 5783ccce87a..9dd7ea2d31b 100644 --- a/massa-bootstrap/src/error.rs +++ b/massa-bootstrap/src/error.rs @@ -50,4 +50,6 @@ pub enum BootstrapError { IncompatibleVersionError(String), /// Received error: {0} ReceivedError(String), + /// clock error: {0} + ClockError(String), } diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index 33889ae8546..c9f3c934a4d 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -51,9 +51,6 @@ pub struct GlobalBootstrapState { /// list of network peers pub peers: Option, - - /// timestamp correction in milliseconds - pub compensation_millis: i64, } impl GlobalBootstrapState { @@ -62,7 +59,6 @@ impl GlobalBootstrapState { final_state, graph: None, peers: None, - compensation_millis: Default::default(), } } } diff --git a/massa-bootstrap/src/messages.rs b/massa-bootstrap/src/messages.rs index bd563bc1aac..44800e8917c 100644 --- a/massa-bootstrap/src/messages.rs +++ b/massa-bootstrap/src/messages.rs @@ -351,6 +351,7 @@ impl BootstrapServerMessageDeserializer { thread_count, max_async_pool_length, max_async_message_data, + max_datastore_key_length as u32, ), opt_pos_cycle_deserializer: OptionDeserializer::new(CycleInfoDeserializer::new( max_rolls_length, diff --git a/massa-bootstrap/src/server.rs b/massa-bootstrap/src/server.rs index 20e6fdc9293..7fa3160738b 100644 --- a/massa-bootstrap/src/server.rs +++ b/massa-bootstrap/src/server.rs @@ -3,7 +3,7 @@ use futures::StreamExt; use humantime::format_duration; use massa_async_pool::AsyncMessageId; use massa_consensus_exports::{bootstrapable_graph::BootstrapableGraph, ConsensusController}; -use massa_final_state::FinalState; +use massa_final_state::{FinalState, FinalStateError}; use massa_logging::massa_trace; use massa_models::{ block::BlockId, prehash::PreHashSet, slot::Slot, streaming_step::StreamingStep, @@ -60,7 +60,6 @@ pub async fn start_bootstrap_server( bootstrap_config: BootstrapConfig, establisher: Establisher, keypair: KeyPair, - compensation_millis: i64, version: Version, ) -> Result, BootstrapError> { massa_trace!("bootstrap.lib.start_bootstrap_server", {}); @@ -68,7 +67,7 @@ pub async fn start_bootstrap_server( let (manager_tx, manager_rx) = mpsc::channel::<()>(1); let whitelist = if let Ok(whitelist) = - std::fs::read_to_string(&bootstrap_config.bootstrap_whitelist_file) + std::fs::read_to_string(&bootstrap_config.bootstrap_whitelist_path) { Some( serde_json::from_str::>(whitelist.as_str()) @@ -86,7 +85,7 @@ pub async fn start_bootstrap_server( }; let blacklist = if let Ok(blacklist) = - std::fs::read_to_string(&bootstrap_config.bootstrap_blacklist_file) + std::fs::read_to_string(&bootstrap_config.bootstrap_blacklist_path) { Some( serde_json::from_str::>(blacklist.as_str()) @@ -112,7 +111,6 @@ pub async fn start_bootstrap_server( manager_rx, bind, keypair, - compensation_millis, version, whitelist, blacklist, @@ -140,7 +138,6 @@ struct BootstrapServer { bind: SocketAddr, keypair: KeyPair, bootstrap_config: BootstrapConfig, - compensation_millis: i64, version: Version, blacklist: Option>, whitelist: Option>, @@ -220,7 +217,7 @@ impl BootstrapServer { let mut server = BootstrapServerBinder::new(dplx, self.keypair.clone(), self.bootstrap_config.max_bytes_read_write, self.bootstrap_config.max_bootstrap_message_size, self.bootstrap_config.thread_count, self.bootstrap_config.max_datastore_key_length, self.bootstrap_config.randomness_size_bytes, self.bootstrap_config.consensus_bootstrap_part_size); let _ = match tokio::time::timeout(self.bootstrap_config.write_error_timeout.into(), server.send(BootstrapServerMessage::BootstrapError { error: - format!("Your last bootstrap on this server was {} ago and you have to wait before retrying.", format_duration(per_ip_min_interval.saturating_sub(occ.get().elapsed()))) + format!("Your last bootstrap on this server was {} ago and you have to wait {} before retrying.", format_duration(occ.get().elapsed()), format_duration(per_ip_min_interval.saturating_sub(occ.get().elapsed()))) })).await { Err(_) => Err(std::io::Error::new(std::io::ErrorKind::TimedOut, "bootstrap error too early retry bootstrap send timed out").into()), Ok(Err(e)) => Err(e), @@ -254,7 +251,6 @@ impl BootstrapServer { // launch bootstrap - let compensation_millis = self.compensation_millis; let version = self.version; let data_execution = self.final_state.clone(); let consensus_command_sender = self.consensus_controller.clone(); @@ -264,7 +260,7 @@ impl BootstrapServer { bootstrap_sessions.push(async move { let mut server = BootstrapServerBinder::new(dplx, keypair, config.max_bytes_read_write, config.max_bootstrap_message_size, config.thread_count, config.max_datastore_key_length, config.randomness_size_bytes, config.consensus_bootstrap_part_size); - match manage_bootstrap(&config, &mut server, data_execution, compensation_millis, version, consensus_command_sender, network_command_sender).await { + match manage_bootstrap(&config, &mut server, data_execution, version, consensus_command_sender, network_command_sender).await { Ok(_) => { info!("bootstrapped peer {}", remote_addr) }, @@ -329,6 +325,8 @@ pub async fn stream_bootstrap_information( let exec_ops_part; let final_state_changes; + let mut slot_too_old = false; + // Scope of the final state read { let final_state_read = final_state.read(); @@ -362,14 +360,21 @@ pub async fn stream_bootstrap_information( "Bootstrap cursor set to future slot".to_string(), )); } - final_state_changes = final_state_read.get_state_changes_part( + final_state_changes = match final_state_read.get_state_changes_part( slot, new_ledger_step.clone(), new_pool_step, new_cycle_step, new_credits_step, new_ops_step, - )?; + ) { + Ok(data) => data, + Err(err) if matches!(err, FinalStateError::InvalidSlot(_)) => { + slot_too_old = true; + Vec::default() + } + Err(err) => return Err(BootstrapError::FinalStateError(err)), + }; } else { final_state_changes = Vec::new(); } @@ -384,6 +389,24 @@ pub async fn stream_bootstrap_information( current_slot = final_state_read.slot; } + if slot_too_old { + match tokio::time::timeout( + write_timeout, + server.send(BootstrapServerMessage::SlotTooOld), + ) + .await + { + Err(_) => Err(std::io::Error::new( + std::io::ErrorKind::TimedOut, + "SlotTooOld message send timed out", + ) + .into()), + Ok(Err(e)) => Err(e), + Ok(Ok(_)) => Ok(()), + }?; + return Ok(()); + } + // Setup final state global cursor let final_state_global_step = if last_ledger_step.finished() && last_pool_step.finished() @@ -486,7 +509,6 @@ async fn manage_bootstrap( bootstrap_config: &BootstrapConfig, server: &mut BootstrapServerBinder, final_state: Arc>, - compensation_millis: i64, version: Version, consensus_controller: Box, network_command_sender: NetworkCommandSender, @@ -523,7 +545,7 @@ async fn manage_bootstrap( let write_timeout: std::time::Duration = bootstrap_config.write_timeout.into(); // Sync clocks. - let server_time = MassaTime::now(compensation_millis)?; + let server_time = MassaTime::now()?; match tokio::time::timeout( write_timeout, diff --git a/massa-bootstrap/src/settings.rs b/massa-bootstrap/src/settings.rs index 4376e0f5cff..5024b5697d3 100644 --- a/massa-bootstrap/src/settings.rs +++ b/massa-bootstrap/src/settings.rs @@ -3,7 +3,7 @@ use massa_signature::PublicKey; use massa_time::MassaTime; use serde::Deserialize; -use std::net::SocketAddr; +use std::{net::SocketAddr, path::PathBuf}; /// Bootstrap configuration. #[derive(Debug, Deserialize, Clone)] @@ -11,9 +11,9 @@ pub struct BootstrapConfig { /// Ip address of our bootstrap nodes and their public key. pub bootstrap_list: Vec<(SocketAddr, PublicKey)>, /// Path to the bootstrap whitelist file. This whitelist define IPs that can bootstrap on your node. - pub bootstrap_whitelist_file: std::path::PathBuf, + pub bootstrap_whitelist_path: PathBuf, /// Path to the bootstrap blacklist file. This whitelist define IPs that will not be able to bootstrap on your node. This list is optional. - pub bootstrap_blacklist_file: std::path::PathBuf, + pub bootstrap_blacklist_path: PathBuf, /// Port to listen if we choose to allow other nodes to use us as bootstrap node. pub bind: Option, /// connection timeout @@ -30,8 +30,8 @@ pub struct BootstrapConfig { pub retry_delay: MassaTime, /// Max ping delay. pub max_ping: MassaTime, - /// Enable clock synchronization - pub enable_clock_synchronization: bool, + /// Maximum allowed time between server and client clocks + pub max_clock_delta: MassaTime, /// Cache duration pub cache_duration: MassaTime, /// Max simultaneous bootstraps diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index 1227ec6050e..69555aa173b 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -25,6 +25,7 @@ use massa_final_state::{ test_exports::{assert_eq_final_state, assert_eq_final_state_hash}, FinalState, FinalStateConfig, StateChanges, }; +use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_ledger_exports::LedgerConfig; use massa_models::{address::Address, slot::Slot, streaming_step::StreamingStep, version::Version}; use massa_models::{ @@ -122,6 +123,7 @@ async fn test_bootstrap_server() { "", &rolls_path, server_selector_controller.clone(), + Hash::from_bytes(&[0; HASH_SIZE_BYTES]), ) .unwrap(), final_state_local_config.clone(), @@ -132,6 +134,7 @@ async fn test_bootstrap_server() { "", &rolls_path, client_selector_controller.clone(), + Hash::from_bytes(&[0; HASH_SIZE_BYTES]), ) .unwrap(), final_state_local_config, @@ -148,7 +151,6 @@ async fn test_bootstrap_server() { bootstrap_config.clone(), bootstrap_establisher, keypair.clone(), - 0, Version::from_str("TEST.1.10").unwrap(), ) .await @@ -163,7 +165,7 @@ async fn test_bootstrap_server() { final_state_client_clone, remote_establisher, Version::from_str("TEST.1.10").unwrap(), - MassaTime::now(0).unwrap().saturating_sub(1000.into()), + MassaTime::now().unwrap().saturating_sub(1000.into()), None, ) .await @@ -205,7 +207,7 @@ async fn test_bootstrap_server() { let wait_peers = async move || { // wait for bootstrap to ask network for peers, send them let response = - match wait_network_command(&mut network_cmd_rx, 10_000.into(), |cmd| match cmd { + match wait_network_command(&mut network_cmd_rx, 20_000.into(), |cmd| match cmd { NetworkCommand::GetBootstrapPeers(resp) => Some(resp), _ => None, }) @@ -223,7 +225,7 @@ async fn test_bootstrap_server() { let sent_graph = get_boot_state(); let sent_graph_clone = sent_graph.clone(); std::thread::spawn(move || loop { - consensus_event_receiver.wait_command(MassaTime::from_millis(10_000), |cmd| match &cmd { + consensus_event_receiver.wait_command(MassaTime::from_millis(20_000), |cmd| match &cmd { MockConsensusControllerMessage::GetBootstrapableGraph { execution_cursor, response_tx, diff --git a/massa-bootstrap/src/tests/tools.rs b/massa-bootstrap/src/tests/tools.rs index 51c9795fda7..08486ce29d5 100644 --- a/massa-bootstrap/src/tests/tools.rs +++ b/massa-bootstrap/src/tests/tools.rs @@ -52,6 +52,7 @@ use std::str::FromStr; use std::{ collections::BTreeMap, net::{IpAddr, Ipv4Addr, SocketAddr}, + path::PathBuf, }; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; @@ -156,13 +157,9 @@ fn get_random_deferred_credits(r_limit: u64) -> DeferredCredits { fn get_random_pos_state(r_limit: u64, pos: PoSFinalState) -> PoSFinalState { let mut cycle_history = VecDeque::new(); let (roll_counts, production_stats, rng_seed) = get_random_pos_cycles_info(r_limit, true); - cycle_history.push_back(CycleInfo::new_with_hash( - 0, - false, - roll_counts, - rng_seed, - production_stats, - )); + let mut cycle = CycleInfo::new_with_hash(0, false, roll_counts, rng_seed, production_stats); + cycle.final_state_hash_snapshot = Some(Hash::from_bytes(&[0; 32])); + cycle_history.push_back(cycle); let mut deferred_credits = DeferredCredits::default(); deferred_credits.final_nested_extend(get_random_deferred_credits(r_limit)); PoSFinalState { @@ -286,13 +283,13 @@ pub fn get_bootstrap_config(bootstrap_public_key: PublicKey) -> BootstrapConfig read_error_timeout: 200.into(), write_error_timeout: 200.into(), bootstrap_list: vec![(SocketAddr::new(BASE_BOOTSTRAP_IP, 16), bootstrap_public_key)], - bootstrap_whitelist_file: std::path::PathBuf::from( + bootstrap_whitelist_path: PathBuf::from( "../massa-node/base_config/bootstrap_whitelist.json", ), - bootstrap_blacklist_file: std::path::PathBuf::from( + bootstrap_blacklist_path: PathBuf::from( "../massa-node/base_config/bootstrap_blacklist.json", ), - enable_clock_synchronization: true, + max_clock_delta: MassaTime::from_millis(1000), cache_duration: 10000.into(), max_simultaneous_bootstraps: 2, ip_list_max_size: 10, diff --git a/massa-client/Cargo.toml b/massa-client/Cargo.toml index b11dc99712b..7e389fc1317 100644 --- a/massa-client/Cargo.toml +++ b/massa-client/Cargo.toml @@ -29,5 +29,5 @@ massa_sdk = { path = "../massa-sdk" } massa_wallet = { path = "../massa-wallet" } [dev-dependencies] -toml_edit = "0.15" +toml_edit = "0.16" diff --git a/massa-client/base_config/config.toml b/massa-client/base_config/config.toml index 174acbbffcc..db17f5e366d 100644 --- a/massa-client/base_config/config.toml +++ b/massa-client/base_config/config.toml @@ -6,7 +6,7 @@ timeout = 1000 ip = "127.0.0.1" private_port = 33034 public_port = 33035 - +api_port = 33036 [http] # maximum size in bytes of a request max_request_body_size = 52428800 diff --git a/massa-client/src/cmds.rs b/massa-client/src/cmds.rs index 4376a3e8680..b05f3e6336b 100644 --- a/massa-client/src/cmds.rs +++ b/massa-client/src/cmds.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022 MASSA LABS use crate::repl::Output; -use anyhow::{anyhow, bail, Result}; +use anyhow::{anyhow, bail, Error, Result}; use console::style; use massa_models::api::{ AddressInfo, CompactAddressInfo, DatastoreEntryInput, EventFilter, OperationInput, @@ -29,7 +29,7 @@ use std::fmt::{Debug, Display}; use std::net::IpAddr; use std::path::PathBuf; use strum::{EnumMessage, EnumProperty, IntoEnumIterator}; -use strum_macros::{Display, EnumIter, EnumMessage, EnumProperty, EnumString}; +use strum_macros::{Display, EnumIter, EnumString}; /// All the client commands /// the order they are defined is the order they are displayed in so be careful @@ -40,6 +40,9 @@ pub enum Command { #[strum(ascii_case_insensitive, message = "display this help")] help, + #[strum(ascii_case_insensitive, message = "exit the prompt")] + exit, + #[strum( ascii_case_insensitive, props(args = "IpAddr1 IpAddr2 ..."), @@ -97,17 +100,24 @@ pub enum Command { #[strum( ascii_case_insensitive, - props(args = "[IpAddr]"), - message = "whitelist given IP addresses" + props(args = "(add, remove or allow-all) [IpAddr]"), + message = "Manage boostrap whitelist IP address(es).No args returns the whitelist blacklist" + )] + node_bootsrap_whitelist, + + #[strum( + ascii_case_insensitive, + props(args = "(add or remove) [IpAddr]"), + message = "Manage boostrap blacklist IP address(es). No args returns the boostrap blacklist" )] - node_whitelist, + node_bootsrap_blacklist, #[strum( ascii_case_insensitive, - props(args = "[IpAddr]"), - message = "remove from whitelist given IP addresses" + props(args = "(add or remove) [IpAddr]"), + message = "Manage peers whitelist IP address(es). No args returns the peers whitelist" )] - node_remove_from_whitelist, + node_peers_whitelist, #[strum( ascii_case_insensitive, @@ -134,7 +144,7 @@ pub enum Command { props(args = "BlockId"), message = "show info about a block (content, finality ...)" )] - get_block, + get_blocks, #[strum( ascii_case_insensitive, @@ -153,7 +163,7 @@ pub enum Command { #[strum( ascii_case_insensitive, props( - args = "start=Slot end=Slot emitter_address=Address caller_address=Address operation_id=OperationId is_final=bool" + args = "start=Slot end=Slot emitter_address=Address caller_address=Address operation_id=OperationId is_final=bool is_error=bool" ), message = "show events emitted by smart contracts with various filters" )] @@ -218,7 +228,7 @@ pub enum Command { props(args = "SenderAddress PathToBytecode MaxGas Fee",), message = "create and send an operation containing byte code" )] - send_smart_contract, + execute_smart_contract, #[strum( ascii_case_insensitive, @@ -232,7 +242,7 @@ pub enum Command { props(args = "PathToBytecode MaxGas Address",), message = "execute byte code, address is optional. Nothing is really executed on chain" )] - read_only_smart_contract, + read_only_execute_smart_contract, #[strum( ascii_case_insensitive, @@ -251,6 +261,30 @@ pub enum Command { when_moon, } +#[derive(Debug, Display, EnumString, EnumIter)] +#[strum(serialize_all = "snake_case")] +pub enum ListOperation { + #[strum( + ascii_case_insensitive, + message = "add", + detailed_message = "add(s) the given value(s) to the target" + )] + Add, + #[strum( + ascii_case_insensitive, + serialize = "allow-all", + message = "allow-all", + detailed_message = "allow all in the target if exists" + )] + AllowAll, + #[strum( + ascii_case_insensitive, + message = "remove", + detailed_message = "remove(s) the given value(s) from the target if exists" + )] + Remove, +} + /// Display the help of all commands pub(crate) fn help() { println!("HELP of Massa client (list of available commands):"); @@ -548,13 +582,13 @@ impl Command { } } - Command::get_block => { + Command::get_blocks => { if parameters.len() != 1 { - bail!("wrong param numbers") + bail!("wrong param numbers, expecting at least one IP address") } - let block_id = parameters[0].parse::()?; - match client.public.get_block(block_id).await { - Ok(block_info) => Ok(Box::new(block_info)), + let block_ids = parse_vec::(parameters)?; + match client.public.get_blocks(block_ids).await { + Ok(blocks_info) => Ok(Box::new(blocks_info)), Err(e) => rpc_error!(e), } } @@ -576,13 +610,14 @@ impl Command { } Command::get_filtered_sc_output_event => { - let p_list: [&str; 6] = [ + let p_list: [&str; 7] = [ "start", "end", "emitter_address", "caller_address", "operation_id", "is_final", + "is_error", ]; let mut p: HashMap<&str, &str> = HashMap::new(); for v in parameters { @@ -600,6 +635,7 @@ impl Command { original_caller_address: parse_key_value(&p, p_list[3]), original_operation_id: parse_key_value(&p, p_list[4]), is_final: parse_key_value(&p, p_list[5]), + is_error: parse_key_value(&p, p_list[6]), }; match client.public.get_filtered_sc_output_event(filter).await { Ok(events) => Ok(Box::new(events)), @@ -700,7 +736,7 @@ impl Command { } } None => { - client_warning!("the total amount hit the limit overflow, operation will certainly be rejected"); + client_warning!("the total amount hit the limit overflow, operation will be rejected"); } } if let Ok(staked_keys) = client.private.get_staking_addresses().await { @@ -798,9 +834,8 @@ impl Command { }; let mut res = "".to_string(); if let Some(e) = end { - let (days, hours, mins, secs) = e - .saturating_sub(MassaTime::now(0)?) - .days_hours_mins_secs()?; // compensation milliseconds is zero + let (days, hours, mins, secs) = + e.saturating_sub(MassaTime::now()?).days_hours_mins_secs()?; // compensation milliseconds is zero let _ = write!(res, "{} days, {} hours, {} minutes, {} seconds remaining until the end of the current episode", days, hours, mins, secs); } else { @@ -818,7 +853,7 @@ impl Command { } Ok(Box::new(())) } - Command::send_smart_contract => { + Command::execute_smart_contract => { if parameters.len() != 4 { bail!("wrong number of parameters"); } @@ -846,8 +881,8 @@ impl Command { Ok(node_status) => node_status.config.max_block_size, Err(e) => bail!("RpcError: {}", e), }; - if data.len() > max_block_size as usize / 2 { - client_warning!("bytecode size exceeded half of the maximum size of a block, operation will certainly be rejected"); + if data.len() > max_block_size as usize { + client_warning!("bytecode size exceeded the maximum size of a block, operation will be rejected"); } } let datastore = BTreeMap::new(); @@ -899,7 +934,7 @@ impl Command { } } None => { - client_warning!("the total amount hit the limit overflow, operation will certainly be rejected"); + client_warning!("the total amount hit the limit overflow, operation will be rejected"); } } }; @@ -931,7 +966,7 @@ impl Command { bail!("Missing public key") } } - Command::read_only_smart_contract => { + Command::read_only_execute_smart_contract => { if parameters.len() != 2 && parameters.len() != 3 { bail!("wrong number of parameters"); } @@ -987,29 +1022,183 @@ impl Command { Err(e) => rpc_error!(e), } } - Command::node_whitelist => { - let ips = parse_vec::(parameters)?; - match client.private.node_whitelist(ips).await { - Ok(()) => { - if !json { - println!("Request of whitelisting successfully sent!") - } + Command::node_bootsrap_blacklist => { + if parameters.is_empty() { + match client.private.node_bootstrap_blacklist().await { + Ok(bootsraplist_ips) => Ok(Box::new(bootsraplist_ips)), + Err(e) => rpc_error!(e), } - Err(e) => rpc_error!(e), + } else { + let cli_op = match parameters[0].parse::() { + Ok(op) => op, + Err(_) => bail!( + "failed to parse operation, supported operations are: [add, remove]" + ), + }; + let args = ¶meters[1..]; + if args.is_empty() { + bail!("[IpAddr] parameter shouldn't be empty"); + } + let ips = parse_vec::(args)?; + let res: Result> = match cli_op { + ListOperation::Add => { + match client.private.node_add_to_bootstrap_blacklist(ips).await { + Ok(()) => { + if !json { + println!( + "Request of bootsrap blacklisting successfully sent!" + ) + } + Ok(Box::new(())) + } + Err(e) => rpc_error!(e), + } + } + ListOperation::Remove => { + match client + .private + .node_remove_from_bootstrap_blacklist(ips) + .await + { + Ok(()) => { + if !json { + println!("Request of remove from bootsrap blacklist successfully sent!") + } + Ok(Box::new(())) + } + Err(e) => rpc_error!(e), + } + } + ListOperation::AllowAll => { + bail!("\"allow-all\" command is not implemented") + } + }; + res } - Ok(Box::new(())) } - Command::node_remove_from_whitelist => { - let ips = parse_vec::(parameters)?; - match client.private.node_remove_from_whitelist(ips).await { - Ok(()) => { - if !json { - println!("Request of removing from whitelist successfully sent!") + Command::node_bootsrap_whitelist => { + if parameters.is_empty() { + match client.private.node_bootstrap_whitelist().await { + Ok(bootsraplist_ips) => Ok(Box::new(bootsraplist_ips)), + Err(e) => { + client_warning!("if bootsrap whitelist configuration file does't exists, bootsrap is allowed for everyone !!!"); + rpc_error!(e) } } - Err(e) => rpc_error!(e), + } else { + let cli_op = match parameters[0].parse::() { + Ok(op) => op, + Err(_) => bail!( + "failed to parse operation, supported operations are: [add, remove, allow-all]" + ), + }; + let args = ¶meters[1..]; + let res: Result> = match cli_op { + ListOperation::Add => { + if args.is_empty() { + bail!("[IpAddr] parameter shouldn't be empty"); + } + match client + .private + .node_add_to_bootstrap_whitelist(parse_vec::(args)?) + .await + { + Ok(()) => { + if !json { + println!( + "Request of bootsrap whitelisting successfully sent!" + ) + } + Ok(Box::new(())) + } + Err(e) => rpc_error!(e), + } + } + ListOperation::Remove => { + if args.is_empty() { + bail!("[IpAddr] parameter shouldn't be empty"); + } + match client + .private + .node_remove_from_bootstrap_whitelist(parse_vec::(args)?) + .await + { + Ok(()) => { + if !json { + println!("Request of remove from bootsrap whitelist successfully sent!") + } + Ok(Box::new(())) + } + Err(e) => rpc_error!(e), + } + } + ListOperation::AllowAll => { + match client.private.node_bootstrap_whitelist_allow_all().await { + Ok(()) => { + if !json { + println!( + "Request of bootsrap whitelisting everyone successfully sent!" + ) + } + Ok(Box::new(())) + } + Err(e) => rpc_error!(e), + } + } + }; + res } - Ok(Box::new(())) + } + Command::node_peers_whitelist => { + if parameters.is_empty() { + match client.private.node_peers_whitelist().await { + Ok(peerlist_ips) => Ok(Box::new(peerlist_ips)), + Err(e) => rpc_error!(e), + } + } else { + let cli_op = match parameters[0].parse::() { + Ok(op) => op, + Err(_) => bail!( + "failed to parse operation, supported operations are: [add, remove]" + ), + }; + let args = ¶meters[1..]; + if args.is_empty() { + bail!("[IpAddr] parameter shouldn't be empty"); + } + let ips = parse_vec::(args)?; + let res: Result> = match cli_op { + ListOperation::Add => { + match client.private.node_add_to_peers_whitelist(ips).await { + Ok(()) => { + if !json { + println!("Request of peers whitelisting successfully sent!") + } + Ok(Box::new(())) + } + Err(e) => rpc_error!(e), + } + } + ListOperation::Remove => { + match client.private.node_remove_from_peers_whitelist(ips).await { + Ok(()) => { + if !json { + println!("Request of remove from peers whitelist successfully sent!") + } + Ok(Box::new(())) + } + Err(e) => rpc_error!(e), + } + } + ListOperation::AllowAll => { + bail!("\"allow-all\" command is not implemented") + } + }; + res + } + } + Command::exit => { + std::process::exit(0); } } } @@ -1030,7 +1219,7 @@ async fn send_operation( } .config; - let slot = get_current_latest_block_slot(cfg.thread_count, cfg.t0, cfg.genesis_timestamp, 0)? // clock compensation is zero + let slot = get_current_latest_block_slot(cfg.thread_count, cfg.t0, cfg.genesis_timestamp)? .unwrap_or_else(|| Slot::new(0, 0)); let mut expire_period = slot.period + cfg.operation_validity_periods; if slot.thread >= addr.get_thread(cfg.thread_count) { @@ -1067,8 +1256,16 @@ async fn send_operation( /// TODO: ugly utilities functions /// takes a slice of string and makes it into a `Vec` -pub fn parse_vec(args: &[String]) -> anyhow::Result, T::Err> { - args.iter().map(|x| x.parse::()).collect() +pub fn parse_vec(args: &[String]) -> anyhow::Result, Error> +where + T::Err: Display, +{ + args.iter() + .map(|x| { + x.parse::() + .map_err(|e| anyhow!("failed to parse \"{}\" due to: {}", x, e)) + }) + .collect() } /// reads a file diff --git a/massa-client/src/repl.rs b/massa-client/src/repl.rs index 3abe8d0e48c..0e1cc9fdea5 100644 --- a/massa-client/src/repl.rs +++ b/massa-client/src/repl.rs @@ -20,6 +20,7 @@ use rustyline::error::ReadlineError; use rustyline::validate::MatchingBracketValidator; use rustyline::{CompletionType, Config, Editor}; use rustyline_derive::{Completer, Helper, Highlighter, Hinter, Validator}; +use std::net::IpAddr; use std::str; use strum::IntoEnumIterator; use strum::ParseError; @@ -97,7 +98,7 @@ struct MyHelper { pub(crate) async fn run(client: &Client, wallet: &mut Wallet) -> Result<()> { massa_fancy_ascii_art_logo!(); - println!("Use 'CTRL+D or CTRL+C' to quit the prompt"); + println!("Use 'exit' or 'CTRL+D or CTRL+C' to quit the prompt"); println!("Use the Up/Down arrows to scroll through history"); println!("Use the Right arrow or Tab to complete your command"); println!("Use the Enter key to execute your command"); @@ -280,6 +281,14 @@ impl Output for Vec { } } +impl Output for Vec { + fn pretty_print(&self) { + for ips in self { + println!("{}", ips); + } + } +} + impl Output for Vec { fn pretty_print(&self) { for operation_info in self { diff --git a/massa-client/src/tests/tools.rs b/massa-client/src/tests/tools.rs index 57d9089fe50..caf12e722eb 100644 --- a/massa-client/src/tests/tools.rs +++ b/massa-client/src/tests/tools.rs @@ -7,9 +7,7 @@ use toml_edit::{value, Document}; pub fn _update_genesis_timestamp(config_path: &str) { let toml = fs::read_to_string(config_path).expect("Unable to read file"); let mut doc = toml.parse::().unwrap(); - doc["consensus"]["genesis_timestamp"] = value(format!( - "{}", - MassaTime::now(10000 * 60 * 60).unwrap().to_millis() - )); + doc["consensus"]["genesis_timestamp"] = + value(format!("{}", MassaTime::now().unwrap().to_millis())); fs::write(config_path, doc.to_string()).expect("Unable to write file"); } diff --git a/massa-consensus-exports/Cargo.toml b/massa-consensus-exports/Cargo.toml index 7286e9bd634..d37d46383cd 100644 --- a/massa-consensus-exports/Cargo.toml +++ b/massa-consensus-exports/Cargo.toml @@ -13,6 +13,8 @@ nom = "7.1" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" +jsonrpsee = { version = "0.16.2", features = ["server"] } +tokio = { version = "1.21", features = ["sync"] } #custom modules massa_hash = { path = "../massa-hash"} massa_execution_exports = { path = "../massa-execution-exports" } diff --git a/massa-consensus-exports/src/channels.rs b/massa-consensus-exports/src/channels.rs index a895b7cfc97..2cc2a44bde6 100644 --- a/massa-consensus-exports/src/channels.rs +++ b/massa-consensus-exports/src/channels.rs @@ -1,5 +1,5 @@ -use crossbeam_channel::Sender; use massa_execution_exports::ExecutionController; +use massa_models::block::{Block, BlockHeader, FilledBlock}; use massa_pool_exports::PoolController; use massa_pos_exports::SelectorController; use massa_protocol_exports::ProtocolCommandSender; @@ -8,11 +8,15 @@ use crate::events::ConsensusEvent; /// Contains a reference to the pool, selector and execution controller /// Contains a channel to send info to protocol +/// Contains channels to send info to api #[derive(Clone)] pub struct ConsensusChannels { pub execution_controller: Box, pub selector_controller: Box, pub pool_command_sender: Box, - pub controller_event_tx: Sender, + pub controller_event_tx: crossbeam_channel::Sender, pub protocol_command_sender: ProtocolCommandSender, + pub block_sender: tokio::sync::broadcast::Sender, + pub block_header_sender: tokio::sync::broadcast::Sender, + pub filled_block_sender: tokio::sync::broadcast::Sender, } diff --git a/massa-consensus-exports/src/events.rs b/massa-consensus-exports/src/events.rs index e48b4803379..a20475b0834 100644 --- a/massa-consensus-exports/src/events.rs +++ b/massa-consensus-exports/src/events.rs @@ -3,4 +3,6 @@ pub enum ConsensusEvent { /// probable desynchronization detected, need re-synchronization NeedSync, + /// Network is ended should be send after `end_timestamp` + Stop, } diff --git a/massa-consensus-exports/src/settings.rs b/massa-consensus-exports/src/settings.rs index e479692b21e..f3fd18edb53 100644 --- a/massa-consensus-exports/src/settings.rs +++ b/massa-consensus-exports/src/settings.rs @@ -4,8 +4,6 @@ use serde::{Deserialize, Serialize}; #[derive(Debug, Clone, Deserialize, Serialize)] pub struct ConsensusConfig { - /// Clock compensation - pub clock_compensation_millis: i64, /// Genesis timestamp pub genesis_timestamp: MassaTime, /// Delta time between two period @@ -48,4 +46,12 @@ pub struct ConsensusConfig { pub channel_size: usize, /// size of a consensus bootstrap streaming part pub bootstrap_part_size: u64, + /// whether broadcast is enabled + pub broadcast_enabled: bool, + /// blocks headers sender(channel) capacity + pub broadcast_blocks_headers_capacity: usize, + /// blocks sender(channel) capacity + pub broadcast_blocks_capacity: usize, + /// filled blocks sender(channel) capacity + pub broadcast_filled_blocks_capacity: usize, } diff --git a/massa-consensus-exports/src/test_exports/config.rs b/massa-consensus-exports/src/test_exports/config.rs index 29ce87aba80..5f466adfa10 100644 --- a/massa-consensus-exports/src/test_exports/config.rs +++ b/massa-consensus-exports/src/test_exports/config.rs @@ -12,7 +12,6 @@ use crate::ConsensusConfig; impl Default for ConsensusConfig { fn default() -> Self { Self { - clock_compensation_millis: 0, genesis_timestamp: *GENESIS_TIMESTAMP, t0: T0, thread_count: THREAD_COUNT, @@ -34,6 +33,10 @@ impl Default for ConsensusConfig { stats_timespan: MassaTime::from_millis(60000), channel_size: CHANNEL_SIZE, bootstrap_part_size: CONSENSUS_BOOTSTRAP_PART_SIZE, + broadcast_enabled: true, + broadcast_blocks_headers_capacity: 128, + broadcast_blocks_capacity: 128, + broadcast_filled_blocks_capacity: 128, } } } diff --git a/massa-consensus-worker/src/controller.rs b/massa-consensus-worker/src/controller.rs index bbec2b64c8c..d0f1928fd40 100644 --- a/massa-consensus-worker/src/controller.rs +++ b/massa-consensus-worker/src/controller.rs @@ -1,12 +1,13 @@ use massa_consensus_exports::{ block_graph_export::BlockGraphExport, block_status::BlockStatus, bootstrapable_graph::BootstrapableGraph, error::ConsensusError, - export_active_block::ExportActiveBlock, ConsensusController, + export_active_block::ExportActiveBlock, ConsensusChannels, ConsensusController, }; use massa_models::{ api::BlockGraphStatus, - block::{BlockHeader, BlockId}, + block::{BlockHeader, BlockId, FilledBlock}, clique::Clique, + operation::{Operation, OperationId}, prehash::PreHashSet, slot::Slot, stats::ConsensusStats, @@ -30,20 +31,26 @@ use crate::{commands::ConsensusCommand, state::ConsensusState}; #[derive(Clone)] pub struct ConsensusControllerImpl { command_sender: SyncSender, + channels: ConsensusChannels, shared_state: Arc>, bootstrap_part_size: u64, + broadcast_enabled: bool, } impl ConsensusControllerImpl { pub fn new( command_sender: SyncSender, + channels: ConsensusChannels, shared_state: Arc>, bootstrap_part_size: u64, + broadcast_enabled: bool, ) -> Self { Self { command_sender, + channels, shared_state, bootstrap_part_size, + broadcast_enabled, } } } @@ -219,6 +226,38 @@ impl ConsensusController for ConsensusControllerImpl { } fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool) { + if self.broadcast_enabled { + if let Some(wrapped_block) = block_storage.read_blocks().get(&block_id) { + let operations: Vec<(OperationId, Option>)> = + wrapped_block + .content + .operations + .iter() + .map(|operation_id| { + match block_storage.read_operations().get(operation_id).cloned() { + Some(wrapped_operation) => (*operation_id, Some(wrapped_operation)), + None => (*operation_id, None), + } + }) + .collect(); + + let _block_receivers_count = self + .channels + .block_sender + .send(wrapped_block.content.clone()); + let _filled_block_receivers_count = + self.channels.filled_block_sender.send(FilledBlock { + header: wrapped_block.content.header.clone(), + operations, + }); + } else { + warn!( + "error no ws event sent, block with id {} not found", + block_id + ); + }; + } + if let Err(err) = self .command_sender .try_send(ConsensusCommand::RegisterBlock( @@ -233,6 +272,12 @@ impl ConsensusController for ConsensusControllerImpl { } fn register_block_header(&self, block_id: BlockId, header: Wrapped) { + if self.broadcast_enabled { + let _ = self + .channels + .block_header_sender + .send(header.clone().content); + } if let Err(err) = self .command_sender .try_send(ConsensusCommand::RegisterBlockHeader(block_id, header)) diff --git a/massa-consensus-worker/src/state/process.rs b/massa-consensus-worker/src/state/process.rs index 9c38e6d9164..04ddeb1fb66 100644 --- a/massa-consensus-worker/src/state/process.rs +++ b/massa-consensus-worker/src/state/process.rs @@ -781,7 +781,7 @@ impl ConsensusState { } // manage finalized blocks - let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; + let timestamp = MassaTime::now()?; let finalized_blocks = mem::take(&mut self.new_final_blocks); let mut final_block_slots = HashMap::with_capacity(finalized_blocks.len()); let mut final_block_stats = VecDeque::with_capacity(finalized_blocks.len()); @@ -810,7 +810,7 @@ impl ConsensusState { // add stale blocks to stats let new_stale_block_ids_creators_slots = mem::take(&mut self.new_stale_blocks); - let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; + let timestamp = MassaTime::now()?; for (_b_id, (_b_creator, _b_slot)) in new_stale_block_ids_creators_slots.into_iter() { self.stale_block_stats.push_back(timestamp); } diff --git a/massa-consensus-worker/src/state/process_commands.rs b/massa-consensus-worker/src/state/process_commands.rs index fd923648f10..3677ed3aa1c 100644 --- a/massa-consensus-worker/src/state/process_commands.rs +++ b/massa-consensus-worker/src/state/process_commands.rs @@ -97,7 +97,7 @@ impl ConsensusState { // Block is coming from protocol mark it for desync calculation if !created { - let now = MassaTime::now(self.config.clock_compensation_millis)?; + let now = MassaTime::now()?; self.protocol_blocks.push_back((now, block_id)); } diff --git a/massa-consensus-worker/src/state/stats.rs b/massa-consensus-worker/src/state/stats.rs index 4ab766a4ca4..dc336c12ab2 100644 --- a/massa-consensus-worker/src/state/stats.rs +++ b/massa-consensus-worker/src/state/stats.rs @@ -13,10 +13,7 @@ use massa_consensus_exports::events::ConsensusEvent; impl ConsensusState { /// Calculate and return stats about consensus pub fn get_stats(&self) -> Result { - let timespan_end = max( - self.launch_time, - MassaTime::now(self.config.clock_compensation_millis)?, - ); + let timespan_end = max(self.launch_time, MassaTime::now()?); let timespan_start = max( timespan_end.saturating_sub(self.config.stats_timespan), self.launch_time, @@ -47,7 +44,7 @@ impl ConsensusState { // if none => we are probably desync #[cfg(not(feature = "sandbox"))] { - let now = MassaTime::now(self.config.clock_compensation_millis)?; + let now = MassaTime::now()?; if now > max(self.config.genesis_timestamp, self.launch_time) .saturating_add(self.stats_desync_detection_timespan) @@ -73,8 +70,7 @@ impl ConsensusState { /// Remove old stats from consensus storage pub fn prune_stats(&mut self) -> Result<(), ConsensusError> { - let start_time = MassaTime::now(self.config.clock_compensation_millis)? - .saturating_sub(self.stats_history_timespan); + let start_time = MassaTime::now()?.saturating_sub(self.stats_history_timespan); while let Some((t, _, _)) = self.final_block_stats.front() { if t < &start_time { self.final_block_stats.pop_front(); diff --git a/massa-consensus-worker/src/worker/init.rs b/massa-consensus-worker/src/worker/init.rs index 5af5969165c..f2adeee6d5d 100644 --- a/massa-consensus-worker/src/worker/init.rs +++ b/massa-consensus-worker/src/worker/init.rs @@ -79,8 +79,7 @@ impl ConsensusWorker { init_graph: Option, storage: Storage, ) -> Result { - let now = MassaTime::now(config.clock_compensation_millis) - .expect("Couldn't init timer consensus"); + let now = MassaTime::now().expect("Couldn't init timer consensus"); let previous_slot = get_latest_block_slot_at_timestamp( config.thread_count, config.t0, @@ -126,7 +125,7 @@ impl ConsensusWorker { config.genesis_timestamp, next_slot, )? - .estimate_instant(config.clock_compensation_millis)?; + .estimate_instant()?; info!( "Started node at time {}, cycle {}, period {}, thread {}", diff --git a/massa-consensus-worker/src/worker/main_loop.rs b/massa-consensus-worker/src/worker/main_loop.rs index 88489e1fafc..0cd6997c150 100644 --- a/massa-consensus-worker/src/worker/main_loop.rs +++ b/massa-consensus-worker/src/worker/main_loop.rs @@ -1,12 +1,12 @@ use std::{sync::mpsc, time::Instant}; -use massa_consensus_exports::error::ConsensusError; +use massa_consensus_exports::{error::ConsensusError, events::ConsensusEvent}; use massa_models::{ slot::Slot, timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, }; use massa_time::MassaTime; -use tracing::{info, log::warn}; +use tracing::log::{info, warn}; use crate::commands::ConsensusCommand; @@ -77,8 +77,7 @@ impl ConsensusWorker { /// Extra safety against double-production caused by clock adjustments (this is the role of the `previous_slot` parameter). fn get_next_slot(&self, previous_slot: Option) -> (Slot, Instant) { // get current absolute time - let now = MassaTime::now(self.config.clock_compensation_millis) - .expect("could not get current time"); + let now = MassaTime::now().expect("could not get current time"); // get closest slot according to the current absolute time let mut next_slot = get_closest_slot_to_timestamp( @@ -105,7 +104,7 @@ impl ConsensusWorker { next_slot, ) .expect("could not get block slot timestamp") - .estimate_instant(self.config.clock_compensation_millis) + .estimate_instant() .expect("could not estimate block slot instant"); (next_slot, next_instant) @@ -118,6 +117,18 @@ impl ConsensusWorker { loop { match self.wait_slot_or_command(self.next_instant) { WaitingStatus::Ended => { + if let Some(end) = self.config.end_timestamp { + if self.next_instant > end.estimate_instant().unwrap() { + info!("This episode has come to an end, please get the latest testnet node version to continue"); + let _ = self + .shared_state + .read() + .channels + .controller_event_tx + .send(ConsensusEvent::Stop); + break; + } + } let previous_cycle = self .previous_slot .map(|s| s.get_cycle(self.config.periods_per_cycle)); diff --git a/massa-consensus-worker/src/worker/mod.rs b/massa-consensus-worker/src/worker/mod.rs index dc9f2e3e74b..298b2341d99 100644 --- a/massa-consensus-worker/src/worker/mod.rs +++ b/massa-consensus-worker/src/worker/mod.rs @@ -63,7 +63,7 @@ pub fn start_consensus_worker( let shared_state = Arc::new(RwLock::new(ConsensusState { storage: storage.clone(), config: config.clone(), - channels, + channels: channels.clone(), max_cliques: vec![Clique { block_ids: PreHashSet::::default(), fitness: 0, @@ -89,7 +89,7 @@ pub fn start_consensus_worker( stale_block_stats: Default::default(), protocol_blocks: Default::default(), wishlist: Default::default(), - launch_time: MassaTime::now(config.clock_compensation_millis).unwrap(), + launch_time: MassaTime::now().unwrap(), stats_desync_detection_timespan, stats_history_timespan: std::cmp::max( stats_desync_detection_timespan, @@ -99,20 +99,25 @@ pub fn start_consensus_worker( })); let shared_state_cloned = shared_state.clone(); + let mut consensus_worker = + ConsensusWorker::new(config.clone(), rx, shared_state_cloned, init_graph, storage).unwrap(); + let consensus_thread = thread::Builder::new() .name("consensus worker".into()) - .spawn(move || { - let mut consensus_worker = - ConsensusWorker::new(config, rx, shared_state_cloned, init_graph, storage).unwrap(); - consensus_worker.run() - }) + .spawn(move || consensus_worker.run()) .expect("Can't spawn consensus thread."); let manager = ConsensusManagerImpl { consensus_thread: Some((tx.clone(), consensus_thread)), }; - let controller = ConsensusControllerImpl::new(tx, shared_state, bootstrap_part_size); + let controller = ConsensusControllerImpl::new( + tx, + channels, + shared_state, + bootstrap_part_size, + config.broadcast_enabled, + ); (Box::new(controller), Box::new(manager)) } diff --git a/massa-execution-exports/Cargo.toml b/massa-execution-exports/Cargo.toml index 36787a3ed12..7086b5e7988 100644 --- a/massa-execution-exports/Cargo.toml +++ b/massa-execution-exports/Cargo.toml @@ -18,7 +18,9 @@ massa_storage = { path = "../massa-storage" } massa_final_state = { path = "../massa-final-state" } massa_ledger_exports = { path = "../massa-ledger-exports", optional = true } parking_lot = { version = "0.12", features = ["deadlock_detection"], optional = true } +massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime" } # for more information on what are the following features used for, see the cargo.toml at workspace level [features] +gas_calibration = ["massa_ledger_exports/testing", "parking_lot"] testing = ["massa_models/testing", "massa_ledger_exports/testing", "parking_lot"] diff --git a/massa-execution-exports/src/error.rs b/massa-execution-exports/src/error.rs index 523c58fa505..4edf88b0246 100644 --- a/massa-execution-exports/src/error.rs +++ b/massa-execution-exports/src/error.rs @@ -39,6 +39,9 @@ pub enum ExecutionError { /// Not enough gas in the block: {0} NotEnoughGas(String), + /// Given gas is above the threshold: {0} + TooMuchGas(String), + /// Include operation error: {0} IncludeOperationError(String), } diff --git a/massa-execution-exports/src/event_store.rs b/massa-execution-exports/src/event_store.rs index ed45fc988b9..332ab49d631 100644 --- a/massa-execution-exports/src/event_store.rs +++ b/massa-execution-exports/src/event_store.rs @@ -9,7 +9,7 @@ use std::collections::VecDeque; /// Store for events emitted by smart contracts #[derive(Default, Debug, Clone)] -pub struct EventStore(VecDeque); +pub struct EventStore(pub VecDeque); impl EventStore { /// Push a new smart contract event to the store @@ -72,6 +72,11 @@ impl EventStore { return false; } } + if let Some(is_error) = filter.is_error { + if x.context.is_error != is_error { + return false; + } + } match (filter.emitter_address, x.context.call_stack.front()) { (Some(addr1), Some(addr2)) if addr1 != *addr2 => return false, (Some(_), None) => return false, @@ -110,6 +115,7 @@ fn test_prune() { call_stack: VecDeque::new(), origin_operation_id: None, is_final: false, + is_error: false, }, data: i.to_string(), }); diff --git a/massa-execution-exports/src/lib.rs b/massa-execution-exports/src/lib.rs index dc8aeaf8e7c..9aae9563e51 100644 --- a/massa-execution-exports/src/lib.rs +++ b/massa-execution-exports/src/lib.rs @@ -53,11 +53,12 @@ mod types; pub use controller_traits::{ExecutionController, ExecutionManager}; pub use error::ExecutionError; pub use event_store::EventStore; +pub use massa_sc_runtime::GasCosts; pub use settings::{ExecutionConfig, StorageCostsConstants}; pub use types::{ ExecutionAddressInfo, ExecutionOutput, ExecutionStackElement, ReadOnlyCallRequest, ReadOnlyExecutionOutput, ReadOnlyExecutionRequest, ReadOnlyExecutionTarget, }; -#[cfg(feature = "testing")] +#[cfg(any(feature = "testing", feature = "gas_calibration"))] pub mod test_exports; diff --git a/massa-execution-exports/src/settings.rs b/massa-execution-exports/src/settings.rs index d7b51137271..7a140dd39cd 100644 --- a/massa-execution-exports/src/settings.rs +++ b/massa-execution-exports/src/settings.rs @@ -3,6 +3,7 @@ //! This module provides the structures used to provide configuration parameters to the Execution system use massa_models::amount::Amount; +use massa_sc_runtime::GasCosts; use massa_time::MassaTime; use num::rational::Ratio; @@ -34,8 +35,6 @@ pub struct ExecutionConfig { pub roll_price: Amount, /// extra lag to add on the execution cursor to improve performance pub cursor_delay: MassaTime, - /// time compensation in milliseconds - pub clock_compensation: i64, /// genesis timestamp pub genesis_timestamp: MassaTime, /// period duration @@ -60,4 +59,8 @@ pub struct ExecutionConfig { pub max_datastore_value_size: u64, /// Storage cost constants pub storage_costs_constants: StorageCostsConstants, + /// Max gas for read only executions + pub max_read_only_gas: u64, + /// Gas costs + pub gas_costs: GasCosts, } diff --git a/massa-execution-exports/src/test_exports/config.rs b/massa-execution-exports/src/test_exports/config.rs index 0fef2147ce2..951a1fa60bd 100644 --- a/massa-execution-exports/src/test_exports/config.rs +++ b/massa-execution-exports/src/test_exports/config.rs @@ -4,6 +4,7 @@ use crate::{ExecutionConfig, StorageCostsConstants}; use massa_models::config::*; +use massa_sc_runtime::GasCosts; use massa_time::MassaTime; impl Default for ExecutionConfig { @@ -31,10 +32,8 @@ impl Default for ExecutionConfig { max_gas_per_block: MAX_GAS_PER_BLOCK, operation_validity_period: OPERATION_VALIDITY_PERIODS, periods_per_cycle: PERIODS_PER_CYCLE, - clock_compensation: Default::default(), // reset genesis timestamp because we are in test mode that can take a while to process - genesis_timestamp: MassaTime::now(0) - .expect("Impossible to reset the timestamp in test"), + genesis_timestamp: MassaTime::now().expect("Impossible to reset the timestamp in test"), t0: 64.into(), stats_time_window_duration: MassaTime::from_millis(30000), max_miss_ratio: *POS_MISS_RATE_DEACTIVATION_THRESHOLD, @@ -42,6 +41,20 @@ impl Default for ExecutionConfig { max_bytecode_size: MAX_BYTECODE_LENGTH, max_datastore_value_size: MAX_DATASTORE_VALUE_LENGTH, storage_costs_constants, + max_read_only_gas: 100_000_000, + gas_costs: GasCosts::new( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/../massa-node/base_config/gas_costs/abi_gas_costs.json" + ) + .into(), + concat!( + env!("CARGO_MANIFEST_DIR"), + "/../massa-node/base_config/gas_costs/wasm_gas_costs.json" + ) + .into(), + ) + .unwrap(), } } } diff --git a/massa-execution-exports/src/test_exports/mock.rs b/massa-execution-exports/src/test_exports/mock.rs index 6de8b18fc5d..ac98810ed59 100644 --- a/massa-execution-exports/src/test_exports/mock.rs +++ b/massa-execution-exports/src/test_exports/mock.rs @@ -116,8 +116,8 @@ impl ExecutionController for MockExecutionController { /// Get execution statistics fn get_stats(&self) -> ExecutionStats { ExecutionStats { - time_window_start: MassaTime::now(0).unwrap(), - time_window_end: MassaTime::now(0).unwrap(), + time_window_start: MassaTime::now().unwrap(), + time_window_end: MassaTime::now().unwrap(), final_block_count: 0, final_executed_operations_count: 0, active_cursor: Slot::new(0, 0), diff --git a/massa-execution-exports/src/types.rs b/massa-execution-exports/src/types.rs index c1b2e075b18..978270df5d8 100644 --- a/massa-execution-exports/src/types.rs +++ b/massa-execution-exports/src/types.rs @@ -56,6 +56,8 @@ pub struct ReadOnlyExecutionOutput { pub out: ExecutionOutput, /// Gas cost for this execution pub gas_cost: u64, + /// Returned value from the module call + pub call_result: Vec, } /// structure describing different types of read-only execution request diff --git a/massa-execution-worker/Cargo.toml b/massa-execution-worker/Cargo.toml index 5f3ff06339c..6d403163f69 100644 --- a/massa-execution-worker/Cargo.toml +++ b/massa-execution-worker/Cargo.toml @@ -10,10 +10,12 @@ edition = "2021" anyhow = "1.0" rand = "0.8" rand_xoshiro = "0.6" +criterion = {version = "0.4", optional = true} parking_lot = { version = "0.12", features = ["deadlock_detection"] } tracing = "0.1" serde_json = "1.0" num = { version = "0.4", features = ["serde"] } +tempfile = { version = "3.3", optional = true } # use with gas_calibration feature # custom modules massa_async_pool = { path = "../massa-async-pool" } massa_executed_ops = { path = "../massa-executed-ops" } @@ -21,33 +23,39 @@ massa_execution_exports = { path = "../massa-execution-exports" } massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } massa_hash = { path = "../massa-hash" } -massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", tag = "v0.9.0" } +massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } +massa_ledger_worker = { path = "../massa-ledger-worker", optional = true } massa_ledger_exports = { path = "../massa-ledger-exports" } +massa_pos_worker = { path = "../massa-pos-worker", optional = true } massa_pos_exports = { path = "../massa-pos-exports" } massa_final_state = { path = "../massa-final-state" } [dev-dependencies] -serial_test = "0.9" +massa_pos_worker = { path = "../massa-pos-worker" } +serial_test = "0.10" tempfile = "3.2" +massa_ledger_worker = { path = "../massa-ledger-worker"} # custom modules with testing enabled -massa_pos_worker = { path = "../massa-pos-worker" } -massa_ledger_worker = { path = "../massa-ledger-worker", features = [ - "testing", -] } massa_execution_exports = { path = "../massa-execution-exports", features = [ "testing", ] } massa_final_state = { path = "../massa-final-state", features = ["testing"] } +[[bench]] +name = "basic" +harness = false + [features] sandbox = ["massa_async_pool/sandbox"] +gas_calibration = ["massa_execution_exports/gas_calibration", "massa_final_state/testing", "massa_pos_worker", "massa_ledger_worker", "tempfile"] testing = [ "massa_execution_exports/testing", "massa_ledger_exports/testing", "massa_pos_exports/testing", - "massa_pos_worker/testing", - "massa_ledger_worker/testing", "massa_final_state/testing", ] + +# This feature is useful as we want to have code that is compiled only when running benchmarks +benchmarking = ["criterion", "massa_pos_worker", "massa_ledger_worker", "tempfile"] \ No newline at end of file diff --git a/massa-execution-worker/benches/basic.rs b/massa-execution-worker/benches/basic.rs new file mode 100644 index 00000000000..613305e0ee3 --- /dev/null +++ b/massa-execution-worker/benches/basic.rs @@ -0,0 +1,83 @@ +#[cfg(feature = "benchmarking")] +use criterion::{black_box, criterion_group, criterion_main, Criterion}; + +#[cfg(feature = "benchmarking")] +fn criterion_benchmark(c: &mut Criterion) { + use massa_execution_worker::InterfaceImpl; + use massa_models::address::Address; + use massa_sc_runtime::{run_main, GasCosts}; + use rand::Rng; + use std::path::PathBuf; + use std::str::FromStr; + + /// This function is used to prepare the data for the benchmarks + /// It prepare the interface and the contracts to be executed. + fn prepare_bench_function() -> (InterfaceImpl, Vec>, GasCosts) { + let interface = InterfaceImpl::new_default( + black_box( + Address::from_str("A12cMW9zRKFDS43Z2W88VCmdQFxmHjAo54XvuVV34UzJeXRLXW9M").unwrap(), + ), + black_box(None), + ); + let gas_costs = GasCosts::new( + concat!( + env!("CARGO_MANIFEST_DIR"), + "/../massa-node/base_config/gas_costs/abi_gas_costs.json" + ) + .into(), + concat!( + env!("CARGO_MANIFEST_DIR"), + "/../massa-node/base_config/gas_costs/wasm_gas_costs.json" + ) + .into(), + ) + .unwrap(); + let base_path = concat!(env!("CARGO_MANIFEST_DIR"), "/benches/wasm"); + let contracts_names = vec!["prints.wasm", "event_callstack.wasm"]; + let contracts = contracts_names + .iter() + .map(|name| std::fs::read::(format!("{}/{}", base_path, name).into()).unwrap()) + .collect::>>(); + (interface, contracts, gas_costs) + } + + c.bench_function("Same execution", |b| { + let (interface, contracts, gas_costs) = prepare_bench_function(); + b.iter(|| { + let contract_id = 0; + run_main( + contracts.get(contract_id).unwrap(), + 2_000_000_000, + &interface, + gas_costs.clone(), + ) + .unwrap() + }) + }); + + c.bench_function("2 different executions", |b| { + let mut rng = rand::thread_rng(); + let (interface, contracts, gas_costs) = prepare_bench_function(); + b.iter(|| { + let contract_id = rng.gen_range(0..2); + run_main( + contracts.get(contract_id).unwrap(), + 2_000_000_000, + &interface, + gas_costs.clone(), + ) + .unwrap() + }) + }); +} + +#[cfg(feature = "benchmarking")] +criterion_group!(benches, criterion_benchmark); + +#[cfg(feature = "benchmarking")] +criterion_main!(benches); + +#[cfg(not(feature = "benchmarking"))] +fn main() { + println!("Please use the `--features benchmarking` flag to run this benchmark."); +} diff --git a/massa-execution-worker/benches/wasm/event_callstack.wasm b/massa-execution-worker/benches/wasm/event_callstack.wasm new file mode 100644 index 00000000000..092bc5518e7 Binary files /dev/null and b/massa-execution-worker/benches/wasm/event_callstack.wasm differ diff --git a/massa-execution-worker/benches/wasm/prints.wasm b/massa-execution-worker/benches/wasm/prints.wasm new file mode 100644 index 00000000000..9f36ae5a04a Binary files /dev/null and b/massa-execution-worker/benches/wasm/prints.wasm differ diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index 381970d2c88..2a39831ff2c 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -37,7 +37,7 @@ use tracing::debug; /// A snapshot taken from an `ExecutionContext` and that represents its current state. /// The `ExecutionContext` state can then be restored later from this snapshot. -pub(crate) struct ExecutionContextSnapshot { +pub struct ExecutionContextSnapshot { /// speculative ledger changes caused so far in the context pub ledger_changes: LedgerChanges, @@ -69,13 +69,16 @@ pub(crate) struct ExecutionContextSnapshot { /// An execution context that needs to be initialized before executing bytecode, /// passed to the VM to interact with during bytecode execution (through ABIs), /// and read after execution to gather results. -pub(crate) struct ExecutionContext { +pub struct ExecutionContext { /// configuration config: ExecutionConfig, /// speculative ledger state, /// as seen after everything that happened so far in the context + #[cfg(all(not(feature = "gas_calibration"), not(feature = "benchmarking")))] speculative_ledger: SpeculativeLedger, + #[cfg(any(feature = "gas_calibration", feature = "benchmarking"))] + pub(crate) speculative_ledger: SpeculativeLedger, /// speculative asynchronous pool state, /// as seen after everything that happened so far in the context @@ -197,19 +200,8 @@ impl ExecutionContext { /// /// # Arguments /// * `snapshot`: a saved snapshot to be restored - /// * `with_error`: an optional execution error to emit as an event conserved after snapshot reset. - pub fn reset_to_snapshot( - &mut self, - snapshot: ExecutionContextSnapshot, - with_error: Option, - ) { - // Create error event, if any. - let err_event = with_error.map(|err| { - self.event_create( - serde_json::json!({ "massa_execution_error": format!("{}", err) }).to_string(), - ) - }); - + /// * `error`: an execution error to emit as an event conserved after snapshot reset. + pub fn reset_to_snapshot(&mut self, snapshot: ExecutionContextSnapshot, error: ExecutionError) { // Reset context to snapshot. self.speculative_ledger .reset_to_snapshot(snapshot.ledger_changes); @@ -222,14 +214,20 @@ impl ExecutionContext { self.created_addr_index = snapshot.created_addr_index; self.created_event_index = snapshot.created_event_index; self.stack = snapshot.stack; - self.events = snapshot.events; self.unsafe_rng = snapshot.unsafe_rng; - // If there was an error, emit the corresponding event now. - // Note that the context event counter is properly handled by event_emit (see doc). - if let Some(event) = err_event { - self.event_emit(event); + // For events, set snapshot delta to error events. + // Start iterating from snapshot events length because we are dealing with a VecDeque. + for event in self.events.0.range_mut(snapshot.events.0.len()..) { + event.context.is_error = true; } + + // Emit the error event. + // Note that the context event counter is properly handled by event_emit (see doc). + self.event_emit(self.event_create( + serde_json::json!({ "massa_execution_error": format!("{}", error) }).to_string(), + true, + )); } /// Create a new `ExecutionContext` for read-only execution @@ -689,7 +687,10 @@ impl ExecutionContext { let slot = self.slot; // settle emitted async messages and reimburse the senders of deleted messages - let deleted_messages = self.speculative_async_pool.settle_slot(&slot); + let ledger_changes = self.speculative_ledger.take(); + let deleted_messages = self + .speculative_async_pool + .settle_slot(&slot, &ledger_changes); for (_msg_id, msg) in deleted_messages { self.cancel_async_message(&msg); } @@ -713,7 +714,7 @@ impl ExecutionContext { // generate the execution output let state_changes = StateChanges { - ledger_changes: self.speculative_ledger.take(), + ledger_changes, async_pool_changes: self.speculative_async_pool.take(), pos_changes: self.speculative_roll_state.take(), executed_ops_changes: self.speculative_executed_ops.take(), @@ -763,7 +764,7 @@ impl ExecutionContext { /// /// # Arguments: /// data: the string data that is the payload of the event - pub fn event_create(&self, data: String) -> SCOutputEvent { + pub fn event_create(&self, data: String, is_error: bool) -> SCOutputEvent { // Gather contextual information from the execution context let context = EventExecutionContext { slot: self.slot, @@ -773,6 +774,7 @@ impl ExecutionContext { index_in_slot: self.created_event_index, origin_operation_id: self.origin_operation_id, is_final: false, + is_error, }; // Return the event diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index c2bf27ad1f8..4e14a67b5fa 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -114,10 +114,7 @@ impl ExecutionState { // no active slots executed yet: set active_cursor to the last final block active_cursor: last_final_slot, final_cursor: last_final_slot, - stats_counter: ExecutionStatsCounter::new( - config.stats_time_window_duration, - config.clock_compensation, - ), + stats_counter: ExecutionStatsCounter::new(config.stats_time_window_duration), config, } } @@ -320,7 +317,7 @@ impl ExecutionState { operation_id, &err )); debug!("{}", &err); - context.reset_to_snapshot(context_snapshot, Some(err)); + context.reset_to_snapshot(context_snapshot, err); } } } @@ -506,8 +503,13 @@ impl ExecutionState { }; // run the VM on the bytecode contained in the operation - match massa_sc_runtime::run_main(bytecode, *max_gas, &*self.execution_interface) { - Ok(_reamining_gas) => {} + match massa_sc_runtime::run_main( + bytecode, + *max_gas, + &*self.execution_interface, + self.config.gas_costs.clone(), + ) { + Ok(_response) => {} Err(err) => { // there was an error during bytecode execution return Err(ExecutionError::RuntimeError(format!( @@ -601,8 +603,9 @@ impl ExecutionState { target_func, param, &*self.execution_interface, + self.config.gas_costs.clone(), ) { - Ok(_reamining_gas) => {} + Ok(_response) => {} Err(err) => { // there was an error during bytecode execution return Err(ExecutionError::RuntimeError(format!( @@ -660,7 +663,7 @@ impl ExecutionState { "message data does not convert to utf-8".into(), ) }; - context.reset_to_snapshot(context_snapshot, Some(err.clone())); + context.reset_to_snapshot(context_snapshot, err.clone()); context.cancel_async_message(&message); return Err(err); } @@ -675,7 +678,7 @@ impl ExecutionState { "could not credit coins to target of async execution: {}", err )); - context.reset_to_snapshot(context_snapshot, Some(err.clone())); + context.reset_to_snapshot(context_snapshot, err.clone()); context.cancel_async_message(&message); return Err(err); } @@ -690,6 +693,7 @@ impl ExecutionState { &message.handler, &message.data, &*self.execution_interface, + self.config.gas_costs.clone(), ) { // execution failed: reset context to snapshot and reimburse sender let err = ExecutionError::RuntimeError(format!( @@ -697,7 +701,7 @@ impl ExecutionState { err )); let mut context = context_guard!(self); - context.reset_to_snapshot(context_snapshot, Some(err.clone())); + context.reset_to_snapshot(context_snapshot, err.clone()); context.cancel_async_message(&message); Err(err) } else { @@ -1011,6 +1015,14 @@ impl ExecutionState { // TODO ensure that speculative things are reset after every execution ends (incl. on error and readonly) // otherwise, on prod stats accumulation etc... from the API we might be counting the remainder of this speculative execution + // check if read only request max gas is above the threshold + if req.max_gas > self.config.max_read_only_gas { + return Err(ExecutionError::TooMuchGas(format!( + "execution gas for read-only call is {} which is above the maximum allowed {}", + req.max_gas, self.config.max_read_only_gas + ))); + } + // set the execution slot to be the one after the latest executed active slot let slot = self .active_cursor @@ -1027,15 +1039,20 @@ impl ExecutionState { self.active_history.clone(), ); - // run the intepreter according to the target type - let remaining_gas = match req.target { + // run the interpreter according to the target type + let exec_response = match req.target { ReadOnlyExecutionTarget::BytecodeExecution(bytecode) => { // set the execution context for execution *context_guard!(self) = execution_context; // run the bytecode's main function - massa_sc_runtime::run_main(&bytecode, req.max_gas, &*self.execution_interface) - .map_err(|err| ExecutionError::RuntimeError(err.to_string()))? + massa_sc_runtime::run_main( + &bytecode, + req.max_gas, + &*self.execution_interface, + self.config.gas_costs.clone(), + ) + .map_err(|err| ExecutionError::RuntimeError(err.to_string()))? } ReadOnlyExecutionTarget::FunctionCall { target_addr, @@ -1057,6 +1074,7 @@ impl ExecutionState { &target_func, ¶meter, &*self.execution_interface, + self.config.gas_costs.clone(), ) .map_err(|err| ExecutionError::RuntimeError(err.to_string()))? } @@ -1066,7 +1084,8 @@ impl ExecutionState { let execution_output = context_guard!(self).settle_slot(); Ok(ReadOnlyExecutionOutput { out: execution_output, - gas_cost: req.max_gas.saturating_sub(remaining_gas), + gas_cost: req.max_gas.saturating_sub(exec_response.remaining_gas), + call_result: exec_response.ret, }) } diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index 4bdd73be77d..e13c3b71f40 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -7,9 +7,10 @@ use crate::context::ExecutionContext; use anyhow::{anyhow, bail, Result}; -use massa_async_pool::AsyncMessage; +use massa_async_pool::{AsyncMessage, AsyncMessageTrigger}; use massa_execution_exports::ExecutionConfig; use massa_execution_exports::ExecutionStackElement; +use massa_models::config::MAX_DATASTORE_KEY_LENGTH; use massa_models::{ address::Address, amount::Amount, slot::Slot, timeslots::get_block_slot_timestamp, }; @@ -21,6 +22,9 @@ use std::str::FromStr; use std::sync::Arc; use tracing::debug; +#[cfg(any(feature = "gas_calibration", feature = "benchmarking"))] +use massa_models::datastore::Datastore; + /// helper for locking the context mutex macro_rules! context_guard { ($self:ident) => { @@ -30,7 +34,7 @@ macro_rules! context_guard { /// an implementation of the Interface trait (see massa-sc-runtime crate) #[derive(Clone)] -pub(crate) struct InterfaceImpl { +pub struct InterfaceImpl { /// execution configuration config: ExecutionConfig, /// thread-safe shared access to the execution context (see context.rs) @@ -46,6 +50,35 @@ impl InterfaceImpl { pub fn new(config: ExecutionConfig, context: Arc>) -> InterfaceImpl { InterfaceImpl { config, context } } + + #[cfg(any(feature = "gas_calibration", feature = "benchmarking"))] + /// Used to create an default interface to run SC in a test environment + pub fn new_default( + sender_addr: Address, + operation_datastore: Option, + ) -> InterfaceImpl { + use massa_ledger_exports::{LedgerEntry, SetUpdateOrDelete}; + + let config = ExecutionConfig::default(); + let (final_state, _tempfile, _tempdir) = crate::tests::get_sample_state().unwrap(); + let mut execution_context = + ExecutionContext::new(config.clone(), final_state, Default::default()); + execution_context.stack = vec![ExecutionStackElement { + address: sender_addr, + coins: Amount::zero(), + owned_addresses: vec![sender_addr], + operation_datastore, + }]; + execution_context.speculative_ledger.added_changes.0.insert( + sender_addr, + SetUpdateOrDelete::Set(LedgerEntry { + balance: Amount::from_mantissa_scale(1_000_000_000, 0), + ..Default::default() + }), + ); + let context = Arc::new(Mutex::new(execution_context)); + InterfaceImpl::new(config, context) + } } impl InterfaceClone for InterfaceImpl { @@ -62,7 +95,11 @@ impl InterfaceClone for InterfaceImpl { impl Interface for InterfaceImpl { /// prints a message in the node logs at log level 3 (debug) fn print(&self, message: &str) -> Result<()> { - debug!("SC print: {}", message); + if cfg!(test) { + println!("SC print: {}", message); + } else { + debug!("SC print: {}", message); + } Ok(()) } @@ -342,6 +379,46 @@ impl Interface for InterfaceImpl { Ok(context.has_data_entry(&addr, key)) } + /// Check whether or not the caller has write access in the current context + /// + /// # Returns + /// true if the caller has write access + fn caller_has_write_access(&self) -> Result { + let context = context_guard!(self); + let mut call_stack_iter = context.stack.iter().rev(); + let caller_owned_addresses = if let Some(last) = call_stack_iter.next() { + if let Some(prev_to_last) = call_stack_iter.next() { + prev_to_last.owned_addresses.clone() + } else { + last.owned_addresses.clone() + } + } else { + return Err(anyhow!("empty stack")); + }; + let current_address = context.get_current_address()?; + Ok(caller_owned_addresses.contains(¤t_address)) + } + + /// Returns bytecode of the current address + fn raw_get_bytecode(&self) -> Result> { + let context = context_guard!(self); + let address = context.get_current_address()?; + match context.get_bytecode(&address) { + Some(bytecode) => Ok(bytecode), + _ => bail!("bytecode not found"), + } + } + + /// Returns bytecode of the target address + fn raw_get_bytecode_for(&self, address: &str) -> Result> { + let context = context_guard!(self); + let address = Address::from_str(address)?; + match context.get_bytecode(&address) { + Some(bytecode) => Ok(bytecode), + _ => bail!("bytecode not found"), + } + } + /// Get the operation datastore keys (aka entries). /// Note that the datastore is only accessible to the initial caller level. /// @@ -423,7 +500,7 @@ impl Interface for InterfaceImpl { /// # Returns /// The string representation of the resulting address fn address_from_public_key(&self, public_key: &str) -> Result { - let public_key = massa_signature::PublicKey::from_bs58_check(public_key)?; + let public_key = massa_signature::PublicKey::from_str(public_key)?; let addr = massa_models::address::Address::from_public_key(&public_key); Ok(addr.to_string()) } @@ -442,7 +519,7 @@ impl Interface for InterfaceImpl { Ok(sig) => sig, Err(_) => return Ok(false), }; - let public_key = match massa_signature::PublicKey::from_bs58_check(public_key) { + let public_key = match massa_signature::PublicKey::from_str(public_key) { Ok(pubk) => pubk, Err(_) => return Ok(false), }; @@ -527,7 +604,7 @@ impl Interface for InterfaceImpl { /// data: the string data that is the payload of the event fn generate_event(&self, data: String) -> Result<()> { let mut context = context_guard!(self); - let event = context.event_create(data); + let event = context.event_create(data, false); context.event_emit(event); Ok(()) } @@ -586,6 +663,7 @@ impl Interface for InterfaceImpl { raw_fee: u64, raw_coins: u64, data: &[u8], + filter: Option<(&str, Option<&[u8]>)>, ) -> Result<()> { if validity_start.1 >= self.config.thread_count { bail!("validity start thread exceeds the configuration thread count") @@ -613,6 +691,20 @@ impl Interface for InterfaceImpl { Slot::new(validity_start.0, validity_start.1), Slot::new(validity_end.0, validity_end.1), data.to_vec(), + filter + .map(|(addr, key)| { + let datastore_key = key.map(|k| k.to_vec()); + if let Some(ref k) = datastore_key { + if k.len() > MAX_DATASTORE_KEY_LENGTH as usize { + bail!("datastore key is too long") + } + } + Ok::(AsyncMessageTrigger { + address: Address::from_str(addr)?, + datastore_key, + }) + }) + .transpose()?, )); execution_context.created_message_index += 1; Ok(()) diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs index d48babb3149..2c89fa46d72 100644 --- a/massa-execution-worker/src/lib.rs +++ b/massa-execution-worker/src/lib.rs @@ -99,5 +99,8 @@ mod worker; pub use worker::start_execution_worker; -#[cfg(test)] +#[cfg(any(feature = "gas_calibration", feature = "benchmarking"))] +pub use interface_impl::InterfaceImpl; + +#[cfg(any(test, feature = "gas_calibration", feature = "benchmarking"))] mod tests; diff --git a/massa-execution-worker/src/slot_sequencer.rs b/massa-execution-worker/src/slot_sequencer.rs index 51dfd1b669f..9274857c099 100644 --- a/massa-execution-worker/src/slot_sequencer.rs +++ b/massa-execution-worker/src/slot_sequencer.rs @@ -187,7 +187,7 @@ impl SlotSequencer { /// Note that this time cursor is shifted by `self.config.cursor_delay` /// to avoid computing speculative slots that are too recent, and therefore subject to frequent re-writes. fn get_time_cursor(&self) -> Slot { - let shifted_now = MassaTime::now(self.config.clock_compensation) + let shifted_now = MassaTime::now() .expect("could not get current time") .saturating_sub(self.config.cursor_delay); get_latest_block_slot_at_timestamp( @@ -726,7 +726,7 @@ impl SlotSequencer { // This means that we are still waiting for `Self::update` to be called for the first time. // To avoid CPU-intensive loops upstream, just register a wake-up after a single slot delay (t0/T). if self.sequence.is_empty() { - return MassaTime::now(self.config.clock_compensation) + return MassaTime::now() .expect("could not get current time") .saturating_add( self.config diff --git a/massa-execution-worker/src/speculative_async_pool.rs b/massa-execution-worker/src/speculative_async_pool.rs index 8e166ae8f6d..918013ebe9e 100644 --- a/massa-execution-worker/src/speculative_async_pool.rs +++ b/massa-execution-worker/src/speculative_async_pool.rs @@ -6,6 +6,7 @@ use crate::active_history::ActiveHistory; use massa_async_pool::{AsyncMessage, AsyncMessageId, AsyncPool, AsyncPoolChanges}; use massa_final_state::FinalState; +use massa_ledger_exports::LedgerChanges; use massa_models::slot::Slot; use parking_lot::RwLock; use std::sync::Arc; @@ -21,7 +22,7 @@ pub(crate) struct SpeculativeAsyncPool { /// List of newly emitted asynchronous messages emitted: Vec<(AsyncMessageId, AsyncMessage)>, - /// List of changes (additions/deletions) to the pool after settling emitted messages + /// List of changes (additions/deletions/activation) to the pool after settling emitted messages settled_changes: AsyncPoolChanges, } @@ -98,17 +99,27 @@ impl SpeculativeAsyncPool { /// /// # Arguments /// * slot: slot that is being settled + /// * ledger_changes: ledger changes for that slot, used to see if we can activate some messages /// /// # Returns /// the list of deleted `(message_id, message)`, used for reimbursement - pub fn settle_slot(&mut self, slot: &Slot) -> Vec<(AsyncMessageId, AsyncMessage)> { - let deleted_messages = self.async_pool.settle_slot(slot, &mut self.emitted); + pub fn settle_slot( + &mut self, + slot: &Slot, + ledger_changes: &LedgerChanges, + ) -> Vec<(AsyncMessageId, AsyncMessage)> { + let (deleted_messages, triggered_messages) = + self.async_pool + .settle_slot(slot, &mut self.emitted, ledger_changes); for (msg_id, msg) in std::mem::take(&mut self.emitted) { self.settled_changes.push_add(msg_id, msg); } for (msg_id, _msg) in deleted_messages.iter() { self.settled_changes.push_delete(*msg_id); } + for (msg_id, _msg) in triggered_messages.iter() { + self.settled_changes.push_activate(*msg_id); + } deleted_messages } } diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index 8e5e30718b3..49fa3e3128f 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -31,7 +31,10 @@ pub(crate) struct SpeculativeLedger { active_history: Arc>, /// list of ledger changes that were applied to this `SpeculativeLedger` since its creation + #[cfg(all(not(feature = "gas_calibration"), not(feature = "benchmarking")))] added_changes: LedgerChanges, + #[cfg(any(feature = "gas_calibration", feature = "benchmarking"))] + pub added_changes: LedgerChanges, /// max datastore key length max_datastore_key_length: u8, diff --git a/massa-execution-worker/src/stats.rs b/massa-execution-worker/src/stats.rs index 0c9f56300d0..67f35a74b2f 100644 --- a/massa-execution-worker/src/stats.rs +++ b/massa-execution-worker/src/stats.rs @@ -9,8 +9,6 @@ use std::collections::VecDeque; pub struct ExecutionStatsCounter { /// duration of the time window time_window_duration: MassaTime, - /// time compensation (milliseconds) - compensation_millis: i64, /// final blocks in the time window (count, instant) final_blocks: VecDeque<(usize, MassaTime)>, /// final operations executed in the time window (count, instant) @@ -19,10 +17,9 @@ pub struct ExecutionStatsCounter { impl ExecutionStatsCounter { /// create a new `ExecutionStatsCounter` - pub fn new(time_window_duration: MassaTime, compensation_millis: i64) -> Self { + pub fn new(time_window_duration: MassaTime) -> Self { ExecutionStatsCounter { time_window_duration, - compensation_millis, final_blocks: Default::default(), final_executed_ops: Default::default(), } @@ -53,24 +50,21 @@ impl ExecutionStatsCounter { /// register final blocks pub fn register_final_blocks(&mut self, count: usize) { - let current_time = - MassaTime::now(self.compensation_millis).expect("could not get current time"); + let current_time = MassaTime::now().expect("could not get current time"); self.final_blocks.push_back((count, current_time)); self.refresh(current_time); } /// register final executed operations pub fn register_final_executed_operations(&mut self, count: usize) { - let current_time = - MassaTime::now(self.compensation_millis).expect("could not get current time"); + let current_time = MassaTime::now().expect("could not get current time"); self.final_executed_ops.push_back((count, current_time)); self.refresh(current_time); } /// get statistics pub fn get_stats(&self, active_cursor: Slot) -> ExecutionStats { - let current_time = - MassaTime::now(self.compensation_millis).expect("could not get current time"); + let current_time = MassaTime::now().expect("could not get current time"); let start_time = current_time.saturating_sub(self.time_window_duration); let map_func = |pair: &(usize, MassaTime)| -> usize { let (cnt, t) = pair; diff --git a/massa-execution-worker/src/tests/mock.rs b/massa-execution-worker/src/tests/mock.rs index 9a8c224b4bb..119ae0226fa 100644 --- a/massa-execution-worker/src/tests/mock.rs +++ b/massa-execution-worker/src/tests/mock.rs @@ -1,18 +1,8 @@ -use massa_execution_exports::ExecutionError; use massa_final_state::{FinalState, FinalStateConfig}; -use massa_hash::Hash; use massa_ledger_exports::LedgerEntry; use massa_ledger_exports::{LedgerConfig, LedgerController, LedgerError}; use massa_ledger_worker::FinalLedger; -use massa_models::{ - address::Address, - amount::Amount, - block::{Block, BlockHeader, BlockHeaderSerializer, BlockSerializer, WrappedBlock}, - config::THREAD_COUNT, - operation::WrappedOperation, - slot::Slot, - wrapped::WrappedContent, -}; +use massa_models::{address::Address, amount::Amount, config::THREAD_COUNT}; use massa_pos_exports::SelectorConfig; use massa_pos_worker::start_selector_worker; use massa_signature::KeyPair; @@ -27,6 +17,20 @@ use std::{ use tempfile::NamedTempFile; use tempfile::TempDir; +#[cfg(feature = "testing")] +use massa_models::{ + block::{Block, BlockHeader, BlockHeaderSerializer, BlockSerializer, WrappedBlock}, + operation::WrappedOperation, + slot::Slot, + wrapped::WrappedContent, +}; + +#[cfg(feature = "testing")] +use massa_execution_exports::ExecutionError; + +#[cfg(feature = "testing")] +use massa_hash::Hash; + fn get_initials() -> (NamedTempFile, HashMap) { let file = NamedTempFile::new().unwrap(); let mut rolls: BTreeMap = BTreeMap::new(); @@ -58,6 +62,32 @@ fn get_initials() -> (NamedTempFile, HashMap) { }, ); + // thread 2 / 31 + let keypair_2 = + KeyPair::from_str("S12APSAzMPsJjVGWzUJ61ZwwGFTNapA4YtArMKDyW4edLu6jHvCr").unwrap(); + let addr_2 = Address::from_public_key(&keypair_2.get_public_key()); + rolls.insert(addr_2, 100); + ledger.insert( + addr_2, + LedgerEntry { + balance: Amount::from_str("300_000").unwrap(), + ..Default::default() + }, + ); + + // thread 3 / 31 + let keypair_3 = + KeyPair::from_str("S12onbtxzgHcDSrVMp9bzP1cUjno8V5hZd4yYiqaMmC3nq4z7fSv").unwrap(); + let addr_3 = Address::from_public_key(&keypair_3.get_public_key()); + rolls.insert(addr_3, 100); + ledger.insert( + addr_3, + LedgerEntry { + balance: Amount::from_str("300_000").unwrap(), + ..Default::default() + }, + ); + // write file serde_json::to_writer_pretty::<&File, BTreeMap>(file.as_file(), &rolls) .expect("unable to write ledger file"); @@ -70,6 +100,7 @@ fn get_initials() -> (NamedTempFile, HashMap) { /// Same as `get_random_address()` and return `keypair` associated /// to the address. +#[cfg(feature = "testing")] pub fn get_random_address_full() -> (Address, KeyPair) { let keypair = KeyPair::generate(); (Address::from_public_key(&keypair.get_public_key()), keypair) @@ -105,6 +136,7 @@ pub fn get_sample_state() -> Result<(Arc>, NamedTempFile, Tem /// creator. /// /// Return a result that should be unwrapped in the root `#[test]` routine. +#[cfg(feature = "testing")] pub fn create_block( creator_keypair: KeyPair, operations: Vec, diff --git a/massa-execution-worker/src/tests/mod.rs b/massa-execution-worker/src/tests/mod.rs index 7ab4452af2d..f9ae64fcd33 100644 --- a/massa-execution-worker/src/tests/mod.rs +++ b/massa-execution-worker/src/tests/mod.rs @@ -1,5 +1,13 @@ // Copyright (c) 2022 MASSA LABS +#[cfg(any(test, feature = "gas_calibration", feature = "benchmarking"))] mod mock; + +#[cfg(all(not(feature = "gas_calibration"), not(feature = "benchmarking")))] mod scenarios_mandatories; + +#[cfg(all(not(feature = "gas_calibration"), not(feature = "benchmarking")))] mod tests_active_history; + +#[cfg(any(feature = "gas_calibration", feature = "benchmarking"))] +pub use mock::get_sample_state; diff --git a/massa-execution-worker/src/tests/scenarios_mandatories.rs b/massa-execution-worker/src/tests/scenarios_mandatories.rs index 531e0316770..2ca0e35490f 100644 --- a/massa-execution-worker/src/tests/scenarios_mandatories.rs +++ b/massa-execution-worker/src/tests/scenarios_mandatories.rs @@ -113,6 +113,7 @@ fn init_execution_worker( /// This test can fail if the gas is going up in the execution #[test] #[serial] +#[ignore] fn test_nested_call_gas_usage() { // setup the period duration let exec_cfg = ExecutionConfig { @@ -179,7 +180,7 @@ fn test_nested_call_gas_usage() { let balance_expected = Amount::from_str("300000") .unwrap() // Gas fee - .saturating_sub(Amount::from_str("100000").unwrap()) + .saturating_sub(Amount::from_str("10").unwrap()) // Storage cost base .saturating_sub(exec_cfg.storage_costs_constants.ledger_entry_base_cost) // Storage cost bytecode @@ -302,7 +303,7 @@ fn send_and_receive_async_message() { Default::default(), block_storage.clone(), ); - // sleep for 100ms to reach the message execution period + // sleep for 150ms to reach the message execution period std::thread::sleep(Duration::from_millis(150)); // retrieve events emitted by smart contracts @@ -321,6 +322,341 @@ fn send_and_receive_async_message() { manager.stop(); } +/// Context +/// +/// Functional test for local smart-contract execution +/// +/// 1. a block is created with 2 ExecuteSC operations +/// it contains 1 local execution and 1 local call +/// both operation datastores have the bytecode of local_function.wasm +/// 2. store and set the block as final +/// 3. wait for execution +/// 4. retrieve events emitted by the initial an sub functions +/// 5. match event and call stack to make sure that executions were local +#[test] +#[serial] +fn local_execution() { + // setup the period duration and cursor delay + let exec_cfg = ExecutionConfig { + t0: 100.into(), + cursor_delay: 0.into(), + ..ExecutionConfig::default() + }; + // get a sample final state + let (sample_state, _keep_file, _keep_dir) = get_sample_state().unwrap(); + + // init the storage + let mut storage = Storage::create_root(); + // start the execution worker + let (mut manager, controller) = start_execution_worker( + exec_cfg.clone(), + sample_state.clone(), + sample_state.read().pos_state.selector.clone(), + ); + // initialize the execution system with genesis blocks + init_execution_worker(&exec_cfg, &storage, controller.clone()); + // keypair associated to thread 0 + let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + // load bytecodes + // you can check the source code of the following wasm files in massa-unit-tests-src + let exec_bytecode = include_bytes!("./wasm/local_execution.wasm"); + let call_bytecode = include_bytes!("./wasm/local_call.wasm"); + let datastore_bytecode = include_bytes!("./wasm/local_function.wasm").to_vec(); + let mut datastore = BTreeMap::new(); + datastore.insert(b"smart-contract".to_vec(), datastore_bytecode); + + // create the block contaning the operations + let local_exec_op = + create_execute_sc_operation(&keypair, exec_bytecode, datastore.clone()).unwrap(); + let local_call_op = create_execute_sc_operation(&keypair, call_bytecode, datastore).unwrap(); + storage.store_operations(vec![local_exec_op.clone(), local_call_op.clone()]); + let block = create_block( + KeyPair::generate(), + vec![local_exec_op.clone(), local_call_op.clone()], + Slot::new(1, 0), + ) + .unwrap(); + // store the block in storage + storage.store_block(block.clone()); + + // set our block as a final block so the message is sent + let mut finalized_blocks: HashMap = Default::default(); + finalized_blocks.insert(block.content.header.content.slot, block.id); + let mut block_storage: PreHashMap = Default::default(); + block_storage.insert(block.id, storage.clone()); + controller.update_blockclique_status( + finalized_blocks, + Default::default(), + block_storage.clone(), + ); + // sleep for 100ms to wait for execution + std::thread::sleep(Duration::from_millis(100)); + + // retrieve events emitted by smart contracts + let events = controller.get_filtered_sc_output_event(EventFilter { + ..Default::default() + }); + + // match the events, check balance and call stack to make sure the executions were local + assert!(events.len() == 8, "8 events were expected"); + assert_eq!( + Amount::from_raw(events[1].data.parse().unwrap()), + Amount::from_str("299990").unwrap() // start (300_000) - fee (1000) + ); + assert_eq!(events[1].context.call_stack.len(), 1); + assert_eq!( + events[1].context.call_stack.back().unwrap(), + &Address::from_str("A12eS5qggxuvqviD5eQ72oM2QhGwnmNbT1BaxVXU4hqQ8rAYXFe").unwrap() + ); + assert_eq!(events[2].data, "one local execution completed"); + assert_eq!( + Amount::from_raw(events[5].data.parse().unwrap()), + Amount::from_str("299_979.05275").unwrap() // start (299_000) - fee (1000) - storage cost + ); + assert_eq!(events[5].context.call_stack.len(), 1); + assert_eq!( + events[1].context.call_stack.back().unwrap(), + &Address::from_str("A12eS5qggxuvqviD5eQ72oM2QhGwnmNbT1BaxVXU4hqQ8rAYXFe").unwrap() + ); + assert_eq!(events[6].data, "one local call completed"); + + // stop the execution controller + manager.stop(); +} + +/// Context +/// +/// Functional test for sc deployment utility functions, `functionExists` and `callerHasWriteAccess` +/// +/// 1. a block is created with one ExecuteSC operation containing +/// a deployment sc as bytecode to execute and a deplyed sc as an op datatsore entry +/// 2. store and set the block as final +/// 3. wait for execution +/// 4. retrieve events emitted by the initial an sub functions +/// 5. match events to make sure that `functionExists` and `callerHasWriteAccess` had the expected behaviour +#[test] +#[serial] +fn sc_deployment() { + // setup the period duration and cursor delay + let exec_cfg = ExecutionConfig { + t0: 100.into(), + cursor_delay: 0.into(), + ..ExecutionConfig::default() + }; + // get a sample final state + let (sample_state, _keep_file, _keep_dir) = get_sample_state().unwrap(); + + // init the storage + let mut storage = Storage::create_root(); + // start the execution worker + let (mut manager, controller) = start_execution_worker( + exec_cfg.clone(), + sample_state.clone(), + sample_state.read().pos_state.selector.clone(), + ); + // initialize the execution system with genesis blocks + init_execution_worker(&exec_cfg, &storage, controller.clone()); + // keypair associated to thread 0 + let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + // load bytecodes + // you can check the source code of the following wasm files in massa-unit-tests-src + let op_bytecode = include_bytes!("./wasm/deploy_sc.wasm"); + let datastore_bytecode = include_bytes!("./wasm/init_sc.wasm").to_vec(); + let mut datastore = BTreeMap::new(); + datastore.insert(b"smart-contract".to_vec(), datastore_bytecode); + + // create the block contaning the operation + let op = create_execute_sc_operation(&keypair, op_bytecode, datastore.clone()).unwrap(); + storage.store_operations(vec![op.clone()]); + let block = create_block(KeyPair::generate(), vec![op], Slot::new(1, 0)).unwrap(); + // store the block in storage + storage.store_block(block.clone()); + + // set our block as a final block so the message is sent + let mut finalized_blocks: HashMap = Default::default(); + finalized_blocks.insert(block.content.header.content.slot, block.id); + let mut block_storage: PreHashMap = Default::default(); + block_storage.insert(block.id, storage.clone()); + controller.update_blockclique_status( + finalized_blocks, + Default::default(), + block_storage.clone(), + ); + // sleep for 100ms to wait for execution + std::thread::sleep(Duration::from_millis(100)); + + // retrieve events emitted by smart contracts + let events = controller.get_filtered_sc_output_event(EventFilter { + ..Default::default() + }); + + // match the events + assert!(events.len() == 3, "3 events were expected"); + assert_eq!(events[0].data, "sc created"); + assert_eq!(events[1].data, "constructor exists and will be called"); + assert_eq!(events[2].data, "constructor called by deployer"); + + // stop the execution controller + manager.stop(); +} + +/// # Context +/// +/// Functional test for asynchronous messages sending and handling with a filter +/// +/// 1. a block is created containing an `execute_sc` operation +/// 2. this operation deploy a smart contract and call his function `test` +/// 3. `test` generates an event and place a message to be triggered once again if `test2` datastore key of address `A12DDxjqtBVshdQ4nLqYg6GwRddY5LzEC7bnatVxB5SFtpbCFj8E` is created/modify +/// 4. we set the created block as finalized so the message is actually sent +/// 5. we execute the following slots for 300 milliseconds to reach the message execution period +/// 6. We send a new operation with a smart contract that modify `test` datastore key and so doesn't trigger the message. +/// 7. We send a new operation with a smart contract that create `test2` datastore key and so trigger the message. +/// 8. once the execution period is over we stop the execution controller +/// 9. we retrieve the events emitted by smart contract +/// 10. `test` handler function should have emitted a second event +/// 11. we check if they are events +/// 12. if they are some, we verify that the data has the correct value +#[test] +#[serial] +fn send_and_receive_async_message_with_trigger() { + // setup the period duration and the maximum gas for asynchronous messages execution + let exec_cfg = ExecutionConfig { + t0: 100.into(), + max_async_gas: 1_000_000_000, + cursor_delay: 0.into(), + ..ExecutionConfig::default() + }; + // get a sample final state + let (sample_state, _keep_file, _keep_dir) = get_sample_state().unwrap(); + + let mut blockclique_blocks: HashMap = HashMap::new(); + // init the storage + let mut storage = Storage::create_root(); + // start the execution worker + let (mut manager, controller) = start_execution_worker( + exec_cfg.clone(), + sample_state.clone(), + sample_state.read().pos_state.selector.clone(), + ); + // initialize the execution system with genesis blocks + init_execution_worker(&exec_cfg, &storage, controller.clone()); + // keypair associated to thread 0 + let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + // load bytecode + // you can check the source code of the following wasm file in massa-unit-tests-src + let bytecode = include_bytes!("./wasm/send_message_deploy_condition.wasm"); + let datastore_bytecode = include_bytes!("./wasm/send_message_condition.wasm").to_vec(); + let mut datastore = BTreeMap::new(); + let key = unsafe { + String::from("smart-contract") + .encode_utf16() + .collect::>() + .align_to::() + .1 + .to_vec() + }; + datastore.insert(key, datastore_bytecode); + + // create the block containing the smart contract execution operation + let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); + storage.store_operations(vec![operation.clone()]); + let block = create_block(keypair, vec![operation], Slot::new(1, 0)).unwrap(); + // store the block in storage + storage.store_block(block.clone()); + + // set our block as a final block so the message is sent + let mut finalized_blocks: HashMap = Default::default(); + finalized_blocks.insert(block.content.header.content.slot, block.id); + let mut block_storage: PreHashMap = Default::default(); + block_storage.insert(block.id, storage.clone()); + blockclique_blocks.insert(block.content.header.content.slot, block.id); + controller.update_blockclique_status( + finalized_blocks.clone(), + Some(blockclique_blocks.clone()), + block_storage.clone(), + ); + // sleep for 10ms to reach the message execution period + std::thread::sleep(Duration::from_millis(10)); + + // retrieve events emitted by smart contracts + let events = controller.get_filtered_sc_output_event(EventFilter { + ..Default::default() + }); + + // match the events + assert!(events.len() == 2, "Two event was expected"); + assert_eq!(events[0].data, "Triggered"); + + // keypair associated to thread 1 + let keypair = KeyPair::from_str("S1kEBGgxHFBdsNC4HtRHhsZsB5irAtYHEmuAKATkfiomYmj58tm").unwrap(); + // load bytecode + // you can check the source code of the following wasm file in massa-unit-tests-src + let bytecode = include_bytes!("./wasm/send_message_wrong_trigger.wasm"); + let datastore = BTreeMap::new(); + + // create the block containing the smart contract execution operation + let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); + storage.store_operations(vec![operation.clone()]); + let block = create_block(keypair, vec![operation], Slot::new(1, 1)).unwrap(); + // store the block in storage + storage.store_block(block.clone()); + + // set our block as a final block so the message is sent + finalized_blocks.insert(block.content.header.content.slot, block.id); + let mut block_storage: PreHashMap = Default::default(); + block_storage.insert(block.id, storage.clone()); + blockclique_blocks.insert(block.content.header.content.slot, block.id); + controller.update_blockclique_status(finalized_blocks.clone(), None, block_storage.clone()); + // sleep for 10ms to reach the message execution period + std::thread::sleep(Duration::from_millis(10)); + + // retrieve events emitted by smart contracts + let events = controller.get_filtered_sc_output_event(EventFilter { + ..Default::default() + }); + + // match the events + assert!(events.len() == 3, "Three event was expected"); + assert_eq!(events[0].data, "Triggered"); + + // keypair associated to thread 2 + let keypair = + KeyPair::from_str("S12APSAzMPsJjVGWzUJ61ZwwGFTNapA4YtArMKDyW4edLu6jHvCr").unwrap(); + // load bytecode + // you can check the source code of the following wasm file in massa-unit-tests-src + // This line execute the smart contract that will modify the data entry and then trigger the SC. + let bytecode = include_bytes!("./wasm/send_message_trigger.wasm"); + let datastore = BTreeMap::new(); + + let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); + storage.store_operations(vec![operation.clone()]); + let block = create_block(keypair, vec![operation], Slot::new(1, 2)).unwrap(); + // store the block in storage + storage.store_block(block.clone()); + + // set our block as a final block so the message is sent + finalized_blocks.insert(block.content.header.content.slot, block.id); + let mut block_storage: PreHashMap = Default::default(); + block_storage.insert(block.id, storage.clone()); + blockclique_blocks.insert(block.content.header.content.slot, block.id); + controller.update_blockclique_status(finalized_blocks.clone(), None, block_storage.clone()); + // sleep for 1000ms to reach the message execution period + std::thread::sleep(Duration::from_millis(1000)); + + // retrieve events emitted by smart contracts + let events = controller.get_filtered_sc_output_event(EventFilter { + start: Some(Slot::new(1, 3)), + ..Default::default() + }); + + // match the events + assert!(events.len() == 1, "One event was expected"); + assert_eq!(events[0].data, "Triggered"); + assert_eq!(events[0].data, "Triggered"); + + manager.stop(); +} + #[test] #[serial] pub fn send_and_receive_transaction() { @@ -435,7 +771,7 @@ pub fn roll_buy() { &keypair, ) .unwrap(); - // create the block contaning the roll buy operation + // create the block containing the roll buy operation storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); // store the block in storage @@ -498,9 +834,6 @@ pub fn roll_sell() { let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); let address = Address::from_public_key(&keypair.get_public_key()); - // get initial balance - let balance_initial = sample_state.read().ledger.get_balance(&address).unwrap(); - // get initial roll count let roll_count_initial = sample_state.read().pos_state.get_rolls_for(&address); let roll_sell_1 = 10; @@ -582,20 +915,6 @@ pub fn roll_sell() { credits ); - // Now check balance - let balances = controller.get_final_and_candidate_balance(&[address]); - let candidate_balance = balances.get(0).unwrap().1.unwrap(); - - assert_eq!( - candidate_balance, - exec_cfg - .roll_price - .checked_mul_u64(roll_sell_1 + roll_sell_2) - .unwrap() - .checked_add(balance_initial) - .unwrap() - ); - // stop the execution controller manager.stop(); } @@ -628,7 +947,7 @@ fn sc_execution_error() { // load bytecode // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/execution_error.wasm"); - // create the block contaning the erroneous smart contract execution operation + // create the block containing the erroneous smart contract execution operation let operation = create_execute_sc_operation(&keypair, bytecode, BTreeMap::default()).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); @@ -647,14 +966,18 @@ fn sc_execution_error() { std::thread::sleep(Duration::from_millis(10)); // retrieve the event emitted by the execution error - let events = controller.get_filtered_sc_output_event(EventFilter::default()); + let events = controller.get_filtered_sc_output_event(EventFilter { + is_error: Some(true), + ..Default::default() + }); // match the events - assert!(!events.is_empty(), "One event was expected"); - assert!(events[0].data.contains("massa_execution_error")); - assert!(events[0] + assert!(!events.is_empty(), "2 events were expected"); + assert_eq!(events[0].data, "event generated before the sc failure"); + assert!(events[1].data.contains("massa_execution_error")); + assert!(events[1] .data .contains("runtime error when executing operation")); - assert!(events[0].data.contains("address parsing error")); + assert!(events[1].data.contains("address parsing error")); // stop the execution controller manager.stop(); } @@ -689,7 +1012,7 @@ fn sc_datastore() { let bytecode = include_bytes!("./wasm/datastore.wasm"); let datastore = BTreeMap::from([(vec![65, 66], vec![255]), (vec![9], vec![10, 11])]); - // create the block contaning the erroneous smart contract execution operation + // create the block containing the erroneous smart contract execution operation let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); @@ -748,7 +1071,7 @@ fn set_bytecode_error() { let mut datastore = BTreeMap::new(); datastore.insert(b"smart-contract".to_vec(), datastore_bytecode); - // create the block contaning the erroneous smart contract execution operation + // create the block containing the erroneous smart contract execution operation let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); @@ -808,7 +1131,7 @@ fn datastore_manipulations() { // load bytecode // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/datastore_manipulations.wasm"); - // create the block contaning the erroneous smart contract execution operation + // create the block containing the erroneous smart contract execution operation let operation = create_execute_sc_operation(&keypair, bytecode, BTreeMap::default()).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); @@ -829,26 +1152,19 @@ fn datastore_manipulations() { let events = controller.get_filtered_sc_output_event(EventFilter::default()); // match the events assert!(!events.is_empty(), "2 events were expected"); - let key = "TEST".to_string(); - // in ASC, string are utf16 encoded - let s16 = key.encode_utf16(); - let s16_as_bytes: Vec = s16.map(|item| item.to_ne_bytes()).flatten().collect(); - // in SC, we use the builtin string formatting (using `keys: ${keys}`) & replicate it in Rust - let keys_str: String = s16_as_bytes + let key: Vec = [1, 0, 4, 255].iter().cloned().collect(); + let keys_str: String = key .iter() .map(|b| format!("{}", b)) .collect::>() .join(","); + assert!(events[0].data.contains(&format!("keys: {}", keys_str))); assert!(events[1].data.contains(&format!("keys2: {}", keys_str))); // Length of the value left in the datastore. See sources for more context. - let value_len = "TEST_VALUE" - .to_string() - .encode_utf16() - .size_hint() - .1 - .unwrap() as u64; + let value_len = [21, 0, 49].len() as u64; + assert_eq!( sample_state .read() @@ -858,7 +1174,7 @@ fn datastore_manipulations() { Amount::from_str("300000") .unwrap() // Gas fee - .saturating_sub(Amount::from_str("100000").unwrap()) + .saturating_sub(Amount::from_mantissa_scale(10, 0)) // Storage cost key .saturating_sub( exec_cfg @@ -970,12 +1286,12 @@ fn create_execute_sc_operation( ) -> Result { let op = OperationType::ExecuteSC { data: data.to_vec(), - max_gas: 100_000, + max_gas: 1_000_000, datastore, }; let op = Operation::new_wrapped( Operation { - fee: Amount::from_mantissa_scale(100000, 0), + fee: Amount::from_mantissa_scale(10, 0), expire_period: 10, op, }, @@ -1042,7 +1358,7 @@ fn sc_builtins() { // load bytecode // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/use_builtins.wasm"); - // create the block contaning the erroneous smart contract execution operation + // create the block containing the erroneous smart contract execution operation let operation = create_execute_sc_operation(&keypair, bytecode, BTreeMap::default()).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); @@ -1078,7 +1394,7 @@ fn sc_builtins() { .ledger .get_balance(&Address::from_public_key(&keypair.get_public_key())) .unwrap(), - Amount::from_str("200000").unwrap() + Amount::from_str("299990").unwrap() ); // stop the execution controller manager.stop(); diff --git a/massa-execution-worker/src/tests/wasm/datastore.wasm b/massa-execution-worker/src/tests/wasm/datastore.wasm index da8b62742ed..699ded6b422 100644 Binary files a/massa-execution-worker/src/tests/wasm/datastore.wasm and b/massa-execution-worker/src/tests/wasm/datastore.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/datastore_manipulations.wasm b/massa-execution-worker/src/tests/wasm/datastore_manipulations.wasm index 6475257b617..54c72144045 100644 Binary files a/massa-execution-worker/src/tests/wasm/datastore_manipulations.wasm and b/massa-execution-worker/src/tests/wasm/datastore_manipulations.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/deploy_sc.wasm b/massa-execution-worker/src/tests/wasm/deploy_sc.wasm new file mode 100644 index 00000000000..9c1d6b83aba Binary files /dev/null and b/massa-execution-worker/src/tests/wasm/deploy_sc.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/execution_error.wasm b/massa-execution-worker/src/tests/wasm/execution_error.wasm index e9f83a2fe7c..12fd84d7d03 100644 Binary files a/massa-execution-worker/src/tests/wasm/execution_error.wasm and b/massa-execution-worker/src/tests/wasm/execution_error.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/init_sc.wasm b/massa-execution-worker/src/tests/wasm/init_sc.wasm new file mode 100644 index 00000000000..15e22d7587d Binary files /dev/null and b/massa-execution-worker/src/tests/wasm/init_sc.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/local_call.wasm b/massa-execution-worker/src/tests/wasm/local_call.wasm new file mode 100644 index 00000000000..e7cbd30347c Binary files /dev/null and b/massa-execution-worker/src/tests/wasm/local_call.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/local_execution.wasm b/massa-execution-worker/src/tests/wasm/local_execution.wasm new file mode 100644 index 00000000000..d0bd976d3af Binary files /dev/null and b/massa-execution-worker/src/tests/wasm/local_execution.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/local_function.wasm b/massa-execution-worker/src/tests/wasm/local_function.wasm new file mode 100644 index 00000000000..51ccf225167 Binary files /dev/null and b/massa-execution-worker/src/tests/wasm/local_function.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/nested_call.wasm b/massa-execution-worker/src/tests/wasm/nested_call.wasm index 826e7e2f577..291d1f7b4bb 100644 Binary files a/massa-execution-worker/src/tests/wasm/nested_call.wasm and b/massa-execution-worker/src/tests/wasm/nested_call.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/receive_message.wasm b/massa-execution-worker/src/tests/wasm/receive_message.wasm index 7adbe83fb28..48f1d4fcc6f 100644 Binary files a/massa-execution-worker/src/tests/wasm/receive_message.wasm and b/massa-execution-worker/src/tests/wasm/receive_message.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/send_message.wasm b/massa-execution-worker/src/tests/wasm/send_message.wasm index e5ab00c79d8..2acdf4feff6 100644 Binary files a/massa-execution-worker/src/tests/wasm/send_message.wasm and b/massa-execution-worker/src/tests/wasm/send_message.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/send_message_condition.wasm b/massa-execution-worker/src/tests/wasm/send_message_condition.wasm new file mode 100644 index 00000000000..0a463af986f Binary files /dev/null and b/massa-execution-worker/src/tests/wasm/send_message_condition.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/send_message_condition_deployer.wasm b/massa-execution-worker/src/tests/wasm/send_message_condition_deployer.wasm new file mode 100644 index 00000000000..42e22ea9b50 Binary files /dev/null and b/massa-execution-worker/src/tests/wasm/send_message_condition_deployer.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/send_message_deploy_condition.wasm b/massa-execution-worker/src/tests/wasm/send_message_deploy_condition.wasm new file mode 100644 index 00000000000..8a165c8c2ef Binary files /dev/null and b/massa-execution-worker/src/tests/wasm/send_message_deploy_condition.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/send_message_trigger.wasm b/massa-execution-worker/src/tests/wasm/send_message_trigger.wasm new file mode 100644 index 00000000000..54b73674343 Binary files /dev/null and b/massa-execution-worker/src/tests/wasm/send_message_trigger.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/send_message_wrong_trigger.wasm b/massa-execution-worker/src/tests/wasm/send_message_wrong_trigger.wasm new file mode 100644 index 00000000000..0eb06a893b9 Binary files /dev/null and b/massa-execution-worker/src/tests/wasm/send_message_wrong_trigger.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/set_bytecode_fail.wasm b/massa-execution-worker/src/tests/wasm/set_bytecode_fail.wasm index a1299b0263c..f425ff63861 100644 Binary files a/massa-execution-worker/src/tests/wasm/set_bytecode_fail.wasm and b/massa-execution-worker/src/tests/wasm/set_bytecode_fail.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/test.wasm b/massa-execution-worker/src/tests/wasm/test.wasm index b5bf9cede2b..b01f9f4d5b2 100644 Binary files a/massa-execution-worker/src/tests/wasm/test.wasm and b/massa-execution-worker/src/tests/wasm/test.wasm differ diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs index afc12b6a1fc..667d2feaee8 100644 --- a/massa-execution-worker/src/worker.rs +++ b/massa-execution-worker/src/worker.rs @@ -26,8 +26,6 @@ use tracing::debug; /// Structure gathering all elements needed by the execution thread pub(crate) struct ExecutionThread { - // Execution config - config: ExecutionConfig, // A copy of the input data allowing access to incoming requests input_data: Arc<(Condvar, Mutex)>, // Total continuous slot sequence @@ -62,9 +60,8 @@ impl ExecutionThread { input_data, readonly_requests: RequestQueue::new(config.readonly_queue_length), execution_state, - slot_sequencer: SlotSequencer::new(config.clone(), final_cursor), + slot_sequencer: SlotSequencer::new(config, final_cursor), selector, - config, } } @@ -141,8 +138,7 @@ impl ExecutionThread { // Compute when the next slot will be // This is useful to wait for the next speculative miss to append to active slots. let wakeup_deadline = self.slot_sequencer.get_next_slot_deadline(); - let now = - MassaTime::now(self.config.clock_compensation).expect("could not get current time"); + let now = MassaTime::now().expect("could not get current time"); if wakeup_deadline <= now { // next slot is right now: the loop needs to iterate return (input_data, false); @@ -153,7 +149,7 @@ impl ExecutionThread { let _ = self.input_data.0.wait_until( &mut input_data_lock, wakeup_deadline - .estimate_instant(self.config.clock_compensation) + .estimate_instant() .expect("could not estimate instant"), ); } diff --git a/massa-factory-exports/src/config.rs b/massa-factory-exports/src/config.rs index 8d2dfbeb0cf..83e0db4b083 100644 --- a/massa-factory-exports/src/config.rs +++ b/massa-factory-exports/src/config.rs @@ -16,9 +16,6 @@ pub struct FactoryConfig { /// period duration pub t0: MassaTime, - /// clock compensation in relative milliseconds - pub clock_compensation_millis: i64, - /// initial delay before starting production, to avoid double-production on node restart pub initial_delay: MassaTime, diff --git a/massa-factory-exports/src/test_exports/config.rs b/massa-factory-exports/src/test_exports/config.rs index c4c0f8c2460..0902df7bf0d 100644 --- a/massa-factory-exports/src/test_exports/config.rs +++ b/massa-factory-exports/src/test_exports/config.rs @@ -8,9 +8,8 @@ impl Default for FactoryConfig { use massa_models::config::*; FactoryConfig { thread_count: THREAD_COUNT, - genesis_timestamp: MassaTime::now(0).expect("failed to get current time"), + genesis_timestamp: MassaTime::now().expect("failed to get current time"), t0: T0, - clock_compensation_millis: 0, initial_delay: MassaTime::from(0), max_block_size: MAX_BLOCK_SIZE as u64, max_block_gas: MAX_GAS_PER_BLOCK, diff --git a/massa-factory-worker/Cargo.toml b/massa-factory-worker/Cargo.toml index dc3b8b42138..4a945487a9c 100644 --- a/massa-factory-worker/Cargo.toml +++ b/massa-factory-worker/Cargo.toml @@ -25,7 +25,7 @@ massa_serialization = { path = "../massa-serialization" } massa_pool_exports = { path = "../massa-pool-exports" } [dev-dependencies] -serial_test = "0.9" +serial_test = "0.10" massa_protocol_exports = { path = "../massa-protocol-exports", features=["testing"] } massa_consensus_exports = { path = "../massa-consensus-exports", features = ["testing"] } massa_factory_exports = { path = "../massa-factory-exports", features=["testing"] } diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index cc690b78be2..39cf3c9f9c6 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -56,8 +56,7 @@ impl BlockFactoryWorker { /// Extra safety against double-production caused by clock adjustments (this is the role of the `previous_slot` parameter). fn get_next_slot(&self, previous_slot: Option) -> (Slot, Instant) { // get current absolute time - let now = - MassaTime::now(self.cfg.clock_compensation_millis).expect("could not get current time"); + let now = MassaTime::now().expect("could not get current time"); // if it's the first computed slot, add a time shift to prevent double-production on node restart with clock skew let base_time = if previous_slot.is_none() { @@ -96,7 +95,7 @@ impl BlockFactoryWorker { next_slot, ) .expect("could not get block slot timestamp") - .estimate_instant(self.cfg.clock_compensation_millis) + .estimate_instant() .expect("could not estimate block slot instant"); (next_slot, next_instant) diff --git a/massa-factory-worker/src/endorsement_factory.rs b/massa-factory-worker/src/endorsement_factory.rs index 4c61cf0b062..797c10b797d 100644 --- a/massa-factory-worker/src/endorsement_factory.rs +++ b/massa-factory-worker/src/endorsement_factory.rs @@ -62,8 +62,7 @@ impl EndorsementFactoryWorker { /// Extra safety against double-production caused by clock adjustments (this is the role of the `previous_slot` parameter). fn get_next_slot(&self, previous_slot: Option) -> (Slot, Instant) { // get delayed time - let now = - MassaTime::now(self.cfg.clock_compensation_millis).expect("could not get current time"); + let now = MassaTime::now().expect("could not get current time"); // if it's the first computed slot, add a time shift to prevent double-production on node restart with clock skew let base_time = if previous_slot.is_none() { @@ -103,7 +102,7 @@ impl EndorsementFactoryWorker { ) .expect("could not get block slot timestamp") .saturating_sub(self.half_t0) - .estimate_instant(self.cfg.clock_compensation_millis) + .estimate_instant() .expect("could not estimate block slot instant"); (next_slot, next_instant) diff --git a/massa-factory-worker/src/tests/tools.rs b/massa-factory-worker/src/tests/tools.rs index ef2335f5874..c0f8acb0ca8 100644 --- a/massa-factory-worker/src/tests/tools.rs +++ b/massa-factory-worker/src/tests/tools.rs @@ -113,7 +113,7 @@ impl TestFactory { operations: Option>, endorsements: Option>, ) -> (BlockId, Storage) { - let now = MassaTime::now(0).expect("could not get current time"); + let now = MassaTime::now().expect("could not get current time"); let next_slot_instant = get_next_slot_instant( self.factory_config.genesis_timestamp, self.factory_config.thread_count, diff --git a/massa-final-state/src/config.rs b/massa-final-state/src/config.rs index a8e99dbdc1a..d5cc324a780 100644 --- a/massa-final-state/src/config.rs +++ b/massa-final-state/src/config.rs @@ -17,7 +17,7 @@ pub struct FinalStateConfig { pub async_pool_config: AsyncPoolConfig, /// proof-of-stake configuration pub pos_config: PoSConfig, - /// exectued operations configuration + /// executed operations configuration pub executed_ops_config: ExecutedOpsConfig, /// final changes history length pub final_history_length: usize, diff --git a/massa-final-state/src/error.rs b/massa-final-state/src/error.rs index 235d3831c2c..3fa99696d99 100644 --- a/massa-final-state/src/error.rs +++ b/massa-final-state/src/error.rs @@ -9,6 +9,8 @@ use thiserror::Error; #[non_exhaustive] #[derive(Display, Error, Debug)] pub enum FinalStateError { + /// invalid slot: {0} + InvalidSlot(String), /// ledger error: {0} LedgerError(String), /// PoS error: {0} diff --git a/massa-final-state/src/final_state.rs b/massa-final-state/src/final_state.rs index c22ad0dde05..ee56d3b4623 100644 --- a/massa-final-state/src/final_state.rs +++ b/massa-final-state/src/final_state.rs @@ -8,7 +8,7 @@ use crate::{config::FinalStateConfig, error::FinalStateError, state_changes::StateChanges}; use massa_async_pool::{AsyncMessageId, AsyncPool, AsyncPoolChanges, Change}; use massa_executed_ops::ExecutedOps; -use massa_hash::Hash; +use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_ledger_exports::{get_address_from_key, LedgerChanges, LedgerController}; use massa_models::{slot::Slot, streaming_step::StreamingStep}; use massa_pos_exports::{DeferredCredits, PoSFinalState, SelectorController}; @@ -32,8 +32,12 @@ pub struct FinalState { /// history of recent final state changes, useful for streaming bootstrap /// `front = oldest`, `back = newest` pub changes_history: VecDeque<(Slot, StateChanges)>, + /// hash of the final state, it is computed on finality + pub final_state_hash: Hash, } +const FINAL_STATE_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; + impl FinalState { /// Initializes a new `FinalState` /// @@ -50,6 +54,7 @@ impl FinalState { &config.initial_seed_string, &config.initial_rolls_path, selector, + ledger.get_ledger_hash(), ) .map_err(|err| FinalStateError::PosError(format!("PoS final state init error: {}", err)))?; @@ -62,7 +67,7 @@ impl FinalState { // create a default executed ops let executed_ops = ExecutedOps::new(config.executed_ops_config.clone()); - // generate the final state + // create the final state Ok(FinalState { slot, ledger, @@ -71,9 +76,52 @@ impl FinalState { config, executed_ops, changes_history: Default::default(), // no changes in history + final_state_hash: Hash::from_bytes(FINAL_STATE_HASH_INITIAL_BYTES), }) } + /// Compute the current state hash. + /// + /// Used when finalizing a slot. + /// Slot information is only used for logging. + pub fn compute_state_hash_at_slot(&mut self, slot: Slot) { + // 1. init hash concatenation with the ledger hash + let ledger_hash = self.ledger.get_ledger_hash(); + let mut hash_concat: Vec = ledger_hash.to_bytes().to_vec(); + debug!("ledger hash at slot {}: {}", slot, ledger_hash); + // 2. async_pool hash + hash_concat.extend(self.async_pool.hash.to_bytes()); + debug!("async_pool hash at slot {}: {}", slot, self.async_pool.hash); + // 3. pos deferred_credit hash + hash_concat.extend(self.pos_state.deferred_credits.hash.to_bytes()); + debug!( + "deferred_credit hash at slot {}: {}", + slot, self.pos_state.deferred_credits.hash + ); + // 4. pos cycle history hashes, skip the bootstrap safety cycle if there is one + let n = (self.pos_state.cycle_history.len() == self.config.pos_config.cycle_history_length) + as usize; + for cycle_info in self.pos_state.cycle_history.iter().skip(n) { + hash_concat.extend(cycle_info.cycle_global_hash.to_bytes()); + debug!( + "cycle ({}) hash at slot {}: {}", + cycle_info.cycle, slot, cycle_info.cycle_global_hash + ); + } + // 5. executed operations hash + hash_concat.extend(self.executed_ops.hash.to_bytes()); + debug!( + "executed_ops hash at slot {}: {}", + slot, self.executed_ops.hash + ); + // 6. compute and save final state hash + self.final_state_hash = Hash::compute_from(&hash_concat); + info!( + "final_state hash at slot {}: {}", + slot, self.final_state_hash + ); + } + /// Performs the initial draws. pub fn compute_initial_draws(&mut self) -> Result<(), FinalStateError> { self.pos_state @@ -98,7 +146,8 @@ impl FinalState { // update current slot self.slot = slot; - // apply changes + // apply the state changes + // unwrap is justified because every error in PoS `apply_changes` is critical self.ledger .apply_changes(changes.ledger_changes.clone(), self.slot); self.async_pool @@ -106,7 +155,9 @@ impl FinalState { self.pos_state .apply_changes(changes.pos_changes.clone(), self.slot, true) .expect("could not settle slot in final state proof-of-stake"); - // TODO do not panic above: it might just mean that the lookback cycle is not available + // TODO: + // do not panic above, it might just mean that the lookback cycle is not available + // bootstrap again instead self.executed_ops .apply_changes(changes.executed_ops_changes.clone(), self.slot); @@ -118,39 +169,13 @@ impl FinalState { self.changes_history.push_back((slot, changes)); } - // final hash computing and sub hashes logging - // 1. init hash concatenation with the ledger hash - let ledger_hash = self.ledger.get_ledger_hash(); - let mut hash_concat: Vec = ledger_hash.to_bytes().to_vec(); - debug!("ledger hash at slot {}: {}", slot, ledger_hash); - // 2. async_pool hash - hash_concat.extend(self.async_pool.hash.to_bytes()); - debug!("async_pool hash at slot {}: {}", slot, self.async_pool.hash); - // 3. pos deferred_credit hash - hash_concat.extend(self.pos_state.deferred_credits.hash.to_bytes()); - debug!( - "deferred_credit hash at slot {}: {}", - slot, self.pos_state.deferred_credits.hash - ); - // 4. pos cycle history hashes - let n = (self.pos_state.cycle_history.len() == self.config.pos_config.cycle_history_length) - as usize; - for cycle_info in self.pos_state.cycle_history.iter().skip(n) { - hash_concat.extend(cycle_info.global_hash.to_bytes()); - debug!( - "cycle ({}) hash at slot {}: {}", - cycle_info.cycle, slot, cycle_info.global_hash - ); - } - // 5. executed operations hash - hash_concat.extend(self.executed_ops.hash.to_bytes()); - debug!( - "executed_ops hash at slot {}: {}", - slot, self.executed_ops.hash - ); - // 6. final state hash - let final_state_hash = Hash::compute_from(&hash_concat); - info!("final_state hash at slot {}: {}", slot, final_state_hash); + // compute the final state hash + self.compute_state_hash_at_slot(slot); + + // feed final_state_hash to the last cycle + let cycle = slot.get_cycle(self.config.periods_per_cycle); + self.pos_state + .feed_cycle_state_hash(cycle, self.final_state_hash); } /// Used for bootstrap. @@ -180,16 +205,16 @@ impl FinalState { let index = slot .slots_since(first_slot, self.config.thread_count) .map_err(|_| { - FinalStateError::LedgerError( - "get_state_changes_part given slot is overflowing history.".to_string(), + FinalStateError::InvalidSlot( + "get_state_changes_part given slot is overflowing history".to_string(), ) })? .saturating_add(1); // Check if the `slot` index isn't in the future if self.changes_history.len() as u64 <= index { - return Err(FinalStateError::LedgerError( - "slot index is overflowing history.".to_string(), + return Err(FinalStateError::InvalidSlot( + "slot index is overflowing history".to_string(), )); } index @@ -239,10 +264,12 @@ impl FinalState { .0 .iter() .filter_map(|change| match change { - Change::Add(id, _) if id <= &last_id => Some(change.clone()), - Change::Delete(id) if id <= &last_id => Some(change.clone()), - Change::Add(..) => None, - Change::Delete(..) => None, + Change::Add(id, _) | Change::Activate(id) | Change::Delete(id) + if id <= &last_id => + { + Some(change.clone()) + } + _ => None, }) .collect(), ); diff --git a/massa-final-state/src/state_changes.rs b/massa-final-state/src/state_changes.rs index ed52113f435..69142cc17fc 100644 --- a/massa-final-state/src/state_changes.rs +++ b/massa-final-state/src/state_changes.rs @@ -80,6 +80,7 @@ impl Serializer for StateChangesSerializer { /// Slot::new(2, 0), /// Slot::new(3, 0), /// vec![1, 2, 3, 4], + /// None, /// ); /// let async_pool_changes: AsyncPoolChanges = AsyncPoolChanges(vec![Change::Add(message.compute_id(), message)]); /// state_changes.async_pool_changes = async_pool_changes; @@ -148,6 +149,7 @@ impl StateChangesDeserializer { thread_count, max_async_pool_changes, max_async_message_data, + max_datastore_key_length as u32, ), pos_changes_deserializer: PoSChangesDeserializer::new( thread_count, @@ -187,6 +189,7 @@ impl Deserializer for StateChangesDeserializer { /// Slot::new(2, 0), /// Slot::new(3, 0), /// vec![1, 2, 3, 4], + /// None, /// ); /// let async_pool_changes: AsyncPoolChanges = AsyncPoolChanges(vec![Change::Add(message.compute_id(), message)]); /// state_changes.async_pool_changes = async_pool_changes; diff --git a/massa-final-state/src/test_exports/bootstrap.rs b/massa-final-state/src/test_exports/bootstrap.rs index 05fd77a8733..4a391812f30 100644 --- a/massa-final-state/src/test_exports/bootstrap.rs +++ b/massa-final-state/src/test_exports/bootstrap.rs @@ -6,6 +6,7 @@ use std::collections::VecDeque; use massa_async_pool::AsyncPool; use massa_executed_ops::ExecutedOps; +use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_ledger_exports::LedgerController; use massa_models::slot::Slot; use massa_pos_exports::PoSFinalState; @@ -30,6 +31,7 @@ pub fn create_final_state( changes_history, pos_state, executed_ops, + final_state_hash: Hash::from_bytes(&[0; HASH_SIZE_BYTES]), } } @@ -92,7 +94,7 @@ pub fn assert_eq_final_state_hash(v1: &FinalState, v2: &FinalState) { cycle1.cycle ); assert_eq!( - cycle1.global_hash, cycle2.global_hash, + cycle1.cycle_global_hash, cycle2.cycle_global_hash, "cycle ({}) global_hash mismatch", cycle1.cycle ); diff --git a/massa-final-state/src/test_exports/config.rs b/massa-final-state/src/test_exports/config.rs index 09af73c47f0..9d1950b825f 100644 --- a/massa-final-state/src/test_exports/config.rs +++ b/massa-final-state/src/test_exports/config.rs @@ -7,6 +7,7 @@ use std::path::PathBuf; use crate::{FinalState, FinalStateConfig}; use massa_async_pool::{AsyncPool, AsyncPoolConfig}; use massa_executed_ops::{ExecutedOps, ExecutedOpsConfig}; +use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_ledger_exports::LedgerConfig; use massa_ledger_worker::FinalLedger; use massa_models::{ @@ -29,6 +30,7 @@ impl FinalState { executed_ops: ExecutedOps::new(config.executed_ops_config.clone()), changes_history: Default::default(), config, + final_state_hash: Hash::from_bytes(&[0; HASH_SIZE_BYTES]), } } } diff --git a/massa-hash/Cargo.toml b/massa-hash/Cargo.toml index 6f9e4fca651..badbf93ea10 100644 --- a/massa-hash/Cargo.toml +++ b/massa-hash/Cargo.toml @@ -19,4 +19,4 @@ massa_serialization = { path = "../massa-serialization" } [dev-dependencies] serde_json = "1.0" -serial_test = "0.9" +serial_test = "0.10" diff --git a/massa-ledger-exports/src/ledger_changes.rs b/massa-ledger-exports/src/ledger_changes.rs index 4c9c97e0cd8..3b74466db01 100644 --- a/massa-ledger-exports/src/ledger_changes.rs +++ b/massa-ledger-exports/src/ledger_changes.rs @@ -805,6 +805,36 @@ impl LedgerChanges { } } + /// Tries to return whether there is a change on a given address in the ledger changes + /// and optionally if a datastore key modification also exists in the address's datastore. + /// + /// # Arguments + /// * `addr`: target address + /// * `key`: optional datastore key + /// + /// # Returns + /// * true if the address and, optionally the datastore key, exists in the ledger changes + pub fn has_changes(&self, addr: &Address, key: Option>) -> bool { + // Get the current changes being applied to the ledger entry associated to that address + match self.0.get(addr) { + // This ledger entry is being replaced by a new one: + // check if the new ledger entry has a datastore entry for the provided key + Some(SetUpdateOrDelete::Set(v)) => key.map_or(true, |k| v.datastore.contains_key(&k)), + + // This ledger entry is being updated + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { datastore, .. })) => { + // Check if the update being applied to that datastore entry + key.map_or(true, |k| datastore.contains_key(&k)) + } + + // This ledger entry is being deleted: return true + Some(SetUpdateOrDelete::Delete) => true, + + // This ledger entry is not being changed. + None => false, + } + } + /// Tries to return whether a datastore entry exists for a given address, /// or gets it from a function if the datastore entry's status is unknown. /// diff --git a/massa-ledger-worker/src/ledger_db.rs b/massa-ledger-worker/src/ledger_db.rs index c5b38dc01a7..4f01b8f4a61 100644 --- a/massa-ledger-worker/src/ledger_db.rs +++ b/massa-ledger-worker/src/ledger_db.rs @@ -538,7 +538,9 @@ impl LedgerDB { .map_err(|_| ModelsError::SerializeError("Error in deserialization".to_string()))?; // Every byte should have been read - if rest.is_empty() { + if last_key.is_empty() { + Ok(StreamingStep::Finished(None)) + } else if rest.is_empty() { self.write_batch(batch); Ok(StreamingStep::Ongoing((*last_key).clone())) } else { diff --git a/massa-logging/Cargo.toml b/massa-logging/Cargo.toml index 62d78fc5fc3..eb39e26eec3 100644 --- a/massa-logging/Cargo.toml +++ b/massa-logging/Cargo.toml @@ -12,4 +12,4 @@ tracing = "0.1" [dev-dependencies] pretty_assertions = "1.2" -serial_test = "0.9" +serial_test = "0.10" diff --git a/massa-models/Cargo.toml b/massa-models/Cargo.toml index c14f78f89b8..98f934f2b68 100644 --- a/massa-models/Cargo.toml +++ b/massa-models/Cargo.toml @@ -10,6 +10,8 @@ lazy_static = "1.4" num_enum = "0.5" rust_decimal = "1.26" serde = { version = "1.0", features = ["derive"] } +serde_with = "2.1.0" +strum = { version = "0.24", features = ["derive"] } thiserror = "1.0" num = { version = "0.4", features = ["serde"] } directories = "4.0" @@ -25,7 +27,7 @@ massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } [dev-dependencies] -serial_test = "0.9" +serial_test = "0.10" # for more information on what are the following features used for, see the cargo.toml at workspace level [features] diff --git a/massa-models/src/api.rs b/massa-models/src/api.rs index d3284ade9b4..931ab7f48ab 100644 --- a/massa-models/src/api.rs +++ b/massa-models/src/api.rs @@ -15,6 +15,7 @@ use massa_time::MassaTime; use serde::{Deserialize, Serialize}; use std::collections::BTreeMap; use std::net::IpAddr; +use strum::Display; /// operation input #[derive(Serialize, Deserialize, Debug)] @@ -555,6 +556,12 @@ pub struct EventFilter { /// Some(false) means candidate /// None means final _and_ candidate pub is_final: Option, + /// optional execution status + /// + /// Some(true) means events coming from a failed sc execution + /// Some(false) means events coming from a succeeded sc execution + /// None means both + pub is_error: Option, } /// read only bytecode execution request @@ -584,3 +591,29 @@ pub struct ReadOnlyCall { /// caller's address, optional pub caller_address: Option
, } + +/// SCRUD operations +#[derive(Display)] +#[strum(serialize_all = "snake_case")] +pub enum ScrudOperation { + /// search operation + Search, + /// create operation + Create, + /// read operation + Read, + /// update operation + Update, + /// delete operation + Delete, +} + +/// Bootsrap lists types +#[derive(Display)] +#[strum(serialize_all = "snake_case")] +pub enum ListType { + /// contains banned entry + Blacklist, + /// contains allowed entry + Whitelist, +} diff --git a/massa-models/src/block.rs b/massa-models/src/block.rs index 19d6cb64cfd..f0e5bae6b1f 100644 --- a/massa-models/src/block.rs +++ b/massa-models/src/block.rs @@ -11,7 +11,8 @@ use crate::{ }; use massa_hash::{Hash, HashDeserializer}; use massa_serialization::{ - Deserializer, SerializeError, Serializer, U32VarIntDeserializer, U32VarIntSerializer, + DeserializeError, Deserializer, SerializeError, Serializer, U32VarIntDeserializer, + U32VarIntSerializer, U64VarIntDeserializer, U64VarIntSerializer, }; use massa_signature::{KeyPair, PublicKey, Signature}; use nom::branch::alt; @@ -25,6 +26,7 @@ use nom::{ IResult, }; use serde::{Deserialize, Serialize}; +use serde_with::{DeserializeFromStr, SerializeDisplay}; use std::convert::TryInto; use std::fmt::Formatter; use std::ops::Bound::{Excluded, Included}; @@ -34,7 +36,9 @@ use std::str::FromStr; const BLOCK_ID_SIZE_BYTES: usize = massa_hash::HASH_SIZE_BYTES; /// block id -#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +#[derive( + Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, SerializeDisplay, DeserializeFromStr, +)] pub struct BlockId(pub Hash); impl PreHashed for BlockId {} @@ -49,22 +53,66 @@ impl Id for BlockId { } } +const BLOCKID_PREFIX: char = 'B'; +const BLOCKID_VERSION: u64 = 0; + impl std::fmt::Display for BlockId { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.0.to_bs58_check()) + let u64_serializer = U64VarIntSerializer::new(); + // might want to allocate the vector with capacity in order to avoid re-allocation + let mut bytes: Vec = Vec::new(); + u64_serializer + .serialize(&BLOCKID_VERSION, &mut bytes) + .map_err(|_| std::fmt::Error)?; + bytes.extend(self.0.to_bytes()); + write!( + f, + "{}{}", + BLOCKID_PREFIX, + bs58::encode(bytes).with_check().into_string() + ) } } impl std::fmt::Debug for BlockId { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.0.to_bs58_check()) + write!(f, "{}", self) } } impl FromStr for BlockId { type Err = ModelsError; + /// ## Example + /// ```rust + /// # use massa_hash::Hash; + /// # use std::str::FromStr; + /// # use massa_models::block::BlockId; + /// # let hash = Hash::compute_from(b"test"); + /// # let block_id = BlockId(hash); + /// let ser = block_id.to_string(); + /// let res_block_id = BlockId::from_str(&ser).unwrap(); + /// assert_eq!(block_id, res_block_id); + /// ``` fn from_str(s: &str) -> Result { - Ok(BlockId(Hash::from_str(s)?)) + let mut chars = s.chars(); + match chars.next() { + Some(prefix) if prefix == BLOCKID_PREFIX => { + let data = chars.collect::(); + let decoded_bs58_check = bs58::decode(data) + .with_check(None) + .into_vec() + .map_err(|_| ModelsError::BlockIdParseError)?; + let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); + let (rest, _version) = u64_deserializer + .deserialize::(&decoded_bs58_check[..]) + .map_err(|_| ModelsError::BlockIdParseError)?; + Ok(BlockId(Hash::from_bytes( + rest.try_into() + .map_err(|_| ModelsError::BlockIdParseError)?, + ))) + } + _ => Err(ModelsError::BlockIdParseError), + } } } @@ -84,13 +132,6 @@ impl BlockId { BlockId(Hash::from_bytes(data)) } - /// block id fro `bs58` check - pub fn from_bs58_check(data: &str) -> Result { - Ok(BlockId( - Hash::from_bs58_check(data).map_err(|_| ModelsError::HashError)?, - )) - } - /// first bit of the hashed block id pub fn get_first_bit(&self) -> bool { self.to_bytes()[0] >> 7 == 1 @@ -147,10 +188,19 @@ impl Deserializer for BlockIdDeserializer { pub struct Block { /// signed header pub header: WrappedHeader, - /// operations + /// operations ids pub operations: Vec, } +/// filled block +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FilledBlock { + /// signed header + pub header: WrappedHeader, + /// operations + pub operations: Vec<(OperationId, Option)>, +} + /// Wrapped Block pub type WrappedBlock = Wrapped; diff --git a/massa-models/src/config/constants.rs b/massa-models/src/config/constants.rs index d5cdbb5761c..bb974e5fc23 100644 --- a/massa-models/src/config/constants.rs +++ b/massa-models/src/config/constants.rs @@ -40,19 +40,19 @@ lazy_static::lazy_static! { /// Time in milliseconds when the blockclique started. pub static ref GENESIS_TIMESTAMP: MassaTime = if cfg!(feature = "sandbox") { std::env::var("GENESIS_TIMESTAMP").map(|timestamp| timestamp.parse::().unwrap().into()).unwrap_or_else(|_| - MassaTime::now(0) + MassaTime::now() .unwrap() .saturating_add(MassaTime::from_millis(1000 * 10)) ) } else { - 1669852801000.into() // Thursday, December 01, 2022 00:00:01 AM UTC + 1672790401000.into() // Wednesday, January 04, 2022 00:00:01 AM UTC }; /// TESTNET: time when the blockclique is ended. pub static ref END_TIMESTAMP: Option = if cfg!(feature = "sandbox") { None } else { - Some(1672466400000.into()) // Saturday, December 31, 2022 6:00:00 PM UTC + Some(1675105200000.into()) // Monday, January 30, 2022 19:00:00 PM UTC }; /// `KeyPair` to sign genesis blocks. pub static ref GENESIS_KEY: KeyPair = KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8") @@ -64,7 +64,7 @@ lazy_static::lazy_static! { if cfg!(feature = "sandbox") { "SAND.0.0" } else { - "TEST.17.2" + "TEST.18.0" } .parse() .unwrap() @@ -195,7 +195,7 @@ pub const POOL_CONTROLLER_CHANNEL_SIZE: usize = 1024; // /// Maximum of GAS allowed for a block -pub const MAX_GAS_PER_BLOCK: u64 = 1_000_000_000; +pub const MAX_GAS_PER_BLOCK: u64 = u32::MAX as u64; /// Maximum of GAS allowed for asynchronous messages execution on one slot pub const MAX_ASYNC_GAS: u64 = 1_000_000_000; diff --git a/massa-models/src/endorsement.rs b/massa-models/src/endorsement.rs index 70f8cedced7..52692d067c2 100644 --- a/massa-models/src/endorsement.rs +++ b/massa-models/src/endorsement.rs @@ -6,7 +6,8 @@ use crate::wrapped::{Id, Wrapped, WrappedContent}; use crate::{block::BlockId, error::ModelsError}; use massa_hash::{Hash, HashDeserializer}; use massa_serialization::{ - Deserializer, SerializeError, Serializer, U32VarIntDeserializer, U32VarIntSerializer, + DeserializeError, Deserializer, SerializeError, Serializer, U32VarIntDeserializer, + U32VarIntSerializer, U64VarIntDeserializer, U64VarIntSerializer, }; use nom::error::context; use nom::sequence::tuple; @@ -16,6 +17,7 @@ use nom::{ IResult, }; use serde::{Deserialize, Serialize}; +use serde_with::{DeserializeFromStr, SerializeDisplay}; use std::ops::Bound::{Excluded, Included}; use std::{fmt::Display, str::FromStr}; @@ -23,9 +25,14 @@ use std::{fmt::Display, str::FromStr}; pub const ENDORSEMENT_ID_SIZE_BYTES: usize = massa_hash::HASH_SIZE_BYTES; /// endorsement id -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +#[derive( + Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, SerializeDisplay, DeserializeFromStr, +)] pub struct EndorsementId(Hash); +const ENDORSEMENTID_PREFIX: char = 'E'; +const ENDORSEMENTID_VERSION: u64 = 0; + impl PreHashed for EndorsementId {} impl Id for EndorsementId { @@ -40,14 +47,60 @@ impl Id for EndorsementId { impl std::fmt::Display for EndorsementId { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.0.to_bs58_check()) + let u64_serializer = U64VarIntSerializer::new(); + // might want to allocate the vector with capacity in order to avoid re-allocation + let mut bytes: Vec = Vec::new(); + u64_serializer + .serialize(&ENDORSEMENTID_VERSION, &mut bytes) + .map_err(|_| std::fmt::Error)?; + bytes.extend(self.0.to_bytes()); + write!( + f, + "{}{}", + ENDORSEMENTID_PREFIX, + bs58::encode(bytes).with_check().into_string() + ) + } +} + +impl std::fmt::Debug for EndorsementId { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self) } } impl FromStr for EndorsementId { type Err = ModelsError; + /// ## Example + /// ```rust + /// # use massa_hash::Hash; + /// # use std::str::FromStr; + /// # use massa_models::endorsement::EndorsementId; + /// # let endo_id = EndorsementId::from_bytes(&[0; 32]); + /// let ser = endo_id.to_string(); + /// let res_endo_id = EndorsementId::from_str(&ser).unwrap(); + /// assert_eq!(endo_id, res_endo_id); + /// ``` fn from_str(s: &str) -> Result { - Ok(EndorsementId(Hash::from_str(s)?)) + let mut chars = s.chars(); + match chars.next() { + Some(prefix) if prefix == ENDORSEMENTID_PREFIX => { + let data = chars.collect::(); + let decoded_bs58_check = bs58::decode(data) + .with_check(None) + .into_vec() + .map_err(|_| ModelsError::EndorsementIdParseError)?; + let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); + let (rest, _version) = u64_deserializer + .deserialize::(&decoded_bs58_check[..]) + .map_err(|_| ModelsError::EndorsementIdParseError)?; + Ok(EndorsementId(Hash::from_bytes( + rest.try_into() + .map_err(|_| ModelsError::EndorsementIdParseError)?, + ))) + } + _ => Err(ModelsError::EndorsementIdParseError), + } } } @@ -66,13 +119,6 @@ impl EndorsementId { pub fn from_bytes(data: &[u8; ENDORSEMENT_ID_SIZE_BYTES]) -> EndorsementId { EndorsementId(Hash::from_bytes(data)) } - - /// endorsement id from `bs58` check - pub fn from_bs58_check(data: &str) -> Result { - Ok(EndorsementId( - Hash::from_bs58_check(data).map_err(|_| ModelsError::HashError)?, - )) - } } impl Display for Endorsement { diff --git a/massa-models/src/error.rs b/massa-models/src/error.rs index c02a4124417..b9f3e9621d3 100644 --- a/massa-models/src/error.rs +++ b/massa-models/src/error.rs @@ -32,6 +32,14 @@ pub enum ModelsError { AmountParseError(String), /// address parsing error AddressParseError, + /// node id parsing error + NodeIdParseError, + /// block id parsing error + BlockIdParseError, + /// operation id parsing error + OperationIdParseError, + /// endorsement id parsing error + EndorsementIdParseError, /// checked operation error CheckedOperationError(String), /// invalid version identifier: {0} diff --git a/massa-models/src/execution.rs b/massa-models/src/execution.rs index 90db22c1113..c49d87c3f49 100644 --- a/massa-models/src/execution.rs +++ b/massa-models/src/execution.rs @@ -9,8 +9,7 @@ pub enum ReadOnlyResult { /// An error occurred during execution. Error(String), /// The result of a successful execution. - /// TODO: specify result. - Ok, + Ok(Vec), } /// The response to a request for a read-only execution. @@ -35,7 +34,7 @@ impl Display for ExecuteReadOnlyResponse { match &self.result { ReadOnlyResult::Error(e) => format!("an error occurred during the execution: {}", e), - ReadOnlyResult::Ok => "ok".to_string(), + ReadOnlyResult::Ok(ret) => format!("success, returned value: {:?}", ret), } )?; if !self.output_events.is_empty() { diff --git a/massa-models/src/node.rs b/massa-models/src/node.rs index 4a3a1ed06fd..3935412ab9d 100644 --- a/massa-models/src/node.rs +++ b/massa-models/src/node.rs @@ -1,28 +1,91 @@ // Copyright (c) 2022 MASSA LABS use crate::error::ModelsError; +use massa_serialization::{ + DeserializeError, Deserializer, Serializer, U64VarIntDeserializer, U64VarIntSerializer, +}; use massa_signature::PublicKey; -use serde::{Deserialize, Serialize}; +use serde_with::{DeserializeFromStr, SerializeDisplay}; +use std::ops::Bound::Included; /// `NodeId` wraps a public key to uniquely identify a node. -#[derive(Clone, Copy, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] -pub struct NodeId(pub PublicKey); +#[derive( + Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd, SerializeDisplay, DeserializeFromStr, +)] +pub struct NodeId(PublicKey); + +const NODEID_PREFIX: char = 'N'; +const NODEID_VERSION: u64 = 0; + +impl NodeId { + /// Create a new `NodeId` from a public key. + pub fn new(public_key: PublicKey) -> Self { + Self(public_key) + } + + /// Get the public key of the `NodeId`. + pub fn get_public_key(&self) -> PublicKey { + self.0 + } +} impl std::fmt::Display for NodeId { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.0) + let u64_serializer = U64VarIntSerializer::new(); + // might want to allocate the vector with capacity in order to avoid re-allocation + let mut bytes: Vec = Vec::new(); + u64_serializer + .serialize(&NODEID_VERSION, &mut bytes) + .map_err(|_| std::fmt::Error)?; + bytes.extend(self.0.to_bytes()); + write!( + f, + "{}{}", + NODEID_PREFIX, + bs58::encode(bytes).with_check().into_string() + ) } } impl std::fmt::Debug for NodeId { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.0) + write!(f, "{}", self) } } impl std::str::FromStr for NodeId { type Err = ModelsError; + /// ## Example + /// ```rust + /// # use massa_signature::{PublicKey, KeyPair, Signature}; + /// # use massa_hash::Hash; + /// # use serde::{Deserialize, Serialize}; + /// # use std::str::FromStr; + /// # use massa_models::node::NodeId; + /// # let keypair = KeyPair::generate(); + /// # let node_id = NodeId::new(keypair.get_public_key()); + /// let ser = node_id.to_string(); + /// let res_node_id = NodeId::from_str(&ser).unwrap(); + /// assert_eq!(node_id, res_node_id); + /// ``` fn from_str(s: &str) -> Result { - Ok(NodeId(PublicKey::from_str(s)?)) + let mut chars = s.chars(); + match chars.next() { + Some(prefix) if prefix == NODEID_PREFIX => { + let data = chars.collect::(); + let decoded_bs58_check = bs58::decode(data) + .with_check(None) + .into_vec() + .map_err(|_| ModelsError::NodeIdParseError)?; + let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); + let (rest, _version) = u64_deserializer + .deserialize::(&decoded_bs58_check[..]) + .map_err(|_| ModelsError::NodeIdParseError)?; + Ok(NodeId(PublicKey::from_bytes( + rest.try_into().map_err(|_| ModelsError::NodeIdParseError)?, + )?)) + } + _ => Err(ModelsError::NodeIdParseError), + } } } diff --git a/massa-models/src/operation.rs b/massa-models/src/operation.rs index b31ded2932c..5f960f51a5e 100644 --- a/massa-models/src/operation.rs +++ b/massa-models/src/operation.rs @@ -11,8 +11,9 @@ use crate::{ }; use massa_hash::{Hash, HashDeserializer}; use massa_serialization::{ - Deserializer, SerializeError, Serializer, U16VarIntDeserializer, U16VarIntSerializer, - U32VarIntDeserializer, U32VarIntSerializer, U64VarIntDeserializer, U64VarIntSerializer, + DeserializeError, Deserializer, SerializeError, Serializer, U16VarIntDeserializer, + U16VarIntSerializer, U32VarIntDeserializer, U32VarIntSerializer, U64VarIntDeserializer, + U64VarIntSerializer, }; use nom::error::context; use nom::multi::length_count; @@ -25,6 +26,7 @@ use nom::{ }; use num_enum::{IntoPrimitive, TryFromPrimitive}; use serde::{Deserialize, Serialize}; +use serde_with::{DeserializeFromStr, SerializeDisplay}; use std::convert::TryInto; use std::fmt::Formatter; use std::{ops::Bound::Included, ops::RangeInclusive, str::FromStr}; @@ -36,22 +38,39 @@ pub const OPERATION_ID_SIZE_BYTES: usize = massa_hash::HASH_SIZE_BYTES; pub const OPERATION_ID_PREFIX_SIZE_BYTES: usize = 17; /// operation id -#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +#[derive( + Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, SerializeDisplay, DeserializeFromStr, +)] pub struct OperationId(Hash); +const OPERATIONID_PREFIX: char = 'O'; +const OPERATIONID_VERSION: u64 = 0; + /// Left part of the operation id hash stored in a vector of size [`OPERATION_ID_PREFIX_SIZE_BYTES`] #[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub struct OperationPrefixId([u8; OPERATION_ID_PREFIX_SIZE_BYTES]); impl std::fmt::Display for OperationId { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.0.to_bs58_check()) + let u64_serializer = U64VarIntSerializer::new(); + // might want to allocate the vector with capacity in order to avoid re-allocation + let mut bytes: Vec = Vec::new(); + u64_serializer + .serialize(&OPERATIONID_VERSION, &mut bytes) + .map_err(|_| std::fmt::Error)?; + bytes.extend(self.0.to_bytes()); + write!( + f, + "{}{}", + OPERATIONID_PREFIX, + bs58::encode(bytes).with_check().into_string() + ) } } impl std::fmt::Debug for OperationId { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.0.to_bs58_check()) + write!(f, "{}", self) } } @@ -69,8 +88,36 @@ impl std::fmt::Debug for OperationPrefixId { impl FromStr for OperationId { type Err = ModelsError; + /// ## Example + /// ```rust + /// # use massa_hash::Hash; + /// # use std::str::FromStr; + /// # use massa_models::operation::OperationId; + /// # let op_id = OperationId::from_bytes(&[0; 32]); + /// let ser = op_id.to_string(); + /// let res_op_id = OperationId::from_str(&ser).unwrap(); + /// assert_eq!(op_id, res_op_id); + /// ``` fn from_str(s: &str) -> Result { - Ok(OperationId(Hash::from_str(s)?)) + let mut chars = s.chars(); + match chars.next() { + Some(prefix) if prefix == OPERATIONID_PREFIX => { + let data = chars.collect::(); + let decoded_bs58_check = bs58::decode(data) + .with_check(None) + .into_vec() + .map_err(|_| ModelsError::OperationIdParseError)?; + let u64_deserializer = U64VarIntDeserializer::new(Included(0), Included(u64::MAX)); + let (rest, _version) = u64_deserializer + .deserialize::(&decoded_bs58_check[..]) + .map_err(|_| ModelsError::OperationIdParseError)?; + Ok(OperationId(Hash::from_bytes( + rest.try_into() + .map_err(|_| ModelsError::OperationIdParseError)?, + ))) + } + _ => Err(ModelsError::OperationIdParseError), + } } } @@ -118,13 +165,6 @@ impl OperationId { OperationId(Hash::from_bytes(data)) } - /// op id from `bs58` check - pub fn from_bs58_check(data: &str) -> Result { - Ok(OperationId( - Hash::from_bs58_check(data).map_err(|_| ModelsError::HashError)?, - )) - } - /// convert the [`OperationId`] into a [`OperationPrefixId`] pub fn into_prefix(self) -> OperationPrefixId { OperationPrefixId( @@ -857,8 +897,8 @@ impl Serializer> for OperationIdsSerializer { /// use std::str::FromStr; /// /// let mut operations_ids = Vec::new(); - /// operations_ids.push(OperationId::from_str("2AGSu2kBG9FZ649h18F82CYfsymkhVH2epMafMN2sPZNBQXTrz").unwrap()); - /// operations_ids.push(OperationId::from_str("2AGSu2kBG9FZ649h18F82CYfsymkhVH2epMafMN2sPZNBQXTrz").unwrap()); + /// operations_ids.push(OperationId::from_str("O1xcVGtyWAyrehW1NDpnZ1wE5K95n8qVJCV9dEJSp1ypU8eJsQU").unwrap()); + /// operations_ids.push(OperationId::from_str("O1xcVGtyWAyrehW1NDpnZ1wE5K95n8qVJCV9dEJSp1ypU8eJsQU").unwrap()); /// let mut buffer = Vec::new(); /// OperationIdsSerializer::new().serialize(&operations_ids, &mut buffer).unwrap(); /// ``` @@ -907,8 +947,8 @@ impl Deserializer> for OperationIdsDeserializer { /// use std::str::FromStr; /// /// let mut operations_ids = Vec::new(); - /// operations_ids.push(OperationId::from_str("2AGSu2kBG9FZ649h18F82CYfsymkhVH2epMafMN2sPZNBQXTrz").unwrap()); - /// operations_ids.push(OperationId::from_str("2AGSu2kBG9FZ649h18F82CYfsymkhVH2epMafMN2sPZNBQXTrz").unwrap()); + /// operations_ids.push(OperationId::from_str("O1xcVGtyWAyrehW1NDpnZ1wE5K95n8qVJCV9dEJSp1ypU8eJsQU").unwrap()); + /// operations_ids.push(OperationId::from_str("O1xcVGtyWAyrehW1NDpnZ1wE5K95n8qVJCV9dEJSp1ypU8eJsQU").unwrap()); /// let mut buffer = Vec::new(); /// OperationIdsSerializer::new().serialize(&operations_ids, &mut buffer).unwrap(); /// let (rest, deserialized_operations_ids) = OperationIdsDeserializer::new(1000).deserialize::(&buffer).unwrap(); diff --git a/massa-models/src/output_event.rs b/massa-models/src/output_event.rs index 7d8a4eb0f7e..3a2dc851ee0 100644 --- a/massa-models/src/output_event.rs +++ b/massa-models/src/output_event.rs @@ -35,6 +35,8 @@ pub struct EventExecutionContext { pub origin_operation_id: Option, /// if the event is final pub is_final: bool, + /// if the sc that emitted this event failed + pub is_error: bool, } impl Display for EventExecutionContext { diff --git a/massa-models/src/test_exports/tools.rs b/massa-models/src/test_exports/tools.rs index db4b6a902e2..af4f13ede53 100644 --- a/massa-models/src/test_exports/tools.rs +++ b/massa-models/src/test_exports/tools.rs @@ -9,7 +9,7 @@ pub fn get_next_slot_instant( t0: MassaTime, ) -> MassaTime { // get current time - let now = MassaTime::now(0).expect("could not get current time"); + let now = MassaTime::now().expect("could not get current time"); // get closest slot according to the current absolute time let mut slot = get_closest_slot_to_timestamp(thread_count, t0, genesis_timestamp, now); diff --git a/massa-models/src/timeslots.rs b/massa-models/src/timeslots.rs index a68b4b996a3..0a9883f1c06 100644 --- a/massa-models/src/timeslots.rs +++ b/massa-models/src/timeslots.rs @@ -88,14 +88,8 @@ pub fn get_current_latest_block_slot( thread_count: u8, t0: MassaTime, genesis_timestamp: MassaTime, - clock_compensation: i64, ) -> Result, ModelsError> { - get_latest_block_slot_at_timestamp( - thread_count, - t0, - genesis_timestamp, - MassaTime::now(clock_compensation)?, - ) + get_latest_block_slot_at_timestamp(thread_count, t0, genesis_timestamp, MassaTime::now()?) } /// Turns an `MassaTime` range [start, end) with optional start/end to a `Slot` range [start, end) with optional start/end diff --git a/massa-network-exports/src/network_controller.rs b/massa-network-exports/src/network_controller.rs index 16a0e780b2f..d8dca55d79a 100644 --- a/massa-network-exports/src/network_controller.rs +++ b/massa-network-exports/src/network_controller.rs @@ -50,7 +50,7 @@ impl NetworkCommandSender { } /// add ip to whitelist - pub async fn whitelist(&self, ips: Vec) -> Result<(), NetworkError> { + pub async fn add_to_whitelist(&self, ips: Vec) -> Result<(), NetworkError> { self.0 .send(NetworkCommand::Whitelist(ips)) .await diff --git a/massa-network-worker/Cargo.toml b/massa-network-worker/Cargo.toml index 73169728238..617c744a724 100644 --- a/massa-network-worker/Cargo.toml +++ b/massa-network-worker/Cargo.toml @@ -27,7 +27,7 @@ massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } [dev-dependencies] -serial_test = "0.9" +serial_test = "0.10" tempfile = "3.3" massa_models = { path = "../massa-models", features = ["testing"] } massa_network_exports = { path = "../massa-network-exports", features = [ diff --git a/massa-network-worker/src/handshake_worker.rs b/massa-network-worker/src/handshake_worker.rs index 58624c43ebf..431bfb98433 100644 --- a/massa-network-worker/src/handshake_worker.rs +++ b/massa-network-worker/src/handshake_worker.rs @@ -134,7 +134,7 @@ impl HandshakeWorker { let self_random_hash = Hash::compute_from(&self_random_bytes); // send handshake init future let msg = Message::HandshakeInitiation { - public_key: self.self_node_id.0, + public_key: self.self_node_id.get_public_key(), random_bytes: self_random_bytes, version: self.version, }; @@ -158,7 +158,7 @@ impl HandshakeWorker { public_key: pk, random_bytes: rb, version, - } => (NodeId(pk), rb, version), + } => (NodeId::new(pk), rb, version), Message::PeerList(list) => throw!(PeerListReceived, list), _ => throw!(HandshakeWrongMessage), }, @@ -205,7 +205,7 @@ impl HandshakeWorker { // check their signature other_node_id - .0 + .get_public_key() .verify_signature(&self_random_hash, &other_signature) .map_err(|_err| { NetworkError::HandshakeError(HandshakeErrorType::HandshakeInvalidSignature) diff --git a/massa-network-worker/src/lib.rs b/massa-network-worker/src/lib.rs index 04cc65b09de..35819ee4fbf 100644 --- a/massa-network-worker/src/lib.rs +++ b/massa-network-worker/src/lib.rs @@ -41,7 +41,6 @@ pub mod tests; pub async fn start_network_controller( network_settings: &NetworkConfig, mut establisher: Establisher, - clock_compensation: i64, initial_peers: Option, version: Version, ) -> Result< @@ -85,7 +84,7 @@ pub async fn start_network_controller( } keypair }; - let self_node_id = NodeId(keypair.get_public_key()); + let self_node_id = NodeId::new(keypair.get_public_key()); info!("The node_id of this node is: {}", self_node_id); massa_trace!("self_node_id", { "node_id": self_node_id }); @@ -95,7 +94,7 @@ pub async fn start_network_controller( debug!("Loading peer database"); // load peer info database - let mut peer_info_db = PeerInfoDatabase::new(network_settings, clock_compensation).await?; + let mut peer_info_db = PeerInfoDatabase::new(network_settings).await?; // add bootstrap peers if let Some(peers) = initial_peers { diff --git a/massa-network-worker/src/network_worker.rs b/massa-network-worker/src/network_worker.rs index 6fe20afd24a..cac028869ba 100644 --- a/massa-network-worker/src/network_worker.rs +++ b/massa-network-worker/src/network_worker.rs @@ -97,7 +97,7 @@ impl NetworkWorker { }: NetworkWorkerChannels, version: Version, ) -> NetworkWorker { - let self_node_id = NodeId(keypair.get_public_key()); + let self_node_id = NodeId::new(keypair.get_public_key()); let (node_event_tx, node_event_rx) = mpsc::channel::(cfg.node_event_channel_size); diff --git a/massa-network-worker/src/peer_info_database.rs b/massa-network-worker/src/peer_info_database.rs index 1eb561ca053..492d54aaad4 100644 --- a/massa-network-worker/src/peer_info_database.rs +++ b/massa-network-worker/src/peer_info_database.rs @@ -34,8 +34,6 @@ pub struct PeerInfoDatabase { pub(crate) peer_types_connection_count: EnumMap, /// Every `wakeup_interval` we try to establish a connection with known inactive peers pub(crate) wakeup_interval: MassaTime, - /// Clock compensation. - pub(crate) clock_compensation: i64, } /// Saves advertised and non standard peers to a file. @@ -83,7 +81,6 @@ pub(crate) fn cleanup_peers( cfg: &NetworkConfig, peers: &mut HashMap, opt_new_peers: Option<&Vec>, - clock_compensation: i64, ban_timeout: MassaTime, ) -> Result<(), NetworkError> { // filter and map new peers, remove duplicates @@ -154,7 +151,7 @@ pub(crate) fn cleanup_peers( // sort and truncate inactive banned peers // forget about old banned peers - let ban_limit = MassaTime::now(clock_compensation)?.saturating_sub(ban_timeout); + let ban_limit = MassaTime::now()?.saturating_sub(ban_timeout); banned_peers.retain(|p| p.last_failure.map_or(false, |v| v >= ban_limit)); banned_peers.sort_unstable_by_key(|&p| (std::cmp::Reverse(p.last_failure), p.last_alive)); banned_peers.truncate(cfg.max_banned_peers); @@ -172,8 +169,7 @@ impl PeerInfoDatabase { /// /// # Argument /// * `cfg`: network configuration - /// * `clock_compensation`: sync with server - pub async fn new(cfg: &NetworkConfig, clock_compensation: i64) -> Result { + pub async fn new(cfg: &NetworkConfig) -> Result { // wakeup interval let wakeup_interval = cfg.wakeup_interval; @@ -202,7 +198,7 @@ impl PeerInfoDatabase { } // cleanup - cleanup_peers(cfg, &mut peers, None, clock_compensation, cfg.ban_timeout)?; + cleanup_peers(cfg, &mut peers, None, cfg.ban_timeout)?; // setup saver let peers_file = cfg.peers_file.clone(); @@ -242,7 +238,6 @@ impl PeerInfoDatabase { saver_join_handle, saver_watch_tx, wakeup_interval, - clock_compensation, peer_types_connection_count: EnumMap::default(), }) } @@ -297,7 +292,6 @@ impl PeerInfoDatabase { &self.network_settings, &mut self.peers, None, - self.clock_compensation, self.network_settings.ban_timeout, )?; Ok(()) @@ -327,7 +321,6 @@ impl PeerInfoDatabase { &self.network_settings, &mut self.peers, Some(&new_peers.to_vec()), - self.clock_compensation, self.network_settings.ban_timeout, )?; self.request_dump() @@ -474,7 +467,7 @@ impl PeerInfoDatabase { NetworkConnectionErrorType::PeerInfoNotFoundError(ip), ) })? - .last_alive = Some(MassaTime::now(self.clock_compensation)?); + .last_alive = Some(MassaTime::now()?); self.request_dump() } @@ -492,7 +485,7 @@ impl PeerInfoDatabase { NetworkConnectionErrorType::PeerInfoNotFoundError(ip), ) })? - .last_failure = Some(MassaTime::now(self.clock_compensation)?); + .last_failure = Some(MassaTime::now()?); self.request_dump() } @@ -508,7 +501,7 @@ impl PeerInfoDatabase { .peers .entry(ip) .or_insert_with(|| PeerInfo::new(ip, false)); - peer.last_failure = Some(MassaTime::now(self.clock_compensation)?); + peer.last_failure = Some(MassaTime::now()?); if !peer.banned { peer.banned = true; if !peer.is_active() { @@ -643,7 +636,7 @@ impl PeerInfoDatabase { peer.advertised = true; // we just connected to it. Assume advertised. if peer.banned { - peer.last_failure = Some(MassaTime::now(self.clock_compensation)?); + peer.last_failure = Some(MassaTime::now()?); if !peer.is_active() && peer.peer_type == Default::default() { self.update()?; } @@ -685,7 +678,7 @@ impl PeerInfoDatabase { ) })?; peer.active_out_connection_attempts -= 1; - peer.last_failure = Some(MassaTime::now(self.clock_compensation)?); + peer.last_failure = Some(MassaTime::now()?); let pt = peer.peer_type; if !peer.is_active() && peer.peer_type == PeerType::Standard { self.update()?; @@ -745,7 +738,7 @@ impl PeerInfoDatabase { // is there a attempt slot available if peer.banned { massa_trace!("in_connection_refused_peer_banned", {"ip": peer.ip}); - peer.last_failure = Some(MassaTime::now(self.clock_compensation)?); + peer.last_failure = Some(MassaTime::now()?); self.request_dump()?; return Err(NetworkError::PeerConnectionError( NetworkConnectionErrorType::BannedPeerTryingToConnect(ip), @@ -852,7 +845,7 @@ impl PeerInfoDatabase { cfg: &PeerTypeConnectionConfig, ) -> Result, NetworkError> { let available_slots = count.get_available_out_connection_attempts(cfg); - let now = MassaTime::now(self.clock_compensation)?; + let now = MassaTime::now()?; let f = move |p: &&PeerInfo| { if p.peer_type != peer_type || !p.advertised || p.is_active() || p.banned { return false; diff --git a/massa-network-worker/src/tests/scenarios.rs b/massa-network-worker/src/tests/scenarios.rs index f8dde673880..d0155abab7e 100644 --- a/massa-network-worker/src/tests/scenarios.rs +++ b/massa-network-worker/src/tests/scenarios.rs @@ -102,7 +102,7 @@ async fn test_node_worker_shutdown() { let (node_event_tx, _node_event_rx) = mpsc::channel::(1); let keypair = KeyPair::generate(); - let mock_node_id = NodeId(keypair.get_public_key()); + let mock_node_id = NodeId::new(keypair.get_public_key()); let node_worker_command_tx = node_command_tx.clone(); let node_fn_handle = tokio::spawn(async move { @@ -176,7 +176,7 @@ async fn test_node_worker_operations_message() { let (node_event_tx, _node_event_rx) = mpsc::channel::(1); let keypair = KeyPair::generate(); - let mock_node_id = NodeId(keypair.get_public_key()); + let mock_node_id = NodeId::new(keypair.get_public_key()); // Create transaction. let transaction = get_transaction(50, 10); diff --git a/massa-network-worker/src/tests/test_peer_info_database.rs b/massa-network-worker/src/tests/test_peer_info_database.rs index bfa8d4b0947..ee66ee59344 100644 --- a/massa-network-worker/src/tests/test_peer_info_database.rs +++ b/massa-network-worker/src/tests/test_peer_info_database.rs @@ -54,7 +54,6 @@ async fn test_try_new_in_connection_in_connection_closed() { saver_join_handle, saver_watch_tx, wakeup_interval, - clock_compensation: 0, peer_types_connection_count: Default::default(), }; @@ -158,7 +157,6 @@ async fn test_out_connection_attempt_failed() { saver_watch_tx, peer_types_connection_count: Default::default(), wakeup_interval, - clock_compensation: 0, }; // test with no connection attempt before @@ -269,7 +267,6 @@ async fn test_try_out_connection_attempt_success() { saver_watch_tx, peer_types_connection_count: Default::default(), wakeup_interval, - clock_compensation: 0, }; // test with no connection attempt before @@ -361,7 +358,6 @@ async fn test_new_out_connection_closed() { saver_watch_tx, peer_types_connection_count: Default::default(), wakeup_interval, - clock_compensation: 0, }; // @@ -442,7 +438,6 @@ async fn test_new_out_connection_attempt() { saver_watch_tx, peer_types_connection_count: Default::default(), wakeup_interval, - clock_compensation: 0, }; // test with no peers. @@ -490,7 +485,7 @@ async fn test_get_advertisable_peer_ips() { default_peer_info_not_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 23))); banned_host1.peer_type = PeerType::Bootstrap; banned_host1.banned = true; - banned_host1.last_alive = Some(MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap()); + banned_host1.last_alive = Some(MassaTime::now().unwrap().checked_sub(1000.into()).unwrap()); peers.insert(banned_host1.ip, banned_host1); // peer not advertised, not return let mut connected_peers1 = @@ -500,22 +495,22 @@ async fn test_get_advertisable_peer_ips() { // peer Ok, return let mut connected_peers2 = default_peer_info_not_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 13))); - connected_peers2.last_alive = Some(MassaTime::now(0).unwrap().checked_sub(800.into()).unwrap()); + connected_peers2.last_alive = Some(MassaTime::now().unwrap().checked_sub(800.into()).unwrap()); connected_peers2.last_failure = - Some(MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap()); + Some(MassaTime::now().unwrap().checked_sub(1000.into()).unwrap()); peers.insert(connected_peers2.ip, connected_peers2); // peer Ok, connected return let mut connected_peers1 = default_peer_info_not_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 17))); connected_peers1.active_out_connections = 1; - connected_peers1.last_alive = Some(MassaTime::now(0).unwrap().checked_sub(900.into()).unwrap()); + connected_peers1.last_alive = Some(MassaTime::now().unwrap().checked_sub(900.into()).unwrap()); peers.insert(connected_peers1.ip, connected_peers1); // peer failure before alive but to early. return let mut connected_peers2 = default_peer_info_not_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 14))); - connected_peers2.last_alive = Some(MassaTime::now(0).unwrap().checked_sub(800.into()).unwrap()); + connected_peers2.last_alive = Some(MassaTime::now().unwrap().checked_sub(800.into()).unwrap()); connected_peers2.last_failure = - Some(MassaTime::now(0).unwrap().checked_sub(2000.into()).unwrap()); + Some(MassaTime::now().unwrap().checked_sub(2000.into()).unwrap()); peers.insert(connected_peers2.ip, connected_peers2); let wakeup_interval = network_settings.wakeup_interval; @@ -529,7 +524,6 @@ async fn test_get_advertisable_peer_ips() { saver_watch_tx, peer_types_connection_count: Default::default(), wakeup_interval, - clock_compensation: 0, }; // test with no peers. @@ -576,22 +570,21 @@ async fn test_get_out_connection_candidate_ips() { let mut connected_peers2 = default_peer_info_not_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 12))); connected_peers2.last_failure = - Some(MassaTime::now(0).unwrap().checked_sub(900.into()).unwrap()); + Some(MassaTime::now().unwrap().checked_sub(900.into()).unwrap()); peers.insert(connected_peers2.ip, connected_peers2); // peer failure before alive but too early. return let mut connected_peers2 = default_peer_info_not_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 13))); - connected_peers2.last_alive = Some(MassaTime::now(0).unwrap().checked_sub(900.into()).unwrap()); + connected_peers2.last_alive = Some(MassaTime::now().unwrap().checked_sub(900.into()).unwrap()); connected_peers2.last_failure = - Some(MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap()); + Some(MassaTime::now().unwrap().checked_sub(1000.into()).unwrap()); peers.insert(connected_peers2.ip, connected_peers2); // peer alive no failure. return let mut connected_peers1 = default_peer_info_not_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 14))); - connected_peers1.last_alive = - Some(MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap()); + connected_peers1.last_alive = Some(MassaTime::now().unwrap().checked_sub(1000.into()).unwrap()); peers.insert(connected_peers1.ip, connected_peers1); // peer banned not return. @@ -599,33 +592,24 @@ async fn test_get_out_connection_candidate_ips() { default_peer_info_not_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 23))); banned_host1.peer_type = PeerType::Bootstrap; banned_host1.banned = true; - banned_host1.last_alive = Some(MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap()); + banned_host1.last_alive = Some(MassaTime::now().unwrap().checked_sub(1000.into()).unwrap()); peers.insert(banned_host1.ip, banned_host1); // peer failure after alive not too early. return let mut connected_peers2 = default_peer_info_not_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 15))); - connected_peers2.last_alive = Some( - MassaTime::now(0) - .unwrap() - .checked_sub(12000.into()) - .unwrap(), - ); - connected_peers2.last_failure = Some( - MassaTime::now(0) - .unwrap() - .checked_sub(11000.into()) - .unwrap(), - ); + connected_peers2.last_alive = + Some(MassaTime::now().unwrap().checked_sub(12000.into()).unwrap()); + connected_peers2.last_failure = + Some(MassaTime::now().unwrap().checked_sub(11000.into()).unwrap()); peers.insert(connected_peers2.ip, connected_peers2); // peer failure after alive too early. not return let mut connected_peers2 = default_peer_info_not_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 16))); - connected_peers2.last_alive = - Some(MassaTime::now(0).unwrap().checked_sub(2000.into()).unwrap()); + connected_peers2.last_alive = Some(MassaTime::now().unwrap().checked_sub(2000.into()).unwrap()); connected_peers2.last_failure = - Some(MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap()); + Some(MassaTime::now().unwrap().checked_sub(1000.into()).unwrap()); peers.insert(connected_peers2.ip, connected_peers2); // peer Ok, connected, not return @@ -651,7 +635,6 @@ async fn test_get_out_connection_candidate_ips() { saver_watch_tx, peer_types_connection_count: Default::default(), wakeup_interval, - clock_compensation: 0, }; // test with no peers. @@ -695,23 +678,21 @@ async fn test_cleanup_peers() { &network_settings, &mut peers, None, - 0, network_settings.ban_timeout, ) .unwrap(); assert!(peers.is_empty()); - let now = MassaTime::now(0).unwrap(); + let now = MassaTime::now().unwrap(); let mut connected_peers1 = default_peer_info_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 11))); - connected_peers1.last_alive = - Some(MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap()); + connected_peers1.last_alive = Some(MassaTime::now().unwrap().checked_sub(1000.into()).unwrap()); peers.insert(connected_peers1.ip, connected_peers1); let mut connected_peers2 = default_peer_info_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 12))); - connected_peers2.last_alive = Some(MassaTime::now(0).unwrap().checked_sub(900.into()).unwrap()); + connected_peers2.last_alive = Some(MassaTime::now().unwrap().checked_sub(900.into()).unwrap()); let same_connected_peer = connected_peers2; let non_global = @@ -744,8 +725,7 @@ async fn test_cleanup_peers() { advertised_host1.advertised = true; advertised_host1.active_out_connections = 0; - advertised_host1.last_alive = - Some(MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap()); + advertised_host1.last_alive = Some(MassaTime::now().unwrap().checked_sub(1000.into()).unwrap()); let mut advertised_host2 = default_peer_info_connected(IpAddr::V4(std::net::Ipv4Addr::new(169, 202, 0, 36))); advertised_host2.peer_type = PeerType::Standard; @@ -768,7 +748,6 @@ async fn test_cleanup_peers() { &network_settings, &mut peers, None, - 0, network_settings.ban_timeout, ) .unwrap(); @@ -798,7 +777,6 @@ async fn test_cleanup_peers() { &network_settings, &mut peers, Some(&advertised), - 0, network_settings.ban_timeout, ) .unwrap(); @@ -858,21 +836,11 @@ impl From for PeerInfoDatabase { }, last_alive: match i % 4 { 0 => None, - _ => Some( - MassaTime::now(0) - .unwrap() - .checked_sub(50000.into()) - .unwrap(), - ), + _ => Some(MassaTime::now().unwrap().checked_sub(50000.into()).unwrap()), }, last_failure: match i % 5 { 0 => None, - _ => Some( - MassaTime::now(0) - .unwrap() - .checked_sub(60000.into()) - .unwrap(), - ), + _ => Some(MassaTime::now().unwrap().checked_sub(60000.into()).unwrap()), }, advertised: (ip[2] % 2) == 0, active_out_connection_attempts: 0, @@ -893,7 +861,6 @@ impl From for PeerInfoDatabase { saver_watch_tx, peer_types_connection_count: Default::default(), wakeup_interval, - clock_compensation: 0, } } } diff --git a/massa-network-worker/src/tests/tools.rs b/massa-network-worker/src/tests/tools.rs index 1518039102e..321e9bd4f97 100644 --- a/massa-network-worker/src/tests/tools.rs +++ b/massa-network-worker/src/tests/tools.rs @@ -83,7 +83,7 @@ pub async fn full_connection_to_controller( // perform handshake let keypair = KeyPair::generate(); - let mock_node_id = NodeId(keypair.get_public_key()); + let mock_node_id = NodeId::new(keypair.get_public_key()); let res = HandshakeWorker::spawn( mock_read_half, mock_write_half, @@ -142,7 +142,7 @@ pub async fn rejected_connection_to_controller( // perform handshake and ignore errors let keypair = KeyPair::generate(); - let mock_node_id = NodeId(keypair.get_public_key()); + let mock_node_id = NodeId::new(keypair.get_public_key()); let result = HandshakeWorker::spawn( mock_read_half, mock_write_half, @@ -227,7 +227,7 @@ pub async fn full_connection_from_controller( // perform handshake let keypair = KeyPair::generate(); - let mock_node_id = NodeId(keypair.get_public_key()); + let mock_node_id = NodeId::new(keypair.get_public_key()); let res = HandshakeWorker::spawn( mock_read_half, mock_write_half, @@ -370,7 +370,6 @@ pub async fn network_test( start_network_controller( &network_settings, establisher, - 0, None, Version::from_str("TEST.1.10").unwrap(), ) diff --git a/massa-node/base_config/config.toml b/massa-node/base_config/config.toml index c5379de63f5..a0225ed2ce6 100644 --- a/massa-node/base_config/config.toml +++ b/massa-node/base_config/config.toml @@ -11,6 +11,8 @@ bind_private = "127.0.0.1:33034" # port on which the node API listens for public requests. Can be exposed to the Internet bind_public = "0.0.0.0:33035" + # port on which the node API(V2) listens for HTTP requests and WebSockets subscriptions. Can be exposed to the Internet + bind_api = "0.0.0.0:33036" # max number of arguments per RPC call max_arguments = 128 # path to the openrpc specification file used in `rpc.discover` method @@ -46,6 +48,12 @@ cursor_delay = 2000 # duration of the statistics time window in milliseconds stats_time_window_duration = 60000 + # maximum allowed gas for read only executions + max_read_only_gas = 100_000_000 + # gas cost for ABIs + abi_gas_costs_file = "base_config/gas_costs/abi_gas_costs.json" + # gas cost for wasm operator + wasm_gas_costs_file = "base_config/gas_costs/wasm_gas_costs.json" [ledger] # path to the initial ledger @@ -77,6 +85,13 @@ # max number of item returned per query max_item_return_count = 100 + # blocks headers sender(channel) capacity + broadcast_blocks_headers_capacity = 128 + # blocks sender(channel) capacity + broadcast_blocks_capacity = 128 + # filled blocks sender(channel) capacity + broadcast_filled_blocks_capacity = 128 + [protocol] # timeout after which without answer a hanshake is ended message_timeout = 5000 @@ -117,6 +132,8 @@ max_operations_propagation_time = 32000 # time threshold after which endorsement are not propagated max_endorsements_propagation_time = 48000 + # operations sender(channel) capacity + broadcast_operations_capacity = 5000 [network] # port on which to listen for protocol communication @@ -190,9 +207,9 @@ ["51.75.60.228:31245", "P13Ykon8Zo73PTKMruLViMMtE2rEG646JQ4sCcee2DnopmVM3P5"] ] # path to the bootstrap whitelist file. This whitelist define IPs that can bootstrap on your node. - bootstrap_whitelist_file = "base_config/bootstrap_whitelist.json" + bootstrap_whitelist_path = "base_config/bootstrap_whitelist.json" # path to the bootstrap blacklist file. This whitelist define IPs that will not be able to bootstrap on your node. This list is optional. - bootstrap_blacklist_file = "base_config/bootstrap_blacklist.json" + bootstrap_blacklist_path = "base_config/bootstrap_blacklist.json" # [optionnal] port on which to listen for incoming bootstrap requests bind = "[::]:31245" # timeout to establish a bootstrap connection @@ -209,8 +226,8 @@ read_error_timeout = 200 # timeout for message error sending write_error_timeout = 200 - # when enabled, apply a correction to the local computer clock to match bootstrap server time - enable_clock_synchronization = false + # max allowed difference between client and servers clocks in ms + max_clock_delta = 5000 # [server] data is cached for cache duration milliseconds cache_duration = 15000 # max number of simulataneous bootstraps for server diff --git a/massa-node/base_config/gas_costs/abi_gas_costs.json b/massa-node/base_config/gas_costs/abi_gas_costs.json new file mode 100644 index 00000000000..6abe6319cad --- /dev/null +++ b/massa-node/base_config/gas_costs/abi_gas_costs.json @@ -0,0 +1,49 @@ +{ + "assembly_caller_has_write_access": 142, + "assembly_function_exists": 40653, + "assembly_script_abort": 0, + "assembly_script_address_from_public_key": 317, + "assembly_script_append_data": 314, + "assembly_script_append_data_for": 337, + "assembly_script_call": 32288, + "assembly_script_create_sc": 305, + "assembly_script_date_now": 93, + "assembly_script_delete_data": 217, + "assembly_script_delete_data_for": 214, + "assembly_script_generate_event": 161, + "assembly_script_get_balance": 143, + "assembly_script_get_balance_for": 173, + "assembly_script_get_bytecode": 156, + "assembly_script_get_bytecode_for": 181, + "assembly_script_get_call_coins": 141, + "assembly_script_get_call_stack": 280, + "assembly_script_get_current_period": 142, + "assembly_script_get_current_thread": 142, + "assembly_script_get_data": 218, + "assembly_script_get_data_for": 265, + "assembly_script_get_keys": 460, + "assembly_script_get_keys_for": 483, + "assembly_script_get_op_data": 109, + "assembly_script_get_op_keys": 266, + "assembly_script_get_owned_addresses": 272, + "assembly_script_get_remaining_gas": 150, + "assembly_script_get_time": 140, + "assembly_script_has_data": 189, + "assembly_script_has_data_for": 226, + "assembly_script_has_op_key": 234, + "assembly_script_hash": 238, + "assembly_script_local_call": 34482, + "assembly_script_local_execution": 40401, + "assembly_script_print": 176, + "assembly_script_seed": 67, + "assembly_script_send_message": 462, + "assembly_script_set_bytecode": 224, + "assembly_script_set_bytecode_for": 275, + "assembly_script_set_data": 280, + "assembly_script_set_data_for": 400, + "assembly_script_signature_verify": 204, + "assembly_script_transfer_coins": 196, + "assembly_script_transfer_coins_for": 226, + "assembly_script_unsafe_random": 144, + "launch": 40555 +} \ No newline at end of file diff --git a/massa-node/base_config/gas_costs/wasm_gas_costs.json b/massa-node/base_config/gas_costs/wasm_gas_costs.json new file mode 100644 index 00000000000..5aa482e5e65 --- /dev/null +++ b/massa-node/base_config/gas_costs/wasm_gas_costs.json @@ -0,0 +1,13 @@ +{ + "Wasm:Drop": 38, + "Wasm:GlobalGet": 8, + "Wasm:GlobalSet": 51, + "Wasm:I32Add": 0, + "Wasm:I32Const": 0, + "Wasm:I32DivS": 61, + "Wasm:I32Mul": 26, + "Wasm:I32Sub": 0, + "Wasm:If": 78, + "Wasm:LocalGet": 3, + "Wasm:LocalSet": 18 +} \ No newline at end of file diff --git a/massa-node/base_config/openrpc.json b/massa-node/base_config/openrpc.json index 80a280a8e13..c68fd0b7daf 100644 --- a/massa-node/base_config/openrpc.json +++ b/massa-node/base_config/openrpc.json @@ -2,7 +2,7 @@ "openrpc": "1.2.4", "info": { "title": "Massa OpenRPC Specification", - "version": "TEST.17.2", + "version": "TEST.18.0", "description": "Massa OpenRPC Specification document. Find more information on https://docs.massa.net/en/latest/technical-doc/api.html", "termsOfService": "https://open-rpc.org", "contact": { @@ -91,7 +91,7 @@ "params": [ { "name": "address", - "description": "The strings should be valid address(es).", + "description": "Need to provide at least one valid address", "schema": { "type": "array", "items": { @@ -124,9 +124,12 @@ "params": [ { "name": "blockId", - "description": "Block ID", + "description": "Need to provide at least one valid block id", "schema": { - "$ref": "#/components/schemas/BlockId" + "type": "array", + "items": { + "$ref": "#/components/schemas/BlockId" + } }, "summary": "string", "required": true @@ -138,9 +141,9 @@ }, "name": "BlockInfo" }, - "name": "get_block", - "summary": "Get block", - "description": "Get block." + "name": "get_blocks", + "summary": "Get block(s)", + "description": "Get block(s)." }, { "tags": [ @@ -267,7 +270,7 @@ "params": [ { "name": "endorsementId", - "description": "Must be an endorsement id", + "description": "Need to provide at least one valid endorsement id", "schema": { "type": "array", "items": { @@ -360,8 +363,8 @@ ], "params": [ { - "name": "name", - "description": "String must be an operation Id", + "name": "operationId", + "description": "Need to provide at least one valid operation id", "schema": { "type": "array", "items": { @@ -473,6 +476,96 @@ "summary": "Return hashset of staking addresses", "description": "Return hashset of staking addresses." }, + { + "tags": [ + { + "name": "private", + "description": "Massa private api" + } + ], + "params": [ + { + "name": "ip", + "description": "The strings must be IP address(es)", + "schema": { + "type": "array", + "items": { + "description": "Ip address", + "type": "string" + } + }, + "required": true + } + ], + "result": { + "name": "No return", + "description": "No return.", + "schema": false + }, + "name": "node_add_to_bootstrap_blacklist", + "summary": "Add to bootsrap blacklist given IP address(es)", + "description": "Add to bootsrap blacklist given IP address(es)." + }, + { + "tags": [ + { + "name": "private", + "description": "Massa private api" + } + ], + "params": [ + { + "name": "ip", + "description": "The strings must be IP address(es)", + "schema": { + "type": "array", + "items": { + "description": "Ip address", + "type": "string" + } + }, + "required": true + } + ], + "result": { + "name": "No return", + "description": "No return.", + "schema": false + }, + "name": "node_add_to_bootstrap_whitelist", + "summary": "Add to bootsrap whitelist given IP address(es)", + "description": "Add to bootsrap whitelist given IP address(es)." + }, + { + "tags": [ + { + "name": "private", + "description": "Massa private api" + } + ], + "params": [ + { + "name": "ip", + "description": "The strings must be IP address(es)", + "schema": { + "type": "array", + "items": { + "description": "Ip address", + "type": "string" + } + }, + "required": true + } + ], + "result": { + "name": "No return", + "description": "No return.", + "schema": false + }, + "name": "node_add_to_peers_whitelist", + "summary": "Add to peers whitelist given IP address(es)", + "description": "Add to peers whitelist given IP address(es)." + }, { "tags": [ { @@ -531,6 +624,182 @@ "summary": "Ban given IP address(es)", "description": "Ban given IP address(es)." }, + { + "tags": [ + { + "name": "private", + "description": "Massa private api" + } + ], + "params": [], + "result": { + "name": "ip", + "description": "The strings must be IP address(es)", + "schema": { + "type": "array", + "items": { + "description": "Ip address", + "$ref": "#/components/schemas/IpAddress" + } + } + }, + "name": "node_bootstrap_blacklist", + "summary": "Returns bootsrap blacklist IP address(es)", + "description": "Returns bootsrap blacklist IP address(es)." + }, + { + "tags": [ + { + "name": "private", + "description": "Massa private api" + } + ], + "params": [], + "result": { + "name": "ip", + "description": "The strings must be IP address(es)", + "schema": { + "type": "array", + "items": { + "description": "Ip address", + "$ref": "#/components/schemas/IpAddress" + } + } + }, + "name": "node_bootstrap_whitelist", + "summary": "Returns bootsrap whitelist IP address(es)", + "description": "Returns bootsrap whitelist IP address(es)." + }, + { + "tags": [ + { + "name": "private", + "description": "Massa private api" + } + ], + "params": [], + "result": { + "name": "No return", + "description": "No return.", + "schema": false + }, + "name": "node_bootstrap_whitelist_allow_all", + "summary": "Allow everyone to bootsrap from the node", + "description": "Allow everyone to bootsrap from the node. Remove bootsrap whitelist configuration file." + }, + { + "tags": [ + { + "name": "private", + "description": "Massa private api" + } + ], + "params": [], + "result": { + "name": "ip", + "description": "The strings must be IP address(es)", + "schema": { + "type": "array", + "items": { + "description": "Ip address", + "$ref": "#/components/schemas/IpAddress" + } + } + }, + "name": "node_peers_whitelist", + "summary": "Returns peers whitelist IP address(es)", + "description": "Returns peers whitelist IP address(es)." + }, + { + "tags": [ + { + "name": "private", + "description": "Massa private api" + } + ], + "params": [ + { + "name": "ip", + "description": "The strings must be IP address(es)", + "schema": { + "type": "array", + "items": { + "description": "Ip address", + "type": "string" + } + }, + "required": true + } + ], + "result": { + "name": "No return", + "description": "No return.", + "schema": false + }, + "name": "node_remove_from_bootstrap_blacklist", + "summary": "Remove from bootsrap blacklist given IP address(es)", + "description": "Remove from bootsrap blacklist given IP address(es)." + }, + { + "tags": [ + { + "name": "private", + "description": "Massa private api" + } + ], + "params": [ + { + "name": "ip", + "description": "The strings must be IP address(es)", + "schema": { + "type": "array", + "items": { + "description": "Ip address", + "type": "string" + } + }, + "required": true + } + ], + "result": { + "name": "No return", + "description": "No return.", + "schema": false + }, + "name": "node_remove_from_bootstrap_whitelist", + "summary": "Remove from bootsrap whitelist given IP address(es)", + "description": "Remove from bootsrap whitelist given IP address(es)." + }, + { + "tags": [ + { + "name": "private", + "description": "Massa private api" + } + ], + "params": [ + { + "name": "ip", + "description": "The strings must be IP address(es)", + "schema": { + "type": "array", + "items": { + "description": "Ip address", + "type": "string" + } + }, + "required": true + } + ], + "result": { + "name": "No return", + "description": "No return.", + "schema": false + }, + "name": "node_remove_from_peers_whitelist", + "summary": "Remove from peers whitelist given IP address(es)", + "description": "Remove from peers whitelist given IP address(es)." + }, { "tags": [ { @@ -756,6 +1025,277 @@ "name": "send_operations", "summary": "Adds operations to pool", "description": "Adds operations to pool. Returns operations that were ok and sent to pool." + }, + { + "tags": [ + { + "name": "api", + "description": "Massa api V2" + }, + { + "name": "experimental", + "description": "Experimental APIs. They might disappear, and they will change" + } + ], + "params": [], + "result": { + "schema": { + "$ref": "#/components/schemas/Version" + }, + "name": "Version", + "description": "Node version" + }, + "name": "get_version", + "summary": "Get Massa node version", + "description": "Get Massa node version." + }, + { + "tags": [ + { + "name": "api", + "description": "Massa api V2" + }, + { + "name": "experimental", + "description": "Experimental APIs. They might disappear, and they will change" + }, + { + "name": "websocket", + "description": "WebSocket subscription" + } + ], + "params": [], + "result": { + "schema": { + "$ref": "#/components/schemas/BlockInfo" + }, + "name": "BlockInfo" + }, + "name": "subscribe_new_blocks", + "summary": "New produced blocks", + "description": "New produced blocks." + }, + { + "tags": [ + { + "name": "api", + "description": "Massa api V2" + }, + { + "name": "experimental", + "description": "Experimental APIs. They might disappear, and they will change" + }, + { + "name": "websocket", + "description": "WebSocket subscription" + } + ], + "params": [], + "result": { + "schema": { + "$ref": "#/components/schemas/WrappedHeader" + }, + "name": "BlockHeader" + }, + "name": "subscribe_new_blocks_headers", + "summary": "New produced blocks headers", + "description": "New produced blocks headers." + }, + { + "tags": [ + { + "name": "api", + "description": "Massa api V2" + }, + { + "name": "experimental", + "description": "Experimental APIs. They might disappear, and they will change" + }, + { + "name": "websocket", + "description": "WebSocket subscription" + } + ], + "params": [], + "result": { + "schema": { + "$ref": "#/components/schemas/FilledBlockInfo" + }, + "name": "FilledBlockInfo" + }, + "name": "subscribe_new_filled_blocks", + "summary": "New produced block with operations content", + "description": "New produced block with operations content." + }, + { + "tags": [ + { + "name": "api", + "description": "Massa api V2" + }, + { + "name": "experimental", + "description": "Experimental APIs. They might disappear, and they will change" + }, + { + "name": "websocket", + "description": "WebSocket subscription" + } + ], + "params": [], + "result": { + "schema": { + "$ref": "#/components/schemas/Operation" + }, + "name": "Operation" + }, + "name": "subscribe_new_operations", + "summary": "Subscribe to new received operations", + "description": "Subscribe to new received operations." + }, + { + "tags": [ + { + "name": "api", + "description": "Massa api V2" + }, + { + "name": "experimental", + "description": "Experimental APIs. They might disappear, and they will change" + }, + { + "name": "websocket", + "description": "WebSocket subscription" + } + ], + "params": [ + { + "name": "subscriptionId", + "description": "Subscription id", + "schema": { + "type": "integer" + }, + "required": true + } + ], + "result": { + "schema": { + "type": "boolean" + }, + "name": "unsubscribe result", + "description": "unsubscribe success message" + }, + "name": "unsubscribe_new_blocks", + "summary": "Unsubscribe from new produced blocks", + "description": "Unsubscribe from new produced blocks." + }, + { + "tags": [ + { + "name": "api", + "description": "Massa api V2" + }, + { + "name": "experimental", + "description": "Experimental APIs. They might disappear, and they will change" + }, + { + "name": "websocket", + "description": "WebSocket subscription" + } + ], + "params": [ + { + "name": "subscriptionId", + "description": "Subscription id", + "schema": { + "type": "integer" + }, + "required": true + } + ], + "result": { + "schema": { + "type": "boolean" + }, + "name": "unsubscribe result", + "description": "unsubscribe success message" + }, + "name": "unsubscribe_new_blocks_headers", + "summary": "Unsubscribe from new produced blocks headers", + "description": "Unsubscribe from new produced blocks headers." + }, + { + "tags": [ + { + "name": "api", + "description": "Massa api V2" + }, + { + "name": "experimental", + "description": "Experimental APIs. They might disappear, and they will change" + }, + { + "name": "websocket", + "description": "WebSocket subscription" + } + ], + "params": [ + { + "name": "subscriptionId", + "description": "Subscription id", + "schema": { + "type": "integer" + }, + "required": true + } + ], + "result": { + "schema": { + "type": "boolean" + }, + "name": "unsubscribe result", + "description": "unsubscribe success message" + }, + "name": "unsubscribe_new_filled_blocks", + "summary": "Unsubscribe from new produced filled blocks", + "description": "Unsubscribe from new produced filled blocks." + }, + { + "tags": [ + { + "name": "api", + "description": "Massa api V2" + }, + { + "name": "experimental", + "description": "Experimental APIs. They might disappear, and they will change" + }, + { + "name": "websocket", + "description": "WebSocket subscription" + } + ], + "params": [ + { + "name": "subscriptionId", + "description": "Subscription id", + "schema": { + "type": "integer" + }, + "required": true + } + ], + "result": { + "schema": { + "type": "boolean" + }, + "name": "unsubscribe result", + "description": "unsubscribe success message" + }, + "name": "unsubscribe_new_operations", + "summary": "Unsubscribe from new received operations", + "description": "Unsubscribe from new received operations." } ], "components": { @@ -1381,6 +1921,21 @@ }, "additionalProperties": false }, + "ReadOnlyResult": { + "title": "ReadOnlyResult", + "type": "object", + "description": "The result of a read-only execution", + "properties": { + "Ok": { + "type": "array", + "description": "Included in case of success. The result of the execution" + }, + "Error": { + "type": "string", + "description": "Included in case of error. The error message" + } + } + }, "ExecuteReadOnlyResponse": { "title": "ExecuteReadOnlyResponse", "required": [ @@ -1395,8 +1950,7 @@ "$ref": "#/components/schemas/ExecutedAt" }, "result": { - "description": "\"ok\" or error message", - "type": "string" + "$ref": "#/components/schemas/ReadOnlyResult" }, "output_events": { "type": "array", @@ -1490,6 +2044,10 @@ "is_final": { "description": "Optional filter to filter only candidate or final events", "type": "boolean" + }, + "is_error": { + "description": "Optional filter to retrieve events generated in a failed execution", + "type": "boolean" } }, "additionalProperties": false @@ -1554,6 +2112,81 @@ "is_final": { "description": "Whether the event is final", "type": "boolean" + }, + "is_error": { + "description": "Whether the event was generated in a failed executed or not", + "type": "boolean" + } + }, + "additionalProperties": false + }, + "IpAddress": { + "description": "Ipv4 or Ipv6 address", + "type": "string" + }, + "FilledBlock": { + "title": "FilledBlock", + "required": [ + "header", + "operations" + ], + "type": "object", + "properties": { + "header": { + "$ref": "#/components/schemas/WrappedHeader", + "description": "signed header" + }, + "operations": { + "description": "Operations", + "type": "array", + "items": { + "$ref": "#/components/schemas/OperationInfo" + } + } + }, + "additionalProperties": false + }, + "FilledBlockInfo": { + "title": "FilledBlockInfo", + "required": [ + "id" + ], + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "content": { + "$ref": "#/components/schemas/FilledBlockInfoContent" + } + }, + "additionalProperties": false + }, + "FilledBlockInfoContent": { + "title": "FilledBlockInfoContent", + "required": [ + "block", + "is_final", + "is_in_blockclique", + "is_stale" + ], + "type": "object", + "properties": { + "is_final": { + "description": "true if final", + "type": "boolean" + }, + "is_stale": { + "description": "true if incompatible with a final block", + "type": "boolean" + }, + "is_in_blockclique": { + "description": "true if in the greatest clique", + "type": "boolean" + }, + "block": { + "$ref": "#/components/schemas/FilledBlock", + "description": "filled block" } }, "additionalProperties": false @@ -2168,26 +2801,7 @@ }, "Version": { "description": "Application version, checked during handshakes", - "required": [ - "instance", - "major", - "minor" - ], - "type": "object", - "properties": { - "instance": { - "description": "", - "type": "number" - }, - "major": { - "description": "", - "type": "number" - }, - "minor": { - "description": "", - "type": "number" - } - } + "type": "string" }, "WrappedHeader": { "description": "signed operation", @@ -2271,6 +2885,14 @@ "$ref": "#/components/schemas/BlockInfo" } }, + "BlockHeader": { + "name": "BlockHeader", + "summary": "BlockHeader", + "description": "A BlockHeader object", + "schema": { + "$ref": "#/components/schemas/WrappedHeader" + } + }, "Clique": { "name": "Clique", "summary": "Clique", @@ -2327,6 +2949,14 @@ "$ref": "#/components/schemas/EventFilter" } }, + "FilledBlockInfo": { + "name": "FilledBlockInfo", + "summary": "FilledBlockInfo", + "description": "A FilledBlockInfo object", + "schema": { + "$ref": "#/components/schemas/FilledBlockInfo" + } + }, "GraphInterval": { "name": "GraphInterval", "summary": "GraphInterval", @@ -2414,6 +3044,14 @@ "schema": { "$ref": "#/components/schemas/Staker" } + }, + "Version": { + "name": "Version", + "summary": "Version", + "description": "A Version object", + "schema": { + "$ref": "#/components/schemas/Version" + } } } } diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index 1b572ef0003..bcc6098d21b 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -8,14 +8,14 @@ use crate::settings::SETTINGS; use crossbeam_channel::{Receiver, TryRecvError}; use dialoguer::Password; -use massa_api::{APIConfig, Private, Public, RpcServer, StopHandle, API}; +use massa_api::{APIConfig, ApiServer, ApiV2, Private, Public, RpcServer, StopHandle, API}; use massa_async_pool::AsyncPoolConfig; use massa_bootstrap::{get_state, start_bootstrap_server, BootstrapConfig, BootstrapManager}; use massa_consensus_exports::events::ConsensusEvent; use massa_consensus_exports::{ConsensusChannels, ConsensusConfig, ConsensusManager}; use massa_consensus_worker::start_consensus_worker; use massa_executed_ops::ExecutedOpsConfig; -use massa_execution_exports::{ExecutionConfig, ExecutionManager, StorageCostsConstants}; +use massa_execution_exports::{ExecutionConfig, ExecutionManager, GasCosts, StorageCostsConstants}; use massa_execution_worker::start_execution_worker; use massa_factory_exports::{FactoryChannels, FactoryConfig, FactoryManager}; use massa_factory_worker::start_factory; @@ -52,7 +52,8 @@ use massa_pool_worker::start_pool_controller; use massa_pos_exports::{PoSConfig, SelectorConfig, SelectorManager}; use massa_pos_worker::start_selector_worker; use massa_protocol_exports::{ - ProtocolCommand, ProtocolCommandSender, ProtocolConfig, ProtocolManager, + ProtocolCommand, ProtocolCommandSender, ProtocolConfig, ProtocolManager, ProtocolReceivers, + ProtocolSenders, }; use massa_protocol_worker::start_protocol_controller; use massa_storage::Storage; @@ -66,7 +67,7 @@ use std::time::Duration; use std::{path::Path, process, sync::Arc}; use structopt::StructOpt; use tokio::signal; -use tokio::sync::mpsc; +use tokio::sync::{broadcast, mpsc}; use tracing::{error, info, warn}; use tracing_subscriber::filter::{filter_fn, LevelFilter}; mod settings; @@ -86,10 +87,11 @@ async fn launch( mpsc::Receiver<()>, StopHandle, StopHandle, + StopHandle, ) { info!("Node version : {}", *VERSION); if let Some(end) = *END_TIMESTAMP { - if MassaTime::now(0).expect("could not get now time") > end { + if MassaTime::now().expect("could not get now time") > end { panic!("This episode has come to an end, please get the latest testnet node version to continue"); } } @@ -170,8 +172,8 @@ async fn launch( let bootstrap_config: BootstrapConfig = BootstrapConfig { bootstrap_list: SETTINGS.bootstrap.bootstrap_list.clone(), - bootstrap_whitelist_file: SETTINGS.bootstrap.bootstrap_whitelist_file.clone(), - bootstrap_blacklist_file: SETTINGS.bootstrap.bootstrap_blacklist_file.clone(), + bootstrap_whitelist_path: SETTINGS.bootstrap.bootstrap_whitelist_path.clone(), + bootstrap_blacklist_path: SETTINGS.bootstrap.bootstrap_blacklist_path.clone(), bind: SETTINGS.bootstrap.bind, connect_timeout: SETTINGS.bootstrap.connect_timeout, read_timeout: SETTINGS.bootstrap.read_timeout, @@ -180,7 +182,7 @@ async fn launch( write_error_timeout: SETTINGS.bootstrap.write_error_timeout, retry_delay: SETTINGS.bootstrap.retry_delay, max_ping: SETTINGS.bootstrap.max_ping, - enable_clock_synchronization: SETTINGS.bootstrap.enable_clock_synchronization, + max_clock_delta: SETTINGS.bootstrap.max_clock_delta, cache_duration: SETTINGS.bootstrap.cache_duration, max_simultaneous_bootstraps: SETTINGS.bootstrap.max_simultaneous_bootstraps, per_ip_min_interval: SETTINGS.bootstrap.per_ip_min_interval, @@ -284,7 +286,6 @@ async fn launch( start_network_controller( &network_config, Establisher::new(), - bootstrap_state.compensation_millis, bootstrap_state.peers, *VERSION, ) @@ -312,7 +313,6 @@ async fn launch( max_final_events: SETTINGS.execution.max_final_events, readonly_queue_length: SETTINGS.execution.readonly_queue_length, cursor_delay: SETTINGS.execution.cursor_delay, - clock_compensation: bootstrap_state.compensation_millis, max_async_gas: MAX_ASYNC_GAS, max_gas_per_block: MAX_GAS_PER_BLOCK, roll_price: ROLL_PRICE, @@ -329,6 +329,12 @@ async fn launch( max_bytecode_size: MAX_BYTECODE_LENGTH, max_datastore_value_size: MAX_DATASTORE_VALUE_LENGTH, storage_costs_constants, + max_read_only_gas: SETTINGS.execution.max_read_only_gas, + gas_costs: GasCosts::new( + SETTINGS.execution.abi_gas_costs_file.clone(), + SETTINGS.execution.wasm_gas_costs_file.clone(), + ) + .expect("Failed to load gas costs"), }; let (execution_manager, execution_controller) = start_execution_worker( execution_config, @@ -375,8 +381,11 @@ async fn launch( max_item_return_count: SETTINGS.consensus.max_item_return_count, max_gas_per_block: MAX_GAS_PER_BLOCK, channel_size: CHANNEL_SIZE, - clock_compensation_millis: bootstrap_state.compensation_millis, bootstrap_part_size: CONSENSUS_BOOTSTRAP_PART_SIZE, + broadcast_enabled: SETTINGS.api.enable_ws, + broadcast_blocks_headers_capacity: SETTINGS.consensus.broadcast_blocks_headers_capacity, + broadcast_blocks_capacity: SETTINGS.consensus.broadcast_blocks_capacity, + broadcast_filled_blocks_capacity: SETTINGS.consensus.broadcast_filled_blocks_capacity, }; let (consensus_event_sender, consensus_event_receiver) = @@ -387,11 +396,16 @@ async fn launch( pool_command_sender: pool_controller.clone(), controller_event_tx: consensus_event_sender, protocol_command_sender: ProtocolCommandSender(protocol_command_sender.clone()), + block_header_sender: broadcast::channel(consensus_config.broadcast_blocks_headers_capacity) + .0, + block_sender: broadcast::channel(consensus_config.broadcast_blocks_capacity).0, + filled_block_sender: broadcast::channel(consensus_config.broadcast_filled_blocks_capacity) + .0, }; let (consensus_controller, consensus_manager) = start_consensus_worker( consensus_config, - consensus_channels, + consensus_channels.clone(), bootstrap_state.graph, shared_storage.clone(), ); @@ -426,13 +440,24 @@ async fn launch( t0: T0, max_operations_propagation_time: SETTINGS.protocol.max_operations_propagation_time, max_endorsements_propagation_time: SETTINGS.protocol.max_endorsements_propagation_time, + broadcast_enabled: SETTINGS.api.enable_ws, + broadcast_operations_capacity: SETTINGS.protocol.broadcast_operations_capacity, }; - let protocol_manager = start_protocol_controller( - protocol_config, - network_command_sender.clone(), + let protocol_senders = ProtocolSenders { + network_command_sender: network_command_sender.clone(), + operation_sender: broadcast::channel(protocol_config.broadcast_operations_capacity).0, + }; + + let protocol_receivers = ProtocolReceivers { network_event_receiver, protocol_command_receiver, + }; + + let protocol_manager = start_protocol_controller( + protocol_config, + protocol_receivers, + protocol_senders.clone(), consensus_controller.clone(), pool_controller.clone(), shared_storage.clone(), @@ -445,7 +470,6 @@ async fn launch( thread_count: THREAD_COUNT, genesis_timestamp: *GENESIS_TIMESTAMP, t0: T0, - clock_compensation_millis: bootstrap_state.compensation_millis, initial_delay: SETTINGS.factory.initial_delay, max_block_size: MAX_BLOCK_SIZE as u64, max_block_gas: MAX_GAS_PER_BLOCK, @@ -467,7 +491,6 @@ async fn launch( bootstrap_config, massa_bootstrap::Establisher::new(), private_key, - bootstrap_state.compensation_millis, *VERSION, ) .await @@ -476,9 +499,12 @@ async fn launch( let api_config: APIConfig = APIConfig { bind_private: SETTINGS.api.bind_private, bind_public: SETTINGS.api.bind_public, + bind_api: SETTINGS.api.bind_api, draw_lookahead_period_count: SETTINGS.api.draw_lookahead_period_count, max_arguments: SETTINGS.api.max_arguments, openrpc_spec_path: SETTINGS.api.openrpc_spec_path.clone(), + bootstrap_whitelist_path: SETTINGS.bootstrap.bootstrap_whitelist_path.clone(), + bootstrap_blacklist_path: SETTINGS.bootstrap.bootstrap_blacklist_path.clone(), max_request_body_size: SETTINGS.api.max_request_body_size, max_response_body_size: SETTINGS.api.max_response_body_size, max_connections: SETTINGS.api.max_connections, @@ -500,6 +526,23 @@ async fn launch( t0: T0, periods_per_cycle: PERIODS_PER_CYCLE, }; + + // spawn Massa API + let api = API::::new( + consensus_channels, + protocol_senders, + api_config.clone(), + *VERSION, + ); + let api_handle = api + .serve(&SETTINGS.api.bind_api, &api_config) + .await + .expect("failed to start MASSA API"); + + // Disable WebSockets for Private and Public API's + let mut api_config = api_config.clone(); + api_config.enable_ws = false; + // spawn private API let (api_private, api_private_stop_rx) = API::::new( network_command_sender.clone(), @@ -523,7 +566,6 @@ async fn launch( network_config, *VERSION, network_command_sender.clone(), - bootstrap_state.compensation_millis, node_id, shared_storage.clone(), ); @@ -574,6 +616,7 @@ async fn launch( api_private_stop_rx, api_private_handle, api_public_handle, + api_handle, ) } @@ -602,6 +645,7 @@ async fn stop( }: Managers, api_private_handle: StopHandle, api_public_handle: StopHandle, + api_handle: StopHandle, ) { // stop bootstrap if let Some(bootstrap_manager) = bootstrap_manager { @@ -617,6 +661,9 @@ async fn stop( // stop private API api_private_handle.stop(); + // stop Massa API + api_handle.stop(); + // stop factory factory_manager.stop(); @@ -744,6 +791,7 @@ async fn run(args: Args) -> anyhow::Result<()> { mut api_private_stop_rx, api_private_handle, api_public_handle, + api_handle, ) = launch(node_wallet.clone()).await; // interrupt signal listener @@ -762,6 +810,9 @@ async fn run(args: Args) -> anyhow::Result<()> { warn!("in response to a desynchronization, the node is going to bootstrap again"); break true; } + ConsensusEvent::Stop => { + break false; + } }, Err(TryRecvError::Disconnected) => { error!("consensus_event_receiver.wait_event disconnected"); @@ -808,6 +859,7 @@ async fn run(args: Args) -> anyhow::Result<()> { }, api_private_handle, api_public_handle, + api_handle, ) .await; diff --git a/massa-node/src/settings.rs b/massa-node/src/settings.rs index 625f539a066..3511d2c7bf6 100644 --- a/massa-node/src/settings.rs +++ b/massa-node/src/settings.rs @@ -27,6 +27,9 @@ pub struct ExecutionSettings { pub readonly_queue_length: usize, pub cursor_delay: MassaTime, pub stats_time_window_duration: MassaTime, + pub max_read_only_gas: u64, + pub abi_gas_costs_file: PathBuf, + pub wasm_gas_costs_file: PathBuf, } #[derive(Clone, Debug, Deserialize)] @@ -73,8 +76,8 @@ pub struct NetworkSettings { #[derive(Debug, Deserialize, Clone)] pub struct BootstrapSettings { pub bootstrap_list: Vec<(SocketAddr, PublicKey)>, - pub bootstrap_whitelist_file: std::path::PathBuf, - pub bootstrap_blacklist_file: std::path::PathBuf, + pub bootstrap_whitelist_path: PathBuf, + pub bootstrap_blacklist_path: PathBuf, pub bind: Option, pub connect_timeout: MassaTime, pub read_timeout: MassaTime, @@ -83,7 +86,7 @@ pub struct BootstrapSettings { pub write_error_timeout: MassaTime, pub retry_delay: MassaTime, pub max_ping: MassaTime, - pub enable_clock_synchronization: bool, + pub max_clock_delta: MassaTime, pub cache_duration: MassaTime, pub max_simultaneous_bootstraps: u32, pub per_ip_min_interval: MassaTime, @@ -115,6 +118,7 @@ pub struct APISettings { pub draw_lookahead_period_count: u64, pub bind_private: SocketAddr, pub bind_public: SocketAddr, + pub bind_api: SocketAddr, pub max_arguments: u64, pub openrpc_spec_path: PathBuf, pub max_request_body_size: u32, @@ -166,6 +170,12 @@ pub struct ConsensusSettings { pub block_db_prune_interval: MassaTime, /// max number of items returned while querying pub max_item_return_count: usize, + /// blocks headers sender(channel) capacity + pub broadcast_blocks_headers_capacity: usize, + /// blocks sender(channel) capacity + pub broadcast_blocks_capacity: usize, + /// filled blocks sender(channel) capacity + pub broadcast_filled_blocks_capacity: usize, } /// Protocol Configuration, read from toml user configuration file @@ -209,6 +219,8 @@ pub struct ProtocolSettings { pub max_operations_propagation_time: MassaTime, /// Time threshold after which operation are not propagated pub max_endorsements_propagation_time: MassaTime, + /// operations sender sender(channel) capacity + pub broadcast_operations_capacity: usize, } #[cfg(test)] diff --git a/massa-node/src/tests/config.toml b/massa-node/src/tests/config.toml index b2a5c438045..9f3ebfee3c0 100644 --- a/massa-node/src/tests/config.toml +++ b/massa-node/src/tests/config.toml @@ -5,6 +5,7 @@ draw_lookahead_period_count = 10 bind_private = "127.0.0.1:33034" bind_public = "0.0.0.0:33035" + bind_api = "0.0.0.0:33036" max_arguments = 128 [execution] diff --git a/massa-pos-exports/src/cycle_info.rs b/massa-pos-exports/src/cycle_info.rs index 3bddf8f19c2..e911c566591 100644 --- a/massa-pos-exports/src/cycle_info.rs +++ b/massa-pos-exports/src/cycle_info.rs @@ -1,5 +1,5 @@ use bitvec::vec::BitVec; -use massa_hash::{Hash, HASH_SIZE_BYTES}; +use massa_hash::{Hash, HashDeserializer, HashSerializer, HASH_SIZE_BYTES}; use massa_models::{ address::{Address, AddressDeserializer, AddressSerializer}, prehash::PreHashMap, @@ -7,7 +7,8 @@ use massa_models::{ slot::Slot, }; use massa_serialization::{ - Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, + Deserializer, OptionDeserializer, OptionSerializer, SerializeError, Serializer, + U64VarIntDeserializer, U64VarIntSerializer, }; use nom::{ branch::alt, @@ -104,7 +105,10 @@ pub struct CycleInfo { /// Hash of the production statistics pub production_stats_hash: Hash, /// Hash of the cycle state - pub global_hash: Hash, + pub cycle_global_hash: Hash, + /// Snapshot of the final state hash + /// Used for PoS selections + pub final_state_hash_snapshot: Option, } impl CycleInfo { @@ -135,7 +139,7 @@ impl CycleInfo { hash_concat.extend(production_stats_hash.to_bytes()); // compute the global hash - let global_hash = Hash::compute_from(&hash_concat); + let cycle_global_hash = Hash::compute_from(&hash_concat); // create the new cycle CycleInfo { @@ -146,7 +150,8 @@ impl CycleInfo { production_stats, roll_counts_hash, production_stats_hash, - global_hash, + cycle_global_hash, + final_state_hash_snapshot: None, } } @@ -216,7 +221,7 @@ impl CycleInfo { } // compute the global hash - self.global_hash = Hash::compute_from(&hash_concat); + self.cycle_global_hash = Hash::compute_from(&hash_concat); // return the completion status self.complete @@ -312,7 +317,7 @@ fn test_cycle_info_hash_computation() { "production_stats_hash mismatch" ); assert_eq!( - cycle_a.global_hash, cycle_b.global_hash, + cycle_a.cycle_global_hash, cycle_b.cycle_global_hash, "global_hash mismatch" ); } @@ -322,6 +327,7 @@ pub struct CycleInfoSerializer { u64_ser: U64VarIntSerializer, bitvec_ser: BitVecSerializer, production_stats_ser: ProductionStatsSerializer, + opt_hash_ser: OptionSerializer, } impl Default for CycleInfoSerializer { @@ -337,6 +343,7 @@ impl CycleInfoSerializer { u64_ser: U64VarIntSerializer::new(), bitvec_ser: BitVecSerializer::new(), production_stats_ser: ProductionStatsSerializer::new(), + opt_hash_ser: OptionSerializer::new(HashSerializer::new()), } } } @@ -364,6 +371,10 @@ impl Serializer for CycleInfoSerializer { self.production_stats_ser .serialize(&value.production_stats, buffer)?; + // cycle_info.final_state_hash_snapshot + self.opt_hash_ser + .serialize(&value.final_state_hash_snapshot, buffer)?; + Ok(()) } } @@ -374,6 +385,7 @@ pub struct CycleInfoDeserializer { rolls_deser: RollsDeserializer, bitvec_deser: BitVecDeserializer, production_stats_deser: ProductionStatsDeserializer, + opt_hash_deser: OptionDeserializer, } impl CycleInfoDeserializer { @@ -384,6 +396,7 @@ impl CycleInfoDeserializer { rolls_deser: RollsDeserializer::new(max_rolls_length), bitvec_deser: BitVecDeserializer::new(), production_stats_deser: ProductionStatsDeserializer::new(max_production_stats_length), + opt_hash_deser: OptionDeserializer::new(HashDeserializer::new()), } } } @@ -406,24 +419,30 @@ impl Deserializer for CycleInfoDeserializer { context("production_stats", |input| { self.production_stats_deser.deserialize(input) }), + context("final_state_hash_snapshot", |input| { + self.opt_hash_deser.deserialize(input) + }), )), ) .map( #[allow(clippy::type_complexity)] - |(cycle, complete, roll_counts, rng_seed, production_stats): ( + |(cycle, complete, roll_counts, rng_seed, production_stats, opt_hash): ( u64, // cycle bool, // complete Vec<(Address, u64)>, // roll_counts BitVec, // rng_seed PreHashMap, // production_stats (address, n_success, n_fail) + Option, // final_state_hash_snapshot )| { - CycleInfo::new_with_hash( + let mut cycle = CycleInfo::new_with_hash( cycle, complete, roll_counts.into_iter().collect(), rng_seed, production_stats, - ) + ); + cycle.final_state_hash_snapshot = opt_hash; + cycle }, ) .parse(buffer) diff --git a/massa-pos-exports/src/pos_final_state.rs b/massa-pos-exports/src/pos_final_state.rs index d62194f742d..0394e361ad4 100644 --- a/massa-pos-exports/src/pos_final_state.rs +++ b/massa-pos-exports/src/pos_final_state.rs @@ -5,6 +5,7 @@ use massa_hash::Hash; use massa_models::error::ModelsError; use massa_models::streaming_step::StreamingStep; use massa_models::{address::Address, amount::Amount, prehash::PreHashMap, slot::Slot}; +use massa_serialization::{Serializer, U64VarIntSerializer}; use std::collections::VecDeque; use std::{ collections::BTreeMap, @@ -27,6 +28,8 @@ pub struct PoSFinalState { pub initial_rolls: BTreeMap, /// initial seeds, used for negative cycle look back (cycles -2, -1 in that order) pub initial_seeds: Vec, + /// initial state hash + pub initial_ledger_hash: Hash, } impl PoSFinalState { @@ -36,6 +39,7 @@ impl PoSFinalState { initial_seed_string: &str, initial_rolls_path: &PathBuf, selector: Box, + initial_ledger_hash: Hash, ) -> Result { // load get initial rolls from file let initial_rolls = serde_json::from_str::>( @@ -56,6 +60,7 @@ impl PoSFinalState { selector, initial_rolls, initial_seeds, + initial_ledger_hash, }) } @@ -186,7 +191,9 @@ impl PoSFinalState { )); } } else { - panic!("PoS History shouldn't be empty here."); + return Err(PosError::ContainerInconsistency( + "PoS history should never be empty here".into(), + )); } // get the last history cycle, should always be present because it was filled above @@ -232,7 +239,7 @@ impl PoSFinalState { /// Feeds the selector targeting a given draw cycle fn feed_selector(&self, draw_cycle: u64) -> PosResult<()> { // get roll lookback - let lookback_rolls = match draw_cycle.checked_sub(3) { + let (lookback_rolls, lookback_state_hash) = match draw_cycle.checked_sub(3) { // looking back in history Some(c) => { let index = self @@ -242,10 +249,16 @@ impl PoSFinalState { if !cycle_info.complete { return Err(PosError::CycleUnfinished(c)); } - cycle_info.roll_counts.clone() + // take the final_state_hash_snapshot at cycle - 3 + // it will later be combined with rng_seed from cycle - 2 to determine the selection seed + // do this here to avoid a potential attacker manipulating the selections + let state_hash = cycle_info + .final_state_hash_snapshot + .expect("critical: a complete cycle must contain a final state hash snapshot"); + (cycle_info.roll_counts.clone(), state_hash) } // looking back to negative cycles - None => self.initial_rolls.clone(), + None => (self.initial_rolls.clone(), self.initial_ledger_hash), }; // get seed lookback @@ -259,7 +272,12 @@ impl PoSFinalState { if !cycle_info.complete { return Err(PosError::CycleUnfinished(c)); } - Hash::compute_from(&cycle_info.rng_seed.clone().into_vec()) + let u64_ser = U64VarIntSerializer::new(); + let mut seed = Vec::new(); + u64_ser.serialize(&c, &mut seed).unwrap(); + seed.extend(cycle_info.rng_seed.clone().into_vec()); + seed.extend(lookback_state_hash.to_bytes()); + Hash::compute_from(&seed) } // looking back to negative cycles None => self.initial_seeds[draw_cycle as usize], @@ -271,6 +289,16 @@ impl PoSFinalState { .feed_cycle(draw_cycle, lookback_rolls, lookback_seed) } + /// Feeds the selector targeting a given draw cycle + pub fn feed_cycle_state_hash(&mut self, cycle: u64, final_state_hash: Hash) { + if let Some(index) = self.get_cycle_index(cycle) { + let cycle = self.cycle_history.get_mut(index).unwrap(); + cycle.final_state_hash_snapshot = Some(final_state_hash); + } else { + panic!("cycle {} should be contained here", cycle); + } + } + /// Retrieves the amount of rolls a given address has at the latest cycle pub fn get_rolls_for(&self, addr: &Address) -> u64 { self.cycle_history diff --git a/massa-protocol-exports/src/channels.rs b/massa-protocol-exports/src/channels.rs new file mode 100644 index 00000000000..57b29d61f16 --- /dev/null +++ b/massa-protocol-exports/src/channels.rs @@ -0,0 +1,23 @@ +use massa_models::operation::Operation; +use massa_network_exports::{NetworkCommandSender, NetworkEventReceiver}; +use tokio::sync::mpsc; + +use crate::ProtocolCommand; + +/// Contains channels (senders) used by the protocol worker +/// Contains (a) channel(s) to send info to api +#[derive(Clone)] +pub struct ProtocolSenders { + /// network command sender + pub network_command_sender: NetworkCommandSender, + /// Broadcast sender(channel) for new operations + pub operation_sender: tokio::sync::broadcast::Sender, +} + +/// Contains channels(receivers) used by the protocol worker +pub struct ProtocolReceivers { + /// network event receiver + pub network_event_receiver: NetworkEventReceiver, + /// protocol command receiver + pub protocol_command_receiver: mpsc::Receiver, +} diff --git a/massa-protocol-exports/src/lib.rs b/massa-protocol-exports/src/lib.rs index ab550c45e2e..473ed03fcde 100644 --- a/massa-protocol-exports/src/lib.rs +++ b/massa-protocol-exports/src/lib.rs @@ -5,10 +5,12 @@ #![feature(ip)] #![warn(missing_docs)] #![warn(unused_crate_dependencies)] +mod channels; mod error; mod protocol_controller; mod settings; +pub use channels::{ProtocolReceivers, ProtocolSenders}; pub use error::ProtocolError; pub use protocol_controller::{ BlocksResults, ProtocolCommand, ProtocolCommandSender, ProtocolManagementCommand, diff --git a/massa-protocol-exports/src/settings.rs b/massa-protocol-exports/src/settings.rs index 6c2ad08291a..a86156ae7ee 100644 --- a/massa-protocol-exports/src/settings.rs +++ b/massa-protocol-exports/src/settings.rs @@ -55,4 +55,8 @@ pub struct ProtocolConfig { pub max_operations_propagation_time: MassaTime, /// max time we propagate endorsements pub max_endorsements_propagation_time: MassaTime, + /// Whether WebSockets are enabled + pub broadcast_enabled: bool, + /// operation sender sender(channel) capacity + pub broadcast_operations_capacity: usize, } diff --git a/massa-protocol-exports/src/tests/tools.rs b/massa-protocol-exports/src/tests/tools.rs index 3544c84c7fa..2928ad7a206 100644 --- a/massa-protocol-exports/src/tests/tools.rs +++ b/massa-protocol-exports/src/tests/tools.rs @@ -33,7 +33,7 @@ pub struct NodeInfo { /// create node info pub fn create_node() -> NodeInfo { let keypair = KeyPair::generate(); - let id = NodeId(keypair.get_public_key()); + let id = NodeId::new(keypair.get_public_key()); NodeInfo { keypair, id } } @@ -220,10 +220,12 @@ pub fn create_protocol_config() -> ProtocolConfig { max_serialized_operations_size_per_block: 1024, controller_channel_size: 1024, event_channel_size: 1024, - genesis_timestamp: MassaTime::now(0).unwrap(), + genesis_timestamp: MassaTime::now().unwrap(), t0: MassaTime::from_millis(16000), max_operations_propagation_time: MassaTime::from_millis(30000), max_endorsements_propagation_time: MassaTime::from_millis(60000), + broadcast_enabled: false, + broadcast_operations_capacity: 128, } } diff --git a/massa-protocol-worker/Cargo.toml b/massa-protocol-worker/Cargo.toml index 2817e32c9fb..7d600ba7791 100644 --- a/massa-protocol-worker/Cargo.toml +++ b/massa-protocol-worker/Cargo.toml @@ -26,7 +26,7 @@ massa_signature = { path = "../massa-signature" } [dev-dependencies] lazy_static = "1.4" -serial_test = "0.9" +serial_test = "0.10" futures = "0.3" massa_signature = { path = "../massa-signature" } diff --git a/massa-protocol-worker/src/protocol_worker.rs b/massa-protocol-worker/src/protocol_worker.rs index d99be1b3b70..25b4875fd43 100644 --- a/massa-protocol-worker/src/protocol_worker.rs +++ b/massa-protocol-worker/src/protocol_worker.rs @@ -8,8 +8,10 @@ use crate::{node_info::NodeInfo, worker_operations_impl::OperationBatchBuffer}; use massa_consensus_exports::ConsensusController; use massa_logging::massa_trace; +use massa_models::operation::Operation; use massa_models::slot::Slot; use massa_models::timeslots::get_block_slot_timestamp; +use massa_models::wrapped::Id; use massa_models::{ block::{BlockId, WrappedHeader}, endorsement::{EndorsementId, WrappedEndorsement}, @@ -22,9 +24,8 @@ use massa_network_exports::{AskForBlocksInfo, NetworkCommandSender, NetworkEvent use massa_pool_exports::PoolController; use massa_protocol_exports::{ ProtocolCommand, ProtocolConfig, ProtocolError, ProtocolManagementCommand, ProtocolManager, + ProtocolReceivers, ProtocolSenders, }; - -use massa_models::wrapped::Id; use massa_storage::Storage; use massa_time::{MassaTime, TimeError}; use std::collections::{HashMap, HashSet}; @@ -43,14 +44,14 @@ use tracing::{debug, error, info, warn}; /// /// # Arguments /// * `config`: protocol settings -/// * `network_command_sender`: the `NetworkCommandSender` we interact with -/// * `network_event_receiver`: the `NetworkEventReceiver` we interact with +/// * `senders`: sender(s) channel(s) to communicate with other modules +/// * `receivers`: receiver(s) channel(s) to communicate with other modules +/// * `consensus_controller`: interact with consensus module /// * `storage`: Shared storage to fetch data that are fetch across all modules pub async fn start_protocol_controller( config: ProtocolConfig, - network_command_sender: NetworkCommandSender, - network_event_receiver: NetworkEventReceiver, - protocol_command_receiver: mpsc::Receiver, + receivers: ProtocolReceivers, + senders: ProtocolSenders, consensus_controller: Box, pool_controller: Box, storage: Storage, @@ -64,10 +65,11 @@ pub async fn start_protocol_controller( let res = ProtocolWorker::new( config, ProtocolWorkerChannels { - network_command_sender, - network_event_receiver, - controller_command_rx: protocol_command_receiver, + network_command_sender: senders.network_command_sender, + network_event_receiver: receivers.network_event_receiver, + controller_command_rx: receivers.protocol_command_receiver, controller_manager_rx, + operation_sender: senders.operation_sender, }, consensus_controller, pool_controller, @@ -131,6 +133,8 @@ pub struct ProtocolWorker { controller_command_rx: mpsc::Receiver, /// Channel to send management commands to the controller. controller_manager_rx: mpsc::Receiver, + /// Broadcast sender(channel) for new operations + operation_sender: tokio::sync::broadcast::Sender, /// Ids of active nodes mapped to node info. pub(crate) active_nodes: HashMap, /// List of wanted blocks, @@ -162,6 +166,8 @@ pub struct ProtocolWorkerChannels { pub controller_command_rx: mpsc::Receiver, /// protocol management command receiver pub controller_manager_rx: mpsc::Receiver, + /// Broadcast sender(channel) for new operations + pub operation_sender: tokio::sync::broadcast::Sender, } impl ProtocolWorker { @@ -180,6 +186,7 @@ impl ProtocolWorker { network_event_receiver, controller_command_rx, controller_manager_rx, + operation_sender, }: ProtocolWorkerChannels, consensus_controller: Box, pool_controller: Box, @@ -193,6 +200,7 @@ impl ProtocolWorker { pool_controller, controller_command_rx, controller_manager_rx, + operation_sender, active_nodes: Default::default(), block_wishlist: Default::default(), checked_endorsements: LinearHashCacheSet::new(config.max_known_endorsements_size), @@ -931,6 +939,11 @@ impl ProtocolWorker { } if !new_operations.is_empty() { + if self.config.broadcast_enabled { + for op in new_operations.clone() { + let _ = self.operation_sender.send(op.1.content); + } + } // Store operation, claim locally let mut ops = self.storage.clone_without_refs(); ops.store_operations(new_operations.into_values().collect()); @@ -938,7 +951,7 @@ impl ProtocolWorker { // Propagate operations when their expire period isn't `max_operations_propagation_time` old. let mut ops_to_propagate = ops.clone(); let operations_to_not_propagate = { - let now = MassaTime::now(0)?; + let now = MassaTime::now()?; let read_operations = ops_to_propagate.read_operations(); ops_to_propagate .get_op_refs() @@ -1039,7 +1052,7 @@ impl ProtocolWorker { // Propagate endorsements when the slot of the block they endorse isn't `max_endorsements_propagation_time` old. let mut endorsements_to_propagate = endorsements.clone(); let endorsements_to_not_propagate = { - let now = MassaTime::now(0)?; + let now = MassaTime::now()?; let read_endorsements = endorsements_to_propagate.read_endorsements(); endorsements_to_propagate .get_endorsement_refs() diff --git a/massa-protocol-worker/src/tests/cache_scenarios.rs b/massa-protocol-worker/src/tests/cache_scenarios.rs index f9cd92be300..8fc6718ea4b 100644 --- a/massa-protocol-worker/src/tests/cache_scenarios.rs +++ b/massa-protocol-worker/src/tests/cache_scenarios.rs @@ -36,7 +36,7 @@ async fn test_noting_block_does_not_panic_with_zero_max_node_known_blocks_size() // Create 2 node. let nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; - let address = Address::from_public_key(&nodes[0].id.0); + let address = Address::from_public_key(&nodes[0].id.get_public_key()); let thread = address.get_thread(2); let operation = tools::create_operation_with_expire_period(&nodes[0].keypair, 1); diff --git a/massa-protocol-worker/src/tests/endorsements_scenarios.rs b/massa-protocol-worker/src/tests/endorsements_scenarios.rs index 588b329efd0..ff29dbca693 100644 --- a/massa-protocol-worker/src/tests/endorsements_scenarios.rs +++ b/massa-protocol-worker/src/tests/endorsements_scenarios.rs @@ -272,7 +272,7 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; - let address = Address::from_public_key(&nodes[0].id.0); + let address = Address::from_public_key(&nodes[0].id.get_public_key()); let thread = address.get_thread(2); let endorsement = tools::create_endorsement(); @@ -295,6 +295,7 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou // because of the previously received header. let mut sender = protocol_command_sender.clone(); thread::spawn(move || { + std::thread::sleep(Duration::from_millis(300)); let mut storage = Storage::create_root(); storage.store_endorsements(vec![endorsement]); sender.propagate_endorsements(storage).unwrap(); @@ -345,7 +346,7 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; - let address = Address::from_public_key(&nodes[0].id.0); + let address = Address::from_public_key(&nodes[0].id.get_public_key()); let thread = address.get_thread(2); let endorsement = tools::create_endorsement(); @@ -418,7 +419,7 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou // Create 2 nodes. let nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; - let address = Address::from_public_key(&nodes[0].id.0); + let address = Address::from_public_key(&nodes[0].id.get_public_key()); let thread = address.get_thread(2); let endorsement = tools::create_endorsement(); diff --git a/massa-protocol-worker/src/tests/operations_scenarios.rs b/massa-protocol-worker/src/tests/operations_scenarios.rs index b0ee9cc8fa1..3b7f73fa0a7 100644 --- a/massa-protocol-worker/src/tests/operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/operations_scenarios.rs @@ -440,7 +440,7 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; - let address = Address::from_public_key(&nodes[0].id.0); + let address = Address::from_public_key(&nodes[0].id.get_public_key()); let thread = address.get_thread(2); let operation = tools::create_operation_with_expire_period(&nodes[0].keypair, 1); @@ -691,7 +691,7 @@ async fn test_protocol_does_not_propagates_operations_when_receiving_those_insid // 1. Create an operation let operation = tools::create_operation_with_expire_period(&creator_node.keypair, 1); - let address = Address::from_public_key(&creator_node.id.0); + let address = Address::from_public_key(&creator_node.id.get_public_key()); let thread = address.get_thread(2); // 2. Create a block coming from node creator_node, and including the operation. diff --git a/massa-protocol-worker/src/tests/tools.rs b/massa-protocol-worker/src/tests/tools.rs index 7917116c4a1..3a40123a544 100644 --- a/massa-protocol-worker/src/tests/tools.rs +++ b/massa-protocol-worker/src/tests/tools.rs @@ -11,10 +11,10 @@ use massa_network_exports::BlockInfoReply; use massa_pool_exports::test_exports::{MockPoolController, PoolEventReceiver}; use massa_protocol_exports::{ tests::mock_network_controller::MockNetworkController, ProtocolCommandSender, ProtocolConfig, - ProtocolManager, + ProtocolManager, ProtocolReceivers, ProtocolSenders, }; use massa_storage::Storage; -use tokio::sync::mpsc; +use tokio::sync::{broadcast, mpsc}; pub async fn protocol_test(protocol_config: &ProtocolConfig, test: F) where @@ -44,12 +44,20 @@ where // start protocol controller let (protocol_command_sender, protocol_command_receiver) = mpsc::channel(protocol_config.controller_channel_size); + let operation_sender = broadcast::channel(protocol_config.broadcast_operations_capacity).0; + let protocol_receivers = ProtocolReceivers { + network_event_receiver, + protocol_command_receiver, + }; + let protocol_senders = ProtocolSenders { + network_command_sender, + operation_sender, + }; // start protocol controller let protocol_manager: ProtocolManager = start_protocol_controller( *protocol_config, - network_command_sender, - network_event_receiver, - protocol_command_receiver, + protocol_receivers, + protocol_senders, consensus_controller, pool_controller, Storage::create_root(), @@ -108,11 +116,21 @@ where // start protocol controller let (protocol_command_sender, protocol_command_receiver) = mpsc::channel(protocol_config.controller_channel_size); - let protocol_manager = start_protocol_controller( - *protocol_config, - network_command_sender, + + let protocol_senders = ProtocolSenders { + network_command_sender: network_command_sender.clone(), + operation_sender: broadcast::channel(protocol_config.broadcast_operations_capacity).0, + }; + + let protocol_receivers = ProtocolReceivers { network_event_receiver, protocol_command_receiver, + }; + + let protocol_manager = start_protocol_controller( + *protocol_config, + protocol_receivers, + protocol_senders, consensus_controller, pool_controller, storage.clone(), diff --git a/massa-sdk/Cargo.toml b/massa-sdk/Cargo.toml index d9c6f16a7a1..ce8e1434561 100644 --- a/massa-sdk/Cargo.toml +++ b/massa-sdk/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] -jsonrpsee = { version = "0.16.1", features = ["client"] } +jsonrpsee = { version = "0.16.2", features = ["client"] } http = "0.2.8" massa_models = { path = "../massa-models" } massa_time = { path = "../massa-time" } diff --git a/massa-sdk/src/lib.rs b/massa-sdk/src/lib.rs index 0a282d0a355..60278ecf266 100644 --- a/massa-sdk/src/lib.rs +++ b/massa-sdk/src/lib.rs @@ -172,18 +172,74 @@ impl RpcClient { .await } - /// add ips to whitelist - /// create peer if it was unknown - pub async fn node_whitelist(&self, ips: Vec) -> RpcResult<()> { + /// Returns node peers whitelist IP address(es). + pub async fn node_peers_whitelist(&self) -> RpcResult> { self.http_client - .request("node_whitelist", rpc_params![ips]) + .request("node_peers_whitelist", rpc_params![]) .await } - /// remove IPs from whitelist - pub async fn node_remove_from_whitelist(&self, ips: Vec) -> RpcResult<()> { + /// Add IP address(es) to node peers whitelist. + pub async fn node_add_to_peers_whitelist(&self, ips: Vec) -> RpcResult<()> { self.http_client - .request("node_remove_from_whitelist", rpc_params![ips]) + .request("node_add_to_peers_whitelist", rpc_params![ips]) + .await + } + + /// Remove IP address(es) to node peers whitelist. + pub async fn node_remove_from_peers_whitelist(&self, ips: Vec) -> RpcResult<()> { + self.http_client + .request("node_remove_from_peers_whitelist", rpc_params![ips]) + .await + } + + /// Returns node bootsrap whitelist IP address(es). + pub async fn node_bootstrap_whitelist(&self) -> RpcResult> { + self.http_client + .request("node_bootstrap_whitelist", rpc_params![]) + .await + } + + /// Allow everyone to bootsrap from the node. + /// remove bootsrap whitelist configuration file. + pub async fn node_bootstrap_whitelist_allow_all(&self) -> RpcResult<()> { + self.http_client + .request("node_bootstrap_whitelist_allow_all", rpc_params![]) + .await + } + + /// Add IP address(es) to node bootsrap whitelist. + pub async fn node_add_to_bootstrap_whitelist(&self, ips: Vec) -> RpcResult<()> { + self.http_client + .request("node_add_to_bootstrap_whitelist", rpc_params![ips]) + .await + } + + /// Remove IP address(es) to bootsrap whitelist. + pub async fn node_remove_from_bootstrap_whitelist(&self, ips: Vec) -> RpcResult<()> { + self.http_client + .request("node_remove_from_bootstrap_whitelist", rpc_params![ips]) + .await + } + + /// Returns node bootsrap blacklist IP address(es). + pub async fn node_bootstrap_blacklist(&self) -> RpcResult> { + self.http_client + .request("node_bootstrap_blacklist", rpc_params![]) + .await + } + + /// Add IP address(es) to node bootsrap blacklist. + pub async fn node_add_to_bootstrap_blacklist(&self, ips: Vec) -> RpcResult<()> { + self.http_client + .request("node_add_to_bootstrap_blacklist", rpc_params![ips]) + .await + } + + /// Remove IP address(es) to bootsrap blacklist. + pub async fn node_remove_from_bootstrap_blacklist(&self, ips: Vec) -> RpcResult<()> { + self.http_client + .request("node_remove_from_bootstrap_blacklist", rpc_params![ips]) .await } @@ -209,7 +265,7 @@ impl RpcClient { self.http_client.request("get_stakers", rpc_params![]).await } - /// Returns operations information associated to a given list of operations' IDs. + /// Returns operation(s) information associated to a given list of operation(s) ID(s). pub async fn get_operations( &self, operation_ids: Vec, @@ -219,7 +275,7 @@ impl RpcClient { .await } - /// get info on endorsements by ids + /// Returns endorsement(s) information associated to a given list of endorsement(s) ID(s) pub async fn get_endorsements( &self, endorsement_ids: Vec, @@ -229,10 +285,10 @@ impl RpcClient { .await } - /// Get information on a block given its `BlockId` - pub async fn get_block(&self, block_id: BlockId) -> RpcResult { + /// Returns block(s) information associated to a given list of block(s) ID(s) + pub async fn get_blocks(&self, block_ids: Vec) -> RpcResult { self.http_client - .request("get_block", rpc_params![block_id]) + .request("get_blocks", rpc_params![block_ids]) .await } diff --git a/massa-signature/Cargo.toml b/massa-signature/Cargo.toml index 319f660c449..896248d0374 100644 --- a/massa-signature/Cargo.toml +++ b/massa-signature/Cargo.toml @@ -19,5 +19,5 @@ massa_hash = { path = "../massa-hash" } massa_serialization = { path = "../massa-serialization" } [dev-dependencies] -serial_test = "0.9" +serial_test = "0.10" serde_json = "1.0" diff --git a/massa-signature/src/signature_impl.rs b/massa-signature/src/signature_impl.rs index 832c547418e..2311e467a26 100644 --- a/massa-signature/src/signature_impl.rs +++ b/massa-signature/src/signature_impl.rs @@ -184,44 +184,6 @@ impl KeyPair { pub fn get_public_key(&self) -> PublicKey { PublicKey(self.0.public) } - - /// Encode a keypair into his `base58` form - /// - /// # Example - /// ``` - /// # use massa_signature::KeyPair; - /// let keypair = KeyPair::generate(); - /// let bs58 = keypair.to_bs58_check(); - /// ``` - pub fn to_bs58_check(&self) -> String { - bs58::encode(self.to_bytes()).with_check().into_string() - } - - /// Decode a `base58` encoded keypair - /// - /// # Example - /// ``` - /// # use massa_signature::KeyPair; - /// let keypair = KeyPair::generate(); - /// let bs58 = keypair.to_bs58_check(); - /// let keypair2 = KeyPair::from_bs58_check(&bs58).unwrap(); - /// ``` - pub fn from_bs58_check(data: &str) -> Result { - bs58::decode(data) - .with_check(None) - .into_vec() - .map_err(|err| { - MassaSignatureError::ParsingError(format!( - "keypair bs58_check parsing error: {}", - err - )) - }) - .and_then(|key| { - KeyPair::from_bytes(&key.try_into().map_err(|_| { - MassaSignatureError::ParsingError("Bad keypair format".to_string()) - })?) - }) - } } impl ::serde::Serialize for KeyPair { @@ -451,20 +413,6 @@ impl PublicKey { }) } - /// Serialize a `PublicKey` using `bs58` encoding with checksum. - /// - /// # Example - /// ``` - /// # use massa_signature::{PublicKey, KeyPair}; - /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); - /// - /// let serialized: String = keypair.get_public_key().to_bs58_check(); - /// ``` - pub fn to_bs58_check(&self) -> String { - bs58::encode(self.to_bytes()).with_check().into_string() - } - /// Serialize a `PublicKey` as bytes. /// /// # Example @@ -493,37 +441,6 @@ impl PublicKey { self.0.to_bytes() } - /// Deserialize a `PublicKey` using `bs58` encoding with checksum. - /// - /// # Example - /// ``` - /// # use massa_signature::{PublicKey, KeyPair}; - /// # use serde::{Deserialize, Serialize}; - /// let keypair = KeyPair::generate(); - /// - /// let serialized: String = keypair.get_public_key().to_bs58_check(); - /// let deserialized: PublicKey = PublicKey::from_bs58_check(&serialized).unwrap(); - /// ``` - pub fn from_bs58_check(data: &str) -> Result { - bs58::decode(data) - .with_check(None) - .into_vec() - .map_err(|err| { - MassaSignatureError::ParsingError(format!( - "public key bs58_check parsing error: {}", - err - )) - }) - .and_then(|key| { - PublicKey::from_bytes(&key.try_into().map_err(|err| { - MassaSignatureError::ParsingError(format!( - "public key bs58_check parsing error: {:?}", - err - )) - })?) - }) - } - /// Deserialize a `PublicKey` from bytes. /// /// # Example diff --git a/massa-time/src/lib.rs b/massa-time/src/lib.rs index a8a3771a334..280c424fb51 100644 --- a/massa-time/src/lib.rs +++ b/massa-time/src/lib.rs @@ -200,10 +200,7 @@ impl MassaTime { /// Smallest time interval pub const EPSILON: MassaTime = MassaTime(1); - /// Gets current compensated UNIX timestamp (resolution: milliseconds). - /// - /// # Parameters - /// * `compensation_millis`: when the system clock is slightly off, this parameter allows correcting it by adding this signed number of milliseconds to the locally measured timestamp + /// Gets current UNIX timestamp (resolution: milliseconds). /// /// ``` /// # use std::time::{Duration, SystemTime, UNIX_EPOCH}; @@ -211,23 +208,18 @@ impl MassaTime { /// # use std::convert::TryFrom; /// # use std::cmp::max; /// let now_duration : Duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); - /// let now_massa_time : MassaTime = MassaTime::now(0).unwrap(); + /// let now_massa_time : MassaTime = MassaTime::now().unwrap(); /// let converted :MassaTime = MassaTime::try_from(now_duration).unwrap(); /// assert!(max(now_massa_time.saturating_sub(converted), converted.saturating_sub(now_massa_time)) < 100.into()) /// ``` - pub fn now(compensation_millis: i64) -> Result { - let now: i64 = SystemTime::now() + pub fn now() -> Result { + let now: u64 = SystemTime::now() .duration_since(UNIX_EPOCH) .map_err(|_| TimeError::TimeOverflowError)? .as_millis() .try_into() .map_err(|_| TimeError::TimeOverflowError)?; - let compensated = now - .checked_add(compensation_millis) - .ok_or(TimeError::TimeOverflowError)? - .try_into() - .map_err(|_| TimeError::TimeOverflowError)?; - Ok(MassaTime(compensated)) + Ok(MassaTime(now)) } /// Conversion to `std::time::Duration`. @@ -260,16 +252,16 @@ impl MassaTime { /// # use std::convert::TryFrom; /// # use std::cmp::max; /// # use std::time::Instant; - /// let (cur_timestamp, cur_instant): (MassaTime, Instant) = (MassaTime::now(0).unwrap(), Instant::now()); - /// let massa_time_instant: Instant = cur_timestamp.estimate_instant(0).unwrap(); + /// let (cur_timestamp, cur_instant): (MassaTime, Instant) = (MassaTime::now().unwrap(), Instant::now()); + /// let massa_time_instant: Instant = cur_timestamp.estimate_instant().unwrap(); /// assert!(max( /// massa_time_instant.saturating_duration_since(cur_instant), /// cur_instant.saturating_duration_since(massa_time_instant) /// ) < std::time::Duration::from_millis(10)) /// ``` - pub fn estimate_instant(self, compensation_millis: i64) -> Result { + pub fn estimate_instant(self) -> Result { let (cur_timestamp, cur_instant): (MassaTime, Instant) = - (MassaTime::now(compensation_millis)?, Instant::now()); + (MassaTime::now()?, Instant::now()); cur_instant .checked_add(self.to_duration()) .ok_or(TimeError::TimeOverflowError)? @@ -406,6 +398,19 @@ impl MassaTime { .map(MassaTime) } + /// ``` + /// # use massa_time::*; + /// + /// let time1 = MassaTime::from(42); + /// let time2 = MassaTime::from(84); + /// + /// assert_eq!(time1.abs_diff(time2), MassaTime::from(42)); + /// assert_eq!(time2.abs_diff(time1), MassaTime::from(42)); + /// ``` + pub fn abs_diff(&self, t: MassaTime) -> MassaTime { + MassaTime(self.0.abs_diff(t.0)) + } + /// ``` /// # use massa_time::*; /// let massa_time : MassaTime = MassaTime::from(1_640_995_200_000); diff --git a/rust-toolchain.toml b/rust-toolchain.toml index d9bd3741cbe..e1431a3b8ba 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,2 +1,2 @@ [toolchain] -channel = "nightly-2022-11-14" \ No newline at end of file +channel = "nightly-2022-12-24" \ No newline at end of file diff --git a/tools/Readme.md b/tools/Readme.md index fc8a8313862..9660063444b 100644 --- a/tools/Readme.md +++ b/tools/Readme.md @@ -26,7 +26,7 @@ If required, please update the Git tag in setup_test.rs (line 25) ### Run with local sources -* cargo script setup_test.rs -- --local "../../massa-unit-tests-src/build/massa/*.wasm" +* cargo script setup_test.rs -- --local "../massa-unit-tests-src/build/massa/*.wasm" ### Howto: add a new SC unit tests diff --git a/tools/setup_test.rs b/tools/setup_test.rs index 88e448c5845..790a6e86e65 100644 --- a/tools/setup_test.rs +++ b/tools/setup_test.rs @@ -26,7 +26,7 @@ use glob::glob; use tar::Archive; // git tag -const TAG: &str = "TEST.16.3"; +const TAG: &str = "TEST.18.2"; // Maximum archive file size to download in bytes (here: 1Mb) // const ARCHIVE_MAX_SIZE: u64 = 2; // Maximum archive file size to download in bytes (DEBUG) @@ -34,7 +34,7 @@ const TAG: &str = "TEST.16.3"; const ARCHIVE_MAX_SIZE: u64 = 1048576; // destination path -const PATH_DST_BASE_1: &str = "../massa-execution-worker/src/tests/wasm/"; +const PATH_DST_BASE_1: &str = "massa-execution-worker/src/tests/wasm/"; #[derive(Debug, thiserror::Error)] enum DlFileError {