diff --git a/.github/workflows/cd.yml b/.github/workflows/cd.yml index c5feb45c8cc..d08df142033 100644 --- a/.github/workflows/cd.yml +++ b/.github/workflows/cd.yml @@ -47,7 +47,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly + toolchain: nightly-2022-11-14 target: ${{ matrix.target }} override: true - uses: Swatinem/rust-cache@v1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a6a2b092f2c..1e25304dc1b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,7 +27,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly + toolchain: nightly-2022-11-14 components: rustfmt override: true - uses: Swatinem/rust-cache@v1 @@ -47,7 +47,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly + toolchain: nightly-2022-11-14 override: true - uses: Swatinem/rust-cache@v1 - uses: actions-rs/cargo@v1 @@ -65,7 +65,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly + toolchain: nightly-2022-11-14 override: true - uses: Swatinem/rust-cache@v1 - uses: actions-rs/cargo@v1 @@ -84,7 +84,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly + toolchain: nightly-2022-11-14 override: true - uses: Swatinem/rust-cache@v1 - uses: actions-rs/cargo@v1 @@ -104,7 +104,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly + toolchain: nightly-2022-11-14 components: clippy override: true - uses: Swatinem/rust-cache@v1 @@ -133,7 +133,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly + toolchain: nightly-2022-11-14 override: true - uses: Swatinem/rust-cache@v1 - uses: actions-rs/cargo@v1 @@ -162,7 +162,7 @@ jobs: - uses: actions-rs/toolchain@v1 with: profile: minimal - toolchain: nightly + toolchain: nightly-2022-11-14 components: rustfmt override: true - uses: actions-rs/cargo@v1 diff --git a/Cargo.lock b/Cargo.lock index 84c817d2008..715f5133923 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -33,7 +33,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "433cfd6710c9986c576a25ca913c39d66a6474107b406f34f91d4a8923395241" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", ] @@ -65,9 +65,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.19" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" +checksum = "cc936419f96fa211c1b9166887b38e5e40b19958e5b895be7c1f93adec7071ac" dependencies = [ "memchr", ] @@ -93,12 +93,6 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" -[[package]] -name = "arrayvec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" - [[package]] name = "arrayvec" version = "0.7.2" @@ -115,6 +109,16 @@ dependencies = [ "wasmer", ] +[[package]] +name = "async-lock" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" +dependencies = [ + "event-listener", + "futures-lite", +] + [[package]] name = "async-speed-limit" version = "0.4.0" @@ -127,6 +131,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "async-trait" +version = "0.1.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "atty" version = "0.2.14" @@ -152,7 +167,7 @@ checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object 0.29.0", @@ -171,6 +186,15 @@ version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" +[[package]] +name = "beef" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" +dependencies = [ + "serde", +] + [[package]] name = "bindgen" version = "0.60.1" @@ -204,21 +228,21 @@ checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ "funty", "radium", - "serde 1.0.147", + "serde", "tap", "wyz", ] [[package]] name = "blake3" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a08e53fc5a564bb15bfe6fae56bd71522205f1f91893f9c0116edad6496c183f" +checksum = "42ae2468a89544a466886840aa467a25b766499f4f04bf7d9fcd10ecee9fccef" dependencies = [ "arrayref", - "arrayvec 0.7.2", + "arrayvec", "cc", - "cfg-if 1.0.0", + "cfg-if", "constant_time_eq", "digest 0.10.6", ] @@ -241,6 +265,51 @@ dependencies = [ "generic-array", ] +[[package]] +name = "borsh" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15bf3650200d8bffa99015595e10f1fbd17de07abbc25bb067da79e769939bfa" +dependencies = [ + "borsh-derive", + "hashbrown 0.11.2", +] + +[[package]] +name = "borsh-derive" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6441c552f230375d18e3cc377677914d2ca2b0d36e52129fe15450a2dce46775" +dependencies = [ + "borsh-derive-internal", + "borsh-schema-derive-internal", + "proc-macro-crate 0.1.5", + "proc-macro2", + "syn", +] + +[[package]] +name = "borsh-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5449c28a7b352f2d1e592a8a28bf139bc71afb0764a14f3c02500935d8c44065" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "borsh-schema-derive-internal" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cdbd5696d8bfa21d53d9fe39a714a18538bad11492a42d066dbbc395fb1951c0" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "bs58" version = "0.4.0" @@ -294,9 +363,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" +checksum = "dfb24e866b15a1af2a1b663f10c6b6b8f397a84aadb828f12e5b289ec23a3a3c" [[package]] name = "bzip2-sys" @@ -311,9 +380,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.76" +version = "1.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f" +checksum = "e9f73505338f7d905b19d18738976aae232eb46b8efc15554ffc56deb5d9ebe4" dependencies = [ "jobserver", ] @@ -324,15 +393,9 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" dependencies = [ - "nom 7.1.1", + "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -375,6 +438,17 @@ dependencies = [ "vec_map", ] +[[package]] +name = "clipboard-win" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4ab1b92798304eedc095b53942963240037c0516452cb11aeba709d420b2219" +dependencies = [ + "error-code", + "str-buf", + "winapi", +] + [[package]] name = "combine" version = "4.6.6" @@ -387,15 +461,18 @@ dependencies = [ [[package]] name = "config" -version = "0.11.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1b9d958c2b1368a663f05538fc1b5975adce1e19f435acceae987aceeeb369" +checksum = "11f1667b8320afa80d69d8bbe40830df2c8a06003d86f73d8e003b2c48df416d" dependencies = [ + "async-trait", + "json5", "lazy_static", - "nom 5.1.2", + "nom", + "pathdiff", + "ron", "rust-ini", - "serde 1.0.147", - "serde-hjson", + "serde", "serde_json", "toml", "yaml-rust", @@ -417,15 +494,9 @@ dependencies = [ [[package]] name = "constant_time_eq" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" - -[[package]] -name = "convert_case" -version = "0.4.0" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" +checksum = "f3ad85c1f65dc7b37604eb0e89748faf0b9653065f2a8ef69f96a687ec1e9279" [[package]] name = "core-foundation" @@ -463,7 +534,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9847f90f32a50b0dcbd68bc23ff242798b13080b97b0569f6ed96a45ce4cf2cd" dependencies = [ "autocfg", - "cfg-if 1.0.0", + "cfg-if", "libc", "scopeguard", "windows-sys 0.33.0", @@ -543,7 +614,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -552,7 +623,7 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", ] @@ -562,31 +633,31 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.11" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" +checksum = "01a9af1f4c2ef74bb8aa1f7e19706bc72d03598c8a570bb5de72243c7a9d9d5a" dependencies = [ "autocfg", - "cfg-if 1.0.0", + "cfg-if", "crossbeam-utils", - "memoffset", + "memoffset 0.7.1", "scopeguard", ] [[package]] name = "crossbeam-utils" -version = "0.8.12" +version = "0.8.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" +checksum = "4fb766fa798726286dbbb842f174001dab8abc7b627a1dd86e0b7222a95d929f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -672,26 +743,13 @@ version = "5.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "907076dfda823b0b36d2a1bb5f90c96660a5bbcd7729e10727f07858f22c4edc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "hashbrown 0.12.3", "lock_api", "once_cell", "parking_lot_core", ] -[[package]] -name = "derive_more" -version = "0.99.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" -dependencies = [ - "convert_case", - "proc-macro2", - "quote", - "rustc_version", - "syn", -] - [[package]] name = "dialoguer" version = "0.10.2" @@ -703,16 +761,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "dialoguer" -version = "0.10.2" -source = "git+https://github.com/mitsuhiko/dialoguer#4c1361fda198ec99a9a4a643b6c839ee76987c4c" -dependencies = [ - "console", - "tempfile", - "zeroize", -] - [[package]] name = "diff" version = "0.1.13" @@ -748,6 +796,16 @@ dependencies = [ "dirs-sys", ] +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + [[package]] name = "dirs-sys" version = "0.3.7" @@ -759,6 +817,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "displaydoc" version = "0.2.3" @@ -770,6 +839,12 @@ dependencies = [ "syn", ] +[[package]] +name = "dlv-list" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" + [[package]] name = "dynasm" version = "1.2.3" @@ -815,7 +890,7 @@ dependencies = [ "ed25519", "merlin", "rand 0.7.3", - "serde 1.0.147", + "serde", "sha2 0.9.9", "zeroize", ] @@ -832,6 +907,12 @@ version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" +[[package]] +name = "endian-type" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c34f04666d835ff5d62e058c3995147c06f42fe86ff053337632bca83e42702d" + [[package]] name = "enum-iterator" version = "0.7.0" @@ -859,7 +940,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f5a56d54c8dd9b3ad34752ed197a4eb2a6601bc010808eb097a04a58ae4c43e1" dependencies = [ "enum-map-derive", - "serde 1.0.147", + "serde", ] [[package]] @@ -900,9 +981,46 @@ version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "54558e0ba96fbe24280072642eceb9d7d442e32c7ec0ea9e7ecd7b4ea2cf4e11" dependencies = [ - "serde 1.0.147", + "serde", +] + +[[package]] +name = "errno" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f639046355ee4f37944e44f60642c6f3a7efa3cf6b78c78a0d989a8ce6c396a1" +dependencies = [ + "errno-dragonfly", + "libc", + "winapi", +] + +[[package]] +name = "errno-dragonfly" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "error-code" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64f18991e7bf11e7ffee451b5318b5c1a73c52d0d0ada6e5a3017c8c1ced6a21" +dependencies = [ + "libc", + "str-buf", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + [[package]] name = "fallible-iterator" version = "0.2.0" @@ -918,6 +1036,17 @@ dependencies = [ "instant", ] +[[package]] +name = "fd-lock" +version = "3.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb21c69b9fea5e15dbc1049e4b77145dd0ba1c84019c488102de0dc4ea4b0a27" +dependencies = [ + "cfg-if", + "rustix", + "windows-sys 0.42.0", +] + [[package]] name = "fixedbitset" version = "0.4.2" @@ -930,33 +1059,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "funty" version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" -[[package]] -name = "futures" -version = "0.1.31" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" - [[package]] name = "futures" version = "0.3.25" @@ -997,7 +1105,6 @@ dependencies = [ "futures-core", "futures-task", "futures-util", - "num_cpus", ] [[package]] @@ -1006,6 +1113,21 @@ version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" +[[package]] +name = "futures-lite" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7694489acd39452c77daa48516b894c153f192c3578d5a839b62c58099fcbf48" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + [[package]] name = "futures-macro" version = "0.3.25" @@ -1034,6 +1156,10 @@ name = "futures-timer" version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +dependencies = [ + "gloo-timers", + "send_wrapper", +] [[package]] name = "futures-util" @@ -1041,7 +1167,6 @@ version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" dependencies = [ - "futures 0.1.31", "futures-channel", "futures-core", "futures-io", @@ -1070,7 +1195,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -1081,7 +1206,7 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.11.0+wasi-snapshot-preview1", ] @@ -1126,6 +1251,70 @@ dependencies = [ "regex", ] +[[package]] +name = "gloo-net" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec897194fb9ac576c708f63d35604bc58f2a262b8cec0fabfed26f3991255f21" +dependencies = [ + "futures-channel", + "futures-core", + "futures-sink", + "gloo-utils", + "js-sys", + "pin-project", + "serde", + "serde_json", + "thiserror", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "gloo-timers" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5fb7d06c1c8cc2a29bee7ec961009a0b2caa0793ee4900c2ffb348734ba1c8f9" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "gloo-utils" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "40913a05c8297adca04392f707b1e73b12ba7b8eab7244a4961580b1fd34063c" +dependencies = [ + "js-sys", + "serde", + "serde_json", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "h2" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap", + "slab", + "tokio", + "tokio-util", + "tracing", +] + [[package]] name = "hashbrown" version = "0.11.2" @@ -1221,6 +1410,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", + "h2", "http", "http-body", "httparse", @@ -1235,16 +1425,19 @@ dependencies = [ ] [[package]] -name = "hyper-tls" -version = "0.5.0" +name = "hyper-rustls" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +checksum = "59df7c4e19c950e6e0e868dcc0a300b09a9b88e9ec55bd879ca819087a77355d" dependencies = [ - "bytes", + "http", "hyper", - "native-tls", + "log", + "rustls", + "rustls-native-certs", "tokio", - "tokio-native-tls", + "tokio-rustls", + "webpki-roots", ] [[package]] @@ -1261,7 +1454,7 @@ checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" dependencies = [ "autocfg", "hashbrown 0.12.3", - "serde 1.0.147", + "serde", ] [[package]] @@ -1279,7 +1472,17 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", +] + +[[package]] +name = "io-lifetimes" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46112a93252b123d31a119a8d1a1ac19deac4fac6e0e8b0df58f0d4e5870e63c" +dependencies = [ + "libc", + "windows-sys 0.42.0", ] [[package]] @@ -1316,100 +1519,176 @@ dependencies = [ ] [[package]] -name = "jsonrpc-client-transports" -version = "18.0.0" -source = "git+https://github.com/massalabs/jsonrpc#99733977df27069d64b6e1bcda7192281648fc22" +name = "json5" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" dependencies = [ - "derive_more", - "futures 0.3.25", - "hyper", - "hyper-tls", - "jsonrpc-core", - "jsonrpc-pubsub", - "log", - "serde 1.0.147", - "serde_json", + "pest", + "pest_derive", + "serde", +] + +[[package]] +name = "jsonrpsee" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5af9646e616e37c61093ef85e25bd883ae0c22e2fa1e6eedfe590048247116e3" +dependencies = [ + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-http-client", + "jsonrpsee-proc-macros", + "jsonrpsee-server", + "jsonrpsee-types", + "jsonrpsee-wasm-client", + "jsonrpsee-ws-client", + "tracing", +] + +[[package]] +name = "jsonrpsee-client-transport" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e85cfc9c2f17eab237fdfa2efe5c1608fd06a90e1e0d7fd7b10f2d0e153f375" +dependencies = [ + "anyhow", + "futures-channel", + "futures-timer", + "futures-util", + "gloo-net", + "http", + "jsonrpsee-core", + "jsonrpsee-types", + "pin-project", + "rustls-native-certs", + "soketto", + "thiserror", "tokio", + "tokio-rustls", + "tokio-util", + "tracing", + "webpki-roots", ] [[package]] -name = "jsonrpc-core" -version = "18.0.0" -source = "git+https://github.com/massalabs/jsonrpc#99733977df27069d64b6e1bcda7192281648fc22" +name = "jsonrpsee-core" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673d68136e2f0f67323bab95b3a7177df26ac21ddbf395fc32d60f30fe5a1364" dependencies = [ - "futures 0.3.25", - "futures-executor", + "anyhow", + "arrayvec", + "async-lock", + "async-trait", + "beef", + "futures-channel", + "futures-timer", "futures-util", - "log", - "serde 1.0.147", - "serde_derive", + "globset", + "hyper", + "jsonrpsee-types", + "parking_lot", + "rand 0.8.5", + "rustc-hash", + "serde", "serde_json", + "soketto", + "thiserror", + "tokio", + "tracing", + "wasm-bindgen-futures", ] [[package]] -name = "jsonrpc-core-client" -version = "18.0.0" -source = "git+https://github.com/massalabs/jsonrpc#99733977df27069d64b6e1bcda7192281648fc22" +name = "jsonrpsee-http-client" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42007820863ab29f3adeacf43886ef54abaedb35bc33dada25771db4e1f94de4" dependencies = [ - "futures 0.3.25", - "jsonrpc-client-transports", + "async-trait", + "hyper", + "hyper-rustls", + "jsonrpsee-core", + "jsonrpsee-types", + "rustc-hash", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", ] [[package]] -name = "jsonrpc-derive" -version = "18.0.0" -source = "git+https://github.com/massalabs/jsonrpc#99733977df27069d64b6e1bcda7192281648fc22" +name = "jsonrpsee-proc-macros" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ed8b96f9d2d6a984fd75784ac8bfed994ee40980626b85791782dcd13ffb7ac" dependencies = [ - "proc-macro-crate 0.1.5", + "heck 0.4.0", + "proc-macro-crate 1.2.1", "proc-macro2", "quote", "syn", ] [[package]] -name = "jsonrpc-http-server" -version = "18.0.0" -source = "git+https://github.com/massalabs/jsonrpc#99733977df27069d64b6e1bcda7192281648fc22" +name = "jsonrpsee-server" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a78f34520019321bd466d00620606db2f40827362d0185b3b95040328eb502f6" dependencies = [ - "futures 0.3.25", + "futures-channel", + "futures-util", + "http", "hyper", - "jsonrpc-core", - "jsonrpc-server-utils", - "log", - "net2", - "parking_lot", - "unicase", + "jsonrpsee-core", + "jsonrpsee-types", + "serde", + "serde_json", + "soketto", + "tokio", + "tokio-stream", + "tokio-util", + "tower", + "tracing", ] [[package]] -name = "jsonrpc-pubsub" -version = "18.0.0" -source = "git+https://github.com/massalabs/jsonrpc#99733977df27069d64b6e1bcda7192281648fc22" +name = "jsonrpsee-types" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7985a27ee315c7c8c5c5033ac133e9472aec881edfd947780f5a9970efb7cbbf" dependencies = [ - "futures 0.3.25", - "jsonrpc-core", - "lazy_static", - "log", - "parking_lot", - "rand 0.7.3", - "serde 1.0.147", + "anyhow", + "beef", + "serde", + "serde_json", + "thiserror", + "tracing", ] [[package]] -name = "jsonrpc-server-utils" -version = "18.0.0" -source = "git+https://github.com/massalabs/jsonrpc#99733977df27069d64b6e1bcda7192281648fc22" +name = "jsonrpsee-wasm-client" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46811fcec615d8e58228e7e281b3238693b26da1eb2469ac208af40a217bc8d9" dependencies = [ - "bytes", - "futures 0.3.25", - "globset", - "jsonrpc-core", - "lazy_static", - "log", - "tokio", - "tokio-stream", - "tokio-util", - "unicase", + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-types", +] + +[[package]] +name = "jsonrpsee-ws-client" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "480fc9922f10b8fca3f07c07c51e137ddcf13fd60a304f117cfaa9e9bf41c60b" +dependencies = [ + "http", + "jsonrpsee-client-transport", + "jsonrpsee-core", + "jsonrpsee-types", ] [[package]] @@ -1439,19 +1718,6 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" -[[package]] -name = "lexical-core" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe" -dependencies = [ - "arrayvec 0.5.2", - "bitflags", - "cfg-if 1.0.0", - "ryu", - "static_assertions", -] - [[package]] name = "libc" version = "0.2.137" @@ -1464,7 +1730,7 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "winapi", ] @@ -1506,6 +1772,12 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +[[package]] +name = "linux-raw-sys" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f9f08d8963a6c613f4b1a78f4f4a4dbfadf8e6545b2d72861731e4858b8b47f" + [[package]] name = "lock_api" version = "0.4.9" @@ -1522,7 +1794,7 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1562,9 +1834,8 @@ dependencies = [ "anyhow", "atty", "console", - "dialoguer 0.10.2 (git+https://github.com/mitsuhiko/dialoguer)", + "dialoguer", "erased-serde", - "glob", "lazy_static", "massa_models", "massa_sdk", @@ -1572,13 +1843,13 @@ dependencies = [ "massa_time", "massa_wallet", "paw", - "rev_lines", - "serde 1.0.147", + "rustyline", + "rustyline-derive", + "serde", "serde_json", "structopt", "strum", "strum_macros", - "tilde-expand", "tokio", "toml_edit", ] @@ -1588,7 +1859,8 @@ name = "massa-node" version = "0.1.0" dependencies = [ "anyhow", - "dialoguer 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", + "crossbeam-channel", + "dialoguer", "enum-map", "lazy_static", "massa_api", @@ -1620,7 +1892,7 @@ dependencies = [ "massa_wallet", "parking_lot", "paw", - "serde 1.0.147", + "serde", "serde_json", "structopt", "tokio", @@ -1630,8 +1902,8 @@ dependencies = [ [[package]] name = "massa-sc-runtime" -version = "0.6.9" -source = "git+https://github.com/massalabs/massa-sc-runtime?tag=v0.8.2#46ca849403abe0f0e41da1b10c65a4d652f8b3a0" +version = "0.9.0" +source = "git+https://github.com/massalabs/massa-sc-runtime?tag=v0.9.0#d082983e73f19d236aed24ac2e4607414d43368b" dependencies = [ "anyhow", "as-ffi-bindings", @@ -1639,29 +1911,30 @@ dependencies = [ "cornetto", "lazy_static", "loupe", + "more-asserts 0.3.1", "parking_lot", "rand 0.8.5", - "serde 1.0.147", + "regex", + "serde", "serde_json", "serial_test 0.8.0", "wasmer", "wasmer-compiler-singlepass", "wasmer-engine-universal", "wasmer-middlewares", + "wasmer-types", ] [[package]] name = "massa_api" version = "0.1.0" dependencies = [ + "async-trait", "displaydoc", "itertools", - "jsonrpc-core", - "jsonrpc-derive", - "jsonrpc-http-server", + "jsonrpsee", "massa_consensus_exports", "massa_execution_exports", - "massa_graph", "massa_hash", "massa_models", "massa_network_exports", @@ -1674,7 +1947,8 @@ dependencies = [ "massa_time", "massa_wallet", "parking_lot", - "serde 1.0.147", + "serde", + "serde_json", "thiserror", "tokio", "tracing", @@ -1685,7 +1959,7 @@ name = "massa_async_pool" version = "0.1.0" dependencies = [ "displaydoc", - "futures 0.3.25", + "futures", "lazy_static", "massa_hash", "massa_logging", @@ -1693,11 +1967,11 @@ dependencies = [ "massa_serialization", "massa_signature", "massa_time", - "nom 7.1.1", + "nom", "num", "pretty_assertions", "rand 0.8.5", - "serde 1.0.147", + "serde", "serde_json", "serial_test 0.9.0", "thiserror", @@ -1711,13 +1985,12 @@ dependencies = [ "async-speed-limit", "bitvec", "displaydoc", - "futures 0.3.25", + "futures", "lazy_static", "massa_async_pool", "massa_consensus_exports", "massa_executed_ops", "massa_final_state", - "massa_graph", "massa_hash", "massa_ledger_exports", "massa_ledger_worker", @@ -1729,11 +2002,11 @@ dependencies = [ "massa_serialization", "massa_signature", "massa_time", - "nom 7.1.1", + "nom", "num_enum", "parking_lot", "rand 0.8.5", - "serde 1.0.147", + "serde", "serde_json", "serial_test 0.9.0", "tempfile", @@ -1752,7 +2025,7 @@ dependencies = [ "pbkdf2", "rand 0.8.5", "rand_core 0.6.4", - "serde 1.0.147", + "serde", "serde_json", "serde_qs", "thiserror", @@ -1762,46 +2035,40 @@ dependencies = [ name = "massa_consensus_exports" version = "0.1.0" dependencies = [ + "crossbeam-channel", "displaydoc", - "massa_cipher", "massa_execution_exports", - "massa_graph", + "massa_hash", "massa_models", "massa_pool_exports", "massa_pos_exports", "massa_protocol_exports", + "massa_serialization", "massa_signature", "massa_storage", "massa_time", + "nom", + "serde", "serde_json", - "tempfile", "thiserror", - "tokio", ] [[package]] name = "massa_consensus_worker" version = "0.1.0" dependencies = [ - "massa_cipher", + "displaydoc", "massa_consensus_exports", - "massa_execution_exports", - "massa_graph", "massa_hash", "massa_logging", "massa_models", - "massa_pool_exports", - "massa_pos_exports", - "massa_pos_worker", - "massa_protocol_exports", - "massa_serialization", "massa_signature", "massa_storage", "massa_time", + "num", "parking_lot", + "serde", "serde_json", - "serial_test 0.9.0", - "tokio", "tracing", ] @@ -1812,7 +2079,7 @@ dependencies = [ "massa_hash", "massa_models", "massa_serialization", - "nom 7.1.1", + "nom", ] [[package]] @@ -1878,9 +2145,9 @@ dependencies = [ "massa_signature", "massa_storage", "massa_time", - "nom 7.1.1", + "nom", "num", - "serde 1.0.147", + "serde", "serde_json", "thiserror", "tracing", @@ -1904,7 +2171,7 @@ dependencies = [ "massa_time", "massa_wallet", "parking_lot", - "serde 1.0.147", + "serde", "serde_json", "serial_test 0.9.0", "tracing", @@ -1917,34 +2184,14 @@ dependencies = [ "displaydoc", "massa_async_pool", "massa_executed_ops", + "massa_hash", "massa_ledger_exports", "massa_ledger_worker", "massa_models", "massa_pos_exports", "massa_serialization", "massa_signature", - "nom 7.1.1", - "thiserror", - "tracing", -] - -[[package]] -name = "massa_graph" -version = "0.1.0" -dependencies = [ - "displaydoc", - "massa_execution_exports", - "massa_hash", - "massa_logging", - "massa_models", - "massa_pos_exports", - "massa_serialization", - "massa_signature", - "massa_storage", - "nom 7.1.1", - "num", - "serde 1.0.147", - "serde_json", + "nom", "thiserror", "tracing", ] @@ -1957,8 +2204,8 @@ dependencies = [ "bs58", "displaydoc", "massa_serialization", - "nom 7.1.1", - "serde 1.0.147", + "nom", + "serde", "serde_json", "serial_test 0.9.0", "thiserror", @@ -1973,8 +2220,8 @@ dependencies = [ "massa_models", "massa_serialization", "massa_signature", - "nom 7.1.1", - "serde 1.0.147", + "nom", + "serde", "serde_json", "tempfile", "thiserror", @@ -1989,7 +2236,7 @@ dependencies = [ "massa_models", "massa_serialization", "massa_signature", - "nom 7.1.1", + "nom", "rocksdb", "serde_json", "tempfile", @@ -2019,11 +2266,11 @@ dependencies = [ "massa_serialization", "massa_signature", "massa_time", - "nom 7.1.1", + "nom", "num", "num_enum", "rust_decimal", - "serde 1.0.147", + "serde", "serial_test 0.9.0", "thiserror", ] @@ -2039,8 +2286,8 @@ dependencies = [ "massa_serialization", "massa_signature", "massa_time", - "nom 7.1.1", - "serde 1.0.147", + "nom", + "serde", "serde_json", "tempfile", "thiserror", @@ -2053,7 +2300,7 @@ name = "massa_network_worker" version = "0.1.0" dependencies = [ "enum-map", - "futures 0.3.25", + "futures", "itertools", "massa_hash", "massa_logging", @@ -2062,10 +2309,10 @@ dependencies = [ "massa_serialization", "massa_signature", "massa_time", - "nom 7.1.1", + "nom", "num_enum", "rand 0.8.5", - "serde 1.0.147", + "serde", "serde_json", "serial_test 0.9.0", "tempfile", @@ -2080,7 +2327,7 @@ dependencies = [ "massa_models", "massa_storage", "massa_time", - "serde 1.0.147", + "serde", ] [[package]] @@ -2111,10 +2358,10 @@ dependencies = [ "massa_serialization", "massa_signature", "massa_time", - "nom 7.1.1", + "nom", "num", "parking_lot", - "serde 1.0.147", + "serde", "serde_json", "thiserror", "tokio", @@ -2136,7 +2383,7 @@ dependencies = [ "rand 0.8.5", "rand_distr", "rand_xoshiro", - "serde 1.0.147", + "serde", "serde_json", "tracing", ] @@ -2146,7 +2393,7 @@ name = "massa_protocol_exports" version = "0.1.0" dependencies = [ "displaydoc", - "futures 0.3.25", + "futures", "lazy_static", "massa_hash", "massa_logging", @@ -2155,7 +2402,7 @@ dependencies = [ "massa_signature", "massa_storage", "massa_time", - "serde 1.0.147", + "serde", "serde_json", "thiserror", "tokio", @@ -2166,8 +2413,9 @@ dependencies = [ name = "massa_protocol_worker" version = "0.1.0" dependencies = [ - "futures 0.3.25", + "futures", "lazy_static", + "massa_consensus_exports", "massa_hash", "massa_logging", "massa_models", @@ -2189,10 +2437,10 @@ dependencies = [ name = "massa_sdk" version = "0.1.0" dependencies = [ - "jsonrpc-core-client", + "http", + "jsonrpsee", "massa_models", - "serde 1.0.147", - "tokio", + "massa_time", ] [[package]] @@ -2200,7 +2448,7 @@ name = "massa_serialization" version = "0.1.0" dependencies = [ "displaydoc", - "nom 7.1.1", + "nom", "thiserror", "unsigned-varint", ] @@ -2214,9 +2462,9 @@ dependencies = [ "ed25519-dalek", "massa_hash", "massa_serialization", - "nom 7.1.1", + "nom", "rand 0.7.3", - "serde 1.0.147", + "serde", "serde_json", "serial_test 0.9.0", "thiserror", @@ -2241,8 +2489,8 @@ version = "0.1.0" dependencies = [ "displaydoc", "massa_serialization", - "nom 7.1.1", - "serde 1.0.147", + "nom", + "serde", "thiserror", "time", ] @@ -2256,7 +2504,7 @@ dependencies = [ "massa_hash", "massa_models", "massa_signature", - "serde 1.0.147", + "serde", "serde_json", "serde_qs", "tempfile", @@ -2287,6 +2535,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5de893c32cde5f383baa4c04c5d6dbdd735cfd4a794b0debdb2bb1b421da5ff4" +dependencies = [ + "autocfg", +] + [[package]] name = "merlin" version = "2.0.1" @@ -2333,43 +2590,29 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389" [[package]] -name = "native-tls" -version = "0.2.11" +name = "more-asserts" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" -dependencies = [ - "lazy_static", - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework", - "security-framework-sys", - "tempfile", -] +checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" [[package]] -name = "net2" -version = "0.2.38" +name = "nibble_vec" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d0df99cfcd2530b2e694f6e17e7f37b8e26bb23983ac530c0c97408837c631" +checksum = "77a5d83df9f36fe23f0c3648c6bbb8b0298bb5f1939c8f2704431371f4b84d43" dependencies = [ - "cfg-if 0.1.10", - "libc", - "winapi", + "smallvec", ] [[package]] -name = "nom" -version = "5.1.2" +name = "nix" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af" +checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" dependencies = [ - "lexical-core", - "memchr", - "version_check", + "bitflags", + "cfg-if", + "libc", ] [[package]] @@ -2403,7 +2646,7 @@ dependencies = [ "num-integer", "num-iter", "num-rational", - "num-traits 0.2.15", + "num-traits", ] [[package]] @@ -2414,8 +2657,8 @@ checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.15", - "serde 1.0.147", + "num-traits", + "serde", ] [[package]] @@ -2424,8 +2667,8 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" dependencies = [ - "num-traits 0.2.15", - "serde 1.0.147", + "num-traits", + "serde", ] [[package]] @@ -2435,7 +2678,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", - "num-traits 0.2.15", + "num-traits", ] [[package]] @@ -2446,7 +2689,7 @@ checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ "autocfg", "num-integer", - "num-traits 0.2.15", + "num-traits", ] [[package]] @@ -2458,17 +2701,8 @@ dependencies = [ "autocfg", "num-bigint", "num-integer", - "num-traits 0.2.15", - "serde 1.0.147", -] - -[[package]] -name = "num-traits" -version = "0.1.43" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" -dependencies = [ - "num-traits 0.2.15", + "num-traits", + "serde", ] [[package]] @@ -2545,32 +2779,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "openssl" -version = "0.10.42" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" -dependencies = [ - "bitflags", - "cfg-if 1.0.0", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "openssl-probe" version = "0.1.5" @@ -2578,16 +2786,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] -name = "openssl-sys" -version = "0.9.77" +name = "ordered-multimap" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b03b84c3b2d099b81f0953422b4d4ad58761589d0229b5506356afca05a3670a" +checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" dependencies = [ - "autocfg", - "cc", - "libc", - "pkg-config", - "vcpkg", + "dlv-list", + "hashbrown 0.12.3", ] [[package]] @@ -2605,6 +2810,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "parking" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "427c3892f9e783d91cc128285287e70a59e206ca452770ece88a76f7a3eddd72" + [[package]] name = "parking_lot" version = "0.12.1" @@ -2622,7 +2833,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" dependencies = [ "backtrace", - "cfg-if 1.0.0", + "cfg-if", "libc", "petgraph", "redox_syscall", @@ -2642,6 +2853,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "pathdiff" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" + [[package]] name = "paw" version = "1.0.0" @@ -2693,6 +2910,50 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" +[[package]] +name = "pest" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f400b0f7905bf702f9f3dc3df5a121b16c54e9e8012c082905fdf09a931861a" +dependencies = [ + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "423c2ba011d6e27b02b482a3707c773d19aec65cc024637aec44e19652e66f63" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e64e6c2c85031c02fdbd9e5c72845445ca0a724d419aa0bc068ac620c9935c1" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pest_meta" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57959b91f0a133f89a68be874a5c88ed689c19cd729ecdb5d762ebf16c64d662" +dependencies = [ + "once_cell", + "pest", + "sha1", +] + [[package]] name = "petgraph" version = "0.6.2" @@ -2703,6 +2964,26 @@ dependencies = [ "indexmap", ] +[[package]] +name = "pin-project" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "pin-project-lite" version = "0.2.9" @@ -2727,7 +3008,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef234e08c11dfcb2e56f79fd70f6f2eb7f025c0ce2333e82f4f0518ecad30c6" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "opaque-debug", "universal-hash", @@ -2839,6 +3120,16 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "radix_trie" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c069c179fcdc6a2fe24d8d18305cf085fdbd4f922c041943e203685d6a1c58fd" +dependencies = [ + "endian-type", + "nibble_vec", +] + [[package]] name = "rand" version = "0.7.3" @@ -2907,7 +3198,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ - "num-traits 0.2.15", + "num-traits", "rand 0.8.5", ] @@ -2931,11 +3222,10 @@ dependencies = [ [[package]] name = "rayon" -version = "1.5.3" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" +checksum = "1e060280438193c554f654141c9ea9417886713b7acd75974c85b18a69a88e0b" dependencies = [ - "autocfg", "crossbeam-deque", "either", "rayon-core", @@ -2943,9 +3233,9 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.9.3" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" +checksum = "cac410af5d00ab6884528b4ab69d1e8e146e8d471201800fa1b4524126de6ad3" dependencies = [ "crossbeam-channel", "crossbeam-deque", @@ -3032,10 +3322,19 @@ dependencies = [ ] [[package]] -name = "rev_lines" -version = "0.2.1" +name = "ring" +version = "0.16.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18eb52b6664d331053136fcac7e4883bdc6f5fc04a6aab3b0f75eafb80ab88b3" +checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +dependencies = [ + "cc", + "libc", + "once_cell", + "spin", + "untrusted", + "web-sys", + "winapi", +] [[package]] name = "rkyv" @@ -3072,21 +3371,43 @@ dependencies = [ "librocksdb-sys", ] +[[package]] +name = "ron" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +dependencies = [ + "base64", + "bitflags", + "serde", +] + [[package]] name = "rust-ini" -version = "0.13.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" +checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" +dependencies = [ + "cfg-if", + "ordered-multimap", +] [[package]] name = "rust_decimal" -version = "1.26.1" +version = "1.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee9164faf726e4f3ece4978b25ca877ddc6802fa77f38cdccb32c7f805ecd70c" +checksum = "33c321ee4e17d2b7abe12b5d20c1231db708dd36185c8a21e9de5fed6da4dbe9" dependencies = [ - "arrayvec 0.7.2", - "num-traits 0.2.15", - "serde 1.0.147", + "arrayvec", + "borsh", + "bytecheck", + "byteorder", + "bytes", + "num-traits", + "rand 0.8.5", + "rkyv", + "serde", + "serde_json", ] [[package]] @@ -3102,12 +3423,50 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] -name = "rustc_version" -version = "0.4.0" +name = "rustix" +version = "0.36.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b1fbb4dfc4eb1d390c02df47760bb19a84bb80b301ecc947ab5406394d8223e" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys", + "windows-sys 0.42.0", +] + +[[package]] +name = "rustls" +version = "0.20.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" +dependencies = [ + "log", + "ring", + "sct", + "webpki", +] + +[[package]] +name = "rustls-native-certs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" dependencies = [ - "semver", + "base64", ] [[package]] @@ -3116,6 +3475,40 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" +[[package]] +name = "rustyline" +version = "10.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1cd5ae51d3f7bf65d7969d579d502168ef578f289452bd8ccc91de28fda20e" +dependencies = [ + "bitflags", + "cfg-if", + "clipboard-win", + "dirs-next", + "fd-lock", + "libc", + "log", + "memchr", + "nix", + "radix_trie", + "scopeguard", + "unicode-segmentation", + "unicode-width", + "utf8parse", + "winapi", +] + +[[package]] +name = "rustyline-derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "107c3d5d7f370ac09efa62a78375f94d94b8a33c61d8c278b96683fb4dbf2d8d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "ryu" version = "1.0.11" @@ -3138,6 +3531,16 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "seahash" version = "4.1.0" @@ -3168,52 +3571,34 @@ dependencies = [ ] [[package]] -name = "semver" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" - -[[package]] -name = "serde" -version = "0.8.23" +name = "send_wrapper" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dad3f759919b92c3068c696c15c3d17238234498bbdcc80f2c469606f948ac8" +checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.147" +version = "1.0.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" +checksum = "e53f64bb4ba0191d6d0676e1b141ca55047d83b74f5607e6d8eb88126c52c2dc" dependencies = [ "serde_derive", ] -[[package]] -name = "serde-hjson" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a3a4e0ea8a88553209f6cc6cfe8724ecad22e1acf372793c27d995290fe74f8" -dependencies = [ - "lazy_static", - "num-traits 0.1.43", - "regex", - "serde 0.8.23", -] - [[package]] name = "serde_bytes" version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfc50e8183eeeb6178dcb167ae34a8051d63535023ae38b5d8d12beae193d37b" dependencies = [ - "serde 1.0.147", + "serde", ] [[package]] name = "serde_derive" -version = "1.0.147" +version = "1.0.148" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" +checksum = "a55492425aa53521babf6137309e7d34c20bbfbbfcfe2c7f3a047fd1f6b92c0c" dependencies = [ "proc-macro2", "quote", @@ -3222,13 +3607,13 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.88" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8b3801309262e8184d9687fb697586833e939767aea0dda89f5a8e650e8bd7" +checksum = "020ff22c755c2ed3f8cf162dbb41a7268d934702f3ed3631656ea597e08fc3db" dependencies = [ "itoa", "ryu", - "serde 1.0.147", + "serde", ] [[package]] @@ -3238,7 +3623,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8cac3f1e2ca2fe333923a1ae72caca910b98ed0630bb35ef6f8c8517d6e81afa" dependencies = [ "percent-encoding", - "serde 1.0.147", + "serde", "thiserror", ] @@ -3248,7 +3633,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7eec42e7232e5ca56aa59d63af3c7f991fe71ee6a3ddd2d3480834cf3902b007" dependencies = [ - "futures 0.3.25", + "futures", "lazy_static", "log", "parking_lot", @@ -3262,7 +3647,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92761393ee4dc3ff8f4af487bd58f4307c9329bbedea02cac0089ad9c411e153" dependencies = [ "dashmap", - "futures 0.3.25", + "futures", "lazy_static", "log", "parking_lot", @@ -3294,6 +3679,30 @@ dependencies = [ "syn", ] +[[package]] +name = "sha-1" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", +] + +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.6", +] + [[package]] name = "sha2" version = "0.9.9" @@ -3301,7 +3710,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -3313,7 +3722,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.6", ] @@ -3373,6 +3782,28 @@ dependencies = [ "winapi", ] +[[package]] +name = "soketto" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" +dependencies = [ + "base64", + "bytes", + "futures", + "http", + "httparse", + "log", + "rand 0.8.5", + "sha-1", +] + +[[package]] +name = "spin" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -3380,10 +3811,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] -name = "static_assertions" -version = "1.1.0" +name = "str-buf" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +checksum = "9e08d8363704e6c71fc928674353e6b7c23dcea9d82d7012c8faf2a3a025f8d0" [[package]] name = "strsim" @@ -3443,9 +3874,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.103" +version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" +checksum = "4ae548ec36cf198c0ef7710d3c230987c2d6d7bd98ad6edc0274462724c585ce" dependencies = [ "proc-macro2", "quote", @@ -3482,7 +3913,7 @@ version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", "libc", "redox_syscall", @@ -3549,15 +3980,6 @@ dependencies = [ "once_cell", ] -[[package]] -name = "tilde-expand" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ab1f382b10dd7ff9926b5c33374bc4011b27c82ee890c741aef2bd3fa0d10ba" -dependencies = [ - "libc", -] - [[package]] name = "time" version = "0.3.17" @@ -3565,7 +3987,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a561bf4617eebd33bca6434b988f39ed798e527f51a1e797d0ee4f61c0a38376" dependencies = [ "itoa", - "serde 1.0.147", + "serde", "time-core", "time-macros", ] @@ -3587,9 +4009,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.21.2" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" +checksum = "d76ce4a75fb488c605c54bf610f221cea8b0dafb53333c1a67e8ee199dcd2ae3" dependencies = [ "autocfg", "bytes", @@ -3617,13 +4039,14 @@ dependencies = [ ] [[package]] -name = "tokio-native-tls" -version = "0.3.0" +name = "tokio-rustls" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d995660bd2b7f8c1568414c1126076c13fbb725c40112dc0120b78eb9b717b" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ - "native-tls", + "rustls", "tokio", + "webpki", ] [[package]] @@ -3639,16 +4062,17 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.10" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36943ee01a6d67977dd3f84a5a1d2efeb4ada3a1ae771cadfaa535d9d9fc6507" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", + "futures-io", "futures-sink", - "log", "pin-project-lite", "tokio", + "tracing", ] [[package]] @@ -3657,7 +4081,7 @@ version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ - "serde 1.0.147", + "serde", ] [[package]] @@ -3678,6 +4102,23 @@ dependencies = [ "toml_datetime", ] +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + [[package]] name = "tower-service" version = "0.3.2" @@ -3690,7 +4131,7 @@ version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "log", "pin-project-lite", "tracing-attributes", @@ -3756,13 +4197,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] -name = "unicase" -version = "2.6.0" +name = "ucd-trie" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" -dependencies = [ - "version_check", -] +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "unicode-ident" @@ -3803,9 +4241,21 @@ name = "unsigned-varint" version = "0.7.1" source = "git+https://github.com/cyphar/unsigned-varint.git?branch=nom6-errors#ad577035fc09d2b8351efa14f5812920b9216a9d" dependencies = [ - "nom 7.1.1", + "nom", ] +[[package]] +name = "untrusted" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" + +[[package]] +name = "utf8parse" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "936e4b492acfd135421d8dca4b1aa80a7bfc26e702ef3af710e0752684df5372" + [[package]] name = "valuable" version = "0.1.0" @@ -3830,6 +4280,12 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "waker-fn" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d5b2c62b4012a3e1eca5a7e077d13b3bf498c4073e33ccd58626607748ceeca" + [[package]] name = "want" version = "0.3.0" @@ -3858,7 +4314,9 @@ version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "serde", + "serde_json", "wasm-bindgen-macro", ] @@ -3877,6 +4335,18 @@ dependencies = [ "wasm-bindgen-shared", ] +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + [[package]] name = "wasm-bindgen-macro" version = "0.2.83" @@ -3908,9 +4378,9 @@ checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "wasm-encoder" -version = "0.19.1" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9424cdab516a16d4ea03c8f4a01b14e7b2d04a129dcc2bcdde5bcc5f68f06c41" +checksum = "05632e0a66a6ed8cca593c24223aabd6262f256c3693ad9822c315285f010614" dependencies = [ "leb128", ] @@ -3921,11 +4391,11 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ea8d8361c9d006ea3d7797de7bd6b1492ffd0f91a22430cfda6c1658ad57bedf" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "indexmap", "js-sys", "loupe", - "more-asserts", + "more-asserts 0.2.2", "target-lexicon", "thiserror", "wasm-bindgen", @@ -3964,7 +4434,7 @@ dependencies = [ "enumset", "loupe", "rkyv", - "serde 1.0.147", + "serde", "serde_bytes", "smallvec", "target-lexicon", @@ -3984,7 +4454,7 @@ dependencies = [ "cranelift-frontend", "gimli", "loupe", - "more-asserts", + "more-asserts 0.2.2", "rayon", "smallvec", "target-lexicon", @@ -4005,7 +4475,7 @@ dependencies = [ "gimli", "lazy_static", "loupe", - "more-asserts", + "more-asserts 0.2.2", "rayon", "smallvec", "wasmer-compiler", @@ -4035,9 +4505,9 @@ dependencies = [ "lazy_static", "loupe", "memmap2", - "more-asserts", + "more-asserts 0.2.2", "rustc-demangle", - "serde 1.0.147", + "serde", "serde_bytes", "target-lexicon", "thiserror", @@ -4053,7 +4523,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0358af9c154724587731175553805648d9acb8f6657880d165e378672b7e53" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "enum-iterator", "enumset", "leb128", @@ -4061,7 +4531,7 @@ dependencies = [ "loupe", "object 0.28.4", "rkyv", - "serde 1.0.147", + "serde", "tempfile", "tracing", "wasmer-artifact", @@ -4079,7 +4549,7 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440dc3d93c9ca47865a4f4edd037ea81bf983b5796b59b3d712d844b32dbef15" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "enumset", "leb128", "loupe", @@ -4143,9 +4613,9 @@ dependencies = [ "enum-iterator", "indexmap", "loupe", - "more-asserts", + "more-asserts 0.2.2", "rkyv", - "serde 1.0.147", + "serde", "thiserror", ] @@ -4157,7 +4627,7 @@ checksum = "30d965fa61f4dc4cdb35a54daaf7ecec3563fbb94154a6c35433f879466247dd" dependencies = [ "backtrace", "cc", - "cfg-if 1.0.0", + "cfg-if", "corosensei", "enum-iterator", "indexmap", @@ -4165,12 +4635,12 @@ dependencies = [ "libc", "loupe", "mach", - "memoffset", - "more-asserts", + "memoffset 0.6.5", + "more-asserts 0.2.2", "region", "rkyv", "scopeguard", - "serde 1.0.147", + "serde", "thiserror", "wasmer-artifact", "wasmer-types", @@ -4185,9 +4655,9 @@ checksum = "718ed7c55c2add6548cca3ddd6383d738cd73b892df400e96b9aa876f0141d7a" [[package]] name = "wast" -version = "49.0.0" +version = "50.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ef81fcd60d244cafffeafac3d17615fdb2fddda6aca18f34a8ae233353587c" +checksum = "a2cbb59d4ac799842791fe7e806fa5dbbf6b5554d538e51cc8e176db6ff0ae34" dependencies = [ "leb128", "memchr", @@ -4197,13 +4667,42 @@ dependencies = [ [[package]] name = "wat" -version = "1.0.51" +version = "1.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c347c4460ffb311e95aafccd8c29e4888f241b9e4b3bb0e0ccbd998de2c8c0d" +checksum = "584aaf7a1ecf4d383bbe1a25eeab0cbb8ff96acc6796707ff65cde48f4632f15" dependencies = [ "wast", ] +[[package]] +name = "web-sys" +version = "0.3.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "webpki" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f095d78192e208183081cc07bc5515ef55216397af48b873e5edcd72637fa1bd" +dependencies = [ + "ring", + "untrusted", +] + +[[package]] +name = "webpki-roots" +version = "0.22.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" +dependencies = [ + "webpki", +] + [[package]] name = "which" version = "4.3.0" @@ -4427,9 +4926,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.1+zstd.1.5.2" +version = "2.0.3+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" +checksum = "44ccf97612ac95f3ccb89b2d7346b345e52f1c3019be4984f0455fb4ba991f8a" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index a6a0a6344cf..4c01e663b8e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,7 +12,8 @@ members = [ "massa-execution-worker", "massa-factory-exports", "massa-factory-worker", - "massa-graph", + "massa-consensus-exports", + "massa-consensus-worker", "massa-hash", "massa-logging", "massa-models", diff --git a/massa-api/Cargo.toml b/massa-api/Cargo.toml index faca8e0bef0..e3ebdac2d3e 100644 --- a/massa-api/Cargo.toml +++ b/massa-api/Cargo.toml @@ -7,10 +7,10 @@ edition = "2021" [dependencies] displaydoc = "0.2" -jsonrpc-core = { git = "https://github.com/massalabs/jsonrpc" } -jsonrpc-derive = { git = "https://github.com/massalabs/jsonrpc" } -jsonrpc-http-server = { git = "https://github.com/massalabs/jsonrpc" } +jsonrpsee = { version = "0.16.1", features = ["server", "macros"] } +async-trait = "0.1.58" serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0.87" thiserror = "1.0" tokio = { version = "1.21", features = ["full"] } tracing = "0.1" @@ -18,7 +18,6 @@ itertools = "0.10" parking_lot = { version = "0.12", features = ["deadlock_detection"] } # custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_graph = { path = "../massa-graph" } massa_hash = { path = "../massa-hash" } massa_models = { path = "../massa-models" } massa_network_exports = { path = "../massa-network-exports" } diff --git a/massa-api/src/config.rs b/massa-api/src/config.rs index 796dc3bfe43..086dcc1909d 100644 --- a/massa-api/src/config.rs +++ b/massa-api/src/config.rs @@ -1,9 +1,11 @@ // Copyright (c) 2022 MASSA LABS -use jsonrpc_core::serde::Deserialize; +use massa_time::MassaTime; use std::net::SocketAddr; use std::path::PathBuf; +use serde::Deserialize; + /// API settings. /// the API settings #[derive(Debug, Deserialize, Clone)] @@ -18,6 +20,26 @@ pub struct APIConfig { pub max_arguments: u64, /// openrpc specification path pub openrpc_spec_path: PathBuf, + /// maximum size in bytes of a request. + pub max_request_body_size: u32, + /// maximum size in bytes of a response. + pub max_response_body_size: u32, + /// maximum number of incoming connections allowed. + pub max_connections: u32, + /// maximum number of subscriptions per connection. + pub max_subscriptions_per_connection: u32, + /// max length for logging for requests and responses. Logs bigger than this limit will be truncated. + pub max_log_length: u32, + /// host filtering. + pub allow_hosts: Vec, + /// whether batch requests are supported by this server or not. + pub batch_requests_supported: bool, + /// the interval at which `Ping` frames are submitted. + pub ping_interval: MassaTime, + /// whether to enable HTTP. + pub enable_http: bool, + /// whether to enable WS. + pub enable_ws: bool, /// max datastore value length pub max_datastore_value_length: u64, /// max op datastore entry @@ -30,4 +52,12 @@ pub struct APIConfig { pub max_function_name_length: u16, /// max parameter size pub max_parameter_size: u32, + /// thread count + pub thread_count: u8, + /// `genesis_timestamp` + pub genesis_timestamp: MassaTime, + /// t0 + pub t0: MassaTime, + /// periods per cycle + pub periods_per_cycle: u64, } diff --git a/massa-api/src/error.rs b/massa-api/src/error.rs index df948f67404..958b4def26f 100644 --- a/massa-api/src/error.rs +++ b/massa-api/src/error.rs @@ -1,6 +1,10 @@ // Copyright (c) 2022 MASSA LABS use displaydoc::Display; +use jsonrpsee::{ + core::Error as JsonRpseeError, + types::error::{CallError, ErrorObject}, +}; use massa_consensus_exports::error::ConsensusError; use massa_execution_exports::ExecutionError; use massa_hash::MassaHashError; @@ -20,9 +24,9 @@ pub enum ApiError { ReceiveChannelError(String), /// `massa_hash` error: {0} MassaHashError(#[from] MassaHashError), - /// Consensus error: {0} - ConsensusError(#[from] Box), - /// Execution error: {0} + /// consensus error: {0} + ConsensusError(#[from] ConsensusError), + /// execution error: {0} ExecutionError(#[from] ExecutionError), /// Network error: {0} NetworkError(#[from] NetworkError), @@ -50,7 +54,7 @@ pub enum ApiError { InternalServerError(String), } -impl From for jsonrpc_core::Error { +impl From for JsonRpseeError { fn from(err: ApiError) -> Self { // JSON-RPC Server errors codes must be between -32099 to -32000 let code = match err { @@ -72,16 +76,7 @@ impl From for jsonrpc_core::Error { ApiError::MissingConfig(_) => -32018, ApiError::WrongAPI => -32019, }; - jsonrpc_core::Error { - code: jsonrpc_core::ErrorCode::ServerError(code), - message: err.to_string(), - data: None, - } - } -} -impl std::convert::From for ApiError { - fn from(err: ConsensusError) -> Self { - ApiError::ConsensusError(Box::new(err)) + CallError::Custom(ErrorObject::owned(code, err.to_string(), None::<()>)).into() } } diff --git a/massa-api/src/lib.rs b/massa-api/src/lib.rs index 8179b133ff1..32afa893fd2 100644 --- a/massa-api/src/lib.rs +++ b/massa-api/src/lib.rs @@ -4,11 +4,10 @@ #![warn(missing_docs)] #![warn(unused_crate_dependencies)] use crate::error::ApiError::WrongAPI; -use error::ApiError; -use jsonrpc_core::{serde_json, BoxFuture, IoHandler, Value}; -use jsonrpc_derive::rpc; -use jsonrpc_http_server::{CloseHandle, ServerBuilder}; -use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; +use jsonrpsee::core::{Error as JsonRpseeError, RpcResult}; +use jsonrpsee::proc_macros::rpc; +use jsonrpsee::server::{AllowHosts, ServerBuilder, ServerHandle}; +use massa_consensus_exports::ConsensusController; use massa_execution_exports::ExecutionController; use massa_models::api::{ AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, @@ -36,10 +35,10 @@ use massa_protocol_exports::ProtocolCommandSender; use massa_storage::Storage; use massa_wallet::Wallet; use parking_lot::RwLock; +use serde_json::Value; use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; -use std::thread; -use std::thread::JoinHandle; + use tokio::sync::mpsc; use tracing::{info, warn}; @@ -52,7 +51,7 @@ pub use config::APIConfig; /// Public API component pub struct Public { /// link to the consensus component - pub consensus_command_sender: ConsensusCommandSender, + pub consensus_controller: Box, /// link to the execution component pub execution_controller: Box, /// link to the selector component @@ -63,8 +62,6 @@ pub struct Public { pub protocol_command_sender: ProtocolCommandSender, /// Massa storage pub storage: Storage, - /// consensus configuration (TODO: remove it, can be retrieved via an endpoint) - pub consensus_config: ConsensusConfig, /// API settings pub api_settings: APIConfig, /// network setting @@ -81,14 +78,10 @@ pub struct Public { /// Private API content pub struct Private { - /// link to the consensus component - pub consensus_command_sender: ConsensusCommandSender, /// link to the network component pub network_command_sender: NetworkCommandSender, /// link to the execution component pub execution_controller: Box, - /// consensus configuration (TODO: remove it, can be retrieved via an endpoint) - pub consensus_config: ConsensusConfig, /// API settings pub api_settings: APIConfig, /// stop channel @@ -101,181 +94,196 @@ pub struct Private { pub struct API(T); /// Used to manage the API -pub trait RpcServer: Endpoints { +#[async_trait::async_trait] +pub trait RpcServer: MassaRpcServer { /// Start the API - fn serve(self, _: &SocketAddr) -> StopHandle; + async fn serve( + self, + url: &SocketAddr, + api_config: &APIConfig, + ) -> Result; } -fn serve(api: impl Endpoints, url: &SocketAddr) -> StopHandle { - let mut io = IoHandler::new(); - io.extend_with(api.to_delegate()); - - let server = ServerBuilder::new(io) - .event_loop_executor(tokio::runtime::Handle::current()) - .max_request_body_size(50 * 1024 * 1024) - .start_http(url) - .expect("Unable to start RPC server"); - - let close_handle = server.close_handle(); - let thread_builder = thread::Builder::new().name("rpc-server".into()); - let join_handle = thread_builder - .spawn(|| server.wait()) - .expect("failed to spawn thread : rpc-server"); - - StopHandle { - close_handle, - join_handle, +async fn serve( + api: impl MassaRpcServer, + url: &SocketAddr, + api_config: &APIConfig, +) -> Result { + let allowed_hosts = if api_config.allow_hosts.is_empty() { + AllowHosts::Any + } else { + let hosts = api_config + .allow_hosts + .iter() + .map(|hostname| hostname.into()) + .collect(); + AllowHosts::Only(hosts) + }; + + let mut server_builder = ServerBuilder::new() + .max_request_body_size(api_config.max_request_body_size) + .max_response_body_size(api_config.max_response_body_size) + .max_connections(api_config.max_connections) + .set_host_filtering(allowed_hosts) + .batch_requests_supported(api_config.batch_requests_supported) + .ping_interval(api_config.ping_interval.to_duration()); + + if api_config.enable_http && !api_config.enable_ws { + server_builder = server_builder.http_only(); + } else if api_config.enable_ws && !api_config.enable_http { + server_builder = server_builder.ws_only() + } else { + panic!("wrong server configuration, you can't disable both http and ws") } + + let server = server_builder + .build(url) + .await + .expect("failed to build server"); + + let server_handler = server.start(api.into_rpc()).expect("server start failed"); + let stop_handler = StopHandle { server_handler }; + + Ok(stop_handler) } /// Used to be able to stop the API pub struct StopHandle { - close_handle: CloseHandle, - join_handle: JoinHandle<()>, + server_handler: ServerHandle, } impl StopHandle { /// stop the API gracefully pub fn stop(self) { - self.close_handle.close(); - if let Err(err) = self.join_handle.join() { - warn!("API thread panicked: {:?}", err); - } else { - info!("API finished cleanly"); + match self.server_handler.stop() { + Ok(_) => { + info!("API finished cleanly"); + } + Err(err) => warn!("API thread panicked: {:?}", err), } } } -/// Exposed API endpoints +/// Exposed API methods #[rpc(server)] -pub trait Endpoints { +pub trait MassaRpc { /// Gracefully stop the node. - #[rpc(name = "stop_node")] - fn stop_node(&self) -> BoxFuture>; + #[method(name = "stop_node")] + async fn stop_node(&self) -> RpcResult<()>; /// Sign message with node's key. /// Returns the public key that signed the message and the signature. - #[rpc(name = "node_sign_message")] - fn node_sign_message(&self, _: Vec) -> BoxFuture>; + #[method(name = "node_sign_message")] + async fn node_sign_message(&self, arg: Vec) -> RpcResult; /// Add a vector of new secret(private) keys for the node to use to stake. /// No confirmation to expect. - #[rpc(name = "add_staking_secret_keys")] - fn add_staking_secret_keys(&self, _: Vec) -> BoxFuture>; + #[method(name = "add_staking_secret_keys")] + async fn add_staking_secret_keys(&self, arg: Vec) -> RpcResult<()>; /// Execute bytecode in read-only mode. - #[rpc(name = "execute_read_only_bytecode")] - fn execute_read_only_bytecode( + #[method(name = "execute_read_only_bytecode")] + async fn execute_read_only_bytecode( &self, - _: Vec, - ) -> BoxFuture, ApiError>>; + arg: Vec, + ) -> RpcResult>; /// Execute an SC function in read-only mode. - #[rpc(name = "execute_read_only_call")] - fn execute_read_only_call( + #[method(name = "execute_read_only_call")] + async fn execute_read_only_call( &self, - _: Vec, - ) -> BoxFuture, ApiError>>; + arg: Vec, + ) -> RpcResult>; /// Remove a vector of addresses used to stake. /// No confirmation to expect. - #[rpc(name = "remove_staking_addresses")] - fn remove_staking_addresses(&self, _: Vec
) -> BoxFuture>; + #[method(name = "remove_staking_addresses")] + async fn remove_staking_addresses(&self, arg: Vec
) -> RpcResult<()>; /// Return hash set of staking addresses. - #[rpc(name = "get_staking_addresses")] - fn get_staking_addresses(&self) -> BoxFuture, ApiError>>; + #[method(name = "get_staking_addresses")] + async fn get_staking_addresses(&self) -> RpcResult>; /// Bans given IP address(es). /// No confirmation to expect. - #[rpc(name = "node_ban_by_ip")] - fn node_ban_by_ip(&self, _: Vec) -> BoxFuture>; + #[method(name = "node_ban_by_ip")] + async fn node_ban_by_ip(&self, arg: Vec) -> RpcResult<()>; /// Bans given node id. /// No confirmation to expect. - #[rpc(name = "node_ban_by_id")] - fn node_ban_by_id(&self, _: Vec) -> BoxFuture>; + #[method(name = "node_ban_by_id")] + async fn node_ban_by_id(&self, arg: Vec) -> RpcResult<()>; /// whitelist given IP address. /// No confirmation to expect. /// Note: If the ip was unknown it adds it to the known peers, otherwise it updates the peer type - #[rpc(name = "node_whitelist")] - fn node_whitelist(&self, _: Vec) -> BoxFuture>; + #[method(name = "node_whitelist")] + async fn node_whitelist(&self, arg: Vec) -> RpcResult<()>; /// remove from whitelist given IP address. /// keep it as standard /// No confirmation to expect. - #[rpc(name = "node_remove_from_whitelist")] - fn node_remove_from_whitelist(&self, _: Vec) -> BoxFuture>; + #[method(name = "node_remove_from_whitelist")] + async fn node_remove_from_whitelist(&self, arg: Vec) -> RpcResult<()>; /// Unban given IP address(es). /// No confirmation to expect. - #[rpc(name = "node_unban_by_ip")] - fn node_unban_by_ip(&self, _: Vec) -> BoxFuture>; + #[method(name = "node_unban_by_ip")] + async fn node_unban_by_ip(&self, arg: Vec) -> RpcResult<()>; /// Unban given node id. /// No confirmation to expect. - #[rpc(name = "node_unban_by_id")] - fn node_unban_by_id(&self, _: Vec) -> BoxFuture>; + #[method(name = "node_unban_by_id")] + async fn node_unban_by_id(&self, arg: Vec) -> RpcResult<()>; /// Summary of the current state: time, last final blocks (hash, thread, slot, timestamp), clique count, connected nodes count. - #[rpc(name = "get_status")] - fn get_status(&self) -> BoxFuture>; + #[method(name = "get_status")] + async fn get_status(&self) -> RpcResult; /// Get cliques. - #[rpc(name = "get_cliques")] - fn get_cliques(&self) -> BoxFuture, ApiError>>; + #[method(name = "get_cliques")] + async fn get_cliques(&self) -> RpcResult>; /// Returns the active stakers and their active roll counts for the current cycle. - #[rpc(name = "get_stakers")] - fn get_stakers(&self) -> BoxFuture, ApiError>>; + #[method(name = "get_stakers")] + async fn get_stakers(&self) -> RpcResult>; /// Returns operations information associated to a given list of operations' IDs. - #[rpc(name = "get_operations")] - fn get_operations( - &self, - _: Vec, - ) -> BoxFuture, ApiError>>; + #[method(name = "get_operations")] + async fn get_operations(&self, arg: Vec) -> RpcResult>; /// Get endorsements (not yet implemented). - #[rpc(name = "get_endorsements")] - fn get_endorsements( - &self, - _: Vec, - ) -> BoxFuture, ApiError>>; + #[method(name = "get_endorsements")] + async fn get_endorsements(&self, arg: Vec) -> RpcResult>; /// Get information on a block given its hash. - #[rpc(name = "get_block")] - fn get_block(&self, _: BlockId) -> BoxFuture>; + #[method(name = "get_block")] + async fn get_block(&self, arg: BlockId) -> RpcResult; /// Get information on the block at a slot in the blockclique. /// If there is no block at this slot a `None` is returned. - #[rpc(name = "get_blockclique_block_by_slot")] - fn get_blockclique_block_by_slot(&self, _: Slot) -> BoxFuture, ApiError>>; + #[method(name = "get_blockclique_block_by_slot")] + async fn get_blockclique_block_by_slot(&self, arg: Slot) -> RpcResult>; /// Get the block graph within the specified time interval. /// Optional parameters: from `` (included) and to `` (excluded) millisecond timestamp - #[rpc(name = "get_graph_interval")] - fn get_graph_interval(&self, _: TimeInterval) - -> BoxFuture, ApiError>>; + #[method(name = "get_graph_interval")] + async fn get_graph_interval(&self, arg: TimeInterval) -> RpcResult>; /// Get multiple datastore entries. - #[rpc(name = "get_datastore_entries")] - fn get_datastore_entries( + #[method(name = "get_datastore_entries")] + async fn get_datastore_entries( &self, - _: Vec, - ) -> BoxFuture, ApiError>>; + arg: Vec, + ) -> RpcResult>; /// Get addresses. - #[rpc(name = "get_addresses")] - fn get_addresses(&self, _: Vec
) -> BoxFuture, ApiError>>; + #[method(name = "get_addresses")] + async fn get_addresses(&self, arg: Vec
) -> RpcResult>; /// Adds operations to pool. Returns operations that were ok and sent to pool. - #[rpc(name = "send_operations")] - fn send_operations( - &self, - _: Vec, - ) -> BoxFuture, ApiError>>; + #[method(name = "send_operations")] + async fn send_operations(&self, arg: Vec) -> RpcResult>; /// Get events optionally filtered by: /// * start slot @@ -283,22 +291,19 @@ pub trait Endpoints { /// * emitter address /// * original caller address /// * operation id - #[rpc(name = "get_filtered_sc_output_event")] - fn get_filtered_sc_output_event( - &self, - _: EventFilter, - ) -> BoxFuture, ApiError>>; + #[method(name = "get_filtered_sc_output_event")] + async fn get_filtered_sc_output_event(&self, arg: EventFilter) + -> RpcResult>; /// Get OpenRPC specification. - #[rpc(name = "rpc.discover")] - fn get_openrpc_spec(&self) -> BoxFuture>; + #[method(name = "rpc.discover")] + async fn get_openrpc_spec(&self) -> RpcResult; } -fn wrong_api() -> BoxFuture> { - let closure = async move || Err(WrongAPI); - Box::pin(closure()) +fn wrong_api() -> RpcResult { + Err((WrongAPI).into()) } -fn _jsonrpc_assert(_method: &str, _request: Value, _response: Value) { - // TODO: jsonrpc_client_transports::RawClient::call_method ... see #1182 +fn _jsonrpsee_assert(_method: &str, _request: Value, _response: Value) { + // TODO: jsonrpsee_client_transports::RawClient::call_method ... see #1182 } diff --git a/massa-api/src/private.rs b/massa-api/src/private.rs index 07fab1453a0..161b2610053 100644 --- a/massa-api/src/private.rs +++ b/massa-api/src/private.rs @@ -2,12 +2,10 @@ use crate::config::APIConfig; use crate::error::ApiError; -use crate::{Endpoints, Private, RpcServer, StopHandle, Value, API}; +use crate::{MassaRpcServer, Private, RpcServer, StopHandle, Value, API}; -use jsonrpc_core::BoxFuture; -use jsonrpc_http_server::tokio::sync::mpsc; - -use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; +use async_trait::async_trait; +use jsonrpsee::core::{Error as JsonRpseeError, RpcResult}; use massa_execution_exports::ExecutionController; use massa_models::api::{ AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, @@ -35,24 +33,21 @@ use parking_lot::RwLock; use std::net::{IpAddr, SocketAddr}; use std::str::FromStr; use std::sync::Arc; +use tokio::sync::mpsc; impl API { /// generate a new private API pub fn new( - consensus_command_sender: ConsensusCommandSender, network_command_sender: NetworkCommandSender, execution_controller: Box, api_settings: APIConfig, - consensus_settings: ConsensusConfig, node_wallet: Arc>, ) -> (Self, mpsc::Receiver<()>) { let (stop_node_channel, rx) = mpsc::channel(1); ( API(Private { - consensus_command_sender, network_command_sender, execution_controller, - consensus_config: consensus_settings, api_settings, stop_node_channel, node_wallet, @@ -62,181 +57,179 @@ impl API { } } +#[async_trait] impl RpcServer for API { - fn serve(self, url: &SocketAddr) -> StopHandle { - crate::serve(self, url) + async fn serve( + self, + url: &SocketAddr, + settings: &APIConfig, + ) -> Result { + crate::serve(self, url, settings).await } } #[doc(hidden)] -impl Endpoints for API { - fn stop_node(&self) -> BoxFuture> { +#[async_trait] +impl MassaRpcServer for API { + async fn stop_node(&self) -> RpcResult<()> { let stop = self.0.stop_node_channel.clone(); - let closure = async move || { - stop.send(()).await.map_err(|e| { - ApiError::SendChannelError(format!("error sending stop signal {}", e)) - })?; - Ok(()) - }; - Box::pin(closure()) + stop.send(()) + .await + .map_err(|e| ApiError::SendChannelError(format!("error sending stop signal {}", e)))?; + Ok(()) } - fn node_sign_message(&self, message: Vec) -> BoxFuture> { + async fn node_sign_message(&self, message: Vec) -> RpcResult { let network_command_sender = self.0.network_command_sender.clone(); - let closure = async move || Ok(network_command_sender.node_sign_message(message).await?); - Box::pin(closure()) + match network_command_sender.node_sign_message(message).await { + Ok(public_key_signature) => return Ok(public_key_signature), + Err(e) => return Err(ApiError::from(e).into()), + }; } - fn add_staking_secret_keys(&self, secret_keys: Vec) -> BoxFuture> { + async fn add_staking_secret_keys(&self, secret_keys: Vec) -> RpcResult<()> { let keypairs = match secret_keys.iter().map(|x| KeyPair::from_str(x)).collect() { Ok(keypairs) => keypairs, - Err(e) => { - let closure = async move || Err(ApiError::BadRequest(e.to_string())); - return Box::pin(closure()); - } + Err(e) => return Err(ApiError::BadRequest(e.to_string()).into()), }; + let node_wallet = self.0.node_wallet.clone(); - let closure = async move || { - let mut w_wallet = node_wallet.write(); - w_wallet.add_keypairs(keypairs)?; - Ok(()) + let mut w_wallet = node_wallet.write(); + match w_wallet.add_keypairs(keypairs) { + Ok(_) => return Ok(()), + Err(e) => return Err(ApiError::from(e).into()), }; - Box::pin(closure()) } - fn execute_read_only_bytecode( + async fn execute_read_only_bytecode( &self, _reqs: Vec, - ) -> BoxFuture, ApiError>> { + ) -> RpcResult> { crate::wrong_api::<_>() } - fn execute_read_only_call( + async fn execute_read_only_call( &self, _reqs: Vec, - ) -> BoxFuture, ApiError>> { + ) -> RpcResult> { crate::wrong_api::<_>() } - fn remove_staking_addresses(&self, addresses: Vec
) -> BoxFuture> { + async fn remove_staking_addresses(&self, addresses: Vec
) -> RpcResult<()> { let node_wallet = self.0.node_wallet.clone(); - let closure = async move || { - let mut w_wallet = node_wallet.write(); - w_wallet.remove_addresses(&addresses)?; - Ok(()) + let mut w_wallet = node_wallet.write(); + match w_wallet.remove_addresses(&addresses) { + Ok(()) => return Ok(()), + Err(e) => return Err(ApiError::from(e).into()), }; - Box::pin(closure()) } - fn get_staking_addresses(&self) -> BoxFuture, ApiError>> { + async fn get_staking_addresses(&self) -> RpcResult> { let node_wallet = self.0.node_wallet.clone(); - let closure = async move || Ok(node_wallet.write().get_wallet_address_list()); - Box::pin(closure()) + let addresses_set = node_wallet.write().get_wallet_address_list(); + Ok(addresses_set) } - fn node_ban_by_ip(&self, ips: Vec) -> BoxFuture> { + async fn node_ban_by_ip(&self, ips: Vec) -> RpcResult<()> { let network_command_sender = self.0.network_command_sender.clone(); - let closure = async move || Ok(network_command_sender.node_ban_by_ips(ips).await?); - Box::pin(closure()) + match network_command_sender.node_ban_by_ips(ips).await { + Ok(()) => return Ok(()), + Err(e) => return Err(ApiError::from(e).into()), + }; } - fn node_ban_by_id(&self, ids: Vec) -> BoxFuture> { + async fn node_ban_by_id(&self, ids: Vec) -> RpcResult<()> { let network_command_sender = self.0.network_command_sender.clone(); - let closure = async move || Ok(network_command_sender.node_ban_by_ids(ids).await?); - Box::pin(closure()) + match network_command_sender.node_ban_by_ids(ids).await { + Ok(()) => return Ok(()), + Err(e) => return Err(ApiError::from(e).into()), + }; } - fn node_unban_by_id(&self, ids: Vec) -> BoxFuture> { + async fn node_unban_by_id(&self, ids: Vec) -> RpcResult<()> { let network_command_sender = self.0.network_command_sender.clone(); - let closure = async move || Ok(network_command_sender.node_unban_by_ids(ids).await?); - Box::pin(closure()) + match network_command_sender.node_unban_by_ids(ids).await { + Ok(()) => return Ok(()), + Err(e) => return Err(ApiError::from(e).into()), + }; } - fn node_unban_by_ip(&self, ips: Vec) -> BoxFuture> { + async fn node_unban_by_ip(&self, ips: Vec) -> RpcResult<()> { let network_command_sender = self.0.network_command_sender.clone(); - let closure = async move || Ok(network_command_sender.node_unban_ips(ips).await?); - Box::pin(closure()) + match network_command_sender.node_unban_ips(ips).await { + Ok(()) => return Ok(()), + Err(e) => return Err(ApiError::from(e).into()), + }; } - fn get_status(&self) -> BoxFuture> { + async fn get_status(&self) -> RpcResult { crate::wrong_api::() } - fn get_cliques(&self) -> BoxFuture, ApiError>> { + async fn get_cliques(&self) -> RpcResult> { crate::wrong_api::>() } - fn get_stakers(&self) -> BoxFuture, ApiError>> { + async fn get_stakers(&self) -> RpcResult> { crate::wrong_api::>() } - fn get_operations( - &self, - _: Vec, - ) -> BoxFuture, ApiError>> { + async fn get_operations(&self, _: Vec) -> RpcResult> { crate::wrong_api::>() } - fn get_endorsements( - &self, - _: Vec, - ) -> BoxFuture, ApiError>> { + async fn get_endorsements(&self, _: Vec) -> RpcResult> { crate::wrong_api::>() } - fn get_block(&self, _: BlockId) -> BoxFuture> { + async fn get_block(&self, _: BlockId) -> RpcResult { crate::wrong_api::() } - fn get_blockclique_block_by_slot(&self, _: Slot) -> BoxFuture, ApiError>> { + async fn get_blockclique_block_by_slot(&self, _: Slot) -> RpcResult> { crate::wrong_api::>() } - fn get_graph_interval( - &self, - _: TimeInterval, - ) -> BoxFuture, ApiError>> { + async fn get_graph_interval(&self, _: TimeInterval) -> RpcResult> { crate::wrong_api::>() } - fn get_datastore_entries( + async fn get_datastore_entries( &self, _: Vec, - ) -> BoxFuture, ApiError>> { + ) -> RpcResult> { crate::wrong_api() } - fn get_addresses(&self, _: Vec
) -> BoxFuture, ApiError>> { + async fn get_addresses(&self, _: Vec
) -> RpcResult> { crate::wrong_api::>() } - fn send_operations( - &self, - _: Vec, - ) -> BoxFuture, ApiError>> { + async fn send_operations(&self, _: Vec) -> RpcResult> { crate::wrong_api::>() } - fn get_filtered_sc_output_event( - &self, - _: EventFilter, - ) -> BoxFuture, ApiError>> { + async fn get_filtered_sc_output_event(&self, _: EventFilter) -> RpcResult> { crate::wrong_api::>() } - fn node_whitelist(&self, ips: Vec) -> BoxFuture> { + async fn node_whitelist(&self, ips: Vec) -> RpcResult<()> { let network_command_sender = self.0.network_command_sender.clone(); - let closure = async move || Ok(network_command_sender.whitelist(ips).await?); - Box::pin(closure()) + match network_command_sender.whitelist(ips).await { + Ok(()) => return Ok(()), + Err(e) => return Err(ApiError::from(e).into()), + }; } - fn node_remove_from_whitelist(&self, ips: Vec) -> BoxFuture> { + async fn node_remove_from_whitelist(&self, ips: Vec) -> RpcResult<()> { let network_command_sender = self.0.network_command_sender.clone(); - let closure = async move || Ok(network_command_sender.remove_from_whitelist(ips).await?); - Box::pin(closure()) + match network_command_sender.remove_from_whitelist(ips).await { + Ok(()) => return Ok(()), + Err(e) => return Err(ApiError::from(e).into()), + }; } - fn get_openrpc_spec(&self) -> BoxFuture> { + async fn get_openrpc_spec(&self) -> RpcResult { crate::wrong_api::() } } diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 11bad9f707d..0774f3955ff 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -3,13 +3,14 @@ use crate::config::APIConfig; use crate::error::ApiError; -use crate::{serde_json, Endpoints, Public, RpcServer, StopHandle, Value, API}; -use jsonrpc_core::BoxFuture; -use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; +use crate::{MassaRpcServer, Public, RpcServer, StopHandle, Value, API}; +use async_trait::async_trait; +use jsonrpsee::core::{Error as JsonRpseeError, RpcResult}; +use massa_consensus_exports::block_status::DiscardReason; +use massa_consensus_exports::ConsensusController; use massa_execution_exports::{ ExecutionController, ExecutionStackElement, ReadOnlyExecutionRequest, ReadOnlyExecutionTarget, }; -use massa_graph::DiscardReason; use massa_models::api::{ BlockGraphStatus, DatastoreEntryInput, DatastoreEntryOutput, OperationInput, ReadOnlyBytecodeExecution, ReadOnlyCall, SlotAmount, @@ -57,11 +58,10 @@ use std::net::{IpAddr, SocketAddr}; impl API { /// generate a new public API pub fn new( - consensus_command_sender: ConsensusCommandSender, + consensus_controller: Box, execution_controller: Box, api_settings: APIConfig, selector_controller: Box, - consensus_settings: ConsensusConfig, pool_command_sender: Box, protocol_command_sender: ProtocolCommandSender, network_settings: NetworkConfig, @@ -72,8 +72,7 @@ impl API { storage: Storage, ) -> Self { API(Public { - consensus_command_sender, - consensus_config: consensus_settings, + consensus_controller, api_settings, pool_command_sender, network_settings, @@ -89,40 +88,44 @@ impl API { } } +#[async_trait] impl RpcServer for API { - fn serve(self, url: &SocketAddr) -> StopHandle { - crate::serve(self, url) + async fn serve( + self, + url: &SocketAddr, + api_config: &APIConfig, + ) -> Result { + crate::serve(self, url, api_config).await } } #[doc(hidden)] -impl Endpoints for API { - fn stop_node(&self) -> BoxFuture> { +#[async_trait] +impl MassaRpcServer for API { + async fn stop_node(&self) -> RpcResult<()> { crate::wrong_api::<()>() } - fn node_sign_message(&self, _: Vec) -> BoxFuture> { + async fn node_sign_message(&self, _: Vec) -> RpcResult { crate::wrong_api::() } - fn add_staking_secret_keys(&self, _: Vec) -> BoxFuture> { + async fn add_staking_secret_keys(&self, _: Vec) -> RpcResult<()> { crate::wrong_api::<()>() } - fn execute_read_only_bytecode( + async fn execute_read_only_bytecode( &self, reqs: Vec, - ) -> BoxFuture, ApiError>> { + ) -> RpcResult> { if reqs.len() as u64 > self.0.api_settings.max_arguments { - let closure = async move || Err(ApiError::BadRequest("too many arguments".into())); - return Box::pin(closure()); + return Err(ApiError::BadRequest("too many arguments".into()).into()); } let mut res: Vec = Vec::with_capacity(reqs.len()); for ReadOnlyBytecodeExecution { max_gas, address, - simulated_gas_price, bytecode, operation_datastore, } in reqs @@ -142,9 +145,11 @@ impl Endpoints for API { match deserializer.deserialize::(&v) { Ok((_, deserialized)) => Some(deserialized), Err(e) => { - let err_str = format!("Operation datastore error: {}", e); - let closure = async move || Err(ApiError::InconsistencyError(err_str)); - return Box::pin(closure()); + return Err(ApiError::InconsistencyError(format!( + "Operation datastore error: {}", + e + )) + .into()) } } } @@ -159,7 +164,6 @@ impl Endpoints for API { // translate request let req = ReadOnlyExecutionRequest { max_gas, - simulated_gas_price, target: ReadOnlyExecutionTarget::BytecodeExecution(bytecode), call_stack: vec![ExecutionStackElement { address, @@ -190,23 +194,20 @@ impl Endpoints for API { } // return result - let closure = async move || Ok(res); - Box::pin(closure()) + Ok(res) } - fn execute_read_only_call( + async fn execute_read_only_call( &self, reqs: Vec, - ) -> BoxFuture, ApiError>> { + ) -> RpcResult> { if reqs.len() as u64 > self.0.api_settings.max_arguments { - let closure = async move || Err(ApiError::BadRequest("too many arguments".into())); - return Box::pin(closure()); + return Err(ApiError::BadRequest("too many arguments".into()).into()); } let mut res: Vec = Vec::with_capacity(reqs.len()); for ReadOnlyCall { max_gas, - simulated_gas_price, target_address, target_function, parameter, @@ -226,7 +227,6 @@ impl Endpoints for API { // translate request let req = ReadOnlyExecutionRequest { max_gas, - simulated_gas_price, target: ReadOnlyExecutionTarget::FunctionCall { target_func: target_function, target_addr: target_address, @@ -269,134 +269,162 @@ impl Endpoints for API { } // return result - let closure = async move || Ok(res); - Box::pin(closure()) + Ok(res) } - fn remove_staking_addresses(&self, _: Vec
) -> BoxFuture> { + async fn remove_staking_addresses(&self, _: Vec
) -> RpcResult<()> { crate::wrong_api::<()>() } - fn get_staking_addresses(&self) -> BoxFuture, ApiError>> { + async fn get_staking_addresses(&self) -> RpcResult> { crate::wrong_api::>() } - fn node_ban_by_ip(&self, _: Vec) -> BoxFuture> { + async fn node_ban_by_ip(&self, _: Vec) -> RpcResult<()> { crate::wrong_api::<()>() } - fn node_ban_by_id(&self, _: Vec) -> BoxFuture> { + async fn node_ban_by_id(&self, _: Vec) -> RpcResult<()> { crate::wrong_api::<()>() } - fn node_unban_by_ip(&self, _: Vec) -> BoxFuture> { + async fn node_unban_by_ip(&self, _: Vec) -> RpcResult<()> { crate::wrong_api::<()>() } - fn node_unban_by_id(&self, _: Vec) -> BoxFuture> { + async fn node_unban_by_id(&self, _: Vec) -> RpcResult<()> { crate::wrong_api::<()>() } - fn get_status(&self) -> BoxFuture> { + async fn get_status(&self) -> RpcResult { let execution_controller = self.0.execution_controller.clone(); - let consensus_command_sender = self.0.consensus_command_sender.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let network_command_sender = self.0.network_command_sender.clone(); let network_config = self.0.network_settings.clone(); let version = self.0.version; - let consensus_settings = self.0.consensus_config.clone(); + let api_settings = self.0.api_settings.clone(); let compensation_millis = self.0.compensation_millis; let pool_command_sender = self.0.pool_command_sender.clone(); let node_id = self.0.node_id; let config = CompactConfig::default(); - let closure = async move || { - let now = MassaTime::now(compensation_millis)?; - let last_slot = get_latest_block_slot_at_timestamp( - consensus_settings.thread_count, - consensus_settings.t0, - consensus_settings.genesis_timestamp, - now, - )?; - - let execution_stats = execution_controller.get_stats(); - - let (consensus_stats, network_stats, peers) = tokio::join!( - consensus_command_sender.get_stats(), - network_command_sender.get_network_stats(), - network_command_sender.get_peers() - ); + let now = match MassaTime::now(compensation_millis) { + Ok(now) => now, + Err(e) => return Err(ApiError::from(e).into()), + }; - let pool_stats = ( - pool_command_sender.get_operation_count(), - pool_command_sender.get_endorsement_count(), - ); + let last_slot_result = get_latest_block_slot_at_timestamp( + api_settings.thread_count, + api_settings.t0, + api_settings.genesis_timestamp, + now, + ); + let last_slot = match last_slot_result { + Ok(last_slot) => last_slot, + Err(e) => return Err(ApiError::from(e).into()), + }; - Ok(NodeStatus { - node_id, - node_ip: network_config.routable_ip, - version, - current_time: now, - connected_nodes: peers? - .peers - .iter() - .flat_map(|(ip, peer)| { - peer.active_nodes - .iter() - .map(move |(id, is_outgoing)| (*id, (*ip, *is_outgoing))) - }) - .collect(), - last_slot, - next_slot: last_slot - .unwrap_or_else(|| Slot::new(0, 0)) - .get_next_slot(consensus_settings.thread_count)?, - execution_stats, - consensus_stats: consensus_stats?, - network_stats: network_stats?, - pool_stats, - config, - current_cycle: last_slot - .unwrap_or_else(|| Slot::new(0, 0)) - .get_cycle(consensus_settings.periods_per_cycle), - }) + let execution_stats = execution_controller.get_stats(); + let consensus_stats_result = consensus_controller.get_stats(); + let consensus_stats = match consensus_stats_result { + Ok(consensus_stats) => consensus_stats, + Err(e) => return Err(ApiError::from(e).into()), + }; + + let (network_stats_result, peers_result) = tokio::join!( + network_command_sender.get_network_stats(), + network_command_sender.get_peers() + ); + + let network_stats = match network_stats_result { + Ok(network_stats) => network_stats, + Err(e) => return Err(ApiError::from(e).into()), + }; + + let peers = match peers_result { + Ok(peers) => peers, + Err(e) => return Err(ApiError::from(e).into()), + }; + + let pool_stats = ( + pool_command_sender.get_operation_count(), + pool_command_sender.get_endorsement_count(), + ); + + let next_slot_result = last_slot + .unwrap_or_else(|| Slot::new(0, 0)) + .get_next_slot(api_settings.thread_count); + + let next_slot = match next_slot_result { + Ok(next_slot) => next_slot, + Err(e) => return Err(ApiError::from(e).into()), }; - Box::pin(closure()) + + Ok(NodeStatus { + node_id, + node_ip: network_config.routable_ip, + version, + current_time: now, + connected_nodes: peers + .peers + .iter() + .flat_map(|(ip, peer)| { + peer.active_nodes + .iter() + .map(move |(id, is_outgoing)| (*id, (*ip, *is_outgoing))) + }) + .collect(), + last_slot, + next_slot, + execution_stats, + consensus_stats, + network_stats, + pool_stats, + config, + current_cycle: last_slot + .unwrap_or_else(|| Slot::new(0, 0)) + .get_cycle(api_settings.periods_per_cycle), + }) } - fn get_cliques(&self) -> BoxFuture, ApiError>> { - let consensus_command_sender = self.0.consensus_command_sender.clone(); - let closure = async move || Ok(consensus_command_sender.get_cliques().await?); - Box::pin(closure()) + async fn get_cliques(&self) -> RpcResult> { + let consensus_controller = self.0.consensus_controller.clone(); + Ok(consensus_controller.get_cliques()) } - fn get_stakers(&self) -> BoxFuture, ApiError>> { + async fn get_stakers(&self) -> RpcResult> { let execution_controller = self.0.execution_controller.clone(); - let cfg = self.0.consensus_config.clone(); + let cfg = self.0.api_settings.clone(); let compensation_millis = self.0.compensation_millis; - let closure = async move || { - let curr_cycle = get_latest_block_slot_at_timestamp( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - MassaTime::now(compensation_millis)?, - )? - .unwrap_or_else(|| Slot::new(0, 0)) - .get_cycle(cfg.periods_per_cycle); - let mut staker_vec = execution_controller - .get_cycle_active_rolls(curr_cycle) - .into_iter() - .collect::>(); - staker_vec.sort_by(|&(_, roll_counts_a), &(_, roll_counts_b)| { - roll_counts_b.cmp(&roll_counts_a) - }); - Ok(staker_vec) + let now = match MassaTime::now(compensation_millis) { + Ok(now) => now, + Err(e) => return Err(ApiError::from(e).into()), + }; + + let latest_block_slot_at_timestamp_result = get_latest_block_slot_at_timestamp( + cfg.thread_count, + cfg.t0, + cfg.genesis_timestamp, + now, + ); + + let curr_cycle = match latest_block_slot_at_timestamp_result { + Ok(curr_cycle) => curr_cycle + .unwrap_or_else(|| Slot::new(0, 0)) + .get_cycle(cfg.periods_per_cycle), + Err(e) => return Err(ApiError::from(e).into()), }; - Box::pin(closure()) + + let mut staker_vec = execution_controller + .get_cycle_active_rolls(curr_cycle) + .into_iter() + .collect::>(); + staker_vec + .sort_by(|&(_, roll_counts_a), &(_, roll_counts_b)| roll_counts_b.cmp(&roll_counts_a)); + Ok(staker_vec) } - fn get_operations( - &self, - ops: Vec, - ) -> BoxFuture, ApiError>> { + async fn get_operations(&self, ops: Vec) -> RpcResult> { // get the operations and the list of blocks that contain them from storage let storage_info: Vec<(WrappedOperation, PreHashSet)> = { let read_blocks = self.0.storage.read_blocks(); @@ -423,64 +451,58 @@ impl Endpoints for API { let in_pool = self.0.pool_command_sender.contains_operations(&ops); let api_cfg = self.0.api_settings.clone(); - let consensus_command_sender = self.0.consensus_command_sender.clone(); - let closure = async move || { - if ops.len() as u64 > api_cfg.max_arguments { - return Err(ApiError::BadRequest("too many arguments".into())); - } + let consensus_controller = self.0.consensus_controller.clone(); + if ops.len() as u64 > api_cfg.max_arguments { + return Err(ApiError::BadRequest("too many arguments".into()).into()); + } - // check finality by cross-referencing Consensus and looking for final blocks that contain the op - let is_final: Vec = { - let involved_blocks: Vec = storage_info - .iter() - .flat_map(|(_op, bs)| bs.iter()) - .unique() - .cloned() - .collect(); - let involved_block_statuses = consensus_command_sender - .get_block_statuses(&involved_blocks) - .await?; - let block_statuses: PreHashMap = involved_blocks - .into_iter() - .zip(involved_block_statuses.into_iter()) - .collect(); - storage_info - .iter() - .map(|(_op, bs)| { - bs.iter() - .any(|b| block_statuses.get(b) == Some(&BlockGraphStatus::Final)) - }) - .collect() - }; + // check finality by cross-referencing Consensus and looking for final blocks that contain the op + let is_final: Vec = { + let involved_blocks: Vec = storage_info + .iter() + .flat_map(|(_op, bs)| bs.iter()) + .unique() + .cloned() + .collect(); - // gather all values into a vector of OperationInfo instances - let mut res: Vec = Vec::with_capacity(ops.len()); - let zipped_iterator = izip!( - ops.into_iter(), - storage_info.into_iter(), - in_pool.into_iter(), - is_final.into_iter() - ); - for (id, (operation, in_blocks), in_pool, is_final) in zipped_iterator { - res.push(OperationInfo { - id, - operation, - in_pool, - is_final, - in_blocks: in_blocks.into_iter().collect(), - }); - } + let involved_block_statuses = consensus_controller.get_block_statuses(&involved_blocks); - // return values in the right order - Ok(res) + let block_statuses: PreHashMap = involved_blocks + .into_iter() + .zip(involved_block_statuses.into_iter()) + .collect(); + storage_info + .iter() + .map(|(_op, bs)| { + bs.iter() + .any(|b| block_statuses.get(b) == Some(&BlockGraphStatus::Final)) + }) + .collect() }; - Box::pin(closure()) + + // gather all values into a vector of OperationInfo instances + let mut res: Vec = Vec::with_capacity(ops.len()); + let zipped_iterator = izip!( + ops.into_iter(), + storage_info.into_iter(), + in_pool.into_iter(), + is_final.into_iter() + ); + for (id, (operation, in_blocks), in_pool, is_final) in zipped_iterator { + res.push(OperationInfo { + id, + operation, + in_pool, + is_final, + in_blocks: in_blocks.into_iter().collect(), + }); + } + + // return values in the right order + Ok(res) } - fn get_endorsements( - &self, - eds: Vec, - ) -> BoxFuture, ApiError>> { + async fn get_endorsements(&self, eds: Vec) -> RpcResult> { // get the endorsements and the list of blocks that contain them from storage let storage_info: Vec<(WrappedEndorsement, PreHashSet)> = { let read_blocks = self.0.storage.read_blocks(); @@ -506,206 +528,192 @@ impl Endpoints for API { // ask pool whether it carries the operations let in_pool = self.0.pool_command_sender.contains_endorsements(&eds); - let consensus_command_sender = self.0.consensus_command_sender.clone(); + let consensus_controller = self.0.consensus_controller.clone(); let api_cfg = self.0.api_settings.clone(); - let closure = async move || { - if eds.len() as u64 > api_cfg.max_arguments { - return Err(ApiError::BadRequest("too many arguments".into())); - } - // check finality by cross-referencing Consensus and looking for final blocks that contain the endorsement - let is_final: Vec = { - let involved_blocks: Vec = storage_info - .iter() - .flat_map(|(_ed, bs)| bs.iter()) - .unique() - .cloned() - .collect(); - let involved_block_statuses = consensus_command_sender - .get_block_statuses(&involved_blocks) - .await?; - let block_statuses: PreHashMap = involved_blocks - .into_iter() - .zip(involved_block_statuses.into_iter()) - .collect(); - storage_info - .iter() - .map(|(_ed, bs)| { - bs.iter() - .any(|b| block_statuses.get(b) == Some(&BlockGraphStatus::Final)) - }) - .collect() - }; + if eds.len() as u64 > api_cfg.max_arguments { + return Err(ApiError::BadRequest("too many arguments".into()).into()); + } - // gather all values into a vector of EndorsementInfo instances - let mut res: Vec = Vec::with_capacity(eds.len()); - let zipped_iterator = izip!( - eds.into_iter(), - storage_info.into_iter(), - in_pool.into_iter(), - is_final.into_iter() - ); - for (id, (endorsement, in_blocks), in_pool, is_final) in zipped_iterator { - res.push(EndorsementInfo { - id, - endorsement, - in_pool, - is_final, - in_blocks: in_blocks.into_iter().collect(), - }); - } + // check finality by cross-referencing Consensus and looking for final blocks that contain the endorsement + let is_final: Vec = { + let involved_blocks: Vec = storage_info + .iter() + .flat_map(|(_ed, bs)| bs.iter()) + .unique() + .cloned() + .collect(); + + let involved_block_statuses = consensus_controller.get_block_statuses(&involved_blocks); - // return values in the right order - Ok(res) + let block_statuses: PreHashMap = involved_blocks + .into_iter() + .zip(involved_block_statuses.into_iter()) + .collect(); + storage_info + .iter() + .map(|(_ed, bs)| { + bs.iter() + .any(|b| block_statuses.get(b) == Some(&BlockGraphStatus::Final)) + }) + .collect() }; - Box::pin(closure()) + + // gather all values into a vector of EndorsementInfo instances + let mut res: Vec = Vec::with_capacity(eds.len()); + let zipped_iterator = izip!( + eds.into_iter(), + storage_info.into_iter(), + in_pool.into_iter(), + is_final.into_iter() + ); + for (id, (endorsement, in_blocks), in_pool, is_final) in zipped_iterator { + res.push(EndorsementInfo { + id, + endorsement, + in_pool, + is_final, + in_blocks: in_blocks.into_iter().collect(), + }); + } + + // return values in the right order + Ok(res) } /// gets a block. Returns None if not found /// only active blocks are returned - fn get_block(&self, id: BlockId) -> BoxFuture> { - let consensus_command_sender = self.0.consensus_command_sender.clone(); + async fn get_block(&self, id: BlockId) -> RpcResult { + let consensus_controller = self.0.consensus_controller.clone(); let storage = self.0.storage.clone_without_refs(); - let closure = async move || { - let block = match storage.read_blocks().get(&id).cloned() { - Some(b) => b.content, - None => { - return Ok(BlockInfo { id, content: None }); - } - }; - - let graph_status = consensus_command_sender - .get_block_statuses(&[id]) - .await? - .into_iter() - .next() - .expect("expected get_block_statuses to return one element"); - - let is_final = graph_status == BlockGraphStatus::Final; - let is_in_blockclique = graph_status == BlockGraphStatus::ActiveInBlockclique; - let is_candidate = graph_status == BlockGraphStatus::ActiveInBlockclique - || graph_status == BlockGraphStatus::ActiveInAlternativeCliques; - let is_discarded = graph_status == BlockGraphStatus::Discarded; - - Ok(BlockInfo { - id, - content: Some(BlockInfoContent { - is_final, - is_in_blockclique, - is_candidate, - is_discarded, - block, - }), - }) + let block = match storage.read_blocks().get(&id).cloned() { + Some(b) => b.content, + None => { + return Ok(BlockInfo { id, content: None }); + } }; - Box::pin(closure()) + + let graph_status = consensus_controller + .get_block_statuses(&[id]) + .into_iter() + .next() + .expect("expected get_block_statuses to return one element"); + + let is_final = graph_status == BlockGraphStatus::Final; + let is_in_blockclique = graph_status == BlockGraphStatus::ActiveInBlockclique; + let is_candidate = graph_status == BlockGraphStatus::ActiveInBlockclique + || graph_status == BlockGraphStatus::ActiveInAlternativeCliques; + let is_discarded = graph_status == BlockGraphStatus::Discarded; + + Ok(BlockInfo { + id, + content: Some(BlockInfoContent { + is_final, + is_in_blockclique, + is_candidate, + is_discarded, + block, + }), + }) } - fn get_blockclique_block_by_slot( - &self, - slot: Slot, - ) -> BoxFuture, ApiError>> { - let consensus_command_sender = self.0.consensus_command_sender.clone(); + async fn get_blockclique_block_by_slot(&self, slot: Slot) -> RpcResult> { + let consensus_controller = self.0.consensus_controller.clone(); let storage = self.0.storage.clone_without_refs(); - let closure = async move || { - let block_id_result = consensus_command_sender - .get_blockclique_block_at_slot(slot) - .await; - let block_id = match block_id_result? { - Some(id) => id, - None => return Ok(None), - }; - let res = storage - .read_blocks() - .get(&block_id) - .map(|b| b.content.clone()); - Ok(res) + + let block_id_option = consensus_controller.get_blockclique_block_at_slot(slot); + + let block_id = match block_id_option { + Some(id) => id, + None => return Ok(None), }; - Box::pin(closure()) + + let res = storage + .read_blocks() + .get(&block_id) + .map(|b| b.content.clone()); + Ok(res) } /// gets an interval of the block graph from consensus, with time filtering /// time filtering is done consensus-side to prevent communication overhead - fn get_graph_interval( - &self, - time: TimeInterval, - ) -> BoxFuture, ApiError>> { - let consensus_command_sender = self.0.consensus_command_sender.clone(); - let consensus_settings = self.0.consensus_config.clone(); - let closure = async move || { - // filter blocks from graph_export - let (start_slot, end_slot) = time_range_to_slot_range( - consensus_settings.thread_count, - consensus_settings.t0, - consensus_settings.genesis_timestamp, - time.start, - time.end, - )?; - let graph = consensus_command_sender - .get_block_graph_status(start_slot, end_slot) - .await?; - let mut res = Vec::with_capacity(graph.active_blocks.len()); - let blockclique = graph - .max_cliques - .iter() - .find(|clique| clique.is_blockclique) - .ok_or_else(|| ApiError::InconsistencyError("missing blockclique".to_string()))?; - for (id, exported_block) in graph.active_blocks.into_iter() { + async fn get_graph_interval(&self, time: TimeInterval) -> RpcResult> { + let consensus_controller = self.0.consensus_controller.clone(); + let api_settings = self.0.api_settings.clone(); + + // filter blocks from graph_export + let time_range_to_slot_range_result = time_range_to_slot_range( + api_settings.thread_count, + api_settings.t0, + api_settings.genesis_timestamp, + time.start, + time.end, + ); + + let (start_slot, end_slot) = match time_range_to_slot_range_result { + Ok(time_range_to_slot_range) => time_range_to_slot_range, + Err(e) => return Err(ApiError::from(e).into()), + }; + + let graph = match consensus_controller.get_block_graph_status(start_slot, end_slot) { + Ok(graph) => graph, + Err(e) => return Err(ApiError::from(e).into()), + }; + + let mut res = Vec::with_capacity(graph.active_blocks.len()); + let blockclique = graph + .max_cliques + .iter() + .find(|clique| clique.is_blockclique) + .ok_or_else(|| ApiError::InconsistencyError("missing blockclique".to_string()))?; + for (id, exported_block) in graph.active_blocks.into_iter() { + res.push(BlockSummary { + id, + is_final: exported_block.is_final, + is_stale: false, + is_in_blockclique: blockclique.block_ids.contains(&id), + slot: exported_block.header.content.slot, + creator: exported_block.header.creator_address, + parents: exported_block.header.content.parents, + }); + } + for (id, (reason, (slot, creator, parents))) in graph.discarded_blocks.into_iter() { + if reason == DiscardReason::Stale { res.push(BlockSummary { id, - is_final: exported_block.is_final, - is_stale: false, - is_in_blockclique: blockclique.block_ids.contains(&id), - slot: exported_block.header.content.slot, - creator: exported_block.header.creator_address, - parents: exported_block.header.content.parents, + is_final: false, + is_stale: true, + is_in_blockclique: false, + slot, + creator, + parents, }); } - for (id, (reason, (slot, creator, parents))) in graph.discarded_blocks.into_iter() { - if reason == DiscardReason::Stale { - res.push(BlockSummary { - id, - is_final: false, - is_stale: true, - is_in_blockclique: false, - slot, - creator, - parents, - }); - } - } - Ok(res) - }; - Box::pin(closure()) + } + Ok(res) } - fn get_datastore_entries( + async fn get_datastore_entries( &self, entries: Vec, - ) -> BoxFuture, ApiError>> { + ) -> RpcResult> { let execution_controller = self.0.execution_controller.clone(); - let closure = async move || { - Ok(execution_controller - .get_final_and_active_data_entry( - entries - .into_iter() - .map(|input| (input.address, input.key)) - .collect::>(), - ) - .into_iter() - .map(|output| DatastoreEntryOutput { - final_value: output.0, - candidate_value: output.1, - }) - .collect()) - }; - Box::pin(closure()) + Ok(execution_controller + .get_final_and_active_data_entry( + entries + .into_iter() + .map(|input| (input.address, input.key)) + .collect::>(), + ) + .into_iter() + .map(|output| DatastoreEntryOutput { + final_value: output.0, + candidate_value: output.1, + }) + .collect()) } - fn get_addresses( - &self, - addresses: Vec
, - ) -> BoxFuture, ApiError>> { + async fn get_addresses(&self, addresses: Vec
) -> RpcResult> { // get info from storage about which blocks the addresses have created let created_blocks: Vec> = { let lck = self.0.storage.read_blocks(); @@ -751,9 +759,9 @@ impl Endpoints for API { // get future draws from selector let selection_draws = { let cur_slot = timeslots::get_current_latest_block_slot( - self.0.consensus_config.thread_count, - self.0.consensus_config.t0, - self.0.consensus_config.genesis_timestamp, + self.0.api_settings.thread_count, + self.0.api_settings.t0, + self.0.api_settings.genesis_timestamp, self.0.compensation_millis, ) .expect("could not get latest current slot") @@ -797,7 +805,7 @@ impl Endpoints for API { res.push(AddressInfo { // general address info address, - thread: address.get_thread(self.0.consensus_config.thread_count), + thread: address.get_thread(self.0.api_settings.thread_count), // final execution info final_balance: execution_infos.final_balance, @@ -836,65 +844,69 @@ impl Endpoints for API { }); } - let closure = async move || Ok(res); - Box::pin(closure()) + Ok(res) } - fn send_operations( - &self, - ops: Vec, - ) -> BoxFuture, ApiError>> { + async fn send_operations(&self, ops: Vec) -> RpcResult> { let mut cmd_sender = self.0.pool_command_sender.clone(); let mut protocol_sender = self.0.protocol_command_sender.clone(); let api_cfg = self.0.api_settings.clone(); let mut to_send = self.0.storage.clone_without_refs(); - let closure = async move || { - if ops.len() as u64 > api_cfg.max_arguments { - return Err(ApiError::BadRequest("too many arguments".into())); - } - let operation_deserializer = WrappedDeserializer::new(OperationDeserializer::new( - api_cfg.max_datastore_value_length, - api_cfg.max_function_name_length, - api_cfg.max_parameter_size, - api_cfg.max_op_datastore_entry_count, - api_cfg.max_op_datastore_key_length, - api_cfg.max_op_datastore_value_length, - )); - let verified_ops = ops - .into_iter() - .map(|op_input| { - let mut op_serialized = Vec::new(); - op_serialized.extend(op_input.signature.to_bytes()); - op_serialized.extend(op_input.creator_public_key.to_bytes()); - op_serialized.extend(op_input.serialized_content); - let (rest, op): (&[u8], WrappedOperation) = operation_deserializer - .deserialize::(&op_serialized) - .map_err(|err| { - ApiError::ModelsError(ModelsError::DeserializeError(err.to_string())) - })?; - if rest.is_empty() { - Ok(op) - } else { - Err(ApiError::ModelsError(ModelsError::DeserializeError( - "There is data left after operation deserialization".to_owned(), - ))) - } - }) - .map(|op| match op { - Ok(operation) => { - operation.verify_signature()?; - Ok(operation) - } - Err(e) => Err(e), - }) - .collect::, ApiError>>()?; - to_send.store_operations(verified_ops.clone()); - let ids: Vec = verified_ops.iter().map(|op| op.id).collect(); - cmd_sender.add_operations(to_send.clone()); - protocol_sender.propagate_operations(to_send).await?; - Ok(ids) - }; - Box::pin(closure()) + + if ops.len() as u64 > api_cfg.max_arguments { + return Err(ApiError::BadRequest("too many arguments".into()).into()); + } + let operation_deserializer = WrappedDeserializer::new(OperationDeserializer::new( + api_cfg.max_datastore_value_length, + api_cfg.max_function_name_length, + api_cfg.max_parameter_size, + api_cfg.max_op_datastore_entry_count, + api_cfg.max_op_datastore_key_length, + api_cfg.max_op_datastore_value_length, + )); + let verified_ops = ops + .into_iter() + .map(|op_input| { + let mut op_serialized = Vec::new(); + op_serialized.extend(op_input.signature.to_bytes()); + op_serialized.extend(op_input.creator_public_key.to_bytes()); + op_serialized.extend(op_input.serialized_content); + let (rest, op): (&[u8], WrappedOperation) = operation_deserializer + .deserialize::(&op_serialized) + .map_err(|err| { + ApiError::ModelsError(ModelsError::DeserializeError(err.to_string())) + })?; + if rest.is_empty() { + Ok(op) + } else { + Err(ApiError::ModelsError(ModelsError::DeserializeError( + "There is data left after operation deserialization".to_owned(), + )) + .into()) + } + }) + .map(|op| match op { + Ok(operation) => { + let _verify_signature = match operation.verify_signature() { + Ok(()) => (), + Err(e) => return Err(ApiError::from(e).into()), + }; + Ok(operation) + } + Err(e) => Err(e), + }) + .collect::>>()?; + to_send.store_operations(verified_ops.clone()); + let ids: Vec = verified_ops.iter().map(|op| op.id).collect(); + cmd_sender.add_operations(to_send.clone()); + + tokio::task::spawn_blocking(move || protocol_sender.propagate_operations(to_send)) + .await + .map_err(|err| ApiError::InternalServerError(err.to_string()))? + .map_err(|err| { + ApiError::InternalServerError(format!("Failed to propagate operations: {}", err)) + })?; + Ok(ids) } /// Get events optionally filtered by: @@ -903,48 +915,47 @@ impl Endpoints for API { /// * emitter address /// * original caller address /// * operation id - fn get_filtered_sc_output_event( + async fn get_filtered_sc_output_event( &self, filter: EventFilter, - ) -> BoxFuture, ApiError>> { + ) -> RpcResult> { let events = self .0 .execution_controller .get_filtered_sc_output_event(filter); // TODO: get rid of the async part - let closure = async move || Ok(events); - Box::pin(closure()) + Ok(events) } - fn node_whitelist(&self, _: Vec) -> BoxFuture> { + async fn node_whitelist(&self, _: Vec) -> RpcResult<()> { crate::wrong_api::<()>() } - fn node_remove_from_whitelist(&self, _: Vec) -> BoxFuture> { + async fn node_remove_from_whitelist(&self, _: Vec) -> RpcResult<()> { crate::wrong_api::<()>() } - fn get_openrpc_spec(&self) -> BoxFuture> { + async fn get_openrpc_spec(&self) -> RpcResult { let openrpc_spec_path = self.0.api_settings.openrpc_spec_path.clone(); - let closure = async move || { - std::fs::read_to_string(openrpc_spec_path) - .map_err(|e| { + let openrpc: RpcResult = std::fs::read_to_string(openrpc_spec_path) + .map_err(|e| { + ApiError::InternalServerError(format!( + "failed to read OpenRPC specification: {}", + e + )) + .into() + }) + .and_then(|openrpc_str| { + serde_json::from_str(&openrpc_str).map_err(|e| { ApiError::InternalServerError(format!( - "failed to read OpenRPC specification: {}", + "failed to parse OpenRPC specification: {}", e )) + .into() }) - .and_then(|openrpc_str| { - serde_json::from_str(&openrpc_str).map_err(|e| { - ApiError::InternalServerError(format!( - "failed to parse OpenRPC specification: {}", - e - )) - }) - }) - }; + }); - Box::pin(closure()) + openrpc } } diff --git a/massa-async-pool/src/changes.rs b/massa-async-pool/src/changes.rs index 8bc2d92739d..bdbb79719cd 100644 --- a/massa-async-pool/src/changes.rs +++ b/massa-async-pool/src/changes.rs @@ -64,19 +64,19 @@ impl Serializer for AsyncPoolChangesSerializer { /// use std::str::FromStr; /// use massa_async_pool::{AsyncMessage, Change, AsyncPoolChanges, AsyncPoolChangesSerializer}; /// - /// let message = AsyncMessage { - /// emission_slot: Slot::new(1, 0), - /// emission_index: 0, - /// sender: Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), - /// destination: Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), - /// handler: String::from("test"), - /// max_gas: 10000000, - /// gas_price: Amount::from_str("1").unwrap(), - /// coins: Amount::from_str("1").unwrap(), - /// validity_start: Slot::new(2, 0), - /// validity_end: Slot::new(3, 0), - /// data: vec![1, 2, 3, 4] - /// }; + /// let message = AsyncMessage::new_with_hash( + /// Slot::new(1, 0), + /// 0, + /// Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + /// Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), + /// String::from("test"), + /// 10000000, + /// Amount::from_str("1").unwrap(), + /// Amount::from_str("1").unwrap(), + /// Slot::new(2, 0), + /// Slot::new(3, 0), + /// vec![1, 2, 3, 4], + /// ); /// let changes: AsyncPoolChanges = AsyncPoolChanges(vec![Change::Add(message.compute_id(), message)]); /// let mut serialized = Vec::new(); /// let serializer = AsyncPoolChangesSerializer::new(); @@ -141,19 +141,19 @@ impl Deserializer for AsyncPoolChangesDeserializer { /// use std::str::FromStr; /// use massa_async_pool::{AsyncMessage, Change, AsyncPoolChanges, AsyncPoolChangesSerializer, AsyncPoolChangesDeserializer}; /// - /// let message = AsyncMessage { - /// emission_slot: Slot::new(1, 0), - /// emission_index: 0, - /// sender: Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), - /// destination: Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), - /// handler: String::from("test"), - /// max_gas: 10000000, - /// gas_price: Amount::from_str("1").unwrap(), - /// coins: Amount::from_str("1").unwrap(), - /// validity_start: Slot::new(2, 0), - /// validity_end: Slot::new(3, 0), - /// data: vec![1, 2, 3, 4] - /// }; + /// let message = AsyncMessage::new_with_hash( + /// Slot::new(1, 0), + /// 0, + /// Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + /// Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), + /// String::from("test"), + /// 10000000, + /// Amount::from_str("1").unwrap(), + /// Amount::from_str("1").unwrap(), + /// Slot::new(2, 0), + /// Slot::new(3, 0), + /// vec![1, 2, 3, 4], + /// ); /// let changes: AsyncPoolChanges = AsyncPoolChanges(vec![Change::Add(message.compute_id(), message)]); /// let mut serialized = Vec::new(); /// let serializer = AsyncPoolChangesSerializer::new(); diff --git a/massa-async-pool/src/lib.rs b/massa-async-pool/src/lib.rs index e278d78bbd5..8a2d8b222a4 100644 --- a/massa-async-pool/src/lib.rs +++ b/massa-async-pool/src/lib.rs @@ -16,7 +16,7 @@ //! //! ```json //! { -//! "sender": "xxxx", // address that sent the message and spent max_gas*gas_price+coins on emission +//! "sender": "xxxx", // address that sent the message and spent fee + coins on emission //! "slot": {"period": 123455, "thread": 11}, // slot at which the message was emitted //! "emission_index": 212, // index of the message emitted in this slot //! "destination": "xxxx", // target address @@ -24,7 +24,6 @@ //! "validity_start": {"period": 123456, "thread": 12}, // the message can be handled starting from the validity_start slot (included) //! "validity_end": {"period": 123457, "thread": 16}, // the message can be handled until the validity_end slot (excluded) //! "max_gas": 12334, // max gas available when the handler is called -//! "gas_price": "124.23", // gas price for the handler call //! "coins": "1111.11", // amount of coins to transfer to the destination address when calling its handler //! "data": { ... any object ... } // data payload of the message, passed as the sole parameter of the destination handler when called //! } @@ -32,17 +31,17 @@ //! //! ## How to send a message during bytecode execution //! -//! * messages are sent using an ABI: `send_message(target_address, target_handler, validity_start, validity_end, max_gas, gas_price, coins, data: JSON string) -> Result<(), ABIReturnError>`. Note that data has a configuration defined `max_async_message_data_size`. +//! * messages are sent using an ABI: `send_message(target_address, target_handler, validity_start, validity_end, max_gas, fee, coins, data: JSON string) -> Result<(), ABIReturnError>`. Note that data has a configuration defined `max_async_message_data_size`. //! * when called, this ABI does this: //! * it consumes `compute_gas_cost_of_message_storage(context.current_slot, validity_end_slot)` of gas in the current execution. This allows making the message emission more gas-consuming when it requires storing the message in queue for longer -//! * it consumes `max_gas * gas_price + coins` coins from the sender +//! * it consumes `fee + coins` coins from the sender //! * it generates an `AsyncMessage` and stores it in an asynchronous pool //! -//! Note that `max_gas*gas_price` coins are burned when sending the message. +//! Note that `fee + coins` coins are burned when sending the message. //! //! ## How is the `AsyncPool` handled //! ```md -//! * In the AsyncPool, Messages are kept sorted by `priority = AsyncMessageId(msg.max_gas * msg.gas_price, rev(msg.slot), rev(msg.emission_index))` +//! * In the AsyncPool, Messages are kept sorted by `priority = AsyncMessageId(rev(Ratio(msg.fee, max(msg.max_gas,1))), rev(msg.slot), rev(msg.emission_index))` //! //! * when an AsyncMessage is added to the AsyncPool: //! * if the AsyncPool length has exceeded config.max_async_pool_length: @@ -55,7 +54,7 @@ //! * credit target_address with M.coins //! * run the target handler function with M.payload as parameter and the context: //! * max_gas = M.max_gas -//! * gas_price = M.gas_price +//! * fee = M.fee //! * slot = S //! * call_stack = [M.target_address, M.sender_address] //! * on any failure, cancel all the effects of execution and credit M.coins back to the sender diff --git a/massa-async-pool/src/message.rs b/massa-async-pool/src/message.rs index 67f4fc31ed9..21e967efd28 100644 --- a/massa-async-pool/src/message.rs +++ b/massa-async-pool/src/message.rs @@ -2,6 +2,7 @@ //! This file defines the structure representing an asynchronous message +use massa_hash::Hash; use massa_models::address::AddressDeserializer; use massa_models::amount::{AmountDeserializer, AmountSerializer}; use massa_models::slot::{SlotDeserializer, SlotSerializer}; @@ -18,17 +19,17 @@ use nom::error::{context, ContextError, ParseError}; use nom::multi::length_data; use nom::sequence::tuple; use nom::{IResult, Parser}; +use num::rational::Ratio; use serde::{Deserialize, Serialize}; use std::ops::Bound::{Excluded, Included}; /// Unique identifier of a message. /// Also has the property of ordering by priority (highest first) following the triplet: -/// `(rev(max_gas*gas_price), emission_slot, emission_index)` -pub type AsyncMessageId = (std::cmp::Reverse, Slot, u64); +/// `(rev(Ratio(msg.fee, max(msg.max_gas,1))), emission_slot, emission_index)` +pub type AsyncMessageId = (std::cmp::Reverse>, Slot, u64); #[derive(Clone)] pub struct AsyncMessageIdSerializer { - amount_serializer: AmountSerializer, slot_serializer: SlotSerializer, u64_serializer: U64VarIntSerializer, } @@ -36,7 +37,6 @@ pub struct AsyncMessageIdSerializer { impl AsyncMessageIdSerializer { pub fn new() -> Self { Self { - amount_serializer: AmountSerializer::new(), slot_serializer: SlotSerializer::new(), u64_serializer: U64VarIntSerializer::new(), } @@ -58,19 +58,19 @@ impl Serializer for AsyncMessageIdSerializer { /// use std::str::FromStr; /// use massa_async_pool::{AsyncMessage, AsyncMessageId, AsyncMessageIdSerializer}; /// - /// let message = AsyncMessage { - /// emission_slot: Slot::new(1, 0), - /// emission_index: 0, - /// sender: Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), - /// destination: Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), - /// handler: String::from("test"), - /// max_gas: 10000000, - /// gas_price: Amount::from_str("1").unwrap(), - /// coins: Amount::from_str("1").unwrap(), - /// validity_start: Slot::new(2, 0), - /// validity_end: Slot::new(3, 0), - /// data: vec![1, 2, 3, 4] - /// }; + /// let message = AsyncMessage::new_with_hash( + /// Slot::new(1, 0), + /// 0, + /// Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + /// Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), + /// String::from("test"), + /// 10000000, + /// Amount::from_str("1").unwrap(), + /// Amount::from_str("1").unwrap(), + /// Slot::new(2, 0), + /// Slot::new(3, 0), + /// vec![1, 2, 3, 4], + /// ); /// let id: AsyncMessageId = message.compute_id(); /// let mut serialized = Vec::new(); /// let serializer = AsyncMessageIdSerializer::new(); @@ -81,7 +81,8 @@ impl Serializer for AsyncMessageIdSerializer { value: &AsyncMessageId, buffer: &mut Vec, ) -> Result<(), massa_serialization::SerializeError> { - self.amount_serializer.serialize(&value.0 .0, buffer)?; + self.u64_serializer.serialize(value.0 .0.numer(), buffer)?; + self.u64_serializer.serialize(value.0 .0.denom(), buffer)?; self.slot_serializer.serialize(&value.1, buffer)?; self.u64_serializer.serialize(&value.2, buffer)?; Ok(()) @@ -90,26 +91,18 @@ impl Serializer for AsyncMessageIdSerializer { #[derive(Clone)] pub struct AsyncMessageIdDeserializer { - amount_deserializer: AmountDeserializer, slot_deserializer: SlotDeserializer, - emission_index_deserializer: U64VarIntDeserializer, + u64_deserializer: U64VarIntDeserializer, } impl AsyncMessageIdDeserializer { pub fn new(thread_count: u8) -> Self { Self { - amount_deserializer: AmountDeserializer::new( - Included(Amount::MIN), - Included(Amount::MAX), - ), slot_deserializer: SlotDeserializer::new( (Included(u64::MIN), Included(u64::MAX)), (Included(0), Excluded(thread_count)), ), - emission_index_deserializer: U64VarIntDeserializer::new( - Included(u64::MIN), - Included(u64::MAX), - ), + u64_deserializer: U64VarIntDeserializer::new(Included(u64::MIN), Included(u64::MAX)), } } } @@ -123,19 +116,19 @@ impl Deserializer for AsyncMessageIdDeserializer { /// use std::str::FromStr; /// use massa_async_pool::{AsyncMessage, AsyncMessageId, AsyncMessageIdSerializer, AsyncMessageIdDeserializer}; /// - /// let message = AsyncMessage { - /// emission_slot: Slot::new(1, 0), - /// emission_index: 0, - /// sender: Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), - /// destination: Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), - /// handler: String::from("test"), - /// max_gas: 10000000, - /// gas_price: Amount::from_str("1").unwrap(), - /// coins: Amount::from_str("1").unwrap(), - /// validity_start: Slot::new(2, 0), - /// validity_end: Slot::new(3, 0), - /// data: vec![1, 2, 3, 4] - /// }; + /// let message = AsyncMessage::new_with_hash( + /// Slot::new(1, 0), + /// 0, + /// Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + /// Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), + /// String::from("test"), + /// 10000000, + /// Amount::from_str("1").unwrap(), + /// Amount::from_str("1").unwrap(), + /// Slot::new(2, 0), + /// Slot::new(3, 0), + /// vec![1, 2, 3, 4], + /// ); /// let id: AsyncMessageId = message.compute_id(); /// let mut serialized = Vec::new(); /// let serializer = AsyncMessageIdSerializer::new(); @@ -152,18 +145,21 @@ impl Deserializer for AsyncMessageIdDeserializer { context( "Failed AsyncMessageId deserialization", tuple(( - context("Failed gas_price deserialization", |input| { - self.amount_deserializer.deserialize(input) + context("Failed fee deserialization", |input| { + self.u64_deserializer.deserialize(input) + }), + context("Failed denum deserialization", |input| { + self.u64_deserializer.deserialize(input) }), context("Failed emission_slot deserialization", |input| { self.slot_deserializer.deserialize(input) }), context("Failed emission_index deserialization", |input| { - self.emission_index_deserializer.deserialize(input) + self.u64_deserializer.deserialize(input) }), )), ) - .map(|(amount, slot, index)| (std::cmp::Reverse(amount), slot, index)) + .map(|(fee, denom, slot, index)| (std::cmp::Reverse(Ratio::new(fee, denom)), slot, index)) .parse(buffer) } } @@ -190,9 +186,8 @@ pub struct AsyncMessage { /// Maximum gas to use when processing the message pub max_gas: u64, - /// Gas price to take into account when executing the message. - /// `max_gas * gas_price` are burned by the sender when the message is sent. - pub gas_price: Amount, + /// Fee paid by the sender when the message is processed. + pub fee: Amount, /// Coins sent from the sender to the target address of the message. /// Those coins are spent by the sender address when the message is sent, @@ -208,14 +203,56 @@ pub struct AsyncMessage { /// Raw payload data of the message pub data: Vec, + + /// Hash of the message + pub hash: Hash, } impl AsyncMessage { + #[allow(clippy::too_many_arguments)] + /// Take an `AsyncMessage` and return it with its hash computed + pub fn new_with_hash( + emission_slot: Slot, + emission_index: u64, + sender: Address, + destination: Address, + handler: String, + max_gas: u64, + fee: Amount, + coins: Amount, + validity_start: Slot, + validity_end: Slot, + data: Vec, + ) -> Self { + let async_message_ser = AsyncMessageSerializer::new(); + let mut buffer = Vec::new(); + let mut message = AsyncMessage { + emission_slot, + emission_index, + sender, + destination, + handler, + max_gas, + fee, + coins, + validity_start, + validity_end, + data, + // placeholder hash to serialize the message, replaced below + hash: Hash::from_bytes(&[0; 32]), + }; + async_message_ser + .serialize(&message, &mut buffer) + .expect("critical: asynchronous message serialization should never fail here"); + message.hash = Hash::compute_from(&buffer); + message + } + /// Compute the ID of the message for use when choosing which operations to keep in priority (highest score) on pool overflow. - /// For now, the formula is simply `score = (gas_price * max_gas, rev(emission_slot), rev(emission_index))` pub fn compute_id(&self) -> AsyncMessageId { + let denom = if self.max_gas > 0 { self.max_gas } else { 1 }; ( - std::cmp::Reverse(self.gas_price.saturating_mul_u64(self.max_gas)), + std::cmp::Reverse(Ratio::new(self.fee.to_raw(), denom)), self.emission_slot, self.emission_index, ) @@ -253,19 +290,20 @@ impl Serializer for AsyncMessageSerializer { /// use massa_models::{address::Address, amount::Amount, slot::Slot}; /// use massa_serialization::Serializer; /// use std::str::FromStr; - /// let message = AsyncMessage { - /// emission_slot: Slot::new(1, 0), - /// emission_index: 0, - /// sender: Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), - /// destination: Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), - /// handler: String::from("test"), - /// max_gas: 10000000, - /// gas_price: Amount::from_str("1").unwrap(), - /// coins: Amount::from_str("1").unwrap(), - /// validity_start: Slot::new(2, 0), - /// validity_end: Slot::new(3, 0), - /// data: vec![1, 2, 3, 4] - /// }; + /// + /// let message = AsyncMessage::new_with_hash( + /// Slot::new(1, 0), + /// 0, + /// Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + /// Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), + /// String::from("test"), + /// 10000000, + /// Amount::from_str("1").unwrap(), + /// Amount::from_str("1").unwrap(), + /// Slot::new(2, 0), + /// Slot::new(3, 0), + /// vec![1, 2, 3, 4], + /// ); /// let mut buffer = Vec::new(); /// let message_serializer = AsyncMessageSerializer::new(); /// message_serializer.serialize(&message, &mut buffer).unwrap(); @@ -290,7 +328,7 @@ impl Serializer for AsyncMessageSerializer { buffer.extend(handler_bytes); self.u64_serializer.serialize(&value.max_gas, buffer)?; - self.amount_serializer.serialize(&value.gas_price, buffer)?; + self.amount_serializer.serialize(&value.fee, buffer)?; self.amount_serializer.serialize(&value.coins, buffer)?; self.slot_serializer .serialize(&value.validity_start, buffer)?; @@ -342,19 +380,20 @@ impl Deserializer for AsyncMessageDeserializer { /// use massa_models::{address::Address, amount::Amount, slot::Slot}; /// use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// use std::str::FromStr; - /// let message = AsyncMessage { - /// emission_slot: Slot::new(1, 0), - /// emission_index: 0, - /// sender: Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), - /// destination: Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), - /// handler: String::from("test"), - /// max_gas: 10000000, - /// gas_price: Amount::from_str("1").unwrap(), - /// coins: Amount::from_str("1").unwrap(), - /// validity_start: Slot::new(2, 0), - /// validity_end: Slot::new(3, 0), - /// data: vec![1, 2, 3, 4] - /// }; + /// + /// let message = AsyncMessage::new_with_hash( + /// Slot::new(1, 0), + /// 0, + /// Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + /// Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), + /// String::from("test"), + /// 10000000, + /// Amount::from_str("1").unwrap(), + /// Amount::from_str("1").unwrap(), + /// Slot::new(2, 0), + /// Slot::new(3, 0), + /// vec![1, 2, 3, 4], + /// ); /// let message_serializer = AsyncMessageSerializer::new(); /// let mut serialized = Vec::new(); /// message_serializer.serialize(&message, &mut serialized).unwrap(); @@ -362,6 +401,7 @@ impl Deserializer for AsyncMessageDeserializer { /// let (rest, message_deserialized) = message_deserializer.deserialize::(&serialized).unwrap(); /// assert!(rest.is_empty()); /// assert_eq!(message, message_deserialized); + /// assert_eq!(message.hash, message_deserialized.hash); /// ``` fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( &self, @@ -403,7 +443,7 @@ impl Deserializer for AsyncMessageDeserializer { context("Failed max_gas deserialization", |input| { self.max_gas_deserializer.deserialize(input) }), - context("Failed gas_price deserialization", |input| { + context("Failed fee deserialization", |input| { self.amount_deserializer.deserialize(input) }), context("Failed coins deserialization", |input| { @@ -428,23 +468,25 @@ impl Deserializer for AsyncMessageDeserializer { destination, handler, max_gas, - gas_price, - coins, - validity_start, - validity_end, - data, - )| AsyncMessage { - emission_slot, - emission_index, - sender, - destination, - handler, - max_gas, - gas_price, + fee, coins, validity_start, validity_end, data, + )| { + AsyncMessage::new_with_hash( + emission_slot, + emission_index, + sender, + destination, + handler, + max_gas, + fee, + coins, + validity_start, + validity_end, + data, + ) }, ) .parse(buffer) @@ -466,21 +508,19 @@ mod tests { #[test] fn bad_serialization_version() { - let message = AsyncMessage { - emission_slot: Slot::new(1, 2), - emission_index: 0, - sender: Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x") - .unwrap(), - destination: Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G") - .unwrap(), - handler: String::from("test"), - max_gas: 10000000, - gas_price: Amount::from_str("1").unwrap(), - coins: Amount::from_str("1").unwrap(), - validity_start: Slot::new(2, 0), - validity_end: Slot::new(3, 0), - data: vec![1, 2, 3, 4], - }; + let message = AsyncMessage::new_with_hash( + Slot::new(1, 2), + 0, + Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), + String::from("test"), + 10000000, + Amount::from_str("1").unwrap(), + Amount::from_str("1").unwrap(), + Slot::new(2, 0), + Slot::new(3, 0), + vec![1, 2, 3, 4], + ); let message_serializer = AsyncMessageSerializer::new(); let mut serialized = Vec::new(); message_serializer diff --git a/massa-async-pool/src/pool.rs b/massa-async-pool/src/pool.rs index 577ffc5651b..eb35656248f 100644 --- a/massa-async-pool/src/pool.rs +++ b/massa-async-pool/src/pool.rs @@ -9,6 +9,7 @@ use crate::{ AsyncMessageDeserializer, AsyncMessageIdDeserializer, AsyncMessageIdSerializer, AsyncMessageSerializer, }; +use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_models::{slot::Slot, streaming_step::StreamingStep}; use massa_serialization::{ Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, @@ -22,16 +23,21 @@ use nom::{ use std::collections::BTreeMap; use std::ops::Bound::{Excluded, Included, Unbounded}; +const ASYNC_POOL_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; + /// Represents a pool of sorted messages in a deterministic way. /// The final asynchronous pool is attached to the output of the latest final slot within the context of massa-final-state. /// Nodes must bootstrap the final message pool when they join the network. -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct AsyncPool { /// Asynchronous pool configuration config: AsyncPoolConfig, /// Messages sorted by decreasing ID (decreasing priority) pub(crate) messages: BTreeMap, + + /// Hash of the asynchronous pool + pub hash: Hash, } impl AsyncPool { @@ -40,6 +46,7 @@ impl AsyncPool { AsyncPool { config, messages: Default::default(), + hash: Hash::from_bytes(ASYNC_POOL_HASH_INITIAL_BYTES), } } @@ -52,13 +59,17 @@ impl AsyncPool { for change in changes.0.iter() { match change { // add a new message to the pool - Change::Add(msg_id, msg) => { - self.messages.insert(*msg_id, msg.clone()); + Change::Add(message_id, message) => { + if self.messages.insert(*message_id, message.clone()).is_none() { + self.hash ^= message.hash; + } } // delete a message from the pool - Change::Delete(msg_id) => { - self.messages.remove(msg_id); + Change::Delete(message_id) => { + if let Some(removed_message) = self.messages.remove(message_id) { + self.hash ^= removed_message.hash; + } } } } @@ -123,13 +134,13 @@ impl AsyncPool { // gather all selected items and remove them from self.messages // iterate in decreasing priority order self.messages - .drain_filter(|_, msg| { + .drain_filter(|_, message| { // check available gas and validity period - if available_gas >= msg.max_gas - && slot >= msg.validity_start - && slot < msg.validity_end + if available_gas >= message.max_gas + && slot >= message.validity_start + && slot < message.validity_end { - available_gas -= msg.max_gas; + available_gas -= message.max_gas; true } else { false @@ -157,7 +168,7 @@ impl AsyncPool { let left_bound = match cursor { StreamingStep::Started => Unbounded, StreamingStep::Ongoing(last_id) => Excluded(last_id), - StreamingStep::Finished => return (pool_part, cursor), + StreamingStep::Finished(_) => return (pool_part, cursor), }; let mut pool_part_last_id: Option = None; for (id, message) in self.messages.range((left_bound, Unbounded)) { @@ -171,7 +182,7 @@ impl AsyncPool { if let Some(last_id) = pool_part_last_id { (pool_part, StreamingStep::Ongoing(last_id)) } else { - (pool_part, StreamingStep::Finished) + (pool_part, StreamingStep::Finished(None)) } } @@ -187,11 +198,15 @@ impl AsyncPool { &mut self, part: BTreeMap, ) -> StreamingStep { - self.messages.extend(part); + for (message_id, message) in part { + if self.messages.insert(message_id, message.clone()).is_none() { + self.hash ^= message.hash; + } + } if let Some(message_id) = self.messages.last_key_value().map(|(&id, _)| id) { StreamingStep::Ongoing(message_id) } else { - StreamingStep::Finished + StreamingStep::Finished(None) } } } @@ -308,28 +323,22 @@ fn test_take_batch() { let mut pool = AsyncPool::new(config); let address = Address(Hash::compute_from(b"abc")); for i in 1..10 { - pool.messages.insert( - ( - std::cmp::Reverse(Amount::from_mantissa_scale(i, 0)), - Slot::new(0, 0), - 0, - ), - AsyncMessage { - emission_slot: Slot::new(0, 0), - emission_index: 0, - sender: address, - destination: address, - handler: "function".to_string(), - validity_start: Slot::new(1, 0), - validity_end: Slot::new(3, 0), - max_gas: i, - gas_price: Amount::from_str("0.1").unwrap(), - coins: Amount::from_str("0.3").unwrap(), - data: Vec::new(), - }, + let message = AsyncMessage::new_with_hash( + Slot::new(0, 0), + 0, + address, + address, + "function".to_string(), + i, + Amount::from_str("0.1").unwrap(), + Amount::from_str("0.3").unwrap(), + Slot::new(1, 0), + Slot::new(3, 0), + Vec::new(), ); + pool.messages.insert(message.compute_id(), message); } assert_eq!(pool.messages.len(), 9); pool.take_batch_to_execute(Slot::new(2, 0), 19); - assert_eq!(pool.messages.len(), 6); + assert_eq!(pool.messages.len(), 4); } diff --git a/massa-async-pool/src/test_exports/bootstrap.rs b/massa-async-pool/src/test_exports/bootstrap.rs index 36a44a2f321..a0d4175d32c 100644 --- a/massa-async-pool/src/test_exports/bootstrap.rs +++ b/massa-async-pool/src/test_exports/bootstrap.rs @@ -1,8 +1,8 @@ // Copyright (c) 2022 MASSA LABS -use std::{cmp::Reverse, collections::BTreeMap, str::FromStr}; +use std::{collections::BTreeMap, str::FromStr}; -use crate::{AsyncMessage, AsyncPool, AsyncPoolConfig}; +use crate::{AsyncMessage, AsyncMessageId, AsyncPool, AsyncPoolConfig}; use massa_models::{address::Address, amount::Amount, config::THREAD_COUNT, slot::Slot}; use massa_signature::KeyPair; use rand::Rng; @@ -12,7 +12,7 @@ use rand::Rng; /// Creates a `AsyncPool` from pre-set values pub fn create_async_pool( config: AsyncPoolConfig, - messages: BTreeMap<(Reverse, Slot, u64), AsyncMessage>, + messages: BTreeMap, ) -> AsyncPool { let mut async_pool = AsyncPool::new(config); async_pool.messages = messages; @@ -24,21 +24,21 @@ fn get_random_address() -> Address { Address::from_public_key(&keypair.get_public_key()) } -pub fn get_random_message() -> AsyncMessage { +pub fn get_random_message(fee: Option) -> AsyncMessage { let mut rng = rand::thread_rng(); - AsyncMessage { - emission_slot: Slot::new(rng.gen_range(0..100_000), rng.gen_range(0..THREAD_COUNT)), - emission_index: 0, - sender: get_random_address(), - destination: get_random_address(), - handler: String::from("test"), - max_gas: 10_000, - gas_price: Amount::from_str("100").unwrap(), - coins: Amount::from_str("100").unwrap(), - validity_start: Slot::new(2, 0), - validity_end: Slot::new(4, 0), - data: vec![1, 2, 3], - } + AsyncMessage::new_with_hash( + Slot::new(rng.gen_range(0..100_000), rng.gen_range(0..THREAD_COUNT)), + 0, + get_random_address(), + get_random_address(), + String::from("test"), + 10_000, + fee.unwrap_or_default(), + Amount::from_str("100").unwrap(), + Slot::new(2, 0), + Slot::new(4, 0), + vec![1, 2, 3], + ) } /// Asserts that two instances of `AsyncMessage` are the same @@ -52,7 +52,7 @@ pub fn assert_eq_async_message(v1: &AsyncMessage, v2: &AsyncMessage) { assert_eq!(v1.destination, v2.destination, "destination mismatch"); assert_eq!(v1.handler, v2.handler, "handler mismatch"); assert_eq!(v1.max_gas, v2.max_gas, "max_gas mismatch"); - assert_eq!(v1.gas_price, v2.gas_price, "gas_price mismatch"); + assert_eq!(v1.fee, v2.fee, "fee mismatch"); assert_eq!(v1.coins, v2.coins, "coins mismatch"); assert_eq!( v1.validity_start, v2.validity_start, diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index 2cdcea04c2d..e5a89d4abdd 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -28,7 +28,6 @@ massa_async_pool = { path = "../massa-async-pool" } massa_consensus_exports = { path = "../massa-consensus-exports" } massa_executed_ops = { path = "../massa-executed-ops" } massa_final_state = { path = "../massa-final-state" } -massa_graph = { path = "../massa-graph" } massa_hash = { path = "../massa-hash" } massa_ledger_exports = { path = "../massa-ledger-exports" } massa_logging = { path = "../massa-logging" } @@ -49,6 +48,9 @@ massa_ledger_worker = { path = "../massa-ledger-worker", features = [ ] } massa_pos_worker = { path = "../massa-pos-worker", features = ["testing"] } massa_pos_exports = { path = "../massa-pos-exports", features = ["testing"] } +massa_consensus_exports = { path = "../massa-consensus-exports", features = [ + "testing", +] } lazy_static = "1.4" tempfile = "3.3" @@ -58,11 +60,11 @@ tempfile = "3.3" testing = [ "massa_final_state/testing", "massa_ledger_worker/testing", + "massa_consensus_exports/testing", "massa_async_pool/testing", ] sandbox = [ "massa_async_pool/sandbox", - "massa_consensus_exports/sandbox", "massa_final_state/sandbox", "massa_models/sandbox", ] diff --git a/massa-bootstrap/src/client.rs b/massa-bootstrap/src/client.rs index fb469ee2dfc..e0a1b36733e 100644 --- a/massa-bootstrap/src/client.rs +++ b/massa-bootstrap/src/client.rs @@ -23,13 +23,13 @@ use crate::{ /// This function will send the starting point to receive a stream of the ledger and will receive and process each part until receive a `BootstrapServerMessage::FinalStateFinished` message from the server. /// `next_bootstrap_message` passed as parameter must be `BootstrapClientMessage::AskFinalStatePart` enum variant. /// `next_bootstrap_message` will be updated after receiving each part so that in case of connection lost we can restart from the last message we processed. -async fn stream_final_state( +async fn stream_final_state_and_consensus( cfg: &BootstrapConfig, client: &mut BootstrapClientBinder, next_bootstrap_message: &mut BootstrapClientMessage, global_bootstrap_state: &mut GlobalBootstrapState, ) -> Result<(), BootstrapError> { - if let BootstrapClientMessage::AskFinalStatePart { .. } = &next_bootstrap_message { + if let BootstrapClientMessage::AskBootstrapPart { .. } = &next_bootstrap_message { match tokio::time::timeout( cfg.write_timeout.into(), client.send(next_bootstrap_message), @@ -57,7 +57,7 @@ async fn stream_final_state( Ok(Ok(msg)) => msg, }; match msg { - BootstrapServerMessage::FinalStatePart { + BootstrapServerMessage::BootstrapPart { slot, ledger_part, async_pool_part, @@ -65,7 +65,10 @@ async fn stream_final_state( pos_credits_part, exec_ops_part, final_state_changes, + consensus_part, + consensus_outdated_ids, } => { + // Set final state let mut write_final_state = global_bootstrap_state.final_state.write(); let last_ledger_step = write_final_state.ledger.set_ledger_part(ledger_part)?; let last_pool_step = @@ -100,15 +103,42 @@ async fn stream_final_state( } } write_final_state.slot = slot; + + // Set consensus blocks + if let Some(graph) = global_bootstrap_state.graph.as_mut() { + // Extend the final blocks with the received part + graph.final_blocks.extend(consensus_part.final_blocks); + // Remove every outdated block + graph.final_blocks.retain(|block_export| { + !consensus_outdated_ids.contains(&block_export.block.id) + }); + } else { + global_bootstrap_state.graph = Some(consensus_part); + } + let last_consensus_step = StreamingStep::Ongoing( + // Note that this unwrap call is safe because of the above conditional statement + global_bootstrap_state + .graph + .as_ref() + .unwrap() + .final_blocks + .iter() + .map(|b_export| b_export.block.id) + .collect(), + ); + // Set new message in case of disconnection - *next_bootstrap_message = BootstrapClientMessage::AskFinalStatePart { + *next_bootstrap_message = BootstrapClientMessage::AskBootstrapPart { last_slot: Some(slot), last_ledger_step, last_pool_step, last_cycle_step, last_credits_step, last_ops_step, + last_consensus_step, }; + + // Logs for an easier diagnostic if needed debug!( "client final state bootstrap cursors: {:?}", next_bootstrap_message @@ -118,7 +148,7 @@ async fn stream_final_state( final_state_changes.len() ); } - BootstrapServerMessage::FinalStateFinished => { + BootstrapServerMessage::BootstrapFinished => { info!("State bootstrap complete"); // Set next bootstrap message *next_bootstrap_message = BootstrapClientMessage::AskBootstrapPeers; @@ -126,13 +156,14 @@ async fn stream_final_state( } BootstrapServerMessage::SlotTooOld => { info!("Slot is too old retry bootstrap from scratch"); - *next_bootstrap_message = BootstrapClientMessage::AskFinalStatePart { + *next_bootstrap_message = BootstrapClientMessage::AskBootstrapPart { last_slot: None, last_ledger_step: StreamingStep::Started, last_pool_step: StreamingStep::Started, last_cycle_step: StreamingStep::Started, last_credits_step: StreamingStep::Started, last_ops_step: StreamingStep::Started, + last_consensus_step: StreamingStep::Started, }; panic!("Bootstrap failed, try to bootstrap again."); } @@ -268,9 +299,14 @@ async fn bootstrap_from_server( // Loop to ask data to the server depending on the last message we sent loop { match next_bootstrap_message { - BootstrapClientMessage::AskFinalStatePart { .. } => { - stream_final_state(cfg, client, next_bootstrap_message, global_bootstrap_state) - .await?; + BootstrapClientMessage::AskBootstrapPart { .. } => { + stream_final_state_and_consensus( + cfg, + client, + next_bootstrap_message, + global_bootstrap_state, + ) + .await?; } BootstrapClientMessage::AskBootstrapPeers => { let peers = match send_client_message( @@ -289,36 +325,9 @@ async fn bootstrap_from_server( other => return Err(BootstrapError::UnexpectedServerMessage(other)), }; global_bootstrap_state.peers = Some(peers); - *next_bootstrap_message = BootstrapClientMessage::AskConsensusState; - } - BootstrapClientMessage::AskConsensusState => { - let state = match send_client_message( - next_bootstrap_message, - client, - write_timeout, - cfg.read_timeout.into(), - "ask consensus state timed out", - ) - .await? - { - BootstrapServerMessage::ConsensusState { graph } => graph, - BootstrapServerMessage::BootstrapError { error } => { - return Err(BootstrapError::ReceivedError(error)) - } - other => return Err(BootstrapError::UnexpectedServerMessage(other)), - }; - global_bootstrap_state.graph = Some(state); *next_bootstrap_message = BootstrapClientMessage::BootstrapSuccess; } BootstrapClientMessage::BootstrapSuccess => { - if global_bootstrap_state.graph.is_none() { - *next_bootstrap_message = BootstrapClientMessage::AskConsensusState; - continue; - } - if global_bootstrap_state.peers.is_none() { - *next_bootstrap_message = BootstrapClientMessage::AskBootstrapPeers; - continue; - } match tokio::time::timeout(write_timeout, client.send(next_bootstrap_message)).await { Err(_) => Err(std::io::Error::new( @@ -389,12 +398,7 @@ async fn connect_to_server( bootstrap_config.max_async_pool_changes, bootstrap_config.max_async_pool_length, bootstrap_config.max_async_message_data, - bootstrap_config.max_function_name_length, - bootstrap_config.max_parameters_size, bootstrap_config.max_ledger_changes_count, - bootstrap_config.max_op_datastore_entry_count, - bootstrap_config.max_op_datastore_key_length, - bootstrap_config.max_op_datastore_value_length, bootstrap_config.max_changes_slot_count, bootstrap_config.max_rolls_length, bootstrap_config.max_production_stats_length, @@ -444,13 +448,14 @@ pub async fn get_state( let mut shuffled_list = bootstrap_config.bootstrap_list.clone(); shuffled_list.shuffle(&mut StdRng::from_entropy()); let mut next_bootstrap_message: BootstrapClientMessage = - BootstrapClientMessage::AskFinalStatePart { + BootstrapClientMessage::AskBootstrapPart { last_slot: None, last_ledger_step: StreamingStep::Started, last_pool_step: StreamingStep::Started, last_cycle_step: StreamingStep::Started, last_credits_step: StreamingStep::Started, last_ops_step: StreamingStep::Started, + last_consensus_step: StreamingStep::Started, }; let mut global_bootstrap_state = GlobalBootstrapState::new(final_state.clone()); loop { diff --git a/massa-bootstrap/src/client_binder.rs b/massa-bootstrap/src/client_binder.rs index 59252eb9d75..5dfb18454a0 100644 --- a/massa-bootstrap/src/client_binder.rs +++ b/massa-bootstrap/src/client_binder.rs @@ -39,12 +39,7 @@ pub struct BootstrapClientBinder { max_async_pool_changes: u64, max_async_pool_length: u64, max_async_message_data: u64, - max_function_name_length: u16, - max_parameters_size: u32, max_ledger_changes_count: u64, - max_op_datastore_entry_count: u64, - max_op_datastore_key_length: u8, - max_op_datastore_value_length: u64, max_changes_slot_count: u64, max_rolls_length: u64, max_production_stats_length: u64, @@ -79,12 +74,7 @@ impl BootstrapClientBinder { max_async_pool_changes: u64, max_async_pool_length: u64, max_async_message_data: u64, - max_function_name_length: u16, - max_parameters_size: u32, max_ledger_changes_count: u64, - max_op_datastore_entry_count: u64, - max_op_datastore_key_length: u8, - max_op_datastore_value_length: u64, max_changes_slot_count: u64, max_rolls_length: u64, max_production_stats_length: u64, @@ -114,12 +104,7 @@ impl BootstrapClientBinder { max_async_pool_changes, max_async_pool_length, max_async_message_data, - max_function_name_length, - max_parameters_size, max_ledger_changes_count, - max_op_datastore_entry_count, - max_op_datastore_key_length, - max_op_datastore_value_length, max_changes_slot_count, max_rolls_length, max_production_stats_length, @@ -183,12 +168,7 @@ impl BootstrapClientBinder { self.max_datastore_key_length, self.max_datastore_value_length, self.max_datastore_entry_count, - self.max_function_name_length, - self.max_parameters_size, self.max_bootstrap_error_length, - self.max_op_datastore_entry_count, - self.max_op_datastore_key_length, - self.max_op_datastore_value_length, self.max_changes_slot_count, self.max_rolls_length, self.max_production_stats_length, diff --git a/massa-bootstrap/src/error.rs b/massa-bootstrap/src/error.rs index d072dccb0f6..5783ccce87a 100644 --- a/massa-bootstrap/src/error.rs +++ b/massa-bootstrap/src/error.rs @@ -30,12 +30,12 @@ pub enum BootstrapError { UnexpectedConnectionDrop, /// `massa_hash` error: {0} MassaHashError(#[from] MassaHashError), + /// `massa_consensus` error: {0} + MassaConsensusError(#[from] ConsensusError), /// `massa_signature` error {0} MassaSignatureError(#[from] massa_signature::MassaSignatureError), /// time error: {0} TimeError(#[from] TimeError), - /// consensus error: {0} - ConsensusError(#[from] ConsensusError), /// network error: {0} NetworkError(#[from] NetworkError), /// final state error: {0} diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index 248d23fadbe..33889ae8546 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -14,8 +14,8 @@ #![feature(let_chains)] pub use establisher::types::Establisher; +use massa_consensus_exports::bootstrapable_graph::BootstrapableGraph; use massa_final_state::FinalState; -use massa_graph::BootstrapableGraph; use massa_network_exports::BootstrapPeers; use parking_lot::RwLock; use std::sync::Arc; @@ -43,26 +43,26 @@ pub mod tests; /// a collection of the bootstrap state snapshots of all relevant modules pub struct GlobalBootstrapState { + /// state of the final state + pub final_state: Arc>, + /// state of the consensus graph pub graph: Option, - /// timestamp correction in milliseconds - pub compensation_millis: i64, - /// list of network peers pub peers: Option, - /// state of the final state - pub final_state: Arc>, + /// timestamp correction in milliseconds + pub compensation_millis: i64, } impl GlobalBootstrapState { fn new(final_state: Arc>) -> Self { Self { + final_state, graph: None, - compensation_millis: Default::default(), peers: None, - final_state, + compensation_millis: Default::default(), } } } diff --git a/massa-bootstrap/src/messages.rs b/massa-bootstrap/src/messages.rs index a169e9d0571..bd563bc1aac 100644 --- a/massa-bootstrap/src/messages.rs +++ b/massa-bootstrap/src/messages.rs @@ -4,15 +4,18 @@ use massa_async_pool::{ AsyncMessage, AsyncMessageId, AsyncMessageIdDeserializer, AsyncMessageIdSerializer, AsyncPoolDeserializer, AsyncPoolSerializer, }; -use massa_executed_ops::{ExecutedOpsDeserializer, ExecutedOpsSerializer}; -use massa_final_state::{StateChanges, StateChangesDeserializer, StateChangesSerializer}; -use massa_graph::{ +use massa_consensus_exports::bootstrapable_graph::{ BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, }; +use massa_executed_ops::{ExecutedOpsDeserializer, ExecutedOpsSerializer}; +use massa_final_state::{StateChanges, StateChangesDeserializer, StateChangesSerializer}; use massa_ledger_exports::{KeyDeserializer, KeySerializer}; +use massa_models::block::{BlockId, BlockIdDeserializer, BlockIdSerializer}; use massa_models::operation::OperationId; use massa_models::prehash::PreHashSet; -use massa_models::serialization::{VecU8Deserializer, VecU8Serializer}; +use massa_models::serialization::{ + PreHashSetDeserializer, PreHashSetSerializer, VecU8Deserializer, VecU8Serializer, +}; use massa_models::slot::{Slot, SlotDeserializer, SlotSerializer}; use massa_models::streaming_step::{ StreamingStep, StreamingStepDeserializer, StreamingStepSerializer, @@ -57,13 +60,8 @@ pub enum BootstrapServerMessage { /// Server peers peers: BootstrapPeers, }, - /// Consensus state - ConsensusState { - /// block graph - graph: BootstrapableGraph, - }, - /// Part of the final state - FinalStatePart { + /// Part of final state and consensus + BootstrapPart { /// Slot the state changes are attached to slot: Slot, /// Part of the execution ledger sent in a serialized way @@ -78,9 +76,13 @@ pub enum BootstrapServerMessage { exec_ops_part: BTreeMap>, /// Ledger change for addresses inferior to `address` of the client message until the actual slot. final_state_changes: Vec<(Slot, StateChanges)>, + /// Part of the consensus graph + consensus_part: BootstrapableGraph, + /// Outdated block ids in the current consensus graph bootstrap + consensus_outdated_ids: PreHashSet, }, - /// Message sent when there is no state part left - FinalStateFinished, + /// Message sent when the final state and consensus bootstrap are finished + BootstrapFinished, /// Slot sent to get state changes is too old SlotTooOld, /// Bootstrap error @@ -95,11 +97,10 @@ pub enum BootstrapServerMessage { enum MessageServerTypeId { BootstrapTime = 0u32, Peers = 1u32, - ConsensusState = 2u32, - FinalStatePart = 3u32, - FinalStateFinished = 4u32, - SlotTooOld = 5u32, - BootstrapError = 6u32, + FinalStatePart = 2u32, + FinalStateFinished = 3u32, + SlotTooOld = 4u32, + BootstrapError = 5u32, } /// Serializer for `BootstrapServerMessage` @@ -111,6 +112,7 @@ pub struct BootstrapServerMessageSerializer { peers_serializer: BootstrapPeersSerializer, state_changes_serializer: StateChangesSerializer, bootstrapable_graph_serializer: BootstrapableGraphSerializer, + block_id_set_serializer: PreHashSetSerializer, vec_u8_serializer: VecU8Serializer, slot_serializer: SlotSerializer, async_pool_serializer: AsyncPoolSerializer, @@ -136,6 +138,7 @@ impl BootstrapServerMessageSerializer { peers_serializer: BootstrapPeersSerializer::new(), state_changes_serializer: StateChangesSerializer::new(), bootstrapable_graph_serializer: BootstrapableGraphSerializer::new(), + block_id_set_serializer: PreHashSetSerializer::new(BlockIdSerializer::new()), vec_u8_serializer: VecU8Serializer::new(), slot_serializer: SlotSerializer::new(), async_pool_serializer: AsyncPoolSerializer::new(), @@ -183,13 +186,7 @@ impl Serializer for BootstrapServerMessageSerializer { .serialize(&u32::from(MessageServerTypeId::Peers), buffer)?; self.peers_serializer.serialize(peers, buffer)?; } - BootstrapServerMessage::ConsensusState { graph } => { - self.u32_serializer - .serialize(&u32::from(MessageServerTypeId::ConsensusState), buffer)?; - self.bootstrapable_graph_serializer - .serialize(graph, buffer)?; - } - BootstrapServerMessage::FinalStatePart { + BootstrapServerMessage::BootstrapPart { slot, ledger_part, async_pool_part, @@ -197,6 +194,8 @@ impl Serializer for BootstrapServerMessageSerializer { pos_credits_part, exec_ops_part, final_state_changes, + consensus_part, + consensus_outdated_ids, } => { // message type self.u32_serializer @@ -225,8 +224,14 @@ impl Serializer for BootstrapServerMessageSerializer { self.state_changes_serializer .serialize(state_changes, buffer)?; } + // consensus graph + self.bootstrapable_graph_serializer + .serialize(consensus_part, buffer)?; + // consensus outdated ids + self.block_id_set_serializer + .serialize(consensus_outdated_ids, buffer)?; } - BootstrapServerMessage::FinalStateFinished => { + BootstrapServerMessage::BootstrapFinished => { self.u32_serializer .serialize(&u32::from(MessageServerTypeId::FinalStateFinished), buffer)?; } @@ -259,6 +264,7 @@ pub struct BootstrapServerMessageDeserializer { length_state_changes: U64VarIntDeserializer, state_changes_deserializer: StateChangesDeserializer, bootstrapable_graph_deserializer: BootstrapableGraphDeserializer, + block_id_set_deserializer: PreHashSetDeserializer, ledger_bytes_deserializer: VecU8Deserializer, length_bootstrap_error: U64VarIntDeserializer, slot_deserializer: SlotDeserializer, @@ -285,12 +291,7 @@ impl BootstrapServerMessageDeserializer { max_datastore_key_length: u8, max_datastore_value_length: u64, max_datastore_entry_count: u64, - max_function_name_length: u16, - max_parameters_size: u32, max_bootstrap_error_length: u64, - max_op_datastore_entry_count: u64, - max_op_datastore_key_length: u8, - max_op_datastore_value_length: u64, max_changes_slot_count: u64, max_rolls_length: u64, max_production_stats_length: u64, @@ -327,13 +328,12 @@ impl BootstrapServerMessageDeserializer { thread_count, endorsement_count, max_bootstrap_blocks, - max_datastore_value_length, - max_function_name_length, - max_parameters_size, max_operations_per_block, - max_op_datastore_entry_count, - max_op_datastore_key_length, - max_op_datastore_value_length, + ), + block_id_set_deserializer: PreHashSetDeserializer::new( + BlockIdDeserializer::new(), + Included(0), + Included(max_bootstrap_blocks as u64), ), ledger_bytes_deserializer: VecU8Deserializer::new( Included(0), @@ -379,7 +379,7 @@ impl Deserializer for BootstrapServerMessageDeserializer /// use std::str::FromStr; /// /// let message_serializer = BootstrapServerMessageSerializer::new(); - /// let message_deserializer = BootstrapServerMessageDeserializer::new(32, 16, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 255, 1000, 1000, 1000, 1000, 1000, 10, 255, 1000, 1000, 10_000, 10_000, 10_000, 10, 10_000); + /// let message_deserializer = BootstrapServerMessageDeserializer::new(32, 16, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 255, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000, 1000); /// let bootstrap_server_message = BootstrapServerMessage::BootstrapTime { /// server_time: MassaTime::from(0), /// version: Version::from_str("TEST.1.10").unwrap(), @@ -437,13 +437,6 @@ impl Deserializer for BootstrapServerMessageDeserializer }) .map(|peers| BootstrapServerMessage::BootstrapPeers { peers }) .parse(input), - MessageServerTypeId::ConsensusState => { - context("Failed graph deserialization", |input| { - self.bootstrapable_graph_deserializer.deserialize(input) - }) - .map(|graph| BootstrapServerMessage::ConsensusState { graph }) - .parse(input) - } MessageServerTypeId::FinalStatePart => tuple(( context("Failed slot deserialization", |input| { self.slot_deserializer.deserialize(input) @@ -475,6 +468,12 @@ impl Deserializer for BootstrapServerMessageDeserializer )), ), ), + context("Failed consensus_part deserialization", |input| { + self.bootstrapable_graph_deserializer.deserialize(input) + }), + context("Failed consensus_outdated_ids deserialization", |input| { + self.block_id_set_deserializer.deserialize(input) + }), )) .map( |( @@ -485,8 +484,10 @@ impl Deserializer for BootstrapServerMessageDeserializer pos_credits_part, exec_ops_part, final_state_changes, + consensus_part, + consensus_outdated_ids, )| { - BootstrapServerMessage::FinalStatePart { + BootstrapServerMessage::BootstrapPart { slot, ledger_part, async_pool_part, @@ -494,12 +495,14 @@ impl Deserializer for BootstrapServerMessageDeserializer pos_credits_part, exec_ops_part, final_state_changes, + consensus_part, + consensus_outdated_ids, } }, ) .parse(input), MessageServerTypeId::FinalStateFinished => { - Ok((input, BootstrapServerMessage::FinalStateFinished)) + Ok((input, BootstrapServerMessage::BootstrapFinished)) } MessageServerTypeId::SlotTooOld => Ok((input, BootstrapServerMessage::SlotTooOld)), MessageServerTypeId::BootstrapError => context( @@ -523,10 +526,8 @@ impl Deserializer for BootstrapServerMessageDeserializer pub enum BootstrapClientMessage { /// Ask for bootstrap peers AskBootstrapPeers, - /// Ask for consensus state - AskConsensusState, - /// Ask for a part of the final state - AskFinalStatePart { + /// Ask for a final state and consensus part + AskBootstrapPart { /// Slot we are attached to for changes last_slot: Option, /// Last received ledger key @@ -539,6 +540,8 @@ pub enum BootstrapClientMessage { last_credits_step: StreamingStep, /// Last received executed operation associated slot last_ops_step: StreamingStep, + /// Last received consensus block slot + last_consensus_step: StreamingStep>, }, /// Bootstrap error BootstrapError { @@ -553,10 +556,9 @@ pub enum BootstrapClientMessage { #[repr(u32)] enum MessageClientTypeId { AskBootstrapPeers = 0u32, - AskConsensusState = 1u32, - AskFinalStatePart = 2u32, - BootstrapError = 3u32, - BootstrapSuccess = 4u32, + AskFinalStatePart = 1u32, + BootstrapError = 2u32, + BootstrapSuccess = 3u32, } /// Serializer for `BootstrapClientMessage` @@ -567,6 +569,10 @@ pub struct BootstrapClientMessageSerializer { pool_step_serializer: StreamingStepSerializer, cycle_step_serializer: StreamingStepSerializer, slot_step_serializer: StreamingStepSerializer, + block_ids_step_serializer: StreamingStepSerializer< + PreHashSet, + PreHashSetSerializer, + >, } impl BootstrapClientMessageSerializer { @@ -579,6 +585,9 @@ impl BootstrapClientMessageSerializer { pool_step_serializer: StreamingStepSerializer::new(AsyncMessageIdSerializer::new()), cycle_step_serializer: StreamingStepSerializer::new(U64VarIntSerializer::new()), slot_step_serializer: StreamingStepSerializer::new(SlotSerializer::new()), + block_ids_step_serializer: StreamingStepSerializer::new(PreHashSetSerializer::new( + BlockIdSerializer::new(), + )), } } } @@ -613,17 +622,14 @@ impl Serializer for BootstrapClientMessageSerializer { self.u32_serializer .serialize(&u32::from(MessageClientTypeId::AskBootstrapPeers), buffer)?; } - BootstrapClientMessage::AskConsensusState => { - self.u32_serializer - .serialize(&u32::from(MessageClientTypeId::AskConsensusState), buffer)?; - } - BootstrapClientMessage::AskFinalStatePart { + BootstrapClientMessage::AskBootstrapPart { last_slot, last_ledger_step, last_pool_step, last_cycle_step, last_credits_step, last_ops_step, + last_consensus_step, } => { self.u32_serializer .serialize(&u32::from(MessageClientTypeId::AskFinalStatePart), buffer)?; @@ -638,6 +644,8 @@ impl Serializer for BootstrapClientMessageSerializer { self.slot_step_serializer .serialize(last_credits_step, buffer)?; self.slot_step_serializer.serialize(last_ops_step, buffer)?; + self.block_ids_step_serializer + .serialize(last_consensus_step, buffer)?; } } BootstrapClientMessage::BootstrapError { error } => { @@ -669,11 +677,19 @@ pub struct BootstrapClientMessageDeserializer { pool_step_deserializer: StreamingStepDeserializer, cycle_step_deserializer: StreamingStepDeserializer, slot_step_deserializer: StreamingStepDeserializer, + block_ids_step_deserializer: StreamingStepDeserializer< + PreHashSet, + PreHashSetDeserializer, + >, } impl BootstrapClientMessageDeserializer { /// Creates a new `BootstrapClientMessageDeserializer` - pub fn new(thread_count: u8, max_datastore_key_length: u8) -> Self { + pub fn new( + thread_count: u8, + max_datastore_key_length: u8, + consensus_bootstrap_part_size: u64, + ) -> Self { Self { id_deserializer: U32VarIntDeserializer::new(Included(0), Included(u32::MAX)), length_error_deserializer: U32VarIntDeserializer::new(Included(0), Included(100000)), @@ -695,6 +711,13 @@ impl BootstrapClientMessageDeserializer { (Included(0), Included(u64::MAX)), (Included(0), Excluded(thread_count)), )), + block_ids_step_deserializer: StreamingStepDeserializer::new( + PreHashSetDeserializer::new( + BlockIdDeserializer::new(), + Included(0), + Included(consensus_bootstrap_part_size), + ), + ), } } } @@ -709,7 +732,7 @@ impl Deserializer for BootstrapClientMessageDeserializer /// use std::str::FromStr; /// /// let message_serializer = BootstrapClientMessageSerializer::new(); - /// let message_deserializer = BootstrapClientMessageDeserializer::new(32, 255); + /// let message_deserializer = BootstrapClientMessageDeserializer::new(32, 255, 50); /// let bootstrap_server_message = BootstrapClientMessage::AskBootstrapPeers; /// let mut message_serialized = Vec::new(); /// message_serializer.serialize(&bootstrap_server_message, &mut message_serialized).unwrap(); @@ -741,20 +764,18 @@ impl Deserializer for BootstrapClientMessageDeserializer MessageClientTypeId::AskBootstrapPeers => { Ok((input, BootstrapClientMessage::AskBootstrapPeers)) } - MessageClientTypeId::AskConsensusState => { - Ok((input, BootstrapClientMessage::AskConsensusState)) - } MessageClientTypeId::AskFinalStatePart => { if input.is_empty() { Ok(( input, - BootstrapClientMessage::AskFinalStatePart { + BootstrapClientMessage::AskBootstrapPart { last_slot: None, last_ledger_step: StreamingStep::Started, last_pool_step: StreamingStep::Started, last_cycle_step: StreamingStep::Started, last_credits_step: StreamingStep::Started, last_ops_step: StreamingStep::Started, + last_consensus_step: StreamingStep::Started, }, )) } else { @@ -777,6 +798,9 @@ impl Deserializer for BootstrapClientMessageDeserializer context("Failed last_ops_step deserialization", |input| { self.slot_step_deserializer.deserialize(input) }), + context("Failed last_consensus_step deserialization", |input| { + self.block_ids_step_deserializer.deserialize(input) + }), )) .map( |( @@ -786,14 +810,16 @@ impl Deserializer for BootstrapClientMessageDeserializer last_cycle_step, last_credits_step, last_ops_step, + last_consensus_step, )| { - BootstrapClientMessage::AskFinalStatePart { + BootstrapClientMessage::AskBootstrapPart { last_slot: Some(last_slot), last_ledger_step, last_pool_step, last_cycle_step, last_credits_step, last_ops_step, + last_consensus_step, } }, ) diff --git a/massa-bootstrap/src/server.rs b/massa-bootstrap/src/server.rs index 18f2b86f0f2..74e7cab0759 100644 --- a/massa-bootstrap/src/server.rs +++ b/massa-bootstrap/src/server.rs @@ -1,10 +1,13 @@ use futures::stream::FuturesUnordered; use futures::StreamExt; use massa_async_pool::AsyncMessageId; -use massa_consensus_exports::ConsensusCommandSender; +use massa_consensus_exports::{bootstrapable_graph::BootstrapableGraph, ConsensusController}; use massa_final_state::FinalState; use massa_logging::massa_trace; -use massa_models::{slot::Slot, streaming_step::StreamingStep, version::Version}; +use massa_models::{ + block::BlockId, prehash::PreHashSet, slot::Slot, streaming_step::StreamingStep, + version::Version, +}; use massa_network_exports::NetworkCommandSender; use massa_signature::KeyPair; use massa_time::MassaTime; @@ -50,7 +53,7 @@ impl BootstrapManager { /// start a bootstrap server. /// Once your node will be ready, you may want other to bootstrap from you. pub async fn start_bootstrap_server( - consensus_command_sender: ConsensusCommandSender, + consensus_controller: Box, network_command_sender: NetworkCommandSender, final_state: Arc>, bootstrap_config: BootstrapConfig, @@ -101,7 +104,7 @@ pub async fn start_bootstrap_server( let join_handle = tokio::spawn(async move { BootstrapServer { - consensus_command_sender, + consensus_controller, network_command_sender, final_state, establisher, @@ -128,7 +131,7 @@ pub async fn start_bootstrap_server( } struct BootstrapServer { - consensus_command_sender: ConsensusCommandSender, + consensus_controller: Box, network_command_sender: NetworkCommandSender, final_state: Arc>, establisher: Establisher, @@ -213,10 +216,10 @@ impl BootstrapServer { match self.ip_hist_map.entry(remote_addr.ip()) { hash_map::Entry::Occupied(mut occ) => { if now.duration_since(*occ.get()) <= per_ip_min_interval { - let mut server = BootstrapServerBinder::new(dplx, self.keypair.clone(), self.bootstrap_config.max_bytes_read_write, self.bootstrap_config.max_bootstrap_message_size, self.bootstrap_config.thread_count, self.bootstrap_config.max_datastore_key_length, self.bootstrap_config.randomness_size_bytes); + let mut server = BootstrapServerBinder::new(dplx, self.keypair.clone(), self.bootstrap_config.max_bytes_read_write, self.bootstrap_config.max_bootstrap_message_size, self.bootstrap_config.thread_count, self.bootstrap_config.max_datastore_key_length, self.bootstrap_config.randomness_size_bytes, self.bootstrap_config.consensus_bootstrap_part_size); let _ = match tokio::time::timeout(self.bootstrap_config.write_error_timeout.into(), server.send(BootstrapServerMessage::BootstrapError { error: - format!("Your last bootstrap on this server was {:#?} ago and you have to wait {:#?} before retrying.", occ.get().elapsed(), per_ip_min_interval.saturating_sub(occ.get().elapsed())) + format!("Your last bootstrap on this server was {:#?} ago and you have to wait {:#?} before retrying.", occ.get().elapsed().as_secs(), per_ip_min_interval.saturating_sub(occ.get().elapsed()).as_secs()) })).await { Err(_) => Err(std::io::Error::new(std::io::ErrorKind::TimedOut, "bootstrap error too early retry bootstrap send timed out").into()), Ok(Err(e)) => Err(e), @@ -253,13 +256,13 @@ impl BootstrapServer { let compensation_millis = self.compensation_millis; let version = self.version; let data_execution = self.final_state.clone(); - let consensus_command_sender = self.consensus_command_sender.clone(); + let consensus_command_sender = self.consensus_controller.clone(); let network_command_sender = self.network_command_sender.clone(); let keypair = self.keypair.clone(); let config = self.bootstrap_config.clone(); bootstrap_sessions.push(async move { - let mut server = BootstrapServerBinder::new(dplx, keypair, config.max_bytes_read_write, config.max_bootstrap_message_size, config.thread_count, config.max_datastore_key_length, config.randomness_size_bytes); + let mut server = BootstrapServerBinder::new(dplx, keypair, config.max_bytes_read_write, config.max_bootstrap_message_size, config.thread_count, config.max_datastore_key_length, config.randomness_size_bytes, config.consensus_bootstrap_part_size); match manage_bootstrap(&config, &mut server, data_execution, compensation_millis, version, consensus_command_sender, network_command_sender).await { Ok(_) => { info!("bootstrapped peer {}", remote_addr) @@ -276,7 +279,7 @@ impl BootstrapServer { massa_trace!("bootstrap.session.started", {"active_count": bootstrap_sessions.len()}); } else { let config = self.bootstrap_config.clone(); - let mut server = BootstrapServerBinder::new(dplx, self.keypair.clone(), config.max_bytes_read_write, config.max_bootstrap_message_size, config.thread_count, config.max_datastore_key_length, config.randomness_size_bytes); + let mut server = BootstrapServerBinder::new(dplx, self.keypair.clone(), config.max_bytes_read_write, config.max_bootstrap_message_size, config.thread_count, config.max_datastore_key_length, config.randomness_size_bytes, config.consensus_bootstrap_part_size); let _ = match tokio::time::timeout(config.clone().write_error_timeout.into(), server.send(BootstrapServerMessage::BootstrapError { error: "Bootstrap failed because the bootstrap server currently has no slots available.".to_string() })).await { @@ -297,15 +300,17 @@ impl BootstrapServer { } #[allow(clippy::too_many_arguments)] -pub async fn send_final_state_stream( +pub async fn stream_bootstrap_information( server: &mut BootstrapServerBinder, final_state: Arc>, + consensus_controller: Box, mut last_slot: Option, mut last_ledger_step: StreamingStep>, mut last_pool_step: StreamingStep, mut last_cycle_step: StreamingStep, mut last_credits_step: StreamingStep, mut last_ops_step: StreamingStep, + mut last_consensus_step: StreamingStep>, write_timeout: Duration, ) -> Result<(), BootstrapError> { loop { @@ -378,40 +383,59 @@ pub async fn send_final_state_stream( current_slot = final_state_read.slot; } - if !last_ledger_step.finished() - || !last_pool_step.finished() - || !last_cycle_step.finished() - || !last_credits_step.finished() - || !last_ops_step.finished() - || !final_state_changes.is_empty() + // Setup final state global cursor + let final_state_global_step = if last_ledger_step.finished() + && last_pool_step.finished() + && last_cycle_step.finished() + && last_credits_step.finished() + && last_ops_step.finished() { - match tokio::time::timeout( - write_timeout, - server.send(BootstrapServerMessage::FinalStatePart { - slot: current_slot, - ledger_part, - async_pool_part, - pos_cycle_part, - pos_credits_part, - exec_ops_part, - final_state_changes, - }), - ) - .await - { - Err(_) => Err(std::io::Error::new( - std::io::ErrorKind::TimedOut, - "bootstrap ask ledger part send timed out", - ) - .into()), - Ok(Err(e)) => Err(e), - Ok(Ok(_)) => Ok(()), - }?; + StreamingStep::Finished(Some(current_slot)) + } else { + StreamingStep::Ongoing(current_slot) + }; + + // Setup final state changes cursor + let final_state_changes_step = if final_state_changes.is_empty() { + StreamingStep::Finished(Some(current_slot)) } else { - // There is no ledger data nor async pool data. + StreamingStep::Ongoing(current_slot) + }; + + // Stream consensus blocks if final state base bootstrap is finished + let mut consensus_part = BootstrapableGraph { + final_blocks: Default::default(), + }; + let mut consensus_outdated_ids: PreHashSet = PreHashSet::default(); + if final_state_global_step.finished() { + let (part, outdated_ids, new_consensus_step) = consensus_controller + .get_bootstrap_part(last_consensus_step, final_state_changes_step)?; + consensus_part = part; + consensus_outdated_ids = outdated_ids; + last_consensus_step = new_consensus_step; + } + + // Logs for an easier diagnostic if needed + debug!( + "Final state bootstrap cursor: {:?}", + final_state_global_step + ); + debug!( + "Consensus blocks bootstrap cursor: {:?}", + last_consensus_step + ); + if let StreamingStep::Ongoing(ids) = &last_consensus_step { + debug!("Consensus bootstrap cursor length: {}", ids.len()); + } + + // If the consensus streaming is finished (also meaning that consensus slot == final state slot) exit + if final_state_global_step.finished() + && final_state_changes_step.finished() + && last_consensus_step.finished() + { match tokio::time::timeout( write_timeout, - server.send(BootstrapServerMessage::FinalStateFinished), + server.send(BootstrapServerMessage::BootstrapFinished), ) .await { @@ -425,10 +449,37 @@ pub async fn send_final_state_stream( }?; break; } + + // At this point we know that consensus, final state or both are not finished + match tokio::time::timeout( + write_timeout, + server.send(BootstrapServerMessage::BootstrapPart { + slot: current_slot, + ledger_part, + async_pool_part, + pos_cycle_part, + pos_credits_part, + exec_ops_part, + final_state_changes, + consensus_part, + consensus_outdated_ids, + }), + ) + .await + { + Err(_) => Err(std::io::Error::new( + std::io::ErrorKind::TimedOut, + "bootstrap ask ledger part send timed out", + ) + .into()), + Ok(Err(e)) => Err(e), + Ok(Ok(_)) => Ok(()), + }?; } Ok(()) } +#[allow(clippy::manual_async_fn)] #[allow(clippy::too_many_arguments)] async fn manage_bootstrap( bootstrap_config: &BootstrapConfig, @@ -436,7 +487,7 @@ async fn manage_bootstrap( final_state: Arc>, compensation_millis: i64, version: Version, - consensus_command_sender: ConsensusCommandSender, + consensus_controller: Box, network_command_sender: NetworkCommandSender, ) -> Result<(), BootstrapError> { massa_trace!("bootstrap.lib.manage_bootstrap", {}); @@ -514,45 +565,30 @@ async fn manage_bootstrap( Ok(Ok(_)) => Ok(()), }?; } - BootstrapClientMessage::AskFinalStatePart { + BootstrapClientMessage::AskBootstrapPart { last_slot, last_ledger_step, last_pool_step, last_cycle_step, last_credits_step, last_ops_step, + last_consensus_step, } => { - send_final_state_stream( + stream_bootstrap_information( server, final_state.clone(), + consensus_controller.clone(), last_slot, last_ledger_step, last_pool_step, last_cycle_step, last_credits_step, last_ops_step, + last_consensus_step, write_timeout, ) .await?; } - BootstrapClientMessage::AskConsensusState => { - match tokio::time::timeout( - write_timeout, - server.send(BootstrapServerMessage::ConsensusState { - graph: consensus_command_sender.get_bootstrap_state().await?, - }), - ) - .await - { - Err(_) => Err(std::io::Error::new( - std::io::ErrorKind::TimedOut, - "bootstrap consensus state send timed out", - ) - .into()), - Ok(Err(e)) => Err(e), - Ok(Ok(_)) => Ok(()), - }?; - } BootstrapClientMessage::BootstrapSuccess => break Ok(()), BootstrapClientMessage::BootstrapError { error } => { break Err(BootstrapError::ReceivedError(error)); diff --git a/massa-bootstrap/src/server_binder.rs b/massa-bootstrap/src/server_binder.rs index 0749c307e4b..802c44b9b00 100644 --- a/massa-bootstrap/src/server_binder.rs +++ b/massa-bootstrap/src/server_binder.rs @@ -20,6 +20,7 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt}; /// Bootstrap server binder pub struct BootstrapServerBinder { max_bootstrap_message_size: u32, + consensus_bootstrap_part_size: u64, thread_count: u8, max_datastore_key_length: u8, randomness_size_bytes: usize, @@ -38,6 +39,7 @@ impl BootstrapServerBinder { /// * `duplex`: duplex stream. /// * `local_keypair`: local node user keypair /// * `limit`: limit max bytes per second (up and down) + #[allow(clippy::too_many_arguments)] pub fn new( duplex: Duplex, local_keypair: KeyPair, @@ -46,10 +48,12 @@ impl BootstrapServerBinder { thread_count: u8, max_datastore_key_length: u8, randomness_size_bytes: usize, + consensus_bootstrap_part_size: u64, ) -> Self { let size_field_len = u32::be_bytes_min_length(max_bootstrap_message_size); BootstrapServerBinder { max_bootstrap_message_size, + consensus_bootstrap_part_size, size_field_len, local_keypair, duplex: ::new(limit).limit(duplex), @@ -182,6 +186,7 @@ impl BootstrapServerBinder { let (_, msg) = BootstrapClientMessageDeserializer::new( self.thread_count, self.max_datastore_key_length, + self.consensus_bootstrap_part_size, ) .deserialize::(&msg_bytes) .map_err(|err| BootstrapError::GeneralError(format!("{}", err)))?; diff --git a/massa-bootstrap/src/settings.rs b/massa-bootstrap/src/settings.rs index 0dc903a619a..4376e0f5cff 100644 --- a/massa-bootstrap/src/settings.rs +++ b/massa-bootstrap/src/settings.rs @@ -98,4 +98,6 @@ pub struct BootstrapConfig { pub max_executed_ops_length: u64, /// max executed ops changes pub max_ops_changes_length: u64, + /// consensus bootstrap part size + pub consensus_bootstrap_part_size: u64, } diff --git a/massa-bootstrap/src/tests/binders.rs b/massa-bootstrap/src/tests/binders.rs index d96dda4ea0d..4e9eb610f1e 100644 --- a/massa-bootstrap/src/tests/binders.rs +++ b/massa-bootstrap/src/tests/binders.rs @@ -8,15 +8,13 @@ use crate::{ tests::tools::get_bootstrap_config, BootstrapPeers, }; use massa_models::config::{ - BOOTSTRAP_RANDOMNESS_SIZE_BYTES, ENDORSEMENT_COUNT, MAX_ADVERTISE_LENGTH, - MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, MAX_BOOTSTRAP_ASYNC_POOL_CHANGES, - MAX_BOOTSTRAP_BLOCKS, MAX_BOOTSTRAP_ERROR_LENGTH, MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, - MAX_BOOTSTRAP_MESSAGE_SIZE, MAX_DATASTORE_ENTRY_COUNT, MAX_DATASTORE_KEY_LENGTH, - MAX_DATASTORE_VALUE_LENGTH, MAX_DEFERRED_CREDITS_LENGTH, MAX_EXECUTED_OPS_CHANGES_LENGTH, - MAX_EXECUTED_OPS_LENGTH, MAX_FUNCTION_NAME_LENGTH, MAX_LEDGER_CHANGES_COUNT, - MAX_OPERATIONS_PER_BLOCK, MAX_OPERATION_DATASTORE_ENTRY_COUNT, - MAX_OPERATION_DATASTORE_KEY_LENGTH, MAX_OPERATION_DATASTORE_VALUE_LENGTH, MAX_PARAMETERS_SIZE, - MAX_PRODUCTION_STATS_LENGTH, MAX_ROLLS_COUNT_LENGTH, THREAD_COUNT, + BOOTSTRAP_RANDOMNESS_SIZE_BYTES, CONSENSUS_BOOTSTRAP_PART_SIZE, ENDORSEMENT_COUNT, + MAX_ADVERTISE_LENGTH, MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, + MAX_BOOTSTRAP_ASYNC_POOL_CHANGES, MAX_BOOTSTRAP_BLOCKS, MAX_BOOTSTRAP_ERROR_LENGTH, + MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, MAX_BOOTSTRAP_MESSAGE_SIZE, MAX_DATASTORE_ENTRY_COUNT, + MAX_DATASTORE_KEY_LENGTH, MAX_DATASTORE_VALUE_LENGTH, MAX_DEFERRED_CREDITS_LENGTH, + MAX_EXECUTED_OPS_CHANGES_LENGTH, MAX_EXECUTED_OPS_LENGTH, MAX_LEDGER_CHANGES_COUNT, + MAX_OPERATIONS_PER_BLOCK, MAX_PRODUCTION_STATS_LENGTH, MAX_ROLLS_COUNT_LENGTH, THREAD_COUNT, }; use massa_models::version::Version; use massa_signature::{KeyPair, PublicKey}; @@ -51,12 +49,7 @@ impl BootstrapClientBinder { MAX_BOOTSTRAP_ASYNC_POOL_CHANGES, MAX_ASYNC_POOL_LENGTH, MAX_ASYNC_MESSAGE_DATA, - MAX_FUNCTION_NAME_LENGTH, - MAX_PARAMETERS_SIZE, MAX_LEDGER_CHANGES_COUNT, - MAX_OPERATION_DATASTORE_ENTRY_COUNT, - MAX_OPERATION_DATASTORE_KEY_LENGTH, - MAX_OPERATION_DATASTORE_VALUE_LENGTH, 1000, MAX_ROLLS_COUNT_LENGTH, MAX_PRODUCTION_STATS_LENGTH, @@ -81,6 +74,7 @@ async fn test_binders() { THREAD_COUNT, MAX_DATASTORE_KEY_LENGTH, BOOTSTRAP_RANDOMNESS_SIZE_BYTES, + CONSENSUS_BOOTSTRAP_PART_SIZE, ); let mut client = BootstrapClientBinder::test_default(client, bootstrap_config.bootstrap_list[0].1); @@ -174,6 +168,7 @@ async fn test_binders_double_send_server_works() { THREAD_COUNT, MAX_DATASTORE_KEY_LENGTH, BOOTSTRAP_RANDOMNESS_SIZE_BYTES, + CONSENSUS_BOOTSTRAP_PART_SIZE, ); let mut client = BootstrapClientBinder::test_default(client, bootstrap_config.bootstrap_list[0].1); @@ -252,6 +247,7 @@ async fn test_binders_try_double_send_client_works() { THREAD_COUNT, MAX_DATASTORE_KEY_LENGTH, BOOTSTRAP_RANDOMNESS_SIZE_BYTES, + CONSENSUS_BOOTSTRAP_PART_SIZE, ); let mut client = BootstrapClientBinder::test_default(client, bootstrap_config.bootstrap_list[0].1); diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index 6fa8c695dc0..d2bdfa20969 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -4,7 +4,7 @@ use super::{ mock_establisher, tools::{ bridge_mock_streams, get_boot_state, get_peers, get_random_final_state_bootstrap, - get_random_ledger_changes, wait_consensus_command, wait_network_command, + get_random_ledger_changes, wait_network_command, }, }; use crate::tests::tools::{ @@ -16,19 +16,22 @@ use crate::{ tests::tools::{assert_eq_bootstrap_graph, get_bootstrap_config}, }; use massa_async_pool::AsyncPoolConfig; -use massa_consensus_exports::{commands::ConsensusCommand, ConsensusCommandSender}; +use massa_consensus_exports::{ + bootstrapable_graph::BootstrapableGraph, + test_exports::{MockConsensusController, MockConsensusControllerMessage}, +}; use massa_executed_ops::ExecutedOpsConfig; use massa_final_state::{ - test_exports::assert_eq_final_state, FinalState, FinalStateConfig, StateChanges, + test_exports::{assert_eq_final_state, assert_eq_final_state_hash}, + FinalState, FinalStateConfig, StateChanges, }; use massa_ledger_exports::LedgerConfig; +use massa_models::{address::Address, slot::Slot, streaming_step::StreamingStep, version::Version}; use massa_models::{ - address::Address, config::{ MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, MAX_DATASTORE_KEY_LENGTH, POS_SAVED_CYCLES, }, - slot::Slot, - version::Version, + prehash::PreHashSet, }; use massa_network_exports::{NetworkCommand, NetworkCommandSender}; use massa_pos_exports::{ @@ -59,8 +62,8 @@ async fn test_bootstrap_server() { let rolls_path = PathBuf::from_str("../massa-node/base_config/initial_rolls.json").unwrap(); let genesis_address = Address::from_public_key(&KeyPair::generate().get_public_key()); - // init the communication channels - let (consensus_cmd_tx, mut consensus_cmd_rx) = mpsc::channel::(5); + let (consensus_controller, mut consensus_event_receiver) = + MockConsensusController::new_with_receiver(); let (network_cmd_tx, mut network_cmd_rx) = mpsc::channel::(5); // setup final state local config @@ -139,7 +142,7 @@ async fn test_bootstrap_server() { // start bootstrap server let (bootstrap_establisher, bootstrap_interface) = mock_establisher::new(); let bootstrap_manager = start_bootstrap_server( - ConsensusCommandSender(consensus_cmd_tx), + consensus_controller, NetworkCommandSender(network_cmd_tx), final_state_server.clone(), bootstrap_config.clone(), @@ -216,22 +219,47 @@ async fn test_bootstrap_server() { sent_peers }; - // wait for bootstrap to ask consensus for bootstrap graph, send it - let wait_graph = async move || { - let response = - match wait_consensus_command(&mut consensus_cmd_rx, 1000.into(), |cmd| match cmd { - ConsensusCommand::GetBootstrapState(resp) => Some(resp), - _ => None, - }) - .await - { - Some(resp) => resp, - None => panic!("timeout waiting for get boot graph consensus command"), - }; - let sent_graph = get_boot_state(); - response.send(Box::new(sent_graph.clone())).await.unwrap(); - sent_graph - }; + // intercept consensus parts being asked + let sent_graph = get_boot_state(); + let sent_graph_clone = sent_graph.clone(); + std::thread::spawn(move || loop { + consensus_event_receiver.wait_command(MassaTime::from_millis(10_000), |cmd| match &cmd { + MockConsensusControllerMessage::GetBootstrapableGraph { + execution_cursor, + response_tx, + .. + } => { + // send the consensus blocks only on the first call + // give an empty answer for the following ones + if execution_cursor + == &StreamingStep::Ongoing(Slot { + period: 1, + thread: 0, + }) + { + response_tx + .send(Ok(( + sent_graph_clone.clone(), + PreHashSet::default(), + StreamingStep::Started, + ))) + .unwrap(); + } else { + response_tx + .send(Ok(( + BootstrapableGraph { + final_blocks: Vec::new(), + }, + PreHashSet::default(), + StreamingStep::Finished(None), + ))) + .unwrap(); + } + Some(()) + } + _ => None, + }); + }); // launch the modifier thread let list_changes: Arc>> = Arc::new(RwLock::new(Vec::new())); @@ -258,7 +286,6 @@ async fn test_bootstrap_server() { // wait for peers and graph let sent_peers = wait_peers().await; - let sent_graph = wait_graph().await; // wait for get_state let bootstrap_res = get_state_h @@ -292,6 +319,7 @@ async fn test_bootstrap_server() { // check final states assert_eq_final_state(&final_state_server.read(), &final_state_client.read()); + assert_eq_final_state_hash(&final_state_server.read(), &final_state_client.read()); // compute initial draws final_state_server.write().compute_initial_draws().unwrap(); @@ -309,7 +337,7 @@ async fn test_bootstrap_server() { "mismatch between sent and received peers" ); - // check states + // check graphs assert_eq_bootstrap_graph(&sent_graph, &bootstrap_res.graph.unwrap()); // stop bootstrap server diff --git a/massa-bootstrap/src/tests/tools.rs b/massa-bootstrap/src/tests/tools.rs index 00e505acae0..51c9795fda7 100644 --- a/massa-bootstrap/src/tests/tools.rs +++ b/massa-bootstrap/src/tests/tools.rs @@ -5,24 +5,26 @@ use crate::settings::BootstrapConfig; use bitvec::vec::BitVec; use massa_async_pool::test_exports::{create_async_pool, get_random_message}; use massa_async_pool::{AsyncPoolChanges, Change}; -use massa_consensus_exports::commands::ConsensusCommand; +use massa_consensus_exports::{ + bootstrapable_graph::{ + BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, + }, + export_active_block::{ExportActiveBlock, ExportActiveBlockSerializer}, +}; use massa_executed_ops::{ExecutedOps, ExecutedOpsConfig}; use massa_final_state::test_exports::create_final_state; use massa_final_state::{FinalState, FinalStateConfig}; -use massa_graph::export_active_block::ExportActiveBlockSerializer; -use massa_graph::{export_active_block::ExportActiveBlock, BootstrapableGraph}; -use massa_graph::{BootstrapableGraphDeserializer, BootstrapableGraphSerializer}; use massa_hash::Hash; use massa_ledger_exports::{LedgerChanges, LedgerEntry, SetUpdateOrDelete}; use massa_ledger_worker::test_exports::create_final_ledger; use massa_models::config::{ - BOOTSTRAP_RANDOMNESS_SIZE_BYTES, ENDORSEMENT_COUNT, MAX_ADVERTISE_LENGTH, - MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, MAX_BOOTSTRAP_ASYNC_POOL_CHANGES, - MAX_BOOTSTRAP_BLOCKS, MAX_BOOTSTRAP_ERROR_LENGTH, MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, - MAX_BOOTSTRAP_MESSAGE_SIZE, MAX_DATASTORE_ENTRY_COUNT, MAX_DATASTORE_KEY_LENGTH, - MAX_DATASTORE_VALUE_LENGTH, MAX_DEFERRED_CREDITS_LENGTH, MAX_EXECUTED_OPS_CHANGES_LENGTH, - MAX_EXECUTED_OPS_LENGTH, MAX_FUNCTION_NAME_LENGTH, MAX_LEDGER_CHANGES_COUNT, - MAX_OPERATIONS_PER_BLOCK, MAX_OPERATION_DATASTORE_ENTRY_COUNT, + BOOTSTRAP_RANDOMNESS_SIZE_BYTES, CONSENSUS_BOOTSTRAP_PART_SIZE, ENDORSEMENT_COUNT, + MAX_ADVERTISE_LENGTH, MAX_ASYNC_MESSAGE_DATA, MAX_ASYNC_POOL_LENGTH, + MAX_BOOTSTRAP_ASYNC_POOL_CHANGES, MAX_BOOTSTRAP_BLOCKS, MAX_BOOTSTRAP_ERROR_LENGTH, + MAX_BOOTSTRAP_FINAL_STATE_PARTS_SIZE, MAX_BOOTSTRAP_MESSAGE_SIZE, MAX_DATASTORE_ENTRY_COUNT, + MAX_DATASTORE_KEY_LENGTH, MAX_DATASTORE_VALUE_LENGTH, MAX_DEFERRED_CREDITS_LENGTH, + MAX_EXECUTED_OPS_CHANGES_LENGTH, MAX_EXECUTED_OPS_LENGTH, MAX_FUNCTION_NAME_LENGTH, + MAX_LEDGER_CHANGES_COUNT, MAX_OPERATIONS_PER_BLOCK, MAX_OPERATION_DATASTORE_ENTRY_COUNT, MAX_OPERATION_DATASTORE_KEY_LENGTH, MAX_OPERATION_DATASTORE_VALUE_LENGTH, MAX_PARAMETERS_SIZE, MAX_PRODUCTION_STATS_LENGTH, MAX_ROLLS_COUNT_LENGTH, PERIODS_PER_CYCLE, THREAD_COUNT, }; @@ -139,7 +141,7 @@ fn get_random_deferred_credits(r_limit: u64) -> DeferredCredits { for j in 0u64..r_limit { credits.insert(get_random_address(), Amount::from_raw(j)); } - deferred_credits.0.insert( + deferred_credits.credits.insert( Slot { period: i, thread: 0, @@ -154,14 +156,15 @@ fn get_random_deferred_credits(r_limit: u64) -> DeferredCredits { fn get_random_pos_state(r_limit: u64, pos: PoSFinalState) -> PoSFinalState { let mut cycle_history = VecDeque::new(); let (roll_counts, production_stats, rng_seed) = get_random_pos_cycles_info(r_limit, true); - cycle_history.push_back(CycleInfo { - cycle: 0, + cycle_history.push_back(CycleInfo::new_with_hash( + 0, + false, roll_counts, - complete: false, rng_seed, production_stats, - }); - let deferred_credits = get_random_deferred_credits(r_limit); + )); + let mut deferred_credits = DeferredCredits::default(); + deferred_credits.final_nested_extend(get_random_deferred_credits(r_limit)); PoSFinalState { cycle_history, deferred_credits, @@ -184,13 +187,11 @@ pub fn get_random_pos_changes(r_limit: u64) -> PoSChanges { pub fn get_random_async_pool_changes(r_limit: u64) -> AsyncPoolChanges { let mut changes = AsyncPoolChanges::default(); for _ in 0..(r_limit / 2) { - let mut message = get_random_message(); - message.gas_price = Amount::from_str("10").unwrap(); + let message = get_random_message(Some(Amount::from_str("10").unwrap())); changes.0.push(Change::Add(message.compute_id(), message)); } for _ in (r_limit / 2)..r_limit { - let mut message = get_random_message(); - message.gas_price = Amount::from_str("1000").unwrap(); + let message = get_random_message(Some(Amount::from_str("1_000_000").unwrap())); changes.0.push(Change::Add(message.compute_id(), message)); } changes @@ -228,10 +229,10 @@ pub fn get_random_final_state_bootstrap( let r_limit: u64 = 50; let mut sorted_ledger = HashMap::new(); - let mut messages = BTreeMap::new(); + let mut messages = AsyncPoolChanges::default(); for _ in 0..r_limit { - let message = get_random_message(); - messages.insert(message.compute_id(), message); + let message = get_random_message(None); + messages.0.push(Change::Add(message.compute_id(), message)); } for _ in 0..r_limit { sorted_ledger.insert(get_random_address(), get_random_ledger_entry()); @@ -241,7 +242,8 @@ pub fn get_random_final_state_bootstrap( let slot = Slot::new(0, 0); let final_ledger = create_final_ledger(config.ledger_config.clone(), sorted_ledger); - let async_pool = create_async_pool(config.async_pool_config.clone(), messages); + let mut async_pool = create_async_pool(config.async_pool_config.clone(), BTreeMap::new()); + async_pool.apply_changes_unchecked(&messages); create_final_state( config.clone(), @@ -324,27 +326,7 @@ pub fn get_bootstrap_config(bootstrap_public_key: PublicKey) -> BootstrapConfig max_credits_length: MAX_DEFERRED_CREDITS_LENGTH, max_executed_ops_length: MAX_EXECUTED_OPS_LENGTH, max_ops_changes_length: MAX_EXECUTED_OPS_CHANGES_LENGTH, - } -} - -pub async fn wait_consensus_command( - consensus_command_receiver: &mut Receiver, - timeout: MassaTime, - filter_map: F, -) -> Option -where - F: Fn(ConsensusCommand) -> Option, -{ - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - cmd = consensus_command_receiver.recv() => match cmd { - Some(orig_evt) => if let Some(res_evt) = filter_map(orig_evt) { return Some(res_evt); }, - _ => panic!("network event channel died") - }, - _ = &mut timer => return None - } + consensus_bootstrap_part_size: CONSENSUS_BOOTSTRAP_PART_SIZE, } } @@ -435,7 +417,6 @@ pub fn get_boot_state() -> BootstrapableGraph { block, parents: vec![(get_dummy_block_id("b1"), 4777); THREAD_COUNT as usize], is_final: true, - operations: Default::default(), }; let boot_graph = BootstrapableGraph { @@ -447,13 +428,7 @@ pub fn get_boot_state() -> BootstrapableGraph { THREAD_COUNT, ENDORSEMENT_COUNT, MAX_BOOTSTRAP_BLOCKS, - MAX_DATASTORE_VALUE_LENGTH, - MAX_FUNCTION_NAME_LENGTH, - MAX_PARAMETERS_SIZE, MAX_OPERATIONS_PER_BLOCK, - MAX_OPERATION_DATASTORE_ENTRY_COUNT, - MAX_OPERATION_DATASTORE_KEY_LENGTH, - MAX_OPERATION_DATASTORE_VALUE_LENGTH, ); let mut bootstrapable_graph_serialized = Vec::new(); diff --git a/massa-client/Cargo.toml b/massa-client/Cargo.toml index 2d7ccbfb65d..b11dc99712b 100644 --- a/massa-client/Cargo.toml +++ b/massa-client/Cargo.toml @@ -9,12 +9,12 @@ edition = "2021" anyhow = "1.0" atty = "0.2" console = "0.15" -dialoguer = { git = "https://github.com/mitsuhiko/dialoguer", features = ["history", "completion"] } +dialoguer = "0.10" +rustyline = "10.0.0" +rustyline-derive = "0.7.0" erased-serde = "0.3" -glob = "0.3" lazy_static = "1.4" paw = "1.0" -rev_lines = "0.2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" structopt = { version = "0.3", features = ["paw"] } @@ -28,9 +28,6 @@ massa_time = { path = "../massa-time" } massa_sdk = { path = "../massa-sdk" } massa_wallet = { path = "../massa-wallet" } -[target.'cfg(not(windows))'.dependencies] -tilde-expand = "0.1" - [dev-dependencies] toml_edit = "0.15" diff --git a/massa-client/base_config/config.toml b/massa-client/base_config/config.toml index 089474105d7..174acbbffcc 100644 --- a/massa-client/base_config/config.toml +++ b/massa-client/base_config/config.toml @@ -3,7 +3,22 @@ history_file_path = "config/.massa_history" timeout = 1000 [default_node] -#ip = "145.239.66.206" ip = "127.0.0.1" private_port = 33034 -public_port = 33035 \ No newline at end of file +public_port = 33035 + +[http] + # maximum size in bytes of a request + max_request_body_size = 52428800 + # request timeout + request_timeout = 60000 + # maximum number of outcoming connections allowed + max_concurrent_requests = 100 + # certificate_store, `Native` or `WebPki` + certificate_store = "Native" + # JSON-RPC request object id data type, `String` or `Number` + id_kind = "Number" + # max length for logging for requests and responses. Logs bigger than this limit will be truncated + max_log_length = 4096 + # custom headers passed to the server with every request (default is empty). + headers = [] diff --git a/massa-client/src/cmds.rs b/massa-client/src/cmds.rs index 13fbebc6127..4376a3e8680 100644 --- a/massa-client/src/cmds.rs +++ b/massa-client/src/cmds.rs @@ -28,7 +28,6 @@ use std::fmt::Write as _; use std::fmt::{Debug, Display}; use std::net::IpAddr; use std::path::PathBuf; -use std::process; use strum::{EnumMessage, EnumProperty, IntoEnumIterator}; use strum_macros::{Display, EnumIter, EnumMessage, EnumProperty, EnumString}; @@ -38,9 +37,6 @@ use strum_macros::{Display, EnumIter, EnumMessage, EnumProperty, EnumString}; #[allow(non_camel_case_types)] #[derive(Debug, PartialEq, Eq, EnumIter, EnumMessage, EnumString, EnumProperty, Display)] pub enum Command { - #[strum(ascii_case_insensitive, message = "exit the client gracefully")] - exit, - #[strum(ascii_case_insensitive, message = "display this help")] help, @@ -219,30 +215,28 @@ pub enum Command { #[strum( ascii_case_insensitive, - props(args = "SenderAddress PathToBytecode MaxGas GasPrice Fee",), + props(args = "SenderAddress PathToBytecode MaxGas Fee",), message = "create and send an operation containing byte code" )] send_smart_contract, #[strum( ascii_case_insensitive, - props( - args = "SenderAddress TargetAddress FunctionName Parameter MaxGas GasPrice Coins Fee", - ), + props(args = "SenderAddress TargetAddress FunctionName Parameter MaxGas Coins Fee",), message = "create and send an operation to call a function of a smart contract" )] call_smart_contract, #[strum( ascii_case_insensitive, - props(args = "PathToBytecode MaxGas GasPrice Address",), + props(args = "PathToBytecode MaxGas Address",), message = "execute byte code, address is optional. Nothing is really executed on chain" )] read_only_smart_contract, #[strum( ascii_case_insensitive, - props(args = "TargetAddress TargetFunction Parameter MaxGas GasPrice SenderAddress",), + props(args = "TargetAddress TargetFunction Parameter MaxGas SenderAddress",), message = "call a smart contract function, sender address is optional. Nothing is really executed on chain" )] read_only_call, @@ -376,8 +370,6 @@ impl Command { json: bool, ) -> Result> { match self { - Command::exit => process::exit(0), - Command::help => { if !json { if !parameters.is_empty() { @@ -827,38 +819,24 @@ impl Command { Ok(Box::new(())) } Command::send_smart_contract => { - if parameters.len() != 5 { + if parameters.len() != 4 { bail!("wrong number of parameters"); } let addr = parameters[0].parse::
()?; let path = parameters[1].parse::()?; let max_gas = parameters[2].parse::()?; - let gas_price = parameters[3].parse::()?; - let fee = parameters[4].parse::()?; - + let fee = parameters[3].parse::()?; if !json { - match gas_price - .checked_mul_u64(max_gas) - .and_then(|x| x.checked_add(fee)) - { - Some(total) => { - if let Ok(addresses_info) = - client.public.get_addresses(vec![addr]).await - { - match addresses_info.get(0) { - Some(info) => { - if info.candidate_balance < total { - client_warning!("this operation may be rejected due to insufficient balance"); - } - } - None => { - client_warning!(format!("address {} not found", addr)); - } + if let Ok(addresses_info) = client.public.get_addresses(vec![addr]).await { + match addresses_info.get(0) { + Some(info) => { + if info.candidate_balance < fee { + client_warning!("this operation may be rejected due to insufficient balance"); } } - } - None => { - client_warning!("the total amount hit the limit overflow, operation will certainly be rejected"); + None => { + client_warning!(format!("address {} not found", addr)); + } } } }; @@ -880,7 +858,6 @@ impl Command { OperationType::ExecuteSC { data, max_gas, - gas_price, datastore, }, fee, @@ -890,22 +867,18 @@ impl Command { .await } Command::call_smart_contract => { - if parameters.len() != 8 { + if parameters.len() != 7 { bail!("wrong number of parameters"); } let addr = parameters[0].parse::
()?; let target_addr = parameters[1].parse::
()?; let target_func = parameters[2].clone(); - let param = parameters[3].clone(); + let param = parameters[3].clone().into_bytes(); let max_gas = parameters[4].parse::()?; - let gas_price = parameters[5].parse::()?; - let coins = parameters[6].parse::()?; - let fee = parameters[7].parse::()?; + let coins = parameters[5].parse::()?; + let fee = parameters[6].parse::()?; if !json { - match gas_price - .checked_mul_u64(max_gas) - .and_then(|x| x.checked_add(fee)) - { + match coins.checked_add(fee) { Some(total) => { if let Ok(addresses_info) = client.public.get_addresses(vec![target_addr]).await @@ -939,7 +912,6 @@ impl Command { param, max_gas, coins, - gas_price, }, fee, addr, @@ -960,14 +932,13 @@ impl Command { } } Command::read_only_smart_contract => { - if parameters.len() != 3 && parameters.len() != 4 { + if parameters.len() != 2 && parameters.len() != 3 { bail!("wrong number of parameters"); } let path = parameters[0].parse::()?; let max_gas = parameters[1].parse::()?; - let simulated_gas_price = parameters[2].parse::()?; - let address = if let Some(adr) = parameters.get(3) { + let address = if let Some(adr) = parameters.get(2) { Some(adr.parse::
()?) } else { None @@ -977,7 +948,6 @@ impl Command { .public .execute_read_only_bytecode(ReadOnlyBytecodeExecution { max_gas, - simulated_gas_price, bytecode, address, operation_datastore: None, // TODO - #3072 @@ -989,16 +959,15 @@ impl Command { } } Command::read_only_call => { - if parameters.len() != 5 && parameters.len() != 6 { + if parameters.len() != 4 && parameters.len() != 5 { bail!("wrong number of parameters"); } let target_address = parameters[0].parse::
()?; let target_function = parameters[1].parse::()?; - let parameter = parameters[2].parse::()?; + let parameter = parameters[2].parse::()?.into_bytes(); let max_gas = parameters[3].parse::()?; - let simulated_gas_price = parameters[4].parse::()?; - let caller_address = if let Some(addr) = parameters.get(5) { + let caller_address = if let Some(addr) = parameters.get(4) { Some(addr.parse::
()?) } else { None @@ -1011,7 +980,6 @@ impl Command { target_function, parameter, max_gas, - simulated_gas_price, }) .await { diff --git a/massa-client/src/main.rs b/massa-client/src/main.rs index cfea6d4a217..5c7c1057167 100644 --- a/massa-client/src/main.rs +++ b/massa-client/src/main.rs @@ -9,7 +9,7 @@ use atty::Stream; use cmds::Command; use console::style; use dialoguer::Password; -use massa_sdk::Client; +use massa_sdk::{Client, HttpConfig}; use massa_wallet::Wallet; use serde::Serialize; use std::net::IpAddr; @@ -20,7 +20,6 @@ use structopt::StructOpt; mod cmds; mod repl; mod settings; -mod utils; #[cfg(test)] pub mod tests; @@ -96,8 +95,19 @@ fn main(args: Args) -> anyhow::Result<()> { } async fn run(args: Args) -> Result<()> { + let http_config = HttpConfig { + max_request_body_size: SETTINGS.http.max_request_body_size, + request_timeout: SETTINGS.http.request_timeout, + max_concurrent_requests: SETTINGS.http.max_concurrent_requests, + certificate_store: SETTINGS.http.certificate_store.clone(), + id_kind: SETTINGS.http.id_kind.clone(), + max_log_length: SETTINGS.http.max_log_length, + headers: SETTINGS.http.headers.clone(), + }; + // TODO: move settings loading in another crate ... see #1277 let settings = SETTINGS.clone(); + let address = match args.ip { Some(ip) => ip, None => settings.default_node.ip, @@ -124,10 +134,10 @@ async fn run(args: Args) -> Result<()> { // ... let password = args.password.unwrap_or_else(|| ask_password(&args.wallet)); let mut wallet = Wallet::new(args.wallet, password)?; - let client = Client::new(address, public_port, private_port).await; + let client = Client::new(address, public_port, private_port, &http_config).await; if atty::is(Stream::Stdout) && args.command == Command::help && !args.json { // Interactive mode - repl::run(&client, &mut wallet).await; + repl::run(&client, &mut wallet).await?; } else { // Non-Interactive mode match args diff --git a/massa-client/src/repl.rs b/massa-client/src/repl.rs index 480aed49c24..3abe8d0e48c 100644 --- a/massa-client/src/repl.rs +++ b/massa-client/src/repl.rs @@ -2,11 +2,9 @@ use crate::cmds::{Command, ExtendedWallet}; use crate::settings::SETTINGS; -use crate::utils::longest_common_prefix; +use anyhow::Result; use console::style; -use dialoguer::{theme::ColorfulTheme, Completion, History, Input}; use erased_serde::{Serialize, Serializer}; -use glob::glob; use massa_models::api::{ AddressInfo, BlockInfo, DatastoreEntryOutput, EndorsementInfo, NodeStatus, OperationInfo, }; @@ -17,19 +15,14 @@ use massa_models::prehash::PreHashSet; use massa_models::{address::Address, operation::OperationId}; use massa_sdk::Client; use massa_wallet::Wallet; -use rev_lines::RevLines; -use std::collections::VecDeque; -use std::io::Error; +use rustyline::completion::{Completer, FilenameCompleter, Pair}; +use rustyline::error::ReadlineError; +use rustyline::validate::MatchingBracketValidator; +use rustyline::{CompletionType, Config, Editor}; +use rustyline_derive::{Completer, Helper, Highlighter, Hinter, Validator}; use std::str; -use std::{ - fs::File, - fs::OpenOptions, - io::{BufReader, Write}, -}; use strum::IntoEnumIterator; use strum::ParseError; -#[cfg(not(windows))] -use tilde_expand::tilde_expand; macro_rules! massa_fancy_ascii_art_logo { () => { @@ -94,161 +87,108 @@ fn group_parameters(parameters: Vec) -> Vec { new_parameters } -pub(crate) async fn run(client: &Client, wallet: &mut Wallet) { +#[derive(Helper, Completer, Hinter, Validator, Highlighter)] +struct MyHelper { + #[rustyline(Completer)] + completer: MassaCompleter, + #[rustyline(Validator)] + validator: MatchingBracketValidator, +} + +pub(crate) async fn run(client: &Client, wallet: &mut Wallet) -> Result<()> { massa_fancy_ascii_art_logo!(); - println!("Use 'exit' to quit the prompt"); + println!("Use 'CTRL+D or CTRL+C' to quit the prompt"); println!("Use the Up/Down arrows to scroll through history"); println!("Use the Right arrow or Tab to complete your command"); println!("Use the Enter key to execute your command"); crate::cmds::help(); - let mut history = CommandHistory::default(); - let completion = CommandCompletion::default(); + let h = MyHelper { + completer: MassaCompleter::new(), + validator: MatchingBracketValidator::new(), + }; + let config = Config::builder() + .auto_add_history(true) + .completion_prompt_limit(100) + .completion_type(CompletionType::List) + .max_history_size(10000) + .build(); + let mut rl: Editor = Editor::with_config(config)?; + rl.set_helper(Some(h)); + if rl.load_history(&SETTINGS.history_file_path).is_err() { + println!("No previous history."); + } loop { - if let Ok(input) = Input::::with_theme(&ColorfulTheme::default()) - .with_prompt("command") - .history_with(&mut history) - .completion_with(&completion) - .interact_text() - { - // User input parsing - let input: Vec = - group_parameters(input.split_whitespace().map(|x| x.to_string()).collect()); - let cmd: Result = input[0].parse(); - let parameters = input[1..].to_vec(); - // Print result of evaluated command - match cmd { - Ok(command) => match command.run(client, wallet, ¶meters, false).await { - Ok(output) => output.pretty_print(), - Err(e) => println!("{}", style(format!("Error: {}", e)).red()), - }, - Err(_) => println!("Command not found!\ntype \"help\" to get the list of commands"), + let readline = rl.readline("command > "); + match readline { + Ok(line) => { + if line.is_empty() { + continue; + } + rl.add_history_entry(line.as_str()); + let input: Vec = + group_parameters(line.split_whitespace().map(|x| x.to_string()).collect()); + let cmd: Result = input[0].parse(); + let parameters = input[1..].to_vec(); + // Print result of evaluated command + match cmd { + Ok(command) => match command.run(client, wallet, ¶meters, false).await { + Ok(output) => output.pretty_print(), + Err(e) => println!("{}", style(format!("Error: {}", e)).red()), + }, + Err(_) => { + println!("Command not found!\ntype \"help\" to get the list of commands") + } + } } - } - } -} - -struct CommandHistory { - max: usize, - history: VecDeque, -} - -impl CommandHistory { - fn get_saved_history() -> Result, Error> { - if let Ok(file) = File::open(&SETTINGS.history_file_path) { - let lines = RevLines::new(BufReader::new(file))?; - Ok(lines.collect()) - } else { - File::create(&SETTINGS.history_file_path)?; - Ok(VecDeque::new()) - } - } - - fn write_to_saved_history(command: &str) { - if let Ok(mut file) = OpenOptions::new() - .write(true) - .append(true) - .open(&SETTINGS.history_file_path) - { - writeln!(file, "{}", command).ok(); - } - } -} - -impl Default for CommandHistory { - fn default() -> Self { - CommandHistory { - max: SETTINGS.history, - history: CommandHistory::get_saved_history().unwrap_or_default(), - } - } -} - -impl History for CommandHistory { - fn read(&self, pos: usize) -> Option { - self.history.get(pos).cloned() - } - - fn write(&mut self, val: &T) { - if self.history.len() == self.max { - self.history.pop_back(); - } - let string_value = val.to_string(); - if let Some(last_command) = self.history.iter().next() { - if last_command != &string_value { - CommandHistory::write_to_saved_history(&string_value); - self.history.push_front(string_value); + Err(ReadlineError::Interrupted) => { + break; + } + Err(ReadlineError::Eof) => { + break; + } + Err(err) => { + println!("Error: {err:?}"); + break; } - } else { - CommandHistory::write_to_saved_history(&string_value); - self.history.push_front(string_value); } } + rl.append_history(&SETTINGS.history_file_path).unwrap(); + Ok(()) } -struct CommandCompletion { - options: Vec, +struct MassaCompleter { + file_completer: FilenameCompleter, } -impl Default for CommandCompletion { - fn default() -> Self { - CommandCompletion { - options: Command::iter().map(|x| x.to_string()).collect(), +impl MassaCompleter { + fn new() -> Self { + Self { + file_completer: FilenameCompleter::new(), } } } -#[cfg(not(windows))] -fn expand_path(partial_path: &str) -> Vec { - tilde_expand(partial_path.as_bytes()) -} - -#[cfg(windows)] -fn expand_path(partial_path: &str) -> Vec { - partial_path.as_bytes().to_vec() -} - -impl Completion for CommandCompletion { - /// Simple completion implementation based on sub-string - fn get(&self, input: &str) -> Option { - let input = input.to_string(); - if input.contains(' ') { - let mut args: Vec<&str> = input.split(' ').collect(); - let mut default_path = "./"; - let path_to_complete = args.last_mut().unwrap_or(&mut default_path); - let expanded_path = expand_path(path_to_complete); - *path_to_complete = str::from_utf8(&expanded_path).unwrap_or(path_to_complete); - if let Ok(paths) = glob(&(path_to_complete.to_owned() + "*")) { - let suggestions: Vec = paths - .filter_map(|x| x.map(|path| path.display().to_string()).ok()) - .collect(); - if !suggestions.is_empty() { - println!(); - for path in &suggestions { - println!("{}", style(path).dim()) - } - *path_to_complete = - longest_common_prefix(suggestions.iter().map(|s| &s[..]).collect()); - } - Some(args.join(" ")) - } else { - Some(args.join(" ")) - } +impl Completer for MassaCompleter { + type Candidate = Pair; + fn complete( + &self, + line: &str, + pos: usize, + ctx: &rustyline::Context<'_>, + ) -> rustyline::Result<(usize, Vec)> { + if line.contains(' ') { + self.file_completer.complete(line, pos, ctx) } else { - let suggestions: Vec<&str> = self - .options - .iter() - .filter(|s| s.len() >= input.len() && input == s[..input.len()]) - .map(|s| &s[..]) - .collect(); - if !suggestions.is_empty() { - println!(); - for suggestion in &suggestions { - println!("{}", style(suggestion).dim()); + let mut candidates = Vec::new(); + for cmd in Command::iter() { + if cmd.to_string().starts_with(line) { + candidates.push(Pair { + display: cmd.to_string(), + replacement: cmd.to_string(), + }); } - Some(String::from(longest_common_prefix(suggestions))) - } else { - None } + Ok((0, candidates)) } } } diff --git a/massa-client/src/settings.rs b/massa-client/src/settings.rs index 21361e4463e..0e7ad8e4be6 100644 --- a/massa-client/src/settings.rs +++ b/massa-client/src/settings.rs @@ -16,6 +16,7 @@ pub struct Settings { pub history: usize, pub history_file_path: PathBuf, pub timeout: MassaTime, + pub http: HttpSettings, } #[derive(Debug, Deserialize, Clone)] @@ -25,6 +26,19 @@ pub struct DefaultNode { pub public_port: u16, } +/// Http Client settings. +/// the Http Client settings +#[derive(Debug, Deserialize, Clone)] +pub struct HttpSettings { + pub max_request_body_size: u32, + pub request_timeout: MassaTime, + pub max_concurrent_requests: usize, + pub certificate_store: String, + pub id_kind: String, + pub max_log_length: u32, + pub headers: Vec<(String, String)>, +} + #[cfg(test)] #[test] fn test_load_client_config() { diff --git a/massa-client/src/utils.rs b/massa-client/src/utils.rs deleted file mode 100644 index 5d805c19f53..00000000000 --- a/massa-client/src/utils.rs +++ /dev/null @@ -1,74 +0,0 @@ -// From https://docs.rs/commands/0.0.5/commands/util/fn.longest_common_prefix.html - -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Utilities - -use std::cmp; - -/// Longest Common Prefix -/// -/// Given a vector of string slices, calculate the string -/// slice that is the longest common prefix of the strings. -/// -/// ``` -/// use commands::util::longest_common_prefix; -/// -/// let words = vec!["zebrawood", "zebrafish", "zebra mussel"]; -/// let prefix = longest_common_prefix(words); -/// assert_eq!(prefix, "zebra"); -/// ``` -pub fn longest_common_prefix(strings: Vec<&str>) -> &str { - if strings.is_empty() { - return ""; - } - let str0 = strings[0]; - let str0bytes = str0.as_bytes(); - let mut len = str0.len(); - for str in &strings[1..] { - len = cmp::min( - len, - str.as_bytes() - .iter() - .zip(str0bytes) - .take_while(|&(a, b)| a == b) - .count(), - ); - } - &strings[0][..len] -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn empty_lcp() { - assert_eq!(longest_common_prefix(vec![]), ""); - } - - #[test] - fn single_lcp() { - assert_eq!(longest_common_prefix(vec!["ab"]), "ab"); - } - - #[test] - fn no_lcp() { - assert_eq!(longest_common_prefix(vec!["a", "b", "c"]), ""); - } - - #[test] - fn valid_lcp() { - // assert_eq!(longest_common_prefix(vec!["aa", "ab", "ac"]), "a"); - assert_eq!(longest_common_prefix(vec!["aba", "abb", "abc"]), "ab"); - } - - #[test] - fn valid_is_shortest_lcp() { - assert_eq!(longest_common_prefix(vec!["aba", "ab", "abc"]), "ab"); - } -} diff --git a/massa-consensus-exports/Cargo.toml b/massa-consensus-exports/Cargo.toml index abb6f5e3d83..7286e9bd634 100644 --- a/massa-consensus-exports/Cargo.toml +++ b/massa-consensus-exports/Cargo.toml @@ -7,34 +7,23 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +crossbeam-channel = "0.5.6" displaydoc = "0.2" +nom = "7.1" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" thiserror = "1.0" -tokio = { version = "1.21", features = ["full"] } -tempfile = { version = "3.3", optional = true } # use with testing feature -serde_json = { version = "1.0", optional = true } # use with testing feature -# custom modules -massa_cipher = { path = "../massa-cipher" } +#custom modules +massa_hash = { path = "../massa-hash"} massa_execution_exports = { path = "../massa-execution-exports" } -massa_graph = { path = "../massa-graph" } massa_models = { path = "../massa-models" } massa_pool_exports = { path = "../massa-pool-exports" } massa_pos_exports = { path = "../massa-pos-exports" } -massa_protocol_exports = { path = "../massa-protocol-exports" } -massa_signature = { path = "../massa-signature" } -massa_time = { path = "../massa-time" } +massa_protocol_exports ={ path = "../massa-protocol-exports" } massa_storage = { path = "../massa-storage" } +massa_serialization = { path = "../massa-serialization" } +massa_time = { path = "../massa-time" } +massa_signature = { path = "../massa-signature" } -[dev-dependencies] -massa_models = { path = "../massa-models", features = ["testing"] } - -# for more information on what are the following features used for, see the cargo.toml at workspace level [features] -sandbox = [ "massa_protocol_exports/sandbox" ] -testing = [ - "massa_models/testing", - "massa_execution_exports/testing", - "massa_pool_exports/testing", - "massa_protocol_exports/testing", - "tempfile", - "serde_json" -] +testing = ["massa_models/testing", "massa_execution_exports/testing", "massa_pool_exports/testing", "massa_pos_exports/testing", "massa_protocol_exports/testing", "massa_storage/testing"] \ No newline at end of file diff --git a/massa-consensus-exports/src/block_graph_export.rs b/massa-consensus-exports/src/block_graph_export.rs new file mode 100644 index 00000000000..bd8f5d27069 --- /dev/null +++ b/massa-consensus-exports/src/block_graph_export.rs @@ -0,0 +1,29 @@ +use massa_models::{ + address::Address, + block::BlockId, + clique::Clique, + prehash::{PreHashMap, PreHashSet}, + slot::Slot, +}; + +use crate::block_status::{DiscardReason, ExportCompiledBlock}; + +/// Bootstrap compatible version of the block graph +#[derive(Debug, Clone)] +#[allow(clippy::type_complexity)] +pub struct BlockGraphExport { + /// Genesis blocks. + pub genesis_blocks: Vec, + /// Map of active blocks, were blocks are in their exported version. + pub active_blocks: PreHashMap, + /// Finite cache of discarded blocks, in exported version `(slot, creator_address, parents)`. + pub discarded_blocks: PreHashMap))>, + /// Best parents hashes in each thread. + pub best_parents: Vec<(BlockId, u64)>, + /// Latest final period and block hash in each thread. + pub latest_final_blocks_periods: Vec<(BlockId, u64)>, + /// Head of the incompatibility graph. + pub gi_head: PreHashMap>, + /// List of maximal cliques of compatible blocks. + pub max_cliques: Vec, +} diff --git a/massa-consensus-exports/src/block_status.rs b/massa-consensus-exports/src/block_status.rs new file mode 100644 index 00000000000..2138a969489 --- /dev/null +++ b/massa-consensus-exports/src/block_status.rs @@ -0,0 +1,120 @@ +use massa_models::{ + active_block::ActiveBlock, + address::Address, + block::{Block, BlockId, WrappedHeader}, + prehash::PreHashSet, + slot::Slot, +}; +use massa_storage::Storage; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Clone)] +#[allow(clippy::large_enum_variant)] +pub enum HeaderOrBlock { + Header(WrappedHeader), + Block { + id: BlockId, + slot: Slot, + storage: Storage, + }, +} + +impl HeaderOrBlock { + /// Gets slot for that header or block + pub fn get_slot(&self) -> Slot { + match self { + HeaderOrBlock::Header(header) => header.content.slot, + HeaderOrBlock::Block { slot, .. } => *slot, + } + } +} + +/// Something can be discarded +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub enum DiscardReason { + /// Block is invalid, either structurally, or because of some incompatibility. The String contains the reason for info or debugging. + Invalid(String), + /// Block is incompatible with a final block. + Stale, + /// Block has enough fitness. + Final, +} + +/// Enum used in `BlockGraph`'s state machine +#[derive(Debug, Clone)] +pub enum BlockStatus { + /// The block/header has reached consensus but no consensus-level check has been performed. + /// It will be processed during the next iteration + Incoming(HeaderOrBlock), + /// The block's or header's slot is too much in the future. + /// It will be processed at the block/header slot + WaitingForSlot(HeaderOrBlock), + /// The block references an unknown Block id + WaitingForDependencies { + /// Given header/block + header_or_block: HeaderOrBlock, + /// includes self if it's only a header + unsatisfied_dependencies: PreHashSet, + /// Used to limit and sort the number of blocks/headers waiting for dependencies + sequence_number: u64, + }, + /// The block was checked and included in the blockgraph + Active { + a_block: Box, + storage: Storage, + }, + /// The block was discarded and is kept to avoid reprocessing it + Discarded { + /// Just the slot of that block + slot: Slot, + /// Address of the creator of the block + creator: Address, + /// Ids of parents blocks + parents: Vec, + /// why it was discarded + reason: DiscardReason, + /// Used to limit and sort the number of blocks/headers waiting for dependencies + sequence_number: u64, + }, +} + +/// Block status in the graph that can be exported. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum ExportBlockStatus { + /// received but not yet graph processed + Incoming, + /// waiting for its slot + WaitingForSlot, + /// waiting for a missing dependency + WaitingForDependencies, + /// valid and not yet final + Active(Block), + /// immutable + Final(Block), + /// not part of the graph + Discarded(DiscardReason), +} + +/// The block version that can be exported. +/// Note that the detailed list of operation is not exported +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ExportCompiledBlock { + /// Header of the corresponding block. + pub header: WrappedHeader, + /// For (i, set) in children, + /// set contains the headers' hashes + /// of blocks referencing exported block as a parent, + /// in thread i. + pub children: Vec>, + /// Active or final + pub is_final: bool, +} + +/// Status +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub enum Status { + /// without enough fitness to be part of immutable history + Active, + /// with enough fitness to be part of immutable history + Final, +} diff --git a/massa-graph/src/bootstrapable_graph.rs b/massa-consensus-exports/src/bootstrapable_graph.rs similarity index 85% rename from massa-graph/src/bootstrapable_graph.rs rename to massa-consensus-exports/src/bootstrapable_graph.rs index a03e518596d..4ee633653ff 100644 --- a/massa-graph/src/bootstrapable_graph.rs +++ b/massa-consensus-exports/src/bootstrapable_graph.rs @@ -36,7 +36,7 @@ impl BootstrapableGraphSerializer { impl Serializer for BootstrapableGraphSerializer { /// ## Example /// ```rust - /// use massa_graph::{BootstrapableGraph, BootstrapableGraphSerializer}; + /// use massa_consensus_exports::bootstrapable_graph::{BootstrapableGraph, BootstrapableGraphSerializer}; /// use massa_serialization::Serializer; /// use massa_hash::Hash; /// use massa_models::{prehash::PreHashMap, block::BlockId, config::THREAD_COUNT}; @@ -84,13 +84,7 @@ impl BootstrapableGraphDeserializer { thread_count: u8, endorsement_count: u32, max_bootstrap_blocks: u32, - max_datastore_value_length: u64, - max_function_name_length: u16, - max_parameters_size: u32, max_operations_per_block: u32, - max_op_datastore_entry_count: u64, - max_op_datastore_key_length: u8, - max_op_datastore_value_length: u64, ) -> Self { Self { block_count_deserializer: U32VarIntDeserializer::new( @@ -101,12 +95,6 @@ impl BootstrapableGraphDeserializer { thread_count, endorsement_count, max_operations_per_block, - max_datastore_value_length, - max_function_name_length, - max_parameters_size, - max_op_datastore_entry_count, - max_op_datastore_key_length, - max_op_datastore_value_length, ), } } @@ -115,7 +103,7 @@ impl BootstrapableGraphDeserializer { impl Deserializer for BootstrapableGraphDeserializer { /// ## Example /// ```rust - /// use massa_graph::{BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer}; + /// use massa_consensus_exports::bootstrapable_graph::{BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer}; /// use massa_serialization::{Deserializer, Serializer, DeserializeError}; /// use massa_hash::Hash; /// use massa_models::{prehash::PreHashMap, block::BlockId, config::THREAD_COUNT}; @@ -124,7 +112,7 @@ impl Deserializer for BootstrapableGraphDeserializer { /// }; /// let mut buffer = Vec::new(); /// BootstrapableGraphSerializer::new().serialize(&bootstrapable_graph, &mut buffer).unwrap(); - /// let (rest, bootstrapable_graph_deserialized) = BootstrapableGraphDeserializer::new(32, 9, 10, 10, 100, 1000, 1000, 10, 255, 10_000).deserialize::(&buffer).unwrap(); + /// let (rest, bootstrapable_graph_deserialized) = BootstrapableGraphDeserializer::new(32, 16, 10, 10).deserialize::(&buffer).unwrap(); /// let mut buffer2 = Vec::new(); /// BootstrapableGraphSerializer::new().serialize(&bootstrapable_graph_deserialized, &mut buffer2).unwrap(); /// assert_eq!(buffer, buffer2); diff --git a/massa-consensus-exports/src/channels.rs b/massa-consensus-exports/src/channels.rs new file mode 100644 index 00000000000..a895b7cfc97 --- /dev/null +++ b/massa-consensus-exports/src/channels.rs @@ -0,0 +1,18 @@ +use crossbeam_channel::Sender; +use massa_execution_exports::ExecutionController; +use massa_pool_exports::PoolController; +use massa_pos_exports::SelectorController; +use massa_protocol_exports::ProtocolCommandSender; + +use crate::events::ConsensusEvent; + +/// Contains a reference to the pool, selector and execution controller +/// Contains a channel to send info to protocol +#[derive(Clone)] +pub struct ConsensusChannels { + pub execution_controller: Box, + pub selector_controller: Box, + pub pool_command_sender: Box, + pub controller_event_tx: Sender, + pub protocol_command_sender: ProtocolCommandSender, +} diff --git a/massa-consensus-exports/src/commands.rs b/massa-consensus-exports/src/commands.rs deleted file mode 100644 index f43c900b9ed..00000000000 --- a/massa-consensus-exports/src/commands.rs +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -//! Contains definitions of commands used by the controller -use massa_graph::{BlockGraphExport, BootstrapableGraph}; -use massa_models::api::BlockGraphStatus; -use massa_models::{block::BlockId, slot::Slot}; -use massa_models::{clique::Clique, stats::ConsensusStats}; -use massa_storage::Storage; -use tokio::sync::{mpsc, oneshot}; - -/// Commands that can be processed by consensus. -#[derive(Debug)] -pub enum ConsensusCommand { - /// Returns through a channel current blockgraph without block operations. - GetBlockGraphStatus { - /// optional start slot - slot_start: Option, - /// optional end slot - slot_end: Option, - /// response channel - response_tx: oneshot::Sender, - }, - /// Returns through a channel the graph statuses of a batch of blocks - GetBlockStatuses { - /// wanted block IDs - ids: Vec, - /// response channel - response_tx: oneshot::Sender>, - }, - /// Returns the bootstrap state - GetBootstrapState(mpsc::Sender>), - /// get current stats on consensus - GetStats(oneshot::Sender), - /// Get a block at a given slot in a blockclique - GetBlockcliqueBlockAtSlot { - /// wanted slot - slot: Slot, - /// response channel - response_tx: oneshot::Sender>, - }, - /// Get a block at a given slot in a blockclique - GetLatestBlockcliqueBlockAtSlot { - /// wanted slot - slot: Slot, - /// response channel - response_tx: oneshot::Sender, - }, - /// Get the best parents and their period - GetBestParents { - /// response channel - response_tx: oneshot::Sender>, - }, - /// Send a block - SendBlock { - /// block id - block_id: BlockId, - /// block slot - slot: Slot, - /// All the objects for the block - block_storage: Storage, - /// response channel - response_tx: oneshot::Sender<()>, - }, - /// Get cliques - GetCliques(oneshot::Sender>), -} - -/// Events that are emitted by consensus. -#[derive(Debug, Clone)] -pub enum ConsensusManagementCommand {} diff --git a/massa-consensus-exports/src/consensus_controller.rs b/massa-consensus-exports/src/consensus_controller.rs deleted file mode 100644 index 6657e1d17b9..00000000000 --- a/massa-consensus-exports/src/consensus_controller.rs +++ /dev/null @@ -1,269 +0,0 @@ -//! Copyright (c) 2022 MASSA LABS - -use massa_graph::{BlockGraphExport, BootstrapableGraph}; -use massa_models::api::BlockGraphStatus; -use massa_models::{block::BlockId, slot::Slot}; -use massa_models::{clique::Clique, stats::ConsensusStats}; -use massa_protocol_exports::ProtocolEventReceiver; -use massa_storage::Storage; -use std::collections::VecDeque; - -use tokio::{ - sync::{mpsc, oneshot}, - task::JoinHandle, -}; - -use crate::{ - commands::{ConsensusCommand, ConsensusManagementCommand}, - error::ConsensusResult as Result, - events::ConsensusEvent, - ConsensusError, -}; - -/// Consensus commands sender -/// TODO Make private -#[derive(Clone)] -pub struct ConsensusCommandSender(pub mpsc::Sender); - -impl ConsensusCommandSender { - /// Gets all the available information on the block graph returning a `BlockGraphExport`. - /// - /// # Arguments - /// * `slot_start`: optional slot start for slot-based filtering (included). - /// * `slot_end`: optional slot end for slot-based filtering (excluded). - pub async fn get_block_graph_status( - &self, - slot_start: Option, - slot_end: Option, - ) -> Result { - let (response_tx, response_rx) = oneshot::channel::(); - self.0 - .send(ConsensusCommand::GetBlockGraphStatus { - slot_start, - slot_end, - response_tx, - }) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_block_graph_status".to_string(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_block_graph_status response read error".to_string(), - ) - }) - } - - /// Gets all cliques. - /// - pub async fn get_cliques(&self) -> Result, ConsensusError> { - let (response_tx, response_rx) = oneshot::channel::>(); - self.0 - .send(ConsensusCommand::GetCliques(response_tx)) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_cliques".to_string(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_cliques response read error".to_string(), - ) - }) - } - - /// Gets the graph statuses of a batch of blocks. - /// - /// # Arguments - /// * ids: array of block IDs - pub async fn get_block_statuses( - &self, - ids: &[BlockId], - ) -> Result, ConsensusError> { - let (response_tx, response_rx) = oneshot::channel::>(); - self.0 - .send(ConsensusCommand::GetBlockStatuses { - ids: ids.to_vec(), - response_tx, - }) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_block_statuses".to_string(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_block_statuses response read error".to_string(), - ) - }) - } - - /// get bootstrap snapshot - pub async fn get_bootstrap_state(&self) -> Result { - let (response_tx, mut response_rx) = mpsc::channel::>(10); - self.0 - .send(ConsensusCommand::GetBootstrapState(response_tx)) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_bootstrap_state".into(), - ) - })?; - Ok(*response_rx.recv().await.ok_or_else(|| { - ConsensusError::ReceiveChannelError( - "consensus command get_bootstrap_state response read error".to_string(), - ) - })?) - } - - /// get best parents - pub fn get_best_parents(&self) -> Result, ConsensusError> { - let (response_tx, response_rx) = oneshot::channel::>(); - self.0 - .blocking_send(ConsensusCommand::GetBestParents { response_tx }) - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_best_parents".into(), - ) - })?; - response_rx.blocking_recv().map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_best_parents response read error".to_string(), - ) - }) - } - - /// get block id of a slot in a blockclique - pub async fn get_blockclique_block_at_slot( - &self, - slot: Slot, - ) -> Result, ConsensusError> { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ConsensusCommand::GetBlockcliqueBlockAtSlot { slot, response_tx }) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_blockclique_block_at_slot".into(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_blockclique_block_at_slot response read error".to_string(), - ) - }) - } - - /// get latest block id of a slot in a blockclique - pub fn get_latest_blockclique_block_at_slot( - &self, - slot: Slot, - ) -> Result { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .blocking_send(ConsensusCommand::GetLatestBlockcliqueBlockAtSlot { slot, response_tx }) - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_blockclique_block_at_slot".into(), - ) - })?; - response_rx.blocking_recv().map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_blockclique_block_at_slot response read error".to_string(), - ) - }) - } - - /// get current consensus stats - pub async fn get_stats(&self) -> Result { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ConsensusCommand::GetStats(response_tx)) - .await - .map_err(|_| { - ConsensusError::SendChannelError( - "send error consensus command get_stats".to_string(), - ) - })?; - response_rx.await.map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command get_stats response read error".to_string(), - ) - }) - } - - ///send block - pub fn send_block( - &self, - block_id: BlockId, - slot: Slot, - block_storage: Storage, - ) -> Result<(), ConsensusError> { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .blocking_send(ConsensusCommand::SendBlock { - block_id, - slot, - block_storage, - response_tx, - }) - .map_err(|_| { - ConsensusError::SendChannelError("send error consensus command send_block".into()) - })?; - response_rx.blocking_recv().map_err(|_| { - ConsensusError::ReceiveChannelError( - "consensus command send_block response read error".to_string(), - ) - }) - } -} - -/// channel to receive consensus events -pub struct ConsensusEventReceiver(pub mpsc::Receiver); - -impl ConsensusEventReceiver { - /// wait for the next event - pub async fn wait_event(&mut self) -> Result { - self.0 - .recv() - .await - .ok_or(ConsensusError::ControllerEventError) - } - - /// drains remaining events and returns them in a `VecDeque` - /// note: events are sorted from oldest to newest - pub async fn drain(mut self) -> VecDeque { - let mut remaining_events: VecDeque = VecDeque::new(); - - while let Some(evt) = self.0.recv().await { - remaining_events.push_back(evt); - } - remaining_events - } -} - -/// Consensus manager -pub struct ConsensusManager { - /// protocol handler - pub join_handle: JoinHandle>, - /// consensus management sender - pub manager_tx: mpsc::Sender, -} - -impl ConsensusManager { - /// stop consensus - pub async fn stop( - self, - consensus_event_receiver: ConsensusEventReceiver, - ) -> Result { - drop(self.manager_tx); - let _remaining_events = consensus_event_receiver.drain().await; - let protocol_event_receiver = self.join_handle.await??; - - Ok(protocol_event_receiver) - } -} diff --git a/massa-consensus-exports/src/controller_trait.rs b/massa-consensus-exports/src/controller_trait.rs new file mode 100644 index 00000000000..b46baf1764d --- /dev/null +++ b/massa-consensus-exports/src/controller_trait.rs @@ -0,0 +1,139 @@ +use crate::block_graph_export::BlockGraphExport; +use crate::{bootstrapable_graph::BootstrapableGraph, error::ConsensusError}; +use massa_models::prehash::PreHashSet; +use massa_models::streaming_step::StreamingStep; +use massa_models::{ + api::BlockGraphStatus, + block::{BlockHeader, BlockId}, + clique::Clique, + slot::Slot, + stats::ConsensusStats, + wrapped::Wrapped, +}; +use massa_storage::Storage; + +/// interface that communicates with the graph worker thread +pub trait ConsensusController: Send + Sync { + /// Get an export of a part of the graph + /// + /// # Arguments + /// * `start_slot`: the slot to start the export from, if None, the export starts from the genesis + /// * `end_slot`: the slot to end the export at, if None, the export ends at the current slot + /// + /// # Returns + /// The export of the graph + fn get_block_graph_status( + &self, + start_slot: Option, + end_slot: Option, + ) -> Result; + + /// Get statuses of a list of blocks + /// + /// # Arguments + /// * `block_ids`: the list of block ids to get the status of + /// + /// # Returns + /// The statuses of the blocks sorted by the order of the input list + fn get_block_statuses(&self, ids: &[BlockId]) -> Vec; + + /// Get all the cliques of the graph + /// + /// # Returns + /// The list of cliques + fn get_cliques(&self) -> Vec; + + /// Get a graph to bootstrap from + /// + /// # Returns + /// * a part of the graph + /// * outdated block ids + /// * the updated streaming step + #[allow(clippy::type_complexity)] + fn get_bootstrap_part( + &self, + cursor: StreamingStep>, + execution_cursor: StreamingStep, + ) -> Result< + ( + BootstrapableGraph, + PreHashSet, + StreamingStep>, + ), + ConsensusError, + >; + + /// Get the stats of the consensus + /// + /// # Returns + /// The stats of the consensus + fn get_stats(&self) -> Result; + + /// Get the best parents for the next block to be produced + /// + /// # Returns + /// The id of best parents for the next block to be produced along with their period + fn get_best_parents(&self) -> Vec<(BlockId, u64)>; + + /// Get the block id of the block at a specific slot in the blockclique + /// + /// # Arguments + /// * `slot`: the slot to get the block id of + /// + /// # Returns + /// The block id of the block at the specified slot if exists + fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option; + + /// Get the latest block, that is in the blockclique, in the thread of the given slot and before this `slot`. + /// + /// # Arguments: + /// * `slot`: the slot that will give us the thread and the upper bound + /// + /// # Returns: + /// The block id of the latest block in the thread of the given slot and before this slot + fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId; + + /// Register a block in the graph + /// + /// # Arguments + /// * `block_id`: the id of the block to register + /// * `slot`: the slot of the block + /// * `block_storage`: the storage that contains all the objects of the block + /// * `created`: is the block created by our node ? + fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool); + + /// Register a block header in the graph + /// + /// # Arguments + /// * `block_id`: the id of the block to register + /// * `header`: the header of the block to register + fn register_block_header(&self, block_id: BlockId, header: Wrapped); + + /// Mark a block as invalid in the graph + /// + /// # Arguments + /// * `block_id`: the id of the block to mark as invalid + /// * `header`: the header of the block to mark as invalid + fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped); + + /// Returns a boxed clone of self. + /// Useful to allow cloning `Box`. + fn clone_box(&self) -> Box; +} + +/// Allow cloning `Box` +/// Uses `ConsensusController::clone_box` internally +impl Clone for Box { + fn clone(&self) -> Box { + self.clone_box() + } +} + +/// Consensus manager used to stop the consensus thread +pub trait ConsensusManager { + /// Stop the consensus thread + /// Note that we do not take self by value to consume it + /// because it is not allowed to move out of Box + /// This will improve if the `unsized_fn_params` feature stabilizes enough to be safely usable. + fn stop(&mut self); +} diff --git a/massa-consensus-exports/src/error.rs b/massa-consensus-exports/src/error.rs index f74321ae503..417a1d6ce9d 100644 --- a/massa-consensus-exports/src/error.rs +++ b/massa-consensus-exports/src/error.rs @@ -1,15 +1,47 @@ // Copyright (c) 2022 MASSA LABS use displaydoc::Display; use massa_execution_exports::ExecutionError; -use massa_graph::error::GraphError; use massa_models::error::ModelsError; use massa_protocol_exports::ProtocolError; +use massa_time::TimeError; +use std::array::TryFromSliceError; use thiserror::Error; -use crate::events::ConsensusEvent; - -/// Consensus -pub type ConsensusResult = core::result::Result; +/// Consensus error +#[non_exhaustive] +#[derive(Display, Error, Debug)] +pub enum ConsensusError { + /// execution error: {0} + ExecutionError(#[from] ExecutionError), + /// models error: {0} + ModelsError(#[from] ModelsError), + /// Could not create genesis block {0} + GenesisCreationError(String), + /// missing block {0} + MissingBlock(String), + /// missing operation {0} + MissingOperation(String), + /// there was an inconsistency between containers {0} + ContainerInconsistency(String), + /// fitness overflow + FitnessOverflow, + /// invalid ledger change: {0} + InvalidLedgerChange(String), + /// io error {0} + IOError(#[from] std::io::Error), + /// serde error + SerdeError(#[from] serde_json::Error), + /// Proof of stake cycle unavailable {0} + PosCycleUnavailable(String), + /// Ledger error {0} + LedgerError(#[from] LedgerError), + /// Massa time error {0} + MassaTimeError(#[from] TimeError), + /// transaction error {0} + TransactionError(String), + /// Protocol error {0} + ProtocolError(#[from] ProtocolError), +} /// Internal error #[non_exhaustive] @@ -19,50 +51,20 @@ pub enum InternalError { TransactionError(String), } -/// Consensus errors +/// Ledger error #[non_exhaustive] #[derive(Display, Error, Debug)] -pub enum ConsensusError { - /// execution error: {0} - ExecutionError(#[from] ExecutionError), +pub enum LedgerError { + /// amount overflow + AmountOverflowError, + /// ledger inconsistency error {0} + LedgerInconsistency(String), /// models error: {0} ModelsError(#[from] ModelsError), - /// configuration error: {0} - ConfigError(String), - /// Protocol error {0} - ProtocolError(#[from] Box), - /// failed retrieving consensus controller event - ControllerEventError, - /// Join error {0} - JoinError(#[from] tokio::task::JoinError), - /// Time error {0} - TimeError(#[from] massa_time::TimeError), - /// there was an inconsistency between containers {0} - ContainerInconsistency(String), - /// Send channel error : {0} - SendChannelError(String), - /// Receive channel error : {0} - ReceiveChannelError(String), + /// try from slice error {0} + TryFromSliceError(#[from] TryFromSliceError), /// io error {0} IOError(#[from] std::io::Error), - /// missing block {0} - MissingBlock(String), - /// block creation error {0} - BlockCreationError(String), - /// error sending consensus event: {0} - TokioSendError(#[from] tokio::sync::mpsc::error::SendError), - /// channel error: {0} - ChannelError(String), - /// Graph error: {0} - GraphError(#[from] GraphError), - /// slot overflow - SlotOverflowError, - /// `MassaCipher` error: {0} - MassaCipherError(#[from] massa_cipher::CipherError), -} - -impl std::convert::From for ConsensusError { - fn from(err: massa_protocol_exports::ProtocolError) -> Self { - ConsensusError::ProtocolError(Box::new(err)) - } + /// serde error + SerdeError(#[from] serde_json::Error), } diff --git a/massa-graph/src/export_active_block.rs b/massa-consensus-exports/src/export_active_block.rs similarity index 70% rename from massa-graph/src/export_active_block.rs rename to massa-consensus-exports/src/export_active_block.rs index dbf93c88f14..7f594f549ed 100644 --- a/massa-graph/src/export_active_block.rs +++ b/massa-consensus-exports/src/export_active_block.rs @@ -1,15 +1,13 @@ -use crate::error::{GraphError, GraphResult as Result}; +use crate::error::ConsensusError; use massa_hash::HashDeserializer; use massa_models::{ active_block::ActiveBlock, block::{Block, BlockDeserializer, BlockId, WrappedBlock}, - operation::{Operation, OperationDeserializer, WrappedOperation}, - prehash::{PreHashMap, PreHashSet}, + prehash::PreHashMap, wrapped::{WrappedDeserializer, WrappedSerializer}, }; use massa_serialization::{ - Deserializer, SerializeError, Serializer, U32VarIntDeserializer, U32VarIntSerializer, - U64VarIntDeserializer, U64VarIntSerializer, + Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, }; use massa_storage::Storage; use nom::branch::alt; @@ -17,7 +15,7 @@ use nom::{ bytes::complete::tag, combinator::value, error::{ContextError, ParseError}, - multi::{count, length_count}, + multi::count, sequence::{preceded, tuple}, }; use nom::{error::context, IResult, Parser}; @@ -30,8 +28,6 @@ use std::ops::Bound::Included; pub struct ExportActiveBlock { /// The block. pub block: WrappedBlock, - /// The operations. - pub operations: Vec, /// one `(block id, period)` per thread ( if not genesis ) pub parents: Vec<(BlockId, u64)>, /// for example has its fitness reached the given threshold @@ -47,26 +43,9 @@ impl ExportActiveBlock { .get(&a_block.block_id) .expect("active block missing in storage") .clone(); - // get ops - let operations = { - let read_ops = storage.read_operations(); - block - .content - .operations - .iter() - .map(|op_id| { - read_ops - .get(op_id) - .expect("active block operation missing in storage") - .clone() - }) - .collect() - }; - - // TODO if we deciede that endorsements are separate, also gather endorsements here + // TODO: if we decide that endorsements are separate, also gather endorsements here ExportActiveBlock { - operations, parents: a_block.parents.clone(), is_final: a_block.is_final, block, @@ -78,28 +57,10 @@ impl ExportActiveBlock { self, ref_storage: &Storage, thread_count: u8, - ) -> Result<(ActiveBlock, Storage), GraphError> { + ) -> Result<(ActiveBlock, Storage), ConsensusError> { // create resulting storage let mut storage = ref_storage.clone_without_refs(); - // add operations to storage and claim refs - storage.store_operations(self.operations); - - // check that the block operations match the stored ones - if storage.get_op_refs() - != &self - .block - .content - .operations - .iter() - .cloned() - .collect::>() - { - return Err(GraphError::MissingOperation( - "operation list mismatch on active block conversion".into(), - )); - } - // add endorsements to storage and claim refs // TODO change if we decide that endorsements are stored separately storage.store_endorsements(self.block.content.header.content.endorsements.clone()); @@ -130,7 +91,6 @@ impl ExportActiveBlock { pub struct ExportActiveBlockSerializer { wrapped_serializer: WrappedSerializer, period_serializer: U64VarIntSerializer, - operation_count_serializer: U32VarIntSerializer, } impl ExportActiveBlockSerializer { @@ -139,7 +99,6 @@ impl ExportActiveBlockSerializer { ExportActiveBlockSerializer { wrapped_serializer: WrappedSerializer::new(), period_serializer: U64VarIntSerializer::new(), - operation_count_serializer: U32VarIntSerializer::new(), } } } @@ -153,19 +112,6 @@ impl Serializer for ExportActiveBlockSerializer { // block self.wrapped_serializer.serialize(&value.block, buffer)?; - // operations - self.operation_count_serializer.serialize( - &value - .operations - .len() - .try_into() - .map_err(|_| SerializeError::NumberTooBig("Too many operations".to_string()))?, - buffer, - )?; - for op in &value.operations { - self.wrapped_serializer.serialize(op, buffer)?; - } - // parents with periods // note: there should be no parents for genesis blocks buffer.push(u8::from(!value.parents.is_empty())); @@ -184,45 +130,21 @@ impl Serializer for ExportActiveBlockSerializer { /// Basic deserializer of `ExportActiveBlock` pub struct ExportActiveBlockDeserializer { wrapped_block_deserializer: WrappedDeserializer, - wrapped_operation_deserializer: WrappedDeserializer, hash_deserializer: HashDeserializer, period_deserializer: U64VarIntDeserializer, - operation_count_serializer: U32VarIntDeserializer, thread_count: u8, } impl ExportActiveBlockDeserializer { /// Create a new `ExportActiveBlockDeserializer` #[allow(clippy::too_many_arguments)] - pub fn new( - thread_count: u8, - endorsement_count: u32, - max_operations_per_block: u32, - max_datastore_value_length: u64, - max_function_name_length: u16, - max_parameters_size: u32, - max_op_datastore_entry_count: u64, - max_op_datastore_key_length: u8, - max_op_datastore_value_length: u64, - ) -> Self { + pub fn new(thread_count: u8, endorsement_count: u32, max_operations_per_block: u32) -> Self { ExportActiveBlockDeserializer { wrapped_block_deserializer: WrappedDeserializer::new(BlockDeserializer::new( thread_count, max_operations_per_block, endorsement_count, )), - wrapped_operation_deserializer: WrappedDeserializer::new(OperationDeserializer::new( - max_datastore_value_length, - max_function_name_length, - max_parameters_size, - max_op_datastore_entry_count, - max_op_datastore_key_length, - max_op_datastore_value_length, - )), - operation_count_serializer: U32VarIntDeserializer::new( - Included(0), - Included(max_operations_per_block), - ), hash_deserializer: HashDeserializer::new(), period_deserializer: U64VarIntDeserializer::new(Included(0), Included(u64::MAX)), thread_count, @@ -233,7 +155,7 @@ impl ExportActiveBlockDeserializer { impl Deserializer for ExportActiveBlockDeserializer { /// ## Example: /// ```rust - /// use massa_graph::export_active_block::{ExportActiveBlock, ExportActiveBlockDeserializer, ExportActiveBlockSerializer}; + /// use massa_consensus_exports::export_active_block::{ExportActiveBlock, ExportActiveBlockDeserializer, ExportActiveBlockSerializer}; /// use massa_models::{ledger_models::LedgerChanges, config::THREAD_COUNT, rolls::RollUpdates, block::{BlockId, Block, BlockSerializer, BlockHeader, BlockHeaderSerializer}, prehash::PreHashSet, endorsement::{Endorsement, EndorsementSerializerLW}, slot::Slot, wrapped::WrappedContent}; /// use massa_hash::Hash; /// use std::collections::HashSet; @@ -289,13 +211,12 @@ impl Deserializer for ExportActiveBlockDeserializer { /// let export_active_block = ExportActiveBlock { /// block: full_block.clone(), /// parents: vec![], - /// operations: vec![], /// is_final: false, /// }; /// /// let mut serialized = Vec::new(); /// ExportActiveBlockSerializer::new().serialize(&export_active_block, &mut serialized).unwrap(); - /// let (rest, export_deserialized) = ExportActiveBlockDeserializer::new(32, 9, 1000, 1000, 1000, 1000, 10, 255, 10_000).deserialize::(&serialized).unwrap(); + /// let (rest, export_deserialized) = ExportActiveBlockDeserializer::new(32, 16, 1000).deserialize::(&serialized).unwrap(); /// assert_eq!(export_deserialized.block.id, export_active_block.block.id); /// assert_eq!(export_deserialized.block.serialized_data, export_active_block.block.serialized_data); /// assert_eq!(rest.len(), 0); @@ -311,18 +232,6 @@ impl Deserializer for ExportActiveBlockDeserializer { context("Failed block deserialization", |input| { self.wrapped_block_deserializer.deserialize(input) }), - // operations - context( - "Failed operations deserialization", - length_count( - context("Failed operation count deserialization", |input| { - self.operation_count_serializer.deserialize(input) - }), - context("Failed operation deserialization", |input| { - self.wrapped_operation_deserializer.deserialize(input) - }), - ), - ), // parents context( "Failed parents deserialization", @@ -353,9 +262,8 @@ impl Deserializer for ExportActiveBlockDeserializer { ), )), ) - .map(|(block, operations, parents, is_final)| ExportActiveBlock { + .map(|(block, parents, is_final)| ExportActiveBlock { block, - operations, parents, is_final, }) diff --git a/massa-consensus-exports/src/lib.rs b/massa-consensus-exports/src/lib.rs index c25fef460ae..13eb8124690 100644 --- a/massa-consensus-exports/src/lib.rs +++ b/massa-consensus-exports/src/lib.rs @@ -1,29 +1,22 @@ // Copyright (c) 2022 MASSA LABS -//! Consensus exports -#![feature(async_closure)] -#![feature(hash_drain_filter)] -#![feature(int_roundings)] -#![warn(missing_docs)] -#![warn(unused_crate_dependencies)] +//! Definition and exports of the graph types and errors. -pub use consensus_controller::{ConsensusCommandSender, ConsensusEventReceiver, ConsensusManager}; -pub use error::ConsensusError; -pub use settings::ConsensusConfig; - -mod consensus_controller; +mod channels; +mod controller_trait; +mod settings; -/// consensus errors +pub mod block_graph_export; +pub mod block_status; +pub mod bootstrapable_graph; pub mod error; - -/// consensus settings -pub mod settings; - -/// consensus commands -pub mod commands; - -/// consensus events pub mod events; +pub mod export_active_block; + +pub use channels::ConsensusChannels; +pub use controller_trait::{ConsensusController, ConsensusManager}; +pub use settings::ConsensusConfig; -/// consensus test tools +/// Test utils #[cfg(feature = "testing")] +/// Exports related to tests as Mocks and configurations pub mod test_exports; diff --git a/massa-consensus-exports/src/settings.rs b/massa-consensus-exports/src/settings.rs index f581026dfc3..e479692b21e 100644 --- a/massa-consensus-exports/src/settings.rs +++ b/massa-consensus-exports/src/settings.rs @@ -1,218 +1,51 @@ -// Copyright (c) 2022 MASSA LABS -#![allow(clippy::assertions_on_constants)] -//! Definition & Implementation of the consensus settings -//! ----------------------------------------------------- -//! -//! # Configurations -//! -//! * `setting`: read from user settings file -//! * `config`: merge of settings and hard-coded configuration that shouldn't be -//! modified by user. -//! -//! This file is allowed to use a lot of constants from `massa-models` as all -//! other files named `settings.rs` or `config.rs`. -//! -//! The `ConsensusSettings` is the most basic and complete configuration in the -//! node. You can get almost every configuration from that one. -//! -//! `From impl *`: -//! - `ConsensusConfig`: Create a configuration merging user settings and hard-coded values -//! (see `/massa-models/node_configuration/*`) -//! -//! `From<&ConsensusConfig> impl *`: -//! - `GraphConfig` -//! - `LedgerConfig` -//! - `ProofOfStakeConfig` -//! -//! > Development note: We clone the values on getting a configuration from another. -//! -//! # Usage of constants -//! -//! The default configuration is loaded from the `massa-models` crate. You shouldn't -//! write an hard-coded value in the following file but create a new value in -//! `default.rs` and the testing default equivalent value in `default_testing.rs`. See -//! `/node_configuration/mod.rs` documentation in `massa-models` sources for more -//! information. -//! -//! # Channels -//! -//! The following file contains the definition of the Channels structures used in -//! the current module. -//! -//! # Testing feature -//! -//! In unit test your allowed to use the `testing` feature flag that will -//! use the default values from `/node_configuration/default_testing.rs` in the -//! `massa-models` crate sources. -use massa_execution_exports::ExecutionController; -use massa_graph::settings::GraphConfig; -use massa_pool_exports::PoolController; -use massa_pos_exports::SelectorController; -use massa_protocol_exports::{ProtocolCommandSender, ProtocolEventReceiver}; use massa_signature::KeyPair; use massa_time::MassaTime; -use tokio::sync::mpsc; +use serde::{Deserialize, Serialize}; -use crate::{ - commands::{ConsensusCommand, ConsensusManagementCommand}, - events::ConsensusEvent, -}; - -/// Consensus full configuration (static + user defined) -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Deserialize, Serialize)] pub struct ConsensusConfig { - /// Time in milliseconds when the blockclique started. + /// Clock compensation + pub clock_compensation_millis: i64, + /// Genesis timestamp pub genesis_timestamp: MassaTime, - /// TESTNET: time when the blockclique is ended. - pub end_timestamp: Option, + /// Delta time between two period + pub t0: MassaTime, /// Number of threads pub thread_count: u8, - /// Time between the periods in the same thread. - pub t0: MassaTime, - /// `KeyPair` to sign genesis blocks. + /// Keypair to sign genesis blocks. pub genesis_key: KeyPair, /// Maximum number of blocks allowed in discarded blocks. pub max_discarded_blocks: usize, - /// If a block is `future_block_processing_max_periods` periods in the future, it is just discarded. + /// If a block `is future_block_processing_max_periods` periods in the future, it is just discarded. pub future_block_processing_max_periods: u64, /// Maximum number of blocks allowed in `FutureIncomingBlocks`. pub max_future_processing_blocks: usize, /// Maximum number of blocks allowed in `DependencyWaitingBlocks`. pub max_dependency_blocks: usize, + /// max event send wait + pub max_send_wait: MassaTime, + /// old blocks are pruned every `block_db_prune_interval` + pub block_db_prune_interval: MassaTime, + /// max number of items returned while querying + pub max_item_return_count: usize, + /// Max gas per block for the execution configuration + pub max_gas_per_block: u64, /// Threshold for fitness. pub delta_f0: u64, /// Maximum operation validity period count pub operation_validity_periods: u64, /// cycle duration in periods pub periods_per_cycle: u64, - /// stats time span - pub stats_timespan: MassaTime, - /// max event send wait - pub max_send_wait: MassaTime, /// force keep at least this number of final periods in RAM for each thread pub force_keep_final_periods: u64, /// target number of endorsement per block pub endorsement_count: u32, - /// old blocks are pruned every `block_db_prune_interval` - pub block_db_prune_interval: MassaTime, - /// max number of items returned while querying - pub max_item_return_count: usize, - /// Max gas per block for the execution configuration - pub max_gas_per_block: u64, + /// TESTNET: time when the blockclique is ended. + pub end_timestamp: Option, + /// stats time span + pub stats_timespan: MassaTime, /// channel size pub channel_size: usize, -} - -impl From<&ConsensusConfig> for GraphConfig { - fn from(cfg: &ConsensusConfig) -> Self { - GraphConfig { - thread_count: cfg.thread_count, - genesis_key: cfg.genesis_key.clone(), - max_discarded_blocks: cfg.max_discarded_blocks, - future_block_processing_max_periods: cfg.future_block_processing_max_periods, - max_future_processing_blocks: cfg.max_future_processing_blocks, - max_dependency_blocks: cfg.max_dependency_blocks, - delta_f0: cfg.delta_f0, - operation_validity_periods: cfg.operation_validity_periods, - periods_per_cycle: cfg.periods_per_cycle, - force_keep_final_periods: cfg.force_keep_final_periods, - endorsement_count: cfg.endorsement_count, - max_item_return_count: cfg.max_item_return_count, - } - } -} - -/// Communication asynchronous channels for the consensus worker -/// Contains consensus channels associated (protocol & execution) -/// Contains also controller asynchronous channels (command, manager receivers and event sender) -/// Contains a sender to the pool worker commands -pub struct ConsensusWorkerChannels { - /// Associated protocol command sender. - pub protocol_command_sender: ProtocolCommandSender, - /// Associated protocol event listener. - pub protocol_event_receiver: ProtocolEventReceiver, - /// Execution command sender. - pub execution_controller: Box, - /// Associated Pool command sender. - pub pool_command_sender: Box, - /// Selector controller - pub selector_controller: Box, - /// Channel receiving consensus commands. - pub controller_command_rx: mpsc::Receiver, - /// Channel sending out consensus events. - pub controller_event_tx: mpsc::Sender, - /// Channel receiving consensus management commands. - pub controller_manager_rx: mpsc::Receiver, -} - -/// Public channels associated to the consensus module. -/// Execution & Protocol Sender/Receiver -pub struct ConsensusChannels { - /// outgoing link to execution component - pub execution_controller: Box, - /// outgoing link to protocol component - pub protocol_command_sender: ProtocolCommandSender, - /// incoming link to protocol component - pub protocol_event_receiver: ProtocolEventReceiver, - /// outgoing link to pool component - pub pool_command_sender: Box, - /// selector controller - pub selector_controller: Box, -} - -#[cfg(feature = "testing")] -/// -/// Create the default value of `ConsensusConfig`. -/// -/// Configuration has default values described in crate `massa-models`. -/// The most of `ConsensusConfig` values have in test mode a default value. -/// -/// You can create a `ConsensusConfig` with classic default values and redefining -/// dynamically the values of desired parameters: -/// -/// ```ignore -/// let cfg = ConsensusConfig { -/// max_discarded_blocks: 25, -/// ..Default::default() -/// }; -/// ``` -/// -/// You can also look at the divers `default()` implementation bellow. For example that -/// one is used to initialize the _default paths_ : -/// -/// ```ignore -/// let cfg = ConsensusConfig { -/// max_discarded_blocks: 21, -/// ..ConsensusConfig::default_with_paths(), -/// }; -/// ``` -/// -impl Default for ConsensusConfig { - fn default() -> Self { - use massa_models::config::*; - Self { - // reset genesis timestamp because we are in test mode that can take a while to process - genesis_timestamp: MassaTime::now(0) - .expect("Impossible to reset the timestamp in test"), - end_timestamp: *END_TIMESTAMP, - thread_count: THREAD_COUNT, - t0: T0, - genesis_key: GENESIS_KEY.clone(), - max_discarded_blocks: 100, - future_block_processing_max_periods: 2, - max_future_processing_blocks: 10, - max_dependency_blocks: 100, - delta_f0: DELTA_F0, - operation_validity_periods: OPERATION_VALIDITY_PERIODS, - periods_per_cycle: PERIODS_PER_CYCLE, - stats_timespan: MassaTime::from_millis(1000), - max_send_wait: MassaTime::from_millis(1000), - force_keep_final_periods: 20, - endorsement_count: ENDORSEMENT_COUNT, - block_db_prune_interval: MassaTime::from_millis(1000), - max_item_return_count: 100, - max_gas_per_block: MAX_GAS_PER_BLOCK, - channel_size: CHANNEL_SIZE, - } - } + /// size of a consensus bootstrap streaming part + pub bootstrap_part_size: u64, } diff --git a/massa-consensus-exports/src/test_exports/config.rs b/massa-consensus-exports/src/test_exports/config.rs new file mode 100644 index 00000000000..29ce87aba80 --- /dev/null +++ b/massa-consensus-exports/src/test_exports/config.rs @@ -0,0 +1,39 @@ +use massa_models::config::{ + constants::{ + CHANNEL_SIZE, DELTA_F0, ENDORSEMENT_COUNT, GENESIS_KEY, GENESIS_TIMESTAMP, + MAX_GAS_PER_BLOCK, OPERATION_VALIDITY_PERIODS, PERIODS_PER_CYCLE, T0, THREAD_COUNT, + }, + CONSENSUS_BOOTSTRAP_PART_SIZE, +}; +use massa_time::MassaTime; + +use crate::ConsensusConfig; + +impl Default for ConsensusConfig { + fn default() -> Self { + Self { + clock_compensation_millis: 0, + genesis_timestamp: *GENESIS_TIMESTAMP, + t0: T0, + thread_count: THREAD_COUNT, + genesis_key: GENESIS_KEY.clone(), + max_discarded_blocks: 10000, + future_block_processing_max_periods: 100, + max_future_processing_blocks: 100, + max_dependency_blocks: 2048, + max_send_wait: MassaTime::from_millis(100), + block_db_prune_interval: MassaTime::from_millis(5000), + max_item_return_count: 100, + max_gas_per_block: MAX_GAS_PER_BLOCK, + delta_f0: DELTA_F0, + operation_validity_periods: OPERATION_VALIDITY_PERIODS, + periods_per_cycle: PERIODS_PER_CYCLE, + force_keep_final_periods: 20, + endorsement_count: ENDORSEMENT_COUNT, + end_timestamp: None, + stats_timespan: MassaTime::from_millis(60000), + channel_size: CHANNEL_SIZE, + bootstrap_part_size: CONSENSUS_BOOTSTRAP_PART_SIZE, + } + } +} diff --git a/massa-consensus-exports/src/test_exports/mock.rs b/massa-consensus-exports/src/test_exports/mock.rs index 8be461ec5cc..f4603b16bf4 100644 --- a/massa-consensus-exports/src/test_exports/mock.rs +++ b/massa-consensus-exports/src/test_exports/mock.rs @@ -1,55 +1,272 @@ // Copyright (c) 2022 MASSA LABS -use massa_models::config::CHANNEL_SIZE; +use std::sync::{ + mpsc::{self, Receiver}, + Arc, Mutex, +}; + +use massa_models::{ + api::BlockGraphStatus, + block::{BlockHeader, BlockId}, + clique::Clique, + prehash::PreHashSet, + slot::Slot, + stats::ConsensusStats, + streaming_step::StreamingStep, + wrapped::Wrapped, +}; +use massa_storage::Storage; use massa_time::MassaTime; -use tokio::{sync::mpsc, time::sleep}; use crate::{ - commands::ConsensusCommand, events::ConsensusEvent, ConsensusCommandSender, - ConsensusEventReceiver, + block_graph_export::BlockGraphExport, bootstrapable_graph::BootstrapableGraph, + error::ConsensusError, ConsensusController, }; -/// Mock for the consensus controller. -/// We will receive the commands in this mock and accept callback functions depending of the command in `wait_command`. -/// We will also send the events that can be received by listening to the `ConsensusEventReceiver`. -pub struct MockConsensusController { - /// Command receiver - pub consensus_command_rx: mpsc::Receiver, - _consensus_event_tx: mpsc::Sender, +/// Test tool to mock graph controller responses +pub struct ConsensusEventReceiver(pub Receiver); + +/// List of possible messages you can receive from the mock +/// Each variant corresponds to a unique method in `ConsensusController`, +/// Some variants wait for a response on their `response_tx` field, if present. +/// See the documentation of `ConsensusController` for details on parameters and return values. +#[derive(Clone, Debug)] +pub enum MockConsensusControllerMessage { + GetBlockStatuses { + block_ids: Vec, + response_tx: mpsc::Sender>, + }, + GetBlockGraphStatuses { + start_slot: Option, + end_slot: Option, + response_tx: mpsc::Sender>, + }, + GetCliques { + response_tx: mpsc::Sender>, + }, + GetBootstrapableGraph { + cursor: StreamingStep>, + execution_cursor: StreamingStep, + response_tx: mpsc::Sender< + Result< + ( + BootstrapableGraph, + PreHashSet, + StreamingStep>, + ), + ConsensusError, + >, + >, + }, + GetStats { + response_tx: mpsc::Sender>, + }, + GetBestParents { + response_tx: mpsc::Sender>, + }, + GetBlockcliqueBlockAtSlot { + slot: Slot, + response_tx: mpsc::Sender>, + }, + GetLatestBlockcliqueBlockAtSlot { + slot: Slot, + response_tx: mpsc::Sender, + }, + MarkInvalidBlock { + block_id: BlockId, + header: Wrapped, + }, + RegisterBlock { + block_id: BlockId, + slot: Slot, + block_storage: Storage, + created: bool, + }, + RegisterBlockHeader { + block_id: BlockId, + header: Wrapped, + }, } +/// A mocked graph controller that will intercept calls on its methods +/// and emit corresponding `MockConsensusControllerMessage` messages through a MPSC in a thread-safe way. +/// For messages with a `response_tx` field, the mock will await a response through their `response_tx` channel +/// in order to simulate returning this value at the end of the call. +#[derive(Clone)] +pub struct MockConsensusController(Arc>>); + impl MockConsensusController { - /// Create a new mock consensus controller. - pub fn new_with_receiver() -> (Self, ConsensusCommandSender, ConsensusEventReceiver) { - let (consensus_command_tx, consensus_command_rx) = - mpsc::channel::(CHANNEL_SIZE); - let (consensus_event_tx, consensus_event_rx) = - mpsc::channel::(CHANNEL_SIZE); + /// Create a new pair (mock graph controller, mpsc receiver for emitted messages) + /// Note that unbounded mpsc channels are used + pub fn new_with_receiver() -> (Box, ConsensusEventReceiver) { + let (tx, rx) = mpsc::channel(); ( - MockConsensusController { - consensus_command_rx, - _consensus_event_tx: consensus_event_tx, - }, - ConsensusCommandSender(consensus_command_tx), - ConsensusEventReceiver(consensus_event_rx), + Box::new(MockConsensusController(Arc::new(Mutex::new(tx)))), + ConsensusEventReceiver(rx), ) } +} +impl ConsensusEventReceiver { /// wait command - pub async fn wait_command(&mut self, timeout: MassaTime, filter_map: F) -> Option + pub fn wait_command(&mut self, timeout: MassaTime, filter_map: F) -> Option where - F: Fn(ConsensusCommand) -> Option, + F: Fn(MockConsensusControllerMessage) -> Option, { - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - cmd_opt = self.consensus_command_rx.recv() => match cmd_opt { - Some(orig_cmd) => if let Some(res_cmd) = filter_map(orig_cmd) { return Some(res_cmd); }, - None => panic!("Unexpected closure of network command channel."), - }, - _ = &mut timer => return None - } + match self.0.recv_timeout(timeout.into()) { + Ok(msg) => filter_map(msg), + Err(_) => None, } } } + +/// Implements all the methods of the `ConsensusController` trait, +/// but simply make them emit a `MockConsensusControllerMessage`. +/// If the message contains a `response_tx`, +/// a response from that channel is read and returned as return value. +/// See the documentation of `ConsensusController` for details on each function. +impl ConsensusController for MockConsensusController { + fn get_block_graph_status( + &self, + start_slot: Option, + end_slot: Option, + ) -> Result { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetBlockGraphStatuses { + start_slot, + end_slot, + response_tx, + }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_block_statuses(&self, ids: &[BlockId]) -> Vec { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetBlockStatuses { + block_ids: ids.to_vec(), + response_tx, + }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_cliques(&self) -> Vec { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetCliques { response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_bootstrap_part( + &self, + cursor: StreamingStep>, + execution_cursor: StreamingStep, + ) -> Result< + ( + BootstrapableGraph, + PreHashSet, + StreamingStep>, + ), + ConsensusError, + > { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetBootstrapableGraph { + cursor, + execution_cursor, + response_tx, + }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_stats(&self) -> Result { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetStats { response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_best_parents(&self) -> Vec<(BlockId, u64)> { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetBestParents { response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::GetBlockcliqueBlockAtSlot { slot, response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send( + MockConsensusControllerMessage::GetLatestBlockcliqueBlockAtSlot { + slot, + response_tx, + }, + ) + .unwrap(); + response_rx.recv().unwrap() + } + + fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped) { + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::MarkInvalidBlock { block_id, header }) + .unwrap(); + } + + fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool) { + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::RegisterBlock { + block_id, + slot, + block_storage, + created, + }) + .unwrap(); + } + + fn register_block_header(&self, block_id: BlockId, header: Wrapped) { + self.0 + .lock() + .unwrap() + .send(MockConsensusControllerMessage::RegisterBlockHeader { block_id, header }) + .unwrap(); + } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } +} diff --git a/massa-consensus-exports/src/test_exports/mod.rs b/massa-consensus-exports/src/test_exports/mod.rs index a2f80855a21..aeddfb526b0 100644 --- a/massa-consensus-exports/src/test_exports/mod.rs +++ b/massa-consensus-exports/src/test_exports/mod.rs @@ -1,8 +1,7 @@ -//! Copyright (c) 2022 MASSA LABS +// Copyright (c) 2022 MASSA LABS -/// Mock of the execution module +mod config; mod mock; -/// Tooling to make test using a consensus -mod tools; + +pub use config::*; pub use mock::*; -pub use tools::*; diff --git a/massa-consensus-exports/src/test_exports/tools.rs b/massa-consensus-exports/src/test_exports/tools.rs deleted file mode 100644 index d7d6861ced3..00000000000 --- a/massa-consensus-exports/src/test_exports/tools.rs +++ /dev/null @@ -1,70 +0,0 @@ -use std::collections::HashMap; - -use massa_cipher::encrypt; -use massa_models::{ - address::Address, - ledger_models::LedgerData, - rolls::{RollCounts, RollUpdate, RollUpdates}, -}; -use massa_signature::KeyPair; -use tempfile::NamedTempFile; - -/// Password used for encryption in tests -pub const TEST_PASSWORD: &str = "PASSWORD"; - -/// generate a named temporary JSON ledger file -pub fn generate_ledger_file(ledger_vec: &HashMap) -> NamedTempFile { - use std::io::prelude::*; - let ledger_file_named = NamedTempFile::new().expect("cannot create temp file"); - serde_json::to_writer_pretty(ledger_file_named.as_file(), &ledger_vec) - .expect("unable to write ledger file"); - ledger_file_named - .as_file() - .seek(std::io::SeekFrom::Start(0)) - .expect("could not seek file"); - ledger_file_named -} - -/// generate staking key temp file from array of keypair -pub fn generate_staking_keys_file(staking_keys: &[KeyPair]) -> NamedTempFile { - use std::io::prelude::*; - let file_named = NamedTempFile::new().expect("cannot create temp file"); - let json = serde_json::to_string(&staking_keys).expect("json serialization failed"); - let encrypted_data = encrypt(TEST_PASSWORD, json.as_bytes()).expect("encryption failed"); - std::fs::write(file_named.as_ref(), encrypted_data).expect("data writing failed"); - file_named - .as_file() - .seek(std::io::SeekFrom::Start(0)) - .expect("could not seek file"); - file_named -} - -/// generate a named temporary JSON initial rolls file -pub fn generate_roll_counts_file(roll_counts: &RollCounts) -> NamedTempFile { - use std::io::prelude::*; - let roll_counts_file_named = NamedTempFile::new().expect("cannot create temp file"); - serde_json::to_writer_pretty(roll_counts_file_named.as_file(), &roll_counts.0) - .expect("unable to write ledger file"); - roll_counts_file_named - .as_file() - .seek(std::io::SeekFrom::Start(0)) - .expect("could not seek file"); - roll_counts_file_named -} - -/// generate a default named temporary JSON initial rolls file, -/// assuming two threads. -pub fn generate_default_roll_counts_file(stakers: Vec) -> NamedTempFile { - let mut roll_counts = RollCounts::default(); - for key in stakers.iter() { - let address = Address::from_public_key(&key.get_public_key()); - let update = RollUpdate { - roll_purchases: 1, - roll_sales: 0, - }; - let mut updates = RollUpdates::default(); - updates.apply(&address, &update).unwrap(); - roll_counts.apply_updates(&updates).unwrap(); - } - generate_roll_counts_file(&roll_counts) -} diff --git a/massa-consensus-worker/Cargo.toml b/massa-consensus-worker/Cargo.toml index 679d1ae75b8..a1cc2dd58ed 100644 --- a/massa-consensus-worker/Cargo.toml +++ b/massa-consensus-worker/Cargo.toml @@ -7,49 +7,21 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -serde_json = "1.0" -tokio = { version = "1.21", features = ["full"] } +displaydoc = "0.2" +num = { version = "0.4", features = ["serde"] } tracing = "0.1" -# custom modules +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +parking_lot = { version = "0.12", features = ["deadlock_detection"] } +#custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_graph = { path = "../massa-graph" } -massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } -massa_protocol_exports = { path = "../massa-protocol-exports" } +massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } - -[dev-dependencies] -serial_test = "0.9" -#tempfile = "3.3" -parking_lot = { version = "0.12", features = ["deadlock_detection"] } -massa_models = { path = "../massa-models", features = ["testing"] } -massa_execution_exports = { path = "../massa-execution-exports", features = [ - "testing", -] } -massa_consensus_exports = { path = "../massa-consensus-exports", features = [ - "testing", -] } -massa_pos_exports = { path = "../massa-pos-exports", features = ["testing"]} -massa_pos_worker = { path = "../massa-pos-worker" } -massa_pool_exports = { path = "../massa-pool-exports" } -massa_serialization = { path = "../massa-serialization"} massa_hash = { path = "../massa-hash" } -massa_signature = { path = "../massa-signature" } -massa_cipher = { path = "../massa-cipher" } -massa_storage = { path = "../massa-storage" } -#num = { version = "0.4", features = ["serde"] } -#rand = "0.8" -#futures = "0.3" - +massa_logging = { path = "../massa-logging" } -# for more information on what are the following features used for, see the cargo.toml at workspace level [features] -sandbox = ["massa_consensus_exports/sandbox", "massa_protocol_exports/sandbox" ] -testing = [ - "massa_consensus_exports/testing", - "massa_execution_exports/testing", - "massa_models/testing", - "massa_pool_exports/testing", - "massa_protocol_exports/testing" -] \ No newline at end of file + +sandbox = [] \ No newline at end of file diff --git a/massa-consensus-worker/src/commands.rs b/massa-consensus-worker/src/commands.rs new file mode 100644 index 00000000000..4ca74d79f94 --- /dev/null +++ b/massa-consensus-worker/src/commands.rs @@ -0,0 +1,13 @@ +use massa_models::{ + block::{BlockHeader, BlockId}, + slot::Slot, + wrapped::Wrapped, +}; +use massa_storage::Storage; + +#[allow(clippy::large_enum_variant)] +pub enum ConsensusCommand { + RegisterBlock(BlockId, Slot, Storage, bool), + RegisterBlockHeader(BlockId, Wrapped), + MarkInvalidBlock(BlockId, Wrapped), +} diff --git a/massa-consensus-worker/src/consensus_worker.rs b/massa-consensus-worker/src/consensus_worker.rs deleted file mode 100644 index 02382805693..00000000000 --- a/massa-consensus-worker/src/consensus_worker.rs +++ /dev/null @@ -1,788 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -use massa_consensus_exports::{ - commands::ConsensusCommand, - error::{ConsensusError, ConsensusResult as Result}, - settings::ConsensusWorkerChannels, - ConsensusConfig, -}; -use massa_graph::{BlockGraph, BlockGraphExport}; -use massa_models::timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}; -use massa_models::{address::Address, block::BlockId, slot::Slot}; -use massa_models::{block::WrappedHeader, prehash::PreHashMap}; -use massa_models::{prehash::PreHashSet, stats::ConsensusStats}; -use massa_protocol_exports::{ProtocolEvent, ProtocolEventReceiver}; -use massa_storage::Storage; -use massa_time::MassaTime; -use std::{ - cmp::max, - collections::{HashMap, VecDeque}, -}; -use tokio::time::{sleep, sleep_until, Sleep}; -use tracing::{info, warn}; - -#[cfg(not(feature = "sandbox"))] -use massa_consensus_exports::events::ConsensusEvent; -#[cfg(not(feature = "sandbox"))] -use tokio::sync::mpsc::error::SendTimeoutError; -#[cfg(not(feature = "sandbox"))] -use tracing::debug; - -/// Manages consensus. -pub struct ConsensusWorker { - /// Consensus Configuration - cfg: ConsensusConfig, - /// Associated channels, sender and receivers - channels: ConsensusWorkerChannels, - /// Database containing all information about blocks, the `BlockGraph` and cliques. - block_db: BlockGraph, - /// Previous slot. - previous_slot: Option, - /// Next slot - next_slot: Slot, - /// blocks we want - wishlist: PreHashMap>, - /// latest final periods - latest_final_periods: Vec, - /// clock compensation - clock_compensation: i64, - /// Final block stats `(time, creator, is_from_protocol)` - final_block_stats: VecDeque<(MassaTime, Address, bool)>, - /// Blocks that come from protocol used for stats and ids are removed when inserted in `final_block_stats` - protocol_blocks: VecDeque<(MassaTime, BlockId)>, - /// Stale block timestamp - stale_block_stats: VecDeque, - /// the time span considered for stats - stats_history_timespan: MassaTime, - /// the time span considered for desynchronization detection - #[allow(dead_code)] - stats_desync_detection_timespan: MassaTime, - /// time at which the node was launched (used for desynchronization detection) - launch_time: MassaTime, - /// previous blockclique notified to Execution - prev_blockclique: PreHashMap, -} - -impl ConsensusWorker { - /// Creates a new consensus controller. - /// Initiates the random selector. - /// - /// # Arguments - /// * `cfg`: consensus configuration. - /// * `protocol_command_sender`: associated protocol controller - /// * `block_db`: Database containing all information about blocks, the blockgraph and cliques. - /// * `controller_command_rx`: Channel receiving consensus commands. - /// * `controller_event_tx`: Channel sending out consensus events. - /// * `controller_manager_rx`: Channel receiving consensus management commands. - pub(crate) async fn new( - cfg: ConsensusConfig, - channels: ConsensusWorkerChannels, - block_db: BlockGraph, - clock_compensation: i64, - ) -> Result { - let now = MassaTime::now(clock_compensation)?; - let previous_slot = get_latest_block_slot_at_timestamp( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - now, - )?; - let next_slot = previous_slot.map_or(Ok(Slot::new(0u64, 0u8)), |s| { - s.get_next_slot(cfg.thread_count) - })?; - let latest_final_periods: Vec = block_db - .get_latest_final_blocks_periods() - .iter() - .map(|(_block_id, period)| *period) - .collect(); - info!( - "Started node at time {}, cycle {}, period {}, thread {}", - now.to_utc_string(), - next_slot.get_cycle(cfg.periods_per_cycle), - next_slot.period, - next_slot.thread, - ); - if cfg.genesis_timestamp > now { - let (days, hours, mins, secs) = cfg - .genesis_timestamp - .saturating_sub(now) - .days_hours_mins_secs()?; - info!( - "{} days, {} hours, {} minutes, {} seconds remaining to genesis", - days, hours, mins, secs, - ) - } - massa_trace!("consensus.consensus_worker.new", {}); - - // desync detection timespan - let stats_desync_detection_timespan = cfg.t0.checked_mul(cfg.periods_per_cycle * 2)?; - - // Notify execution module of current blockclique and all final blocks. - // we need to do this because the bootstrap snapshots of the executor vs the consensus may not have been taken in sync - // because the two modules run concurrently and out of sync. - let mut block_storage: PreHashMap = Default::default(); - let notify_finals: HashMap = block_db - .get_all_final_blocks() - .into_iter() - .map(|(b_id, slot)| { - let (_a_block, storage) = block_db - .get_active_block(&b_id) - .expect("active block missing from block_db"); - block_storage.insert(b_id, storage.clone()); - (slot, b_id) - }) - .collect(); - let notify_blockclique: HashMap = block_db - .get_blockclique() - .iter() - .map(|b_id| { - let (a_block, storage) = block_db - .get_active_block(b_id) - .expect("active block missing from block_db"); - let slot = a_block.slot; - block_storage.insert(*b_id, storage.clone()); - (slot, *b_id) - }) - .collect(); - let prev_blockclique: PreHashMap = - notify_blockclique.iter().map(|(k, v)| (*v, *k)).collect(); - channels.execution_controller.update_blockclique_status( - notify_finals, - Some(notify_blockclique), - block_storage, - ); - - Ok(ConsensusWorker { - block_db, - previous_slot, - next_slot, - wishlist: Default::default(), - latest_final_periods, - clock_compensation, - channels, - final_block_stats: Default::default(), - protocol_blocks: Default::default(), - stale_block_stats: VecDeque::new(), - stats_desync_detection_timespan, - stats_history_timespan: max(stats_desync_detection_timespan, cfg.stats_timespan), - cfg, - launch_time: MassaTime::now(clock_compensation)?, - prev_blockclique, - }) - } - - /// Consensus work is managed here. - /// It's mostly a tokio::select within a loop. - pub async fn run_loop(mut self) -> Result { - // signal initial state to pool - self.channels - .pool_command_sender - .notify_final_cs_periods(&self.latest_final_periods); - - // set slot timer - let slot_deadline = get_block_slot_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.next_slot, - )? - .estimate_instant(self.clock_compensation)?; - let next_slot_timer = sleep_until(tokio::time::Instant::from(slot_deadline)); - - tokio::pin!(next_slot_timer); - - // set prune timer - let prune_timer = sleep(self.cfg.block_db_prune_interval.to_duration()); - tokio::pin!(prune_timer); - - loop { - massa_trace!("consensus.consensus_worker.run_loop.select", {}); - /* - select! without the "biased" modifier will randomly select the 1st branch to check, - then will check the next ones in the order they are written. - We choose this order: - * manager commands: low freq, avoid having to wait to stop - * consensus commands (low to medium freq): respond quickly - * slot timer (low freq, timing is important but does not have to be perfect either) - * prune timer: low freq, timing not important but should not wait too long - * receive protocol events (high freq) - */ - tokio::select! { - // listen to manager commands - cmd = self.channels.controller_manager_rx.recv() => { - massa_trace!("consensus.consensus_worker.run_loop.select.manager", {}); - match cmd { - None => break, - Some(_) => {} - }} - - // listen consensus commands - Some(cmd) = self.channels.controller_command_rx.recv() => { - massa_trace!("consensus.consensus_worker.run_loop.consensus_command", {}); - self.process_consensus_command(cmd).await? - }, - - // slot timer - _ = &mut next_slot_timer => { - massa_trace!("consensus.consensus_worker.run_loop.select.slot_tick", {}); - if let Some(end) = self.cfg.end_timestamp { - if MassaTime::now(self.clock_compensation)? > end { - info!("This episode has come to an end, please get the latest testnet node version to continue"); - break; - } - } - self.slot_tick(&mut next_slot_timer).await?; - }, - - // prune timer - _ = &mut prune_timer=> { - massa_trace!("consensus.consensus_worker.run_loop.prune_timer", {}); - // prune block db - let _discarded_final_blocks = self.block_db.prune()?; - - // reset timer - prune_timer.set(sleep( self.cfg.block_db_prune_interval.to_duration())) - } - - // receive protocol controller events - evt = self.channels.protocol_event_receiver.wait_event() =>{ - massa_trace!("consensus.consensus_worker.run_loop.select.protocol_event", {}); - match evt { - Ok(event) => self.process_protocol_event(event).await?, - Err(err) => return Err(ConsensusError::ProtocolError(Box::new(err))) - } - }, - } - } - // after this curly brace you can find the end of the loop - Ok(self.channels.protocol_event_receiver) - } - - /// this function is called around every slot tick - /// it checks for cycle increment - /// detects desynchronization - /// produce quite more logs than actual stuff - async fn slot_tick(&mut self, next_slot_timer: &mut std::pin::Pin<&mut Sleep>) -> Result<()> { - let now = MassaTime::now(self.clock_compensation)?; - let observed_slot = get_latest_block_slot_at_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - now, - )?; - - if observed_slot < Some(self.next_slot) { - // reset timer for next slot - let sleep_deadline = get_block_slot_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.next_slot, - )? - .estimate_instant(self.clock_compensation)?; - next_slot_timer.set(sleep_until(tokio::time::Instant::from(sleep_deadline))); - return Ok(()); - } - - let observed_slot = observed_slot.unwrap(); // does not panic, checked above - - massa_trace!("consensus.consensus_worker.slot_tick", { - "slot": observed_slot - }); - - let previous_cycle = self - .previous_slot - .map(|s| s.get_cycle(self.cfg.periods_per_cycle)); - let observed_cycle = observed_slot.get_cycle(self.cfg.periods_per_cycle); - if previous_cycle.is_none() { - // first cycle observed - info!("Massa network has started ! 🎉") - } - if previous_cycle < Some(observed_cycle) { - info!("Started cycle {}", observed_cycle); - } - - // check if there are any final blocks is coming from protocol - // if none => we are probably desync - #[cfg(not(feature = "sandbox"))] - if now - > max(self.cfg.genesis_timestamp, self.launch_time) - .saturating_add(self.stats_desync_detection_timespan) - && !self - .final_block_stats - .iter() - .any(|(time, _, is_from_protocol)| { - time > &now.saturating_sub(self.stats_desync_detection_timespan) - && *is_from_protocol - }) - { - warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); - let _ = self.send_consensus_event(ConsensusEvent::NeedSync).await; - } - - self.previous_slot = Some(observed_slot); - self.next_slot = observed_slot.get_next_slot(self.cfg.thread_count)?; - - // signal tick to block graph - self.block_db.slot_tick(Some(observed_slot))?; - - // take care of block db changes - self.block_db_changed().await?; - - // reset timer for next slot - let sleep_deadline = get_block_slot_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.next_slot, - )? - .estimate_instant(self.clock_compensation)?; - next_slot_timer.set(sleep_until(tokio::time::Instant::from(sleep_deadline))); - - // prune stats - self.prune_stats()?; - - Ok(()) - } - - /// Manages given consensus command. - /// They can come from the API or the bootstrap server - /// Please refactor me - /// - /// # Argument - /// * `cmd`: consensus command to process - async fn process_consensus_command(&mut self, cmd: ConsensusCommand) -> Result<()> { - match cmd { - ConsensusCommand::GetBlockGraphStatus { - slot_start, - slot_end, - response_tx, - } => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_block_graph_status", - {} - ); - if response_tx - .send(BlockGraphExport::extract_from( - &self.block_db, - slot_start, - slot_end, - )?) - .is_err() - { - warn!("consensus: could not send GetBlockGraphStatus answer"); - } - Ok(()) - } - // gets the graph status of a batch of blocks - ConsensusCommand::GetBlockStatuses { ids, response_tx } => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_block_statuses", - {} - ); - let res: Vec<_> = ids - .iter() - .map(|id| self.block_db.get_block_status(id)) - .collect(); - if response_tx.send(res).is_err() { - warn!("consensus: could not send get_block_statuses answer"); - } - Ok(()) - } - ConsensusCommand::GetCliques(response_tx) => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_cliques", - {} - ); - if response_tx.send(self.block_db.get_cliques()).is_err() { - warn!("consensus: could not send GetSelectionDraws response"); - } - Ok(()) - } - ConsensusCommand::GetBootstrapState(response_tx) => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_bootstrap_state", - {} - ); - let resp = self.block_db.export_bootstrap_graph()?; - if response_tx.send(Box::new(resp)).await.is_err() { - warn!("consensus: could not send GetBootstrapState answer"); - } - Ok(()) - } - ConsensusCommand::GetStats(response_tx) => { - massa_trace!( - "consensus.consensus_worker.process_consensus_command.get_stats", - {} - ); - let res = self.get_stats()?; - if response_tx.send(res).is_err() { - warn!("consensus: could not send get_stats response"); - } - Ok(()) - } - ConsensusCommand::GetBestParents { response_tx } => { - if response_tx - .send(self.block_db.get_best_parents().clone()) - .is_err() - { - warn!("consensus: could not send get best parents response"); - } - Ok(()) - } - ConsensusCommand::GetBlockcliqueBlockAtSlot { slot, response_tx } => { - let res = self.block_db.get_blockclique_block_at_slot(&slot); - if response_tx.send(res).is_err() { - warn!("consensus: could not send get block clique block at slot response"); - } - Ok(()) - } - ConsensusCommand::GetLatestBlockcliqueBlockAtSlot { slot, response_tx } => { - let res = self.block_db.get_latest_blockclique_block_at_slot(&slot); - if response_tx.send(res).is_err() { - warn!( - "consensus: could not send get latest block clique block at slot response" - ); - } - Ok(()) - } - ConsensusCommand::SendBlock { - block_id, - slot, - block_storage, - response_tx, - } => { - self.block_db - .incoming_block(block_id, slot, self.previous_slot, block_storage)?; - - if response_tx.send(()).is_err() { - warn!("consensus: could not send get block clique block at slot response"); - } - Ok(()) - } - } - } - - /// retrieve stats - /// Used in response to a API request - fn get_stats(&mut self) -> Result { - let timespan_end = max(self.launch_time, MassaTime::now(self.clock_compensation)?); - let timespan_start = max( - timespan_end.saturating_sub(self.cfg.stats_timespan), - self.launch_time, - ); - let final_block_count = self - .final_block_stats - .iter() - .filter(|(t, _, _)| *t >= timespan_start && *t < timespan_end) - .count() as u64; - let stale_block_count = self - .stale_block_stats - .iter() - .filter(|t| **t >= timespan_start && **t < timespan_end) - .count() as u64; - let clique_count = self.block_db.get_clique_count() as u64; - Ok(ConsensusStats { - final_block_count, - stale_block_count, - clique_count, - start_timespan: timespan_start, - end_timespan: timespan_end, - }) - } - - /// Manages received protocol events. - /// - /// # Arguments - /// * `event`: event type to process. - async fn process_protocol_event(&mut self, event: ProtocolEvent) -> Result<()> { - match event { - ProtocolEvent::ReceivedBlock { - block_id, - slot, - storage, - } => { - massa_trace!( - "consensus.consensus_worker.process_protocol_event.received_block", - { "block_id": block_id } - ); - self.block_db - .incoming_block(block_id, slot, self.previous_slot, storage)?; - let now = MassaTime::now(self.clock_compensation)?; - self.protocol_blocks.push_back((now, block_id)); - self.block_db_changed().await?; - } - ProtocolEvent::ReceivedBlockHeader { block_id, header } => { - massa_trace!("consensus.consensus_worker.process_protocol_event.received_header", { "block_id": block_id, "header": header }); - self.block_db - .incoming_header(block_id, header, self.previous_slot)?; - self.block_db_changed().await?; - } - ProtocolEvent::InvalidBlock { block_id, header } => { - massa_trace!( - "consensus.consensus_worker.process_protocol_event.invalid_block", - { "block_id": block_id } - ); - self.block_db.invalid_block(&block_id, header)?; - // Say it to consensus - } - } - Ok(()) - } - - /// prune statistics according to the stats span - fn prune_stats(&mut self) -> Result<()> { - let start_time = - MassaTime::now(self.clock_compensation)?.saturating_sub(self.stats_history_timespan); - while let Some((t, _, _)) = self.final_block_stats.front() { - if t < &start_time { - self.final_block_stats.pop_front(); - } else { - break; - } - } - while let Some(t) = self.stale_block_stats.front() { - if t < &start_time { - self.stale_block_stats.pop_front(); - } else { - break; - } - } - while let Some((t, _)) = self.protocol_blocks.front() { - if t < &start_time { - self.protocol_blocks.pop_front(); - } else { - break; - } - } - Ok(()) - } - - /// Notify execution about blockclique changes and finalized blocks. - fn notify_execution(&mut self, finalized_blocks: HashMap) { - // List new block storage instances that Execution doesn't know about. - // That's blocks that have not been sent to execution before, ie. in the previous blockclique). - let mut new_blocks_storage: PreHashMap = finalized_blocks - .iter() - .filter_map(|(_slot, b_id)| { - if self.prev_blockclique.contains_key(b_id) { - // was previously sent as a blockclique element - return None; - } - let (_a_block, storage) = self - .block_db - .get_active_block(b_id) - .expect("final block not found in active blocks"); - Some((*b_id, storage.clone())) - }) - .collect(); - - // Get new blockclique block list with slots. - let mut blockclique_changed = false; - let new_blockclique: PreHashMap = self - .block_db - .get_blockclique() - .iter() - .map(|b_id| { - if let Some(slot) = self.prev_blockclique.remove(b_id) { - // The block was already sent in the previous blockclique: - // the slot can be gathered from there without locking Storage. - // Note: the block is removed from self.prev_blockclique. - (*b_id, slot) - } else { - // The block was not present in the previous blockclique: - // the blockclique has changed => get the block's slot by querying Storage. - blockclique_changed = true; - let (a_block, storage) = self - .block_db - .get_active_block(b_id) - .expect("blockclique block not found in active blocks"); - new_blocks_storage.insert(*b_id, storage.clone()); - (*b_id, a_block.slot) - } - }) - .collect(); - if !self.prev_blockclique.is_empty() { - // All elements present in the new blockclique have been removed from `prev_blockclique` above. - // If `prev_blockclique` is not empty here, it means that it contained elements that are not in the new blockclique anymore. - // In that case, we mark the blockclique as having changed. - blockclique_changed = true; - } - // Overwrite previous blockclique. - // Should still be done even if unchanged because elements were removed from it above. - self.prev_blockclique = new_blockclique.clone(); - - if finalized_blocks.is_empty() && !blockclique_changed { - // There are no changes (neither block finalizations not blockclique changes) to send to execution. - return; - } - - // Notify execution of block finalizations and blockclique changes - self.channels - .execution_controller - .update_blockclique_status( - finalized_blocks, - if blockclique_changed { - Some(new_blockclique.into_iter().map(|(k, v)| (v, k)).collect()) - } else { - None - }, - new_blocks_storage, - ); - } - - /// call me if the block database changed - /// Processing of final blocks, pruning. - /// - /// 1. propagate blocks - /// 2. Notify of attack attempts - /// 3. get new final blocks - /// 4. get blockclique - /// 5. notify Execution - /// 6. Process new final blocks - /// 7. Notify pool of new final ops - /// 8. Notify PoS of final blocks - /// 9. notify protocol of block wish list - /// 10. note new latest final periods (prune graph if changed) - /// 11. add stale blocks to stats - async fn block_db_changed(&mut self) -> Result<()> { - massa_trace!("consensus.consensus_worker.block_db_changed", {}); - - // Propagate new blocks - for (block_id, storage) in self.block_db.get_blocks_to_propagate().into_iter() { - massa_trace!("consensus.consensus_worker.block_db_changed.integrated", { - "block_id": block_id - }); - self.channels - .protocol_command_sender - .integrated_block(block_id, storage) - .await?; - } - - // Notify protocol of attack attempts. - for hash in self.block_db.get_attack_attempts().into_iter() { - self.channels - .protocol_command_sender - .notify_block_attack(hash) - .await?; - massa_trace!("consensus.consensus_worker.block_db_changed.attack", { - "hash": hash - }); - } - - // manage finalized blocks - let timestamp = MassaTime::now(self.clock_compensation)?; - let finalized_blocks = self.block_db.get_new_final_blocks(); - let mut final_block_slots = HashMap::with_capacity(finalized_blocks.len()); - for b_id in finalized_blocks { - if let Some((a_block, _block_store)) = self.block_db.get_active_block(&b_id) { - // add to final blocks to notify execution - final_block_slots.insert(a_block.slot, b_id); - - // add to stats - let block_is_from_protocol = self - .protocol_blocks - .iter() - .any(|(_, block_id)| block_id == &b_id); - self.final_block_stats.push_back(( - timestamp, - a_block.creator_address, - block_is_from_protocol, - )); - } - } - - // notify execution - self.notify_execution(final_block_slots); - - // notify protocol of block wishlist - let new_wishlist = self.block_db.get_block_wishlist()?; - let new_blocks: PreHashMap> = new_wishlist - .iter() - .filter_map(|(id, header)| { - if !self.wishlist.contains_key(id) { - Some((*id, header.clone())) - } else { - None - } - }) - .collect(); - let remove_blocks: PreHashSet = self - .wishlist - .iter() - .filter_map(|(id, _)| { - if !new_wishlist.contains_key(id) { - Some(*id) - } else { - None - } - }) - .collect(); - if !new_blocks.is_empty() || !remove_blocks.is_empty() { - massa_trace!("consensus.consensus_worker.block_db_changed.send_wishlist_delta", { "new": new_wishlist, "remove": remove_blocks }); - self.channels - .protocol_command_sender - .send_wishlist_delta(new_blocks, remove_blocks) - .await?; - self.wishlist = new_wishlist; - } - - // note new latest final periods - let latest_final_periods: Vec = self - .block_db - .get_latest_final_blocks_periods() - .iter() - .map(|(_block_id, period)| *period) - .collect(); - // if changed... - if self.latest_final_periods != latest_final_periods { - // signal new last final periods to pool - self.channels - .pool_command_sender - .notify_final_cs_periods(&latest_final_periods); - // update final periods - self.latest_final_periods = latest_final_periods; - } - - // add stale blocks to stats - let new_stale_block_ids_creators_slots = self.block_db.get_new_stale_blocks(); - let timestamp = MassaTime::now(self.clock_compensation)?; - for (_b_id, (_b_creator, _b_slot)) in new_stale_block_ids_creators_slots.into_iter() { - self.stale_block_stats.push_back(timestamp); - - /* - TODO add this again - let creator_addr = Address::from_public_key(&b_creator); - if self.staking_keys.contains_key(&creator_addr) { - warn!("block {} that was produced by our address {} at slot {} became stale. This is probably due to a temporary desynchronization.", b_id, creator_addr, b_slot); - } - */ - } - - Ok(()) - } - - /// Channel management stuff - /// todo delete - /// or at least introduce some generic - #[cfg(not(feature = "sandbox"))] - async fn send_consensus_event(&self, event: ConsensusEvent) -> Result<()> { - let result = self - .channels - .controller_event_tx - .send_timeout(event, self.cfg.max_send_wait.to_duration()) - .await; - match result { - Ok(()) => return Ok(()), - Err(SendTimeoutError::Closed(event)) => { - debug!( - "failed to send ConsensusEvent due to channel closure: {:?}", - event - ); - } - Err(SendTimeoutError::Timeout(event)) => { - debug!("failed to send ConsensusEvent due to timeout: {:?}", event); - } - } - Err(ConsensusError::ChannelError("failed to send event".into())) - } -} diff --git a/massa-consensus-worker/src/controller.rs b/massa-consensus-worker/src/controller.rs new file mode 100644 index 00000000000..e49e038585a --- /dev/null +++ b/massa-consensus-worker/src/controller.rs @@ -0,0 +1,252 @@ +use massa_consensus_exports::{ + block_graph_export::BlockGraphExport, block_status::BlockStatus, + bootstrapable_graph::BootstrapableGraph, error::ConsensusError, + export_active_block::ExportActiveBlock, ConsensusController, +}; +use massa_models::{ + api::BlockGraphStatus, + block::{BlockHeader, BlockId}, + clique::Clique, + prehash::PreHashSet, + slot::Slot, + stats::ConsensusStats, + streaming_step::StreamingStep, + wrapped::Wrapped, +}; +use massa_storage::Storage; +use parking_lot::RwLock; +use std::sync::{mpsc::SyncSender, Arc}; +use tracing::log::warn; + +use crate::{commands::ConsensusCommand, state::ConsensusState}; + +/// The retrieval of data is made using a shared state and modifications are asked by sending message to a channel. +/// This is done mostly to be able to: +/// +/// - send commands through the channel without waiting for them to be processed from the point of view of the sending thread, and channels are very much optimal for that (much faster than locks) +/// - still be able to read the current state of the graph as processed so far (for this we need a shared state) +/// +/// Note that sending commands and reading the state is done from different, mutually-asynchronous tasks and they can have data that are not sync yet. +#[derive(Clone)] +pub struct ConsensusControllerImpl { + command_sender: SyncSender, + shared_state: Arc>, + bootstrap_part_size: u64, +} + +impl ConsensusControllerImpl { + pub fn new( + command_sender: SyncSender, + shared_state: Arc>, + bootstrap_part_size: u64, + ) -> Self { + Self { + command_sender, + shared_state, + bootstrap_part_size, + } + } +} + +impl ConsensusController for ConsensusControllerImpl { + /// Get a block graph export in a given period. + /// + /// # Arguments: + /// * `start_slot`: the start slot + /// * `end_slot`: the end slot + /// + /// # Returns: + /// An export of the block graph in this period + fn get_block_graph_status( + &self, + start_slot: Option, + end_slot: Option, + ) -> Result { + self.shared_state + .read() + .extract_block_graph_part(start_slot, end_slot) + } + + /// Get statuses of blocks present in the graph + /// + /// # Arguments: + /// * `block_ids`: the block ids to get the status of + /// + /// # Returns: + /// A vector of statuses sorted by the order of the block ids + fn get_block_statuses(&self, ids: &[BlockId]) -> Vec { + let read_shared_state = self.shared_state.read(); + ids.iter() + .map(|id| read_shared_state.get_block_status(id)) + .collect() + } + + /// Get all the cliques possible in the block graph. + /// + /// # Returns: + /// A vector of cliques + fn get_cliques(&self) -> Vec { + self.shared_state.read().max_cliques.clone() + } + + /// Get a part of the graph to send to a node so that he can setup his graph. + /// Used for bootstrap. + /// + /// # Arguments: + /// * `cursor`: streaming cursor containing the current state of bootstrap and what blocks have been to the client already + /// * `execution_cursor`: streaming cursor of the final state to ensure that last slot of the bootstrap info corresponds + /// + /// # Returns: + /// * A portion of the graph + /// * The list of outdated block ids + /// * The streaming step value after the current iteration + fn get_bootstrap_part( + &self, + mut cursor: StreamingStep>, + execution_cursor: StreamingStep, + ) -> Result< + ( + BootstrapableGraph, + PreHashSet, + StreamingStep>, + ), + ConsensusError, + > { + let mut final_blocks: Vec = Vec::new(); + let mut retrieved_ids: PreHashSet = PreHashSet::default(); + let read_shared_state = self.shared_state.read(); + let required_blocks: PreHashSet = + read_shared_state.list_required_active_blocks()?; + + let (current_ids, previous_ids, outdated_ids) = match cursor { + StreamingStep::Started => ( + required_blocks, + PreHashSet::default(), + PreHashSet::default(), + ), + StreamingStep::Ongoing(ref cursor_ids) => ( + // ids that are contained in required_blocks but not in the download cursor => current_ids + required_blocks.difference(cursor_ids).cloned().collect(), + // ids previously downloaded => previous_ids + cursor_ids.clone(), + // ids previously downloaded but not contained in required_blocks anymore => outdated_ids + cursor_ids.difference(&required_blocks).cloned().collect(), + ), + StreamingStep::Finished(_) => { + return Ok(( + BootstrapableGraph { final_blocks }, + PreHashSet::default(), + cursor, + )) + } + }; + + for b_id in ¤t_ids { + if let Some(BlockStatus::Active { a_block, storage }) = + read_shared_state.block_statuses.get(b_id) + { + if final_blocks.len() as u64 >= self.bootstrap_part_size { + break; + } + match execution_cursor { + StreamingStep::Ongoing(slot) | StreamingStep::Finished(Some(slot)) => { + if a_block.slot > slot { + continue; + } + } + _ => (), + } + if a_block.is_final { + let export = ExportActiveBlock::from_active_block(a_block, storage); + final_blocks.push(export); + retrieved_ids.insert(*b_id); + } + } + } + + if final_blocks.is_empty() { + cursor = StreamingStep::Finished(None); + } else { + let pruned_previous_ids = previous_ids.difference(&outdated_ids); + retrieved_ids.extend(pruned_previous_ids); + cursor = StreamingStep::Ongoing(retrieved_ids); + } + + Ok((BootstrapableGraph { final_blocks }, outdated_ids, cursor)) + } + + /// Get the stats of the consensus + fn get_stats(&self) -> Result { + self.shared_state.read().get_stats() + } + + /// Get the current best parents for a block creation + /// + /// # Returns: + /// A block id and a period for each thread of the graph + fn get_best_parents(&self) -> Vec<(BlockId, u64)> { + self.shared_state.read().best_parents.clone() + } + + /// Get the block, that is in the blockclique, at a given slot. + /// + /// # Arguments: + /// * `slot`: the slot to get the block at + /// + /// # Returns: + /// The block id of the block at the given slot if exists + fn get_blockclique_block_at_slot(&self, slot: Slot) -> Option { + self.shared_state + .read() + .get_blockclique_block_at_slot(&slot) + } + + /// Get the latest block, that is in the blockclique, in the thread of the given slot and before this `slot`. + /// + /// # Arguments: + /// * `slot`: the slot that will give us the thread and the upper bound + /// + /// # Returns: + /// The block id of the latest block in the thread of the given slot and before this slot + fn get_latest_blockclique_block_at_slot(&self, slot: Slot) -> BlockId { + self.shared_state + .read() + .get_latest_blockclique_block_at_slot(&slot) + } + + fn register_block(&self, block_id: BlockId, slot: Slot, block_storage: Storage, created: bool) { + if let Err(err) = self + .command_sender + .try_send(ConsensusCommand::RegisterBlock( + block_id, + slot, + block_storage, + created, + )) + { + warn!("error trying to register a block: {}", err); + } + } + + fn register_block_header(&self, block_id: BlockId, header: Wrapped) { + if let Err(err) = self + .command_sender + .try_send(ConsensusCommand::RegisterBlockHeader(block_id, header)) + { + warn!("error trying to register a block header: {}", err); + } + } + + fn mark_invalid_block(&self, block_id: BlockId, header: Wrapped) { + if let Err(err) = self + .command_sender + .try_send(ConsensusCommand::MarkInvalidBlock(block_id, header)) + { + warn!("error trying to mark block as invalid: {}", err); + } + } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } +} diff --git a/massa-consensus-worker/src/lib.rs b/massa-consensus-worker/src/lib.rs index 5d590c1d394..71042385846 100644 --- a/massa-consensus-worker/src/lib.rs +++ b/massa-consensus-worker/src/lib.rs @@ -1,16 +1,22 @@ -//! Copyright (c) 2022 MASSA LABS +// Copyright (c) 2022 MASSA LABS -#![feature(async_closure)] -#![feature(hash_drain_filter)] -#![feature(int_roundings)] -#![warn(missing_docs)] -#![warn(unused_crate_dependencies)] -#[macro_use] -extern crate massa_logging; +//! # General description +//! +//! The consensus worker launches a persistent thread that will run in the background. +//! This thread has a `run` function that triggers the consensus algorithm each slot. It can be interrupted by commands +//! that are managed on the fly. The consensus worker share a state with a controller. This controller can be called by the others modules. +//! It avoid sending message to the thread just for getting informations on the consensus. +//! +//! Communications with execution is blocking. Communications with protocol blocks on sending information to protocol but not blocking +//! when protocol sends informations to this module. +//! +//! This module doesn't use asynchronous code. +#![feature(deadline_api)] -mod consensus_worker; -mod tools; -pub use tools::start_consensus_controller; +mod commands; +mod controller; +mod manager; +mod state; +mod worker; -#[cfg(test)] -mod tests; +pub use worker::start_consensus_worker; diff --git a/massa-consensus-worker/src/manager.rs b/massa-consensus-worker/src/manager.rs new file mode 100644 index 00000000000..d2ef67e5272 --- /dev/null +++ b/massa-consensus-worker/src/manager.rs @@ -0,0 +1,23 @@ +use massa_consensus_exports::ConsensusManager; +use std::{sync::mpsc::SyncSender, thread::JoinHandle}; +use tracing::log::info; + +use crate::commands::ConsensusCommand; + +pub struct ConsensusManagerImpl { + pub consensus_thread: Option<(SyncSender, JoinHandle<()>)>, +} + +impl ConsensusManager for ConsensusManagerImpl { + fn stop(&mut self) { + info!("stopping consensus worker..."); + // join the consensus thread + if let Some((tx, join_handle)) = self.consensus_thread.take() { + drop(tx); + join_handle + .join() + .expect("consensus thread panicked on try to join"); + } + info!("consensus worker stopped"); + } +} diff --git a/massa-consensus-worker/src/state/graph.rs b/massa-consensus-worker/src/state/graph.rs new file mode 100644 index 00000000000..b2c08e5c6d9 --- /dev/null +++ b/massa-consensus-worker/src/state/graph.rs @@ -0,0 +1,362 @@ +use std::collections::VecDeque; + +use massa_consensus_exports::{ + block_status::{BlockStatus, DiscardReason}, + error::ConsensusError, +}; +use massa_logging::massa_trace; +use massa_models::{block::BlockId, clique::Clique, prehash::PreHashSet, slot::Slot}; + +use super::ConsensusState; + +impl ConsensusState { + pub fn insert_parents_descendants( + &mut self, + add_block_id: BlockId, + add_block_slot: Slot, + parents_hash: Vec, + ) { + // add as child to parents + for parent_h in parents_hash.iter() { + if let Some(BlockStatus::Active { + a_block: a_parent, .. + }) = self.block_statuses.get_mut(parent_h) + { + a_parent.children[add_block_slot.thread as usize] + .insert(add_block_id, add_block_slot.period); + } + } + + // add as descendant to ancestors. Note: descendants are never removed. + let mut ancestors: VecDeque = parents_hash.iter().copied().collect(); + let mut visited = PreHashSet::::default(); + while let Some(ancestor_h) = ancestors.pop_back() { + if !visited.insert(ancestor_h) { + continue; + } + if let Some(BlockStatus::Active { a_block: ab, .. }) = + self.block_statuses.get_mut(&ancestor_h) + { + ab.descendants.insert(add_block_id); + for (ancestor_parent_h, _) in ab.parents.iter() { + ancestors.push_front(*ancestor_parent_h); + } + } + } + } + + pub fn compute_fitness_find_blockclique( + &mut self, + add_block_id: &BlockId, + ) -> Result { + let mut blockclique_i = 0usize; + let mut max_clique_fitness = (0u64, num::BigInt::default()); + for (clique_i, clique) in self.max_cliques.iter_mut().enumerate() { + clique.fitness = 0; + clique.is_blockclique = false; + let mut sum_hash = num::BigInt::default(); + for block_h in clique.block_ids.iter() { + let fitness = match self.block_statuses.get(block_h) { + Some(BlockStatus::Active { a_block, storage: _ }) => a_block.fitness, + _ => return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses computing fitness while adding {} - missing {}", add_block_id, block_h))), + }; + clique.fitness = clique + .fitness + .checked_add(fitness) + .ok_or(ConsensusError::FitnessOverflow)?; + sum_hash -= num::BigInt::from_bytes_be(num::bigint::Sign::Plus, block_h.to_bytes()); + } + let cur_fit = (clique.fitness, sum_hash); + if cur_fit > max_clique_fitness { + blockclique_i = clique_i; + max_clique_fitness = cur_fit; + } + } + self.max_cliques[blockclique_i].is_blockclique = true; + Ok(blockclique_i) + } + + pub fn list_stale_blocks(&self, fitness_threshold: u64) -> PreHashSet { + // iterate from largest to smallest to minimize reallocations + let mut indices: Vec = (0..self.max_cliques.len()).collect(); + indices.sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].block_ids.len())); + let mut high_set = PreHashSet::::default(); + let mut low_set = PreHashSet::::default(); + for clique_i in indices.into_iter() { + if self.max_cliques[clique_i].fitness >= fitness_threshold { + high_set.extend(&self.max_cliques[clique_i].block_ids); + } else { + low_set.extend(&self.max_cliques[clique_i].block_ids); + } + } + &low_set - &high_set + } + + pub fn remove_block( + &mut self, + add_block_id: &BlockId, + block_id: &BlockId, + ) -> Result<(), ConsensusError> { + if let Some(BlockStatus::Active { + a_block: active_block, + storage: _storage, + }) = self.block_statuses.remove(block_id) + { + self.active_index.remove(block_id); + if active_block.is_final { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} was already final", add_block_id, block_id))); + } + + // remove from gi_head + if let Some(other_incomps) = self.gi_head.remove(block_id) { + for other_incomp in other_incomps.into_iter() { + if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { + other_incomp_lst.remove(block_id); + } + } + } + + // remove from cliques + let stale_block_fitness = active_block.fitness; + self.max_cliques.iter_mut().for_each(|c| { + if c.block_ids.remove(block_id) { + c.fitness -= stale_block_fitness; + } + }); + self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques + if self.max_cliques.is_empty() { + // make sure at least one clique remains + self.max_cliques = vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }]; + } + + // remove from parent's children + for (parent_h, _parent_period) in active_block.parents.iter() { + if let Some(BlockStatus::Active { + a_block: parent_active_block, + .. + }) = self.block_statuses.get_mut(parent_h) + { + parent_active_block.children[active_block.slot.thread as usize] + .remove(block_id); + } + } + + massa_trace!("consensus.block_graph.add_block_to_graph.stale", { + "hash": block_id + }); + + // mark as stale + self.new_stale_blocks + .insert(*block_id, (active_block.creator_address, active_block.slot)); + self.block_statuses.insert( + *block_id, + BlockStatus::Discarded { + slot: active_block.slot, + creator: active_block.creator_address, + parents: active_block.parents.iter().map(|(h, _)| *h).collect(), + reason: DiscardReason::Stale, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(*block_id); + Ok(()) + } else { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} is missing", add_block_id, block_id))); + } + } + + pub fn list_final_blocks(&self) -> Result, ConsensusError> { + // short-circuiting intersection of cliques from smallest to largest + let mut indices: Vec = (0..self.max_cliques.len()).collect(); + indices.sort_unstable_by_key(|&i| self.max_cliques[i].block_ids.len()); + let mut final_candidates = self.max_cliques[indices[0]].block_ids.clone(); + for i in 1..indices.len() { + final_candidates.retain(|v| self.max_cliques[i].block_ids.contains(v)); + if final_candidates.is_empty() { + break; + } + } + + // restrict search to cliques with high enough fitness, sort cliques by fitness (highest to lowest) + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_final_blocks.restrict", + {} + ); + indices.retain(|&i| self.max_cliques[i].fitness > self.config.delta_f0); + indices.sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].fitness)); + + let mut final_blocks = PreHashSet::::default(); + for clique_i in indices.into_iter() { + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_final_blocks.loop", + { "clique_i": clique_i } + ); + // check in cliques from highest to lowest fitness + if final_candidates.is_empty() { + // no more final candidates + break; + } + let clique = &self.max_cliques[clique_i]; + + // compute the total fitness of all the descendants of the candidate within the clique + let loc_candidates = final_candidates.clone(); + for candidate_h in loc_candidates.into_iter() { + let descendants = match self.block_statuses.get(&candidate_h) { + Some(BlockStatus::Active { + a_block, + storage: _, + }) => &a_block.descendants, + _ => { + return Err(ConsensusError::MissingBlock(format!( + "missing block when computing total fitness of descendants: {}", + candidate_h + ))) + } + }; + let desc_fit: u64 = descendants + .intersection(&clique.block_ids) + .map(|h| { + if let Some(BlockStatus::Active { a_block: ab, .. }) = + self.block_statuses.get(h) + { + return ab.fitness; + } + 0 + }) + .sum(); + if desc_fit > self.config.delta_f0 { + // candidate is final + final_candidates.remove(&candidate_h); + final_blocks.insert(candidate_h); + } + } + } + Ok(final_blocks) + } + + /// Computes max cliques of compatible blocks + pub fn compute_max_cliques(&self) -> Vec> { + let mut max_cliques: Vec> = Vec::new(); + + // algorithm adapted from IK_GPX as summarized in: + // Cazals et al., "A note on the problem of reporting maximal cliques" + // Theoretical Computer Science, 2008 + // https://doi.org/10.1016/j.tcs.2008.05.010 + + // stack: r, p, x + let mut stack: Vec<( + PreHashSet, + PreHashSet, + PreHashSet, + )> = vec![( + PreHashSet::::default(), + self.gi_head.keys().cloned().collect(), + PreHashSet::::default(), + )]; + while let Some((r, mut p, mut x)) = stack.pop() { + if p.is_empty() && x.is_empty() { + max_cliques.push(r); + continue; + } + // choose the pivot vertex following the GPX scheme: + // u_p = node from (p \/ x) that maximizes the cardinality of (P \ Neighbors(u_p, GI)) + let &u_p = p + .union(&x) + .max_by_key(|&u| { + p.difference(&(&self.gi_head[u] | &vec![*u].into_iter().collect())) + .count() + }) + .unwrap(); // p was checked to be non-empty before + + // iterate over u_set = (p /\ Neighbors(u_p, GI)) + let u_set: PreHashSet = + &p & &(&self.gi_head[&u_p] | &vec![u_p].into_iter().collect()); + for u_i in u_set.into_iter() { + p.remove(&u_i); + let u_i_set: PreHashSet = vec![u_i].into_iter().collect(); + let comp_n_u_i: PreHashSet = &self.gi_head[&u_i] | &u_i_set; + stack.push((&r | &u_i_set, &p - &comp_n_u_i, &x - &comp_n_u_i)); + x.insert(u_i); + } + } + if max_cliques.is_empty() { + // make sure at least one clique remains + max_cliques = vec![PreHashSet::::default()]; + } + max_cliques + } + + /// get the clique of higher fitness + pub fn get_blockclique(&self) -> PreHashSet { + self.max_cliques + .iter() + .find(|c| c.is_blockclique) + .expect("blockclique missing") + .block_ids + .clone() + } + + pub fn mark_final_blocks( + &mut self, + add_block_id: &BlockId, + final_blocks: PreHashSet, + ) -> Result<(), ConsensusError> { + for block_id in final_blocks.into_iter() { + // remove from gi_head + if let Some(other_incomps) = self.gi_head.remove(&block_id) { + for other_incomp in other_incomps.into_iter() { + if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { + other_incomp_lst.remove(&block_id); + } + } + } + + // mark as final and update latest_final_blocks_periods + if let Some(BlockStatus::Active { + a_block: final_block, + .. + }) = self.block_statuses.get_mut(&block_id) + { + massa_trace!("consensus.block_graph.add_block_to_graph.final", { + "hash": block_id + }); + final_block.is_final = true; + // remove from cliques + let final_block_fitness = final_block.fitness; + self.max_cliques.iter_mut().for_each(|c| { + if c.block_ids.remove(&block_id) { + c.fitness -= final_block_fitness; + } + }); + self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques + if self.max_cliques.is_empty() { + // make sure at least one clique remains + self.max_cliques = vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }]; + } + // update latest final blocks + if final_block.slot.period + > self.latest_final_blocks_periods[final_block.slot.thread as usize].1 + { + self.latest_final_blocks_periods[final_block.slot.thread as usize] = + (block_id, final_block.slot.period); + } + // update new final blocks list + self.new_final_blocks.insert(block_id); + } else { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses updating final blocks adding {} - block {} is missing", add_block_id, block_id))); + } + } + Ok(()) + } +} diff --git a/massa-consensus-worker/src/state/mod.rs b/massa-consensus-worker/src/state/mod.rs new file mode 100644 index 00000000000..8a8fc542205 --- /dev/null +++ b/massa-consensus-worker/src/state/mod.rs @@ -0,0 +1,474 @@ +use std::collections::{HashMap, VecDeque}; + +use massa_consensus_exports::{ + block_graph_export::BlockGraphExport, + block_status::{BlockStatus, ExportCompiledBlock, HeaderOrBlock}, + error::ConsensusError, + ConsensusChannels, ConsensusConfig, +}; +use massa_models::{ + active_block::ActiveBlock, + address::Address, + api::BlockGraphStatus, + block::{BlockId, WrappedHeader}, + clique::Clique, + prehash::{CapacityAllocator, PreHashMap, PreHashSet}, + slot::Slot, +}; +use massa_storage::Storage; +use massa_time::MassaTime; + +mod graph; +mod process; +mod process_commands; +mod prune; +mod stats; +mod tick; +mod verifications; + +#[derive(Clone)] +pub struct ConsensusState { + /// Configuration + pub config: ConsensusConfig, + /// Channels to communicate with other modules + pub channels: ConsensusChannels, + /// Storage + pub storage: Storage, + /// Block ids of genesis blocks + pub genesis_hashes: Vec, + /// Incompatibility graph: maps a block id to the block ids it is incompatible with + /// One entry per Active Block + pub gi_head: PreHashMap>, + /// All the cliques + pub max_cliques: Vec, + /// ids of active blocks + pub active_index: PreHashSet, + /// Save of latest periods + pub save_final_periods: Vec, + /// One (block id, period) per thread + pub latest_final_blocks_periods: Vec<(BlockId, u64)>, + /// One `(block id, period)` per thread TODO not sure I understand the difference with `latest_final_blocks_periods` + pub best_parents: Vec<(BlockId, u64)>, + /// Every block we know about + pub block_statuses: PreHashMap, + /// Ids of incoming blocks/headers + pub incoming_index: PreHashSet, + /// Used to limit the number of waiting and discarded blocks + pub sequence_counter: u64, + /// ids of waiting for slot blocks/headers + pub waiting_for_slot_index: PreHashSet, + /// ids of waiting for dependencies blocks/headers + pub waiting_for_dependencies_index: PreHashSet, + /// ids of discarded blocks + pub discarded_index: PreHashSet, + /// Blocks that need to be propagated + pub to_propagate: PreHashMap, + /// List of block ids we think are attack attempts + pub attack_attempts: Vec, + /// Newly final blocks + pub new_final_blocks: PreHashSet, + /// Newly stale block mapped to creator and slot + pub new_stale_blocks: PreHashMap, + /// time at which the node was launched (used for desynchronization detection) + pub launch_time: MassaTime, + /// Final block stats `(time, creator, is_from_protocol)` + pub final_block_stats: VecDeque<(MassaTime, Address, bool)>, + /// Blocks that come from protocol used for stats and ids are removed when inserted in `final_block_stats` + pub protocol_blocks: VecDeque<(MassaTime, BlockId)>, + /// Stale block timestamp + pub stale_block_stats: VecDeque, + /// the time span considered for stats + pub stats_history_timespan: MassaTime, + /// the time span considered for desynchronization detection + pub stats_desync_detection_timespan: MassaTime, + /// blocks we want + pub wishlist: PreHashMap>, + /// previous blockclique notified to Execution + pub prev_blockclique: PreHashMap, +} + +impl ConsensusState { + /// Get a full active block + pub fn get_full_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { + match self.block_statuses.get(block_id) { + Some(BlockStatus::Active { a_block, storage }) => Some((a_block.as_ref(), storage)), + _ => None, + } + } + + pub fn get_clique_count(&self) -> usize { + self.max_cliques.len() + } + + /// get the blockclique (or final) block ID at a given slot, if any + pub fn get_blockclique_block_at_slot(&self, slot: &Slot) -> Option { + // List all blocks at this slot. + // The list should be small: make a copy of it to avoid holding the storage lock. + let blocks_at_slot = { + let storage_read = self.storage.read_blocks(); + let returned = match storage_read.get_blocks_by_slot(slot) { + Some(v) => v.clone(), + None => return None, + }; + returned + }; + + // search for the block in the blockclique + let search_in_blockclique = blocks_at_slot + .intersection( + &self + .max_cliques + .iter() + .find(|c| c.is_blockclique) + .expect("expected one clique to be the blockclique") + .block_ids, + ) + .next(); + if let Some(found_id) = search_in_blockclique { + return Some(*found_id); + } + + // block not found in the blockclique: search in the final blocks + blocks_at_slot + .into_iter() + .find(|b_id| match self.block_statuses.get(b_id) { + Some(BlockStatus::Active { a_block, .. }) => a_block.is_final, + _ => false, + }) + } + + /// get the latest blockclique (or final) block ID at a given slot, if any + pub fn get_latest_blockclique_block_at_slot(&self, slot: &Slot) -> BlockId { + let (mut best_block_id, mut best_block_period) = self + .latest_final_blocks_periods + .get(slot.thread as usize) + .unwrap_or_else(|| panic!("unexpected not found latest final block period")); + + self.get_blockclique() + .iter() + .for_each(|id| match self.block_statuses.get(id) { + Some(BlockStatus::Active { + a_block, + storage: _, + }) => { + if a_block.is_final { + panic!( + "unexpected final block on getting latest blockclique block at slot" + ); + } + if a_block.slot.thread == slot.thread + && a_block.slot.period < slot.period + && a_block.slot.period > best_block_period + { + best_block_period = a_block.slot.period; + best_block_id = *id; + } + } + _ => { + panic!("expected to find only active block but found another status") + } + }); + best_block_id + } + + pub fn get_block_status(&self, block_id: &BlockId) -> BlockGraphStatus { + match self.block_statuses.get(block_id) { + None => BlockGraphStatus::NotFound, + Some(BlockStatus::Active { a_block, .. }) => { + if a_block.is_final { + BlockGraphStatus::Final + } else if self + .max_cliques + .iter() + .find(|clique| clique.is_blockclique) + .expect("blockclique absent") + .block_ids + .contains(block_id) + { + BlockGraphStatus::ActiveInBlockclique + } else { + BlockGraphStatus::ActiveInAlternativeCliques + } + } + Some(BlockStatus::Discarded { .. }) => BlockGraphStatus::Discarded, + Some(BlockStatus::Incoming(_)) => BlockGraphStatus::Incoming, + Some(BlockStatus::WaitingForDependencies { .. }) => { + BlockGraphStatus::WaitingForDependencies + } + Some(BlockStatus::WaitingForSlot(_)) => BlockGraphStatus::WaitingForSlot, + } + } + + pub fn list_required_active_blocks(&self) -> Result, ConsensusError> { + // list all active blocks + let mut retain_active: PreHashSet = + PreHashSet::::with_capacity(self.active_index.len()); + + let latest_final_blocks: Vec = self + .latest_final_blocks_periods + .iter() + .map(|(hash, _)| *hash) + .collect(); + + // retain all non-final active blocks, + // the current "best parents", + // and the dependencies for both. + for block_id in self.active_index.iter() { + if let Some(BlockStatus::Active { + a_block: active_block, + .. + }) = self.block_statuses.get(block_id) + { + if !active_block.is_final + || self.best_parents.iter().any(|(b, _p)| b == block_id) + || latest_final_blocks.contains(block_id) + { + retain_active.extend(active_block.parents.iter().map(|(p, _)| *p)); + retain_active.insert(*block_id); + } + } + } + + // retain best parents + retain_active.extend(self.best_parents.iter().map(|(b, _p)| *b)); + + // retain last final blocks + retain_active.extend(self.latest_final_blocks_periods.iter().map(|(h, _)| *h)); + + for (thread, id) in latest_final_blocks.iter().enumerate() { + let mut current_block_id = *id; + while let Some((current_block, _)) = self.get_full_active_block(¤t_block_id) { + let parent_id = { + if !current_block.parents.is_empty() { + Some(current_block.parents[thread].0) + } else { + None + } + }; + + // retain block + retain_active.insert(current_block_id); + + // stop traversing when reaching a block with period number low enough + // so that any of its operations will have their validity period expired at the latest final block in thread + // note: one more is kept because of the way we iterate + if current_block.slot.period + < self.latest_final_blocks_periods[thread] + .1 + .saturating_sub(self.config.operation_validity_periods) + { + break; + } + + // if not genesis, traverse parent + match parent_id { + Some(p_id) => current_block_id = p_id, + None => break, + } + } + } + + // grow with parents & fill thread holes twice + for _ in 0..2 { + // retain the parents of the selected blocks + let retain_clone = retain_active.clone(); + + for retain_h in retain_clone.into_iter() { + retain_active.extend( + self.get_full_active_block(&retain_h) + .ok_or_else(|| ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and retaining the parents of the selected blocks - {} is missing", retain_h)))? + .0.parents + .iter() + .map(|(b_id, _p)| *b_id), + ) + } + + // find earliest kept slots in each thread + let mut earliest_retained_periods: Vec = self + .latest_final_blocks_periods + .iter() + .map(|(_, p)| *p) + .collect(); + for retain_h in retain_active.iter() { + let retain_slot = &self + .get_full_active_block(retain_h) + .ok_or_else(|| ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and finding earliest kept slots in each thread - {} is missing", retain_h)))? + .0.slot; + earliest_retained_periods[retain_slot.thread as usize] = std::cmp::min( + earliest_retained_periods[retain_slot.thread as usize], + retain_slot.period, + ); + } + + // fill up from the latest final block back to the earliest for each thread + for thread in 0..self.config.thread_count { + let mut cursor = self.latest_final_blocks_periods[thread as usize].0; // hash of tha latest final in that thread + while let Some((c_block, _)) = self.get_full_active_block(&cursor) { + if c_block.slot.period < earliest_retained_periods[thread as usize] { + break; + } + retain_active.insert(cursor); + if c_block.parents.is_empty() { + // genesis + break; + } + cursor = c_block.parents[thread as usize].0; + } + } + } + + Ok(retain_active) + } + + pub fn extract_block_graph_part( + &self, + slot_start: Option, + slot_end: Option, + ) -> Result { + let mut export = BlockGraphExport { + genesis_blocks: self.genesis_hashes.clone(), + active_blocks: PreHashMap::with_capacity(self.block_statuses.len()), + discarded_blocks: PreHashMap::with_capacity(self.block_statuses.len()), + best_parents: self.best_parents.clone(), + latest_final_blocks_periods: self.latest_final_blocks_periods.clone(), + gi_head: self.gi_head.clone(), + max_cliques: self.max_cliques.clone(), + }; + + let filter = |&s| { + if let Some(s_start) = slot_start { + if s < s_start { + return false; + } + } + if let Some(s_end) = slot_end { + if s >= s_end { + return false; + } + } + true + }; + + for (hash, block) in self.block_statuses.iter() { + match block { + BlockStatus::Discarded { + slot, + creator, + parents, + reason, + .. + } => { + if filter(slot) { + export + .discarded_blocks + .insert(*hash, (reason.clone(), (*slot, *creator, parents.clone()))); + } + } + BlockStatus::Active { a_block, storage } => { + if filter(&a_block.slot) { + let stored_block = + storage.read_blocks().get(hash).cloned().ok_or_else(|| { + ConsensusError::MissingBlock(format!( + "missing block in BlockGraphExport::extract_from: {}", + hash + )) + })?; + export.active_blocks.insert( + *hash, + ExportCompiledBlock { + header: stored_block.content.header, + children: a_block + .children + .iter() + .map(|thread| { + thread.keys().copied().collect::>() + }) + .collect(), + is_final: a_block.is_final, + }, + ); + } + } + _ => continue, + } + } + + Ok(export) + } + + /// Gets all stored final blocks, not only the still-useful ones + /// This is used when initializing Execution from Consensus. + /// Since the Execution bootstrap snapshot is older than the Consensus snapshot, + /// we might need to signal older final blocks for Execution to catch up. + pub fn get_all_final_blocks(&self) -> HashMap { + self.active_index + .iter() + .map(|b_id| { + let block_infos = match self.block_statuses.get(b_id) { + Some(BlockStatus::Active { a_block, storage }) => { + (a_block.slot, storage.clone()) + } + _ => panic!("active block missing"), + }; + (*b_id, block_infos) + }) + .collect() + } + + /// get the current block wish list, including the operations hash. + pub fn get_block_wishlist( + &self, + ) -> Result>, ConsensusError> { + let mut wishlist = PreHashMap::>::default(); + for block_id in self.waiting_for_dependencies_index.iter() { + if let Some(BlockStatus::WaitingForDependencies { + unsatisfied_dependencies, + .. + }) = self.block_statuses.get(block_id) + { + for unsatisfied_h in unsatisfied_dependencies.iter() { + match self.block_statuses.get(unsatisfied_h) { + Some(BlockStatus::WaitingForDependencies { + header_or_block: HeaderOrBlock::Header(header), + .. + }) => { + wishlist.insert(header.id, Some(header.clone())); + } + None => { + wishlist.insert(*unsatisfied_h, None); + } + _ => {} + } + } + } + } + + Ok(wishlist) + } + + /// Gets a block and all its descendants + /// + /// # Argument + /// * hash : hash of the given block + pub fn get_active_block_and_descendants( + &self, + block_id: &BlockId, + ) -> Result, ConsensusError> { + let mut to_visit = vec![*block_id]; + let mut result = PreHashSet::::default(); + while let Some(visit_h) = to_visit.pop() { + if !result.insert(visit_h) { + continue; // already visited + } + match self.block_statuses.get(&visit_h) { + Some(BlockStatus::Active { a_block, .. }) => { + a_block.as_ref() + .children.iter() + .for_each(|thread_children| to_visit.extend(thread_children.keys())) + }, + _ => return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses iterating through descendants of {} - missing {}", block_id, visit_h))), + } + } + Ok(result) + } +} diff --git a/massa-consensus-worker/src/state/process.rs b/massa-consensus-worker/src/state/process.rs new file mode 100644 index 00000000000..9c38e6d9164 --- /dev/null +++ b/massa-consensus-worker/src/state/process.rs @@ -0,0 +1,872 @@ +use std::{ + collections::{BTreeSet, HashMap, VecDeque}, + mem, +}; + +use massa_consensus_exports::{ + block_status::{BlockStatus, DiscardReason, HeaderOrBlock}, + error::ConsensusError, +}; +use massa_logging::massa_trace; +use massa_models::{ + active_block::ActiveBlock, + address::Address, + block::{BlockId, WrappedHeader}, + clique::Clique, + prehash::{PreHashMap, PreHashSet}, + slot::Slot, +}; +use massa_signature::PublicKey; +use massa_storage::Storage; +use massa_time::MassaTime; +use tracing::log::{debug, info}; + +use crate::state::verifications::HeaderCheckOutcome; + +use super::ConsensusState; + +impl ConsensusState { + /// Acknowledge a set of items recursively and process them + /// + /// # Arguments: + /// * `to_ack`: the set of items to acknowledge and process + /// * `current_slot`: the current slot when this function is called + /// + /// # Returns: + /// Success or error if an error happened during the processing of items + pub fn rec_process( + &mut self, + mut to_ack: BTreeSet<(Slot, BlockId)>, + current_slot: Option, + ) -> Result<(), ConsensusError> { + // order processing by (slot, hash) + while let Some((_slot, hash)) = to_ack.pop_first() { + to_ack.extend(self.process(hash, current_slot)?) + } + Ok(()) + } + + /// Acknowledge a single item, return a set of items to re-ack + /// + /// # Arguments: + /// * `block_id`: the id of the block to acknowledge + /// * `current_slot`: the current slot when this function is called + /// + /// # Returns: + /// A list of items to re-ack and process or an error if the process of an item failed + pub fn process( + &mut self, + block_id: BlockId, + current_slot: Option, + ) -> Result, ConsensusError> { + // list items to reprocess + let mut reprocess = BTreeSet::new(); + + massa_trace!("consensus.block_graph.process", { "block_id": block_id }); + // control all the waiting states and try to get a valid block + let ( + valid_block_creator, + valid_block_slot, + valid_block_parents_hash_period, + valid_block_incomp, + valid_block_inherited_incomp_count, + valid_block_storage, + valid_block_fitness, + ) = match self.block_statuses.get(&block_id) { + None => return Ok(BTreeSet::new()), // disappeared before being processed: do nothing + + // discarded: do nothing + Some(BlockStatus::Discarded { .. }) => { + massa_trace!("consensus.block_graph.process.discarded", { + "block_id": block_id + }); + return Ok(BTreeSet::new()); + } + + // already active: do nothing + Some(BlockStatus::Active { .. }) => { + massa_trace!("consensus.block_graph.process.active", { + "block_id": block_id + }); + return Ok(BTreeSet::new()); + } + + // incoming header + Some(BlockStatus::Incoming(HeaderOrBlock::Header(_))) => { + massa_trace!("consensus.block_graph.process.incoming_header", { + "block_id": block_id + }); + // remove header + let header = if let Some(BlockStatus::Incoming(HeaderOrBlock::Header(header))) = + self.block_statuses.remove(&block_id) + { + self.incoming_index.remove(&block_id); + header + } else { + return Err(ConsensusError::ContainerInconsistency(format!( + "inconsistency inside block statuses removing incoming header {}", + block_id + ))); + }; + match self.check_header(&block_id, &header, current_slot, self)? { + HeaderCheckOutcome::Proceed { .. } => { + // set as waiting dependencies + let mut dependencies = PreHashSet::::default(); + dependencies.insert(block_id); // add self as unsatisfied + self.block_statuses.insert( + block_id, + BlockStatus::WaitingForDependencies { + header_or_block: HeaderOrBlock::Header(header), + unsatisfied_dependencies: dependencies, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.waiting_for_dependencies_index.insert(block_id); + self.promote_dep_tree(block_id)?; + + massa_trace!( + "consensus.block_graph.process.incoming_header.waiting_for_self", + { "block_id": block_id } + ); + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::WaitForDependencies(mut dependencies) => { + // set as waiting dependencies + dependencies.insert(block_id); // add self as unsatisfied + massa_trace!("consensus.block_graph.process.incoming_header.waiting_for_dependencies", {"block_id": block_id, "dependencies": dependencies}); + + self.block_statuses.insert( + block_id, + BlockStatus::WaitingForDependencies { + header_or_block: HeaderOrBlock::Header(header), + unsatisfied_dependencies: dependencies, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.waiting_for_dependencies_index.insert(block_id); + self.promote_dep_tree(block_id)?; + + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::WaitForSlot => { + // make it wait for slot + self.block_statuses.insert( + block_id, + BlockStatus::WaitingForSlot(HeaderOrBlock::Header(header)), + ); + self.waiting_for_slot_index.insert(block_id); + + massa_trace!( + "consensus.block_graph.process.incoming_header.waiting_for_slot", + { "block_id": block_id } + ); + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::Discard(reason) => { + self.maybe_note_attack_attempt(&reason, &block_id); + massa_trace!("consensus.block_graph.process.incoming_header.discarded", {"block_id": block_id, "reason": reason}); + // count stales + if reason == DiscardReason::Stale { + self.new_stale_blocks + .insert(block_id, (header.creator_address, header.content.slot)); + } + // discard + self.block_statuses.insert( + block_id, + BlockStatus::Discarded { + slot: header.content.slot, + creator: header.creator_address, + parents: header.content.parents, + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(block_id); + + return Ok(BTreeSet::new()); + } + } + } + + // incoming block + Some(BlockStatus::Incoming(HeaderOrBlock::Block { id: block_id, .. })) => { + let block_id = *block_id; + massa_trace!("consensus.block_graph.process.incoming_block", { + "block_id": block_id + }); + let (slot, storage) = + if let Some(BlockStatus::Incoming(HeaderOrBlock::Block { + slot, storage, .. + })) = self.block_statuses.remove(&block_id) + { + self.incoming_index.remove(&block_id); + (slot, storage) + } else { + return Err(ConsensusError::ContainerInconsistency(format!( + "inconsistency inside block statuses removing incoming block {}", + block_id + ))); + }; + let stored_block = storage + .read_blocks() + .get(&block_id) + .cloned() + .expect("incoming block not found in storage"); + + match self.check_header( + &block_id, + &stored_block.content.header, + current_slot, + self, + )? { + HeaderCheckOutcome::Proceed { + parents_hash_period, + incompatibilities, + inherited_incompatibilities_count, + fitness, + } => { + // block is valid: remove it from Incoming and return it + massa_trace!("consensus.block_graph.process.incoming_block.valid", { + "block_id": block_id + }); + ( + stored_block.content.header.creator_public_key, + slot, + parents_hash_period, + incompatibilities, + inherited_incompatibilities_count, + storage, + fitness, + ) + } + HeaderCheckOutcome::WaitForDependencies(dependencies) => { + // set as waiting dependencies + self.block_statuses.insert( + block_id, + BlockStatus::WaitingForDependencies { + header_or_block: HeaderOrBlock::Block { + id: block_id, + slot, + storage, + }, + unsatisfied_dependencies: dependencies, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.waiting_for_dependencies_index.insert(block_id); + self.promote_dep_tree(block_id)?; + massa_trace!( + "consensus.block_graph.process.incoming_block.waiting_for_dependencies", + { "block_id": block_id } + ); + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::WaitForSlot => { + // set as waiting for slot + self.block_statuses.insert( + block_id, + BlockStatus::WaitingForSlot(HeaderOrBlock::Block { + id: block_id, + slot, + storage, + }), + ); + self.waiting_for_slot_index.insert(block_id); + + massa_trace!( + "consensus.block_graph.process.incoming_block.waiting_for_slot", + { "block_id": block_id } + ); + return Ok(BTreeSet::new()); + } + HeaderCheckOutcome::Discard(reason) => { + self.maybe_note_attack_attempt(&reason, &block_id); + massa_trace!("consensus.block_graph.process.incoming_block.discarded", {"block_id": block_id, "reason": reason}); + // count stales + if reason == DiscardReason::Stale { + self.new_stale_blocks.insert( + block_id, + ( + stored_block.content.header.creator_address, + stored_block.content.header.content.slot, + ), + ); + } + // add to discard + self.block_statuses.insert( + block_id, + BlockStatus::Discarded { + slot: stored_block.content.header.content.slot, + creator: stored_block.creator_address, + parents: stored_block.content.header.content.parents.clone(), + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(block_id); + + return Ok(BTreeSet::new()); + } + } + } + + Some(BlockStatus::WaitingForSlot(header_or_block)) => { + massa_trace!("consensus.block_graph.process.waiting_for_slot", { + "block_id": block_id + }); + let slot = header_or_block.get_slot(); + if Some(slot) > current_slot { + massa_trace!( + "consensus.block_graph.process.waiting_for_slot.in_the_future", + { "block_id": block_id } + ); + // in the future: ignore + return Ok(BTreeSet::new()); + } + // send back as incoming and ask for reprocess + if let Some(BlockStatus::WaitingForSlot(header_or_block)) = + self.block_statuses.remove(&block_id) + { + self.waiting_for_slot_index.remove(&block_id); + self.block_statuses + .insert(block_id, BlockStatus::Incoming(header_or_block)); + self.incoming_index.insert(block_id); + reprocess.insert((slot, block_id)); + massa_trace!( + "consensus.block_graph.process.waiting_for_slot.reprocess", + { "block_id": block_id } + ); + return Ok(reprocess); + } else { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot block or header {}", block_id))); + }; + } + + Some(BlockStatus::WaitingForDependencies { + unsatisfied_dependencies, + .. + }) => { + massa_trace!("consensus.block_graph.process.waiting_for_dependencies", { + "block_id": block_id + }); + if !unsatisfied_dependencies.is_empty() { + // still has unsatisfied dependencies: ignore + return Ok(BTreeSet::new()); + } + // send back as incoming and ask for reprocess + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, .. + }) = self.block_statuses.remove(&block_id) + { + self.waiting_for_dependencies_index.remove(&block_id); + reprocess.insert((header_or_block.get_slot(), block_id)); + self.block_statuses + .insert(block_id, BlockStatus::Incoming(header_or_block)); + self.incoming_index.insert(block_id); + massa_trace!( + "consensus.block_graph.process.waiting_for_dependencies.reprocess", + { "block_id": block_id } + ); + return Ok(reprocess); + } else { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot header or block {}", block_id))); + } + } + }; + + // add block to graph + self.add_block_to_graph( + block_id, + valid_block_parents_hash_period, + valid_block_creator, + valid_block_slot, + valid_block_incomp, + valid_block_inherited_incomp_count, + valid_block_fitness, + valid_block_storage, + )?; + + // if the block was added, update linked dependencies and mark satisfied ones for recheck + if let Some(BlockStatus::Active { storage, .. }) = self.block_statuses.get(&block_id) { + massa_trace!("consensus.block_graph.process.is_active", { + "block_id": block_id + }); + self.to_propagate.insert(block_id, storage.clone()); + for itm_block_id in self.waiting_for_dependencies_index.iter() { + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, + unsatisfied_dependencies, + .. + }) = self.block_statuses.get_mut(itm_block_id) + { + if unsatisfied_dependencies.remove(&block_id) { + // a dependency was satisfied: retry + reprocess.insert((header_or_block.get_slot(), *itm_block_id)); + } + } + } + } + + Ok(reprocess) + } + + pub fn promote_dep_tree(&mut self, hash: BlockId) -> Result<(), ConsensusError> { + let mut to_explore = vec![hash]; + let mut to_promote: PreHashMap = PreHashMap::default(); + while let Some(h) = to_explore.pop() { + if to_promote.contains_key(&h) { + continue; + } + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, + unsatisfied_dependencies, + sequence_number, + .. + }) = self.block_statuses.get(&h) + { + // promote current block + to_promote.insert(h, (header_or_block.get_slot(), *sequence_number)); + // register dependencies for exploration + to_explore.extend(unsatisfied_dependencies); + } + } + + let mut to_promote: Vec<(Slot, u64, BlockId)> = to_promote + .into_iter() + .map(|(h, (slot, seq))| (slot, seq, h)) + .collect(); + to_promote.sort_unstable(); // last ones should have the highest seq number + for (_slot, _seq, h) in to_promote.into_iter() { + if let Some(BlockStatus::WaitingForDependencies { + sequence_number, .. + }) = self.block_statuses.get_mut(&h) + { + self.sequence_counter += 1; + *sequence_number = self.sequence_counter; + } + } + Ok(()) + } + + /// Add a block to the graph and update the cliques, the graph dependencies and incompatibilities + /// + /// # Arguments: + /// * `add_block_id`: Block id of the block to add + /// * `parents_hash_period`: Ids and periods of the parents of the block to add + /// * `add_block_creator`: Creator of the block to add + /// * `add_block_slot`: Slot of the block to add + /// * `incomp`: Block ids of the blocks incompatible with the block to add + /// * `fitness`: Fitness of the block to add + /// * `storage`: Storage containing all the data of the block to add + /// + /// # Returns: + /// Success or error if any steps failed + #[allow(clippy::too_many_arguments)] + fn add_block_to_graph( + &mut self, + add_block_id: BlockId, + parents_hash_period: Vec<(BlockId, u64)>, + add_block_creator: PublicKey, + add_block_slot: Slot, + incomp: PreHashSet, + inherited_incomp_count: usize, + fitness: u64, + mut storage: Storage, + ) -> Result<(), ConsensusError> { + massa_trace!("consensus.block_graph.add_block_to_graph", { + "block_id": add_block_id + }); + + // Ensure block parents are claimed by the block's storage. + // Note that operations and endorsements should already be there (claimed in Protocol). + storage.claim_block_refs(&parents_hash_period.iter().map(|(p_id, _)| *p_id).collect()); + + // add block to status structure + self.block_statuses.insert( + add_block_id, + BlockStatus::Active { + a_block: Box::new(ActiveBlock { + creator_address: Address::from_public_key(&add_block_creator), + parents: parents_hash_period.clone(), + descendants: PreHashSet::::default(), + block_id: add_block_id, + children: vec![Default::default(); self.config.thread_count as usize], + is_final: false, + slot: add_block_slot, + fitness, + }), + storage, + }, + ); + self.active_index.insert(add_block_id); + + // add as child to parents + // add as descendant to ancestors. Note: descendants are never removed. + self.insert_parents_descendants( + add_block_id, + add_block_slot, + parents_hash_period.iter().map(|(p_id, _)| *p_id).collect(), + ); + + // add incompatibilities to gi_head + massa_trace!( + "consensus.block_graph.add_block_to_graph.add_incompatibilities", + {} + ); + for incomp_h in incomp.iter() { + self.gi_head + .get_mut(incomp_h) + .ok_or_else(|| { + ConsensusError::MissingBlock(format!( + "missing block when adding incomp to gi_head: {}", + incomp_h + )) + })? + .insert(add_block_id); + } + self.gi_head.insert(add_block_id, incomp.clone()); + + // max cliques update + massa_trace!( + "consensus.block_graph.add_block_to_graph.max_cliques_update", + {} + ); + if incomp.len() == inherited_incomp_count { + // clique optimization routine: + // the block only has incompatibilities inherited from its parents + // therefore it is not forking and can simply be added to the cliques it is compatible with + self.max_cliques + .iter_mut() + .filter(|c| incomp.is_disjoint(&c.block_ids)) + .for_each(|c| { + c.block_ids.insert(add_block_id); + }); + } else { + // fully recompute max cliques + massa_trace!( + "consensus.block_graph.add_block_to_graph.clique_full_computing", + { "hash": add_block_id } + ); + let before = self.max_cliques.len(); + self.max_cliques = self + .compute_max_cliques() + .into_iter() + .map(|c| Clique { + block_ids: c, + fitness: 0, + is_blockclique: false, + }) + .collect(); + let after = self.max_cliques.len(); + if before != after { + massa_trace!( + "consensus.block_graph.add_block_to_graph.clique_full_computing more than one clique", + { "cliques": self.max_cliques, "gi_head": self.gi_head } + ); + // gi_head + debug!( + "clique number went from {} to {} after adding {}", + before, after, add_block_id + ); + } + } + + // compute clique fitnesses and find blockclique + massa_trace!("consensus.block_graph.add_block_to_graph.compute_clique_fitnesses_and_find_blockclique", {}); + // note: clique_fitnesses is pair (fitness, -hash_sum) where the second parameter is negative for sorting + let position_blockclique = self.compute_fitness_find_blockclique(&add_block_id)?; + + // update best parents + massa_trace!( + "consensus.block_graph.add_block_to_graph.update_best_parents", + {} + ); + { + let blockclique = &self.max_cliques[position_blockclique]; + + // init best parents as latest_final_blocks_periods + self.best_parents = self.latest_final_blocks_periods.clone(); + // for each blockclique block, set it as best_parent in its own thread + // if its period is higher than the current best_parent in that thread + for block_h in blockclique.block_ids.iter() { + let b_slot = match self.block_statuses.get(block_h) { + Some(BlockStatus::Active { a_block, storage: _ }) => a_block.slot, + _ => return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses updating best parents while adding {} - missing {}", add_block_id, block_h))), + }; + if b_slot.period > self.best_parents[b_slot.thread as usize].1 { + self.best_parents[b_slot.thread as usize] = (*block_h, b_slot.period); + } + } + } + + // list stale blocks + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_stale_blocks", + {} + ); + let fitness_threshold = self.max_cliques[position_blockclique] + .fitness + .saturating_sub(self.config.delta_f0); + let stale_blocks = self.list_stale_blocks(fitness_threshold); + self.max_cliques.retain(|c| c.fitness >= fitness_threshold); + // mark stale blocks + massa_trace!( + "consensus.block_graph.add_block_to_graph.mark_stale_blocks", + {} + ); + for stale_block_hash in stale_blocks.into_iter() { + self.remove_block(&add_block_id, &stale_block_hash)?; + } + + // list final blocks + massa_trace!( + "consensus.block_graph.add_block_to_graph.list_final_blocks", + {} + ); + let final_blocks = self.list_final_blocks()?; + + // mark final blocks and update latest_final_blocks_periods + massa_trace!( + "consensus.block_graph.add_block_to_graph.mark_final_blocks", + {} + ); + self.mark_final_blocks(&add_block_id, final_blocks)?; + + massa_trace!("consensus.block_graph.add_block_to_graph.end", {}); + Ok(()) + } + + /// Note an attack attempt if the discard reason indicates one. + pub fn maybe_note_attack_attempt(&mut self, reason: &DiscardReason, hash: &BlockId) { + massa_trace!("consensus.block_graph.maybe_note_attack_attempt", {"hash": hash, "reason": reason}); + // If invalid, note the attack attempt. + if let DiscardReason::Invalid(reason) = reason { + info!( + "consensus.block_graph.maybe_note_attack_attempt DiscardReason::Invalid:{}", + reason + ); + self.attack_attempts.push(*hash); + } + } + + /// Notify execution about blockclique changes and finalized blocks. + /// + /// # Arguments: + /// * `finalized_blocks`: Block that became final and need to be send to execution + fn notify_execution(&mut self, finalized_blocks: HashMap) { + // List new block storage instances that Execution doesn't know about. + // That's blocks that have not been sent to execution before, ie. in the previous blockclique). + let mut new_blocks_storage: PreHashMap = finalized_blocks + .iter() + .filter_map(|(_slot, b_id)| { + if self.prev_blockclique.contains_key(b_id) { + // was previously sent as a blockclique element + return None; + } + let storage = match self.block_statuses.get(b_id) { + Some(BlockStatus::Active { + a_block: _, + storage, + }) => storage, + _ => panic!("final block not found in active blocks"), + }; + Some((*b_id, storage.clone())) + }) + .collect(); + + // Get new blockclique block list with slots. + let mut blockclique_changed = false; + let new_blockclique: PreHashMap = self + .get_blockclique() + .iter() + .map(|b_id| { + if let Some(slot) = self.prev_blockclique.remove(b_id) { + // The block was already sent in the previous blockclique: + // the slot can be gathered from there without locking Storage. + // Note: the block is removed from self.prev_blockclique. + (*b_id, slot) + } else { + // The block was not present in the previous blockclique: + // the blockclique has changed => get the block's slot by querying Storage. + blockclique_changed = true; + let (slot, storage) = match self.block_statuses.get(b_id) { + Some(BlockStatus::Active { a_block, storage }) => (a_block.slot, storage), + _ => panic!("blockclique block not found in active blocks"), + }; + new_blocks_storage.insert(*b_id, storage.clone()); + (*b_id, slot) + } + }) + .collect(); + if !self.prev_blockclique.is_empty() { + // All elements present in the new blockclique have been removed from `prev_blockclique` above. + // If `prev_blockclique` is not empty here, it means that it contained elements that are not in the new blockclique anymore. + // In that case, we mark the blockclique as having changed. + blockclique_changed = true; + } + // Overwrite previous blockclique. + // Should still be done even if unchanged because elements were removed from it above. + self.prev_blockclique = new_blockclique.clone(); + + if finalized_blocks.is_empty() && !blockclique_changed { + // There are no changes (neither block finalizations not blockclique changes) to send to execution. + return; + } + + // Notify execution of block finalizations and blockclique changes + self.channels + .execution_controller + .update_blockclique_status( + finalized_blocks, + if blockclique_changed { + Some(new_blockclique.into_iter().map(|(k, v)| (v, k)).collect()) + } else { + None + }, + new_blocks_storage, + ); + } + + /// call me if the block database changed + /// Processing of final blocks, pruning. + /// + /// 1. propagate blocks + /// 2. Notify of attack attempts + /// 3. get new final blocks + /// 4. get blockclique + /// 5. notify Execution + /// 6. Process new final blocks + /// 7. Notify pool of new final ops + /// 8. Notify PoS of final blocks + /// 9. notify protocol of block wish list + /// 10. note new latest final periods (prune graph if changed) + /// 11. add stale blocks to stats + pub fn block_db_changed(&mut self) -> Result<(), ConsensusError> { + let final_block_slots = { + massa_trace!("consensus.consensus_worker.block_db_changed", {}); + + // Propagate new blocks + for (block_id, storage) in mem::take(&mut self.to_propagate).into_iter() { + massa_trace!("consensus.consensus_worker.block_db_changed.integrated", { + "block_id": block_id + }); + self.channels + .protocol_command_sender + .integrated_block(block_id, storage)?; + } + + // Notify protocol of attack attempts. + for hash in mem::take(&mut self.attack_attempts).into_iter() { + self.channels + .protocol_command_sender + .notify_block_attack(hash)?; + massa_trace!("consensus.consensus_worker.block_db_changed.attack", { + "hash": hash + }); + } + + // manage finalized blocks + let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; + let finalized_blocks = mem::take(&mut self.new_final_blocks); + let mut final_block_slots = HashMap::with_capacity(finalized_blocks.len()); + let mut final_block_stats = VecDeque::with_capacity(finalized_blocks.len()); + for b_id in finalized_blocks { + if let Some(BlockStatus::Active { + a_block, + storage: _, + }) = self.block_statuses.get(&b_id) + { + // add to final blocks to notify execution + final_block_slots.insert(a_block.slot, b_id); + + // add to stats + let block_is_from_protocol = self + .protocol_blocks + .iter() + .any(|(_, block_id)| block_id == &b_id); + final_block_stats.push_back(( + timestamp, + a_block.creator_address, + block_is_from_protocol, + )); + } + } + self.final_block_stats.extend(final_block_stats); + + // add stale blocks to stats + let new_stale_block_ids_creators_slots = mem::take(&mut self.new_stale_blocks); + let timestamp = MassaTime::now(self.config.clock_compensation_millis)?; + for (_b_id, (_b_creator, _b_slot)) in new_stale_block_ids_creators_slots.into_iter() { + self.stale_block_stats.push_back(timestamp); + } + final_block_slots + }; + + // notify execution + self.notify_execution(final_block_slots); + + // notify protocol of block wishlist + let new_wishlist = self.get_block_wishlist()?; + let new_blocks: PreHashMap> = new_wishlist + .iter() + .filter_map(|(id, header)| { + if !self.wishlist.contains_key(id) { + Some((*id, header.clone())) + } else { + None + } + }) + .collect(); + let remove_blocks: PreHashSet = self + .wishlist + .iter() + .filter_map(|(id, _)| { + if !new_wishlist.contains_key(id) { + Some(*id) + } else { + None + } + }) + .collect(); + if !new_blocks.is_empty() || !remove_blocks.is_empty() { + massa_trace!("consensus.consensus_worker.block_db_changed.send_wishlist_delta", { "new": new_wishlist, "remove": remove_blocks }); + self.channels + .protocol_command_sender + .send_wishlist_delta(new_blocks, remove_blocks)?; + self.wishlist = new_wishlist; + } + + // note new latest final periods + let latest_final_periods: Vec = self + .latest_final_blocks_periods + .iter() + .map(|(_block_id, period)| *period) + .collect(); + // if changed... + if self.save_final_periods != latest_final_periods { + // signal new last final periods to pool + self.channels + .pool_command_sender + .notify_final_cs_periods(&latest_final_periods); + // update final periods + self.save_final_periods = latest_final_periods; + } + + Ok(()) + } +} diff --git a/massa-consensus-worker/src/state/process_commands.rs b/massa-consensus-worker/src/state/process_commands.rs new file mode 100644 index 00000000000..fd923648f10 --- /dev/null +++ b/massa-consensus-worker/src/state/process_commands.rs @@ -0,0 +1,188 @@ +use std::collections::{hash_map::Entry, BTreeSet}; + +use massa_consensus_exports::{ + block_status::{BlockStatus, DiscardReason, HeaderOrBlock}, + error::ConsensusError, +}; +use massa_logging::massa_trace; +use massa_models::{ + block::{BlockId, WrappedHeader}, + slot::Slot, +}; +use massa_storage::Storage; +use massa_time::MassaTime; +use tracing::debug; + +use super::ConsensusState; + +impl ConsensusState { + /// Register a block header in the graph. Ignore genesis hashes. + /// + /// # Arguments: + /// * `block_id`: the block id + /// * `header`: the header to register + /// * `current_slot`: the slot when this function is called + /// + /// # Returns: + /// Success or error if the header is invalid or too old + pub fn register_block_header( + &mut self, + block_id: BlockId, + header: WrappedHeader, + current_slot: Option, + ) -> Result<(), ConsensusError> { + // ignore genesis blocks + if self.genesis_hashes.contains(&block_id) { + return Ok(()); + } + + debug!( + "received header {} for slot {}", + block_id, header.content.slot + ); + massa_trace!("consensus.block_graph.incoming_header", {"block_id": block_id, "header": header}); + let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); + match self.block_statuses.entry(block_id) { + // if absent => add as Incoming, call rec_ack on it + Entry::Vacant(vac) => { + to_ack.insert((header.content.slot, block_id)); + vac.insert(BlockStatus::Incoming(HeaderOrBlock::Header(header))); + self.incoming_index.insert(block_id); + } + Entry::Occupied(mut occ) => match occ.get_mut() { + BlockStatus::Discarded { + sequence_number, .. + } => { + // promote if discarded + self.sequence_counter += 1; + *sequence_number = self.sequence_counter; + } + BlockStatus::WaitingForDependencies { .. } => { + // promote in dependencies + self.promote_dep_tree(block_id)?; + } + _ => {} + }, + } + + // process + self.rec_process(to_ack, current_slot)?; + + Ok(()) + } + + /// Register a new full block in the graph. Ignore genesis hashes. + /// + /// # Arguments: + /// * `block_id`: the block id + /// * `slot`: the slot of the block + /// * `current_slot`: the slot when this function is called + /// * `storage`: Storage containing the whole content of the block + /// * `created`: is the block created by the node or received from the network + /// + /// # Returns: + /// Success or error if the block is invalid or too old + pub fn register_block( + &mut self, + block_id: BlockId, + slot: Slot, + current_slot: Option, + storage: Storage, + created: bool, + ) -> Result<(), ConsensusError> { + // ignore genesis blocks + if self.genesis_hashes.contains(&block_id) { + return Ok(()); + } + + // Block is coming from protocol mark it for desync calculation + if !created { + let now = MassaTime::now(self.config.clock_compensation_millis)?; + self.protocol_blocks.push_back((now, block_id)); + } + + debug!("received block {} for slot {}", block_id, slot); + + let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); + match self.block_statuses.entry(block_id) { + // if absent => add as Incoming, call rec_ack on it + Entry::Vacant(vac) => { + to_ack.insert((slot, block_id)); + vac.insert(BlockStatus::Incoming(HeaderOrBlock::Block { + id: block_id, + slot, + storage, + })); + self.incoming_index.insert(block_id); + } + Entry::Occupied(mut occ) => match occ.get_mut() { + BlockStatus::Discarded { + sequence_number, .. + } => { + // promote if discarded + self.sequence_counter += 1; + *sequence_number = self.sequence_counter; + } + BlockStatus::WaitingForSlot(header_or_block) => { + // promote to full block + *header_or_block = HeaderOrBlock::Block { + id: block_id, + slot, + storage, + }; + } + BlockStatus::WaitingForDependencies { + header_or_block, + unsatisfied_dependencies, + .. + } => { + // promote to full block and satisfy self-dependency + if unsatisfied_dependencies.remove(&block_id) { + // a dependency was satisfied: process + to_ack.insert((slot, block_id)); + } + *header_or_block = HeaderOrBlock::Block { + id: block_id, + slot, + storage, + }; + // promote in dependencies + self.promote_dep_tree(block_id)?; + } + _ => return Ok(()), + }, + } + + // process + self.rec_process(to_ack, current_slot)?; + + Ok(()) + } + + /// Mark a block that is in the graph as invalid. + /// + /// # Arguments: + /// * `block_id`: Block id of the block to mark as invalid + /// * `header`: Header of the block to mark as invalid + pub fn mark_invalid_block(&mut self, block_id: &BlockId, header: WrappedHeader) { + let reason = DiscardReason::Invalid("invalid".to_string()); + self.maybe_note_attack_attempt(&reason, block_id); + massa_trace!("consensus.block_graph.process.invalid_block", {"block_id": block_id, "reason": reason}); + + // add to discard + self.block_statuses.insert( + *block_id, + BlockStatus::Discarded { + slot: header.content.slot, + creator: header.creator_address, + parents: header.content.parents, + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(*block_id); + } +} diff --git a/massa-consensus-worker/src/state/prune.rs b/massa-consensus-worker/src/state/prune.rs new file mode 100644 index 00000000000..216ee35f005 --- /dev/null +++ b/massa-consensus-worker/src/state/prune.rs @@ -0,0 +1,353 @@ +use massa_consensus_exports::{ + block_status::{BlockStatus, DiscardReason, HeaderOrBlock}, + error::ConsensusError, +}; +use massa_logging::massa_trace; +use massa_models::{ + active_block::ActiveBlock, + block::BlockId, + prehash::{PreHashMap, PreHashSet}, + slot::Slot, +}; +use tracing::debug; + +use super::ConsensusState; + +impl ConsensusState { + /// prune active blocks and return final blocks, return discarded final blocks + fn prune_active(&mut self) -> Result, ConsensusError> { + // list required active blocks + let mut retain_active: PreHashSet = self.list_required_active_blocks()?; + + // retain extra history according to the config + // this is useful to avoid desync on temporary connection loss + for a_block in self.active_index.iter() { + if let Some(BlockStatus::Active { + a_block: active_block, + .. + }) = self.block_statuses.get(a_block) + { + let (_b_id, latest_final_period) = + self.latest_final_blocks_periods[active_block.slot.thread as usize]; + if active_block.slot.period + >= latest_final_period.saturating_sub(self.config.force_keep_final_periods) + { + retain_active.insert(*a_block); + } + } + } + + // remove unused final active blocks + let mut discarded_finals: PreHashMap = PreHashMap::default(); + let to_remove: Vec = self + .active_index + .difference(&retain_active) + .copied() + .collect(); + for discard_active_h in to_remove { + let block_slot; + let block_creator; + let block_parents; + { + let read_blocks = self.storage.read_blocks(); + let block = read_blocks.get(&discard_active_h).ok_or_else(|| { + ConsensusError::MissingBlock(format!( + "missing block when removing unused final active blocks: {}", + discard_active_h + )) + })?; + block_slot = block.content.header.content.slot; + block_creator = block.creator_address; + block_parents = block.content.header.content.parents.clone(); + }; + + let discarded_active = if let Some(BlockStatus::Active { + a_block: discarded_active, + .. + }) = self.block_statuses.remove(&discard_active_h) + { + self.active_index.remove(&discard_active_h); + discarded_active + } else { + return Err(ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and removing unused final active blocks - {} is missing", discard_active_h))); + }; + + // remove from parent's children + for (parent_h, _parent_period) in discarded_active.parents.iter() { + if let Some(BlockStatus::Active { + a_block: parent_active_block, + .. + }) = self.block_statuses.get_mut(parent_h) + { + parent_active_block.children[discarded_active.slot.thread as usize] + .remove(&discard_active_h); + } + } + + massa_trace!("consensus.block_graph.prune_active", {"hash": discard_active_h, "reason": DiscardReason::Final}); + + // mark as final + self.block_statuses.insert( + discard_active_h, + BlockStatus::Discarded { + slot: block_slot, + creator: block_creator, + parents: block_parents, + reason: DiscardReason::Final, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(discard_active_h); + + discarded_finals.insert(discard_active_h, *discarded_active); + } + + Ok(discarded_finals) + } + + fn prune_slot_waiting(&mut self) { + if self.waiting_for_slot_index.len() <= self.config.max_future_processing_blocks { + return; + } + let mut slot_waiting: Vec<(Slot, BlockId)> = self + .waiting_for_slot_index + .iter() + .filter_map(|block_id| { + if let Some(BlockStatus::WaitingForSlot(header_or_block)) = + self.block_statuses.get(block_id) + { + return Some((header_or_block.get_slot(), *block_id)); + } + None + }) + .collect(); + slot_waiting.sort_unstable(); + let len_slot_waiting = slot_waiting.len(); + (self.config.max_future_processing_blocks..len_slot_waiting).for_each(|idx| { + let (_slot, block_id) = &slot_waiting[idx]; + self.block_statuses.remove(block_id); + self.waiting_for_slot_index.remove(block_id); + }); + } + + fn prune_discarded(&mut self) -> Result<(), ConsensusError> { + if self.discarded_index.len() <= self.config.max_discarded_blocks { + return Ok(()); + } + let mut discard_hashes: Vec<(u64, BlockId)> = self + .discarded_index + .iter() + .filter_map(|block_id| { + if let Some(BlockStatus::Discarded { + sequence_number, .. + }) = self.block_statuses.get(block_id) + { + return Some((*sequence_number, *block_id)); + } + None + }) + .collect(); + discard_hashes.sort_unstable(); + discard_hashes.truncate(self.discarded_index.len() - self.config.max_discarded_blocks); + for (_, block_id) in discard_hashes.iter() { + self.block_statuses.remove(block_id); + self.discarded_index.remove(block_id); + } + Ok(()) + } + + fn prune_waiting_for_dependencies(&mut self) -> Result<(), ConsensusError> { + let mut to_discard: PreHashMap> = PreHashMap::default(); + let mut to_keep: PreHashMap = PreHashMap::default(); + + // list items that are older than the latest final blocks in their threads or have deps that are discarded + { + for block_id in self.waiting_for_dependencies_index.iter() { + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, + unsatisfied_dependencies, + sequence_number, + }) = self.block_statuses.get(block_id) + { + // has already discarded dependencies => discard (choose worst reason) + let mut discard_reason = None; + let mut discarded_dep_found = false; + for dep in unsatisfied_dependencies.iter() { + if let Some(BlockStatus::Discarded { reason, .. }) = + self.block_statuses.get(dep) + { + discarded_dep_found = true; + match reason { + DiscardReason::Invalid(reason) => { + discard_reason = Some(DiscardReason::Invalid(format!("discarded because depend on block:{} that has discard reason:{}", block_id, reason))); + break; + } + DiscardReason::Stale => discard_reason = Some(DiscardReason::Stale), + DiscardReason::Final => discard_reason = Some(DiscardReason::Stale), + } + } + } + if discarded_dep_found { + to_discard.insert(*block_id, discard_reason); + continue; + } + + // is at least as old as the latest final block in its thread => discard as stale + let slot = header_or_block.get_slot(); + if slot.period <= self.latest_final_blocks_periods[slot.thread as usize].1 { + to_discard.insert(*block_id, Some(DiscardReason::Stale)); + continue; + } + + // otherwise, mark as to_keep + to_keep.insert(*block_id, (*sequence_number, header_or_block.get_slot())); + } + } + } + + // discard in chain and because of limited size + while !to_keep.is_empty() { + // mark entries as to_discard and remove them from to_keep + for (hash, _old_order) in to_keep.clone().into_iter() { + if let Some(BlockStatus::WaitingForDependencies { + unsatisfied_dependencies, + .. + }) = self.block_statuses.get(&hash) + { + // has dependencies that will be discarded => discard (choose worst reason) + let mut discard_reason = None; + let mut dep_to_discard_found = false; + for dep in unsatisfied_dependencies.iter() { + if let Some(reason) = to_discard.get(dep) { + dep_to_discard_found = true; + match reason { + Some(DiscardReason::Invalid(reason)) => { + discard_reason = Some(DiscardReason::Invalid(format!("discarded because depend on block:{} that has discard reason:{}", hash, reason))); + break; + } + Some(DiscardReason::Stale) => { + discard_reason = Some(DiscardReason::Stale) + } + Some(DiscardReason::Final) => { + discard_reason = Some(DiscardReason::Stale) + } + None => {} // leave as None + } + } + } + if dep_to_discard_found { + to_keep.remove(&hash); + to_discard.insert(hash, discard_reason); + continue; + } + } + } + + // remove worst excess element + if to_keep.len() > self.config.max_dependency_blocks { + let remove_elt = to_keep + .iter() + .filter_map(|(hash, _old_order)| { + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, + sequence_number, + .. + }) = self.block_statuses.get(hash) + { + return Some((sequence_number, header_or_block.get_slot(), *hash)); + } + None + }) + .min(); + if let Some((_seq_num, _slot, hash)) = remove_elt { + to_keep.remove(&hash); + to_discard.insert(hash, None); + continue; + } + } + + // nothing happened: stop loop + break; + } + + // transition states to Discarded if there is a reason, otherwise just drop + for (block_id, reason_opt) in to_discard.drain() { + if let Some(BlockStatus::WaitingForDependencies { + header_or_block, .. + }) = self.block_statuses.remove(&block_id) + { + self.waiting_for_dependencies_index.remove(&block_id); + let header = match header_or_block { + HeaderOrBlock::Header(h) => h, + HeaderOrBlock::Block { id: block_id, .. } => self + .storage + .read_blocks() + .get(&block_id) + .ok_or_else(|| { + ConsensusError::MissingBlock(format!( + "missing block when pruning waiting for deps: {}", + block_id + )) + })? + .content + .header + .clone(), + }; + massa_trace!("consensus.block_graph.prune_waiting_for_dependencies", {"hash": block_id, "reason": reason_opt}); + + if let Some(reason) = reason_opt { + // add to stats if reason is Stale + if reason == DiscardReason::Stale { + self.new_stale_blocks + .insert(block_id, (header.creator_address, header.content.slot)); + } + // transition to Discarded only if there is a reason + self.block_statuses.insert( + block_id, + BlockStatus::Discarded { + slot: header.content.slot, + creator: header.creator_address, + parents: header.content.parents.clone(), + reason, + sequence_number: { + self.sequence_counter += 1; + self.sequence_counter + }, + }, + ); + self.discarded_index.insert(block_id); + } + } + } + + Ok(()) + } + + pub fn prune(&mut self) -> Result<(), ConsensusError> { + let before = self.max_cliques.len(); + // Step 1: discard final blocks that are not useful to the graph anymore and return them + self.prune_active()?; + + // Step 2: prune slot waiting blocks + self.prune_slot_waiting(); + + // Step 3: prune dependency waiting blocks + self.prune_waiting_for_dependencies()?; + + // Step 4: prune discarded + self.prune_discarded()?; + + let after = self.max_cliques.len(); + if before != after { + debug!( + "clique number went from {} to {} after pruning", + before, after + ); + } + + Ok(()) + } +} diff --git a/massa-consensus-worker/src/state/stats.rs b/massa-consensus-worker/src/state/stats.rs new file mode 100644 index 00000000000..4ab766a4ca4 --- /dev/null +++ b/massa-consensus-worker/src/state/stats.rs @@ -0,0 +1,101 @@ +use super::ConsensusState; +use massa_consensus_exports::error::ConsensusError; +use massa_models::stats::ConsensusStats; +use massa_time::MassaTime; +use std::cmp::max; + +#[cfg(not(feature = "sandbox"))] +use tracing::log::warn; + +#[cfg(not(feature = "sandbox"))] +use massa_consensus_exports::events::ConsensusEvent; + +impl ConsensusState { + /// Calculate and return stats about consensus + pub fn get_stats(&self) -> Result { + let timespan_end = max( + self.launch_time, + MassaTime::now(self.config.clock_compensation_millis)?, + ); + let timespan_start = max( + timespan_end.saturating_sub(self.config.stats_timespan), + self.launch_time, + ); + let final_block_count = self + .final_block_stats + .iter() + .filter(|(t, _, _)| *t >= timespan_start && *t < timespan_end) + .count() as u64; + let stale_block_count = self + .stale_block_stats + .iter() + .filter(|t| **t >= timespan_start && **t < timespan_end) + .count() as u64; + let clique_count = self.get_clique_count() as u64; + Ok(ConsensusStats { + final_block_count, + stale_block_count, + clique_count, + start_timespan: timespan_start, + end_timespan: timespan_end, + }) + } + + /// Must be called each tick to update stats. Will detect if a desynchronization happened + pub fn stats_tick(&mut self) -> Result<(), ConsensusError> { + // check if there are any final blocks is coming from protocol + // if none => we are probably desync + #[cfg(not(feature = "sandbox"))] + { + let now = MassaTime::now(self.config.clock_compensation_millis)?; + if now + > max(self.config.genesis_timestamp, self.launch_time) + .saturating_add(self.stats_desync_detection_timespan) + && !self + .final_block_stats + .iter() + .any(|(time, _, is_from_protocol)| { + time > &now.saturating_sub(self.stats_desync_detection_timespan) + && *is_from_protocol + }) + { + warn!("desynchronization detected because the recent final block history is empty or contains only blocks produced by this node"); + let _ = self + .channels + .controller_event_tx + .send(ConsensusEvent::NeedSync); + } + } + // prune stats + self.prune_stats()?; + Ok(()) + } + + /// Remove old stats from consensus storage + pub fn prune_stats(&mut self) -> Result<(), ConsensusError> { + let start_time = MassaTime::now(self.config.clock_compensation_millis)? + .saturating_sub(self.stats_history_timespan); + while let Some((t, _, _)) = self.final_block_stats.front() { + if t < &start_time { + self.final_block_stats.pop_front(); + } else { + break; + } + } + while let Some(t) = self.stale_block_stats.front() { + if t < &start_time { + self.stale_block_stats.pop_front(); + } else { + break; + } + } + while let Some((t, _)) = self.protocol_blocks.front() { + if t < &start_time { + self.protocol_blocks.pop_front(); + } else { + break; + } + } + Ok(()) + } +} diff --git a/massa-consensus-worker/src/state/tick.rs b/massa-consensus-worker/src/state/tick.rs new file mode 100644 index 00000000000..3165bc82669 --- /dev/null +++ b/massa-consensus-worker/src/state/tick.rs @@ -0,0 +1,49 @@ +use std::collections::BTreeSet; + +use massa_consensus_exports::{block_status::BlockStatus, error::ConsensusError}; +use massa_logging::massa_trace; +use massa_models::{block::BlockId, slot::Slot}; + +use super::ConsensusState; + +impl ConsensusState { + /// This function should be called each tick and will check if there is a block in the graph that should be processed at this slot, and if so, process it. + /// + /// # Arguments: + /// * `current_slot`: the current slot + /// + /// # Returns: + /// Error if the process of a block returned an error. + pub fn slot_tick(&mut self, current_slot: Slot) -> Result<(), ConsensusError> { + massa_trace!("consensus.consensus_worker.slot_tick", { + "slot": current_slot + }); + + // list all elements for which the time has come + let to_process: BTreeSet<(Slot, BlockId)> = self + .waiting_for_slot_index + .iter() + .filter_map(|b_id| match self.block_statuses.get(b_id) { + Some(BlockStatus::WaitingForSlot(header_or_block)) => { + let slot = header_or_block.get_slot(); + if slot <= current_slot { + Some((slot, *b_id)) + } else { + None + } + } + _ => None, + }) + .collect(); + + massa_trace!("consensus.block_graph.slot_tick", {}); + // process those elements + self.rec_process(to_process, Some(current_slot))?; + + self.stats_tick()?; + // take care of block db changes + self.block_db_changed()?; + + Ok(()) + } +} diff --git a/massa-consensus-worker/src/state/verifications.rs b/massa-consensus-worker/src/state/verifications.rs new file mode 100644 index 00000000000..9fc6dc11be8 --- /dev/null +++ b/massa-consensus-worker/src/state/verifications.rs @@ -0,0 +1,411 @@ +use super::ConsensusState; + +use massa_consensus_exports::{ + block_status::{BlockStatus, DiscardReason}, + error::ConsensusError, +}; +use massa_logging::massa_trace; +use massa_models::{ + block::{BlockId, WrappedHeader}, + prehash::PreHashSet, + slot::Slot, +}; + +/// Possible output of a header check +#[derive(Debug)] +pub enum HeaderCheckOutcome { + /// it's ok and here are some useful values + Proceed { + /// one (parent block id, parent's period) per thread + parents_hash_period: Vec<(BlockId, u64)>, + /// blocks that header is incompatible with + incompatibilities: PreHashSet, + /// number of incompatibilities that are inherited from the parents + inherited_incompatibilities_count: usize, + /// fitness + fitness: u64, + }, + /// there is something wrong with that header + Discard(DiscardReason), + /// it must wait for its slot to be fully processed + WaitForSlot, + /// it must wait for these block ids to be fully processed + WaitForDependencies(PreHashSet), +} + +/// Possible outcomes of endorsements check +#[derive(Debug)] +pub enum EndorsementsCheckOutcome { + /// Everything is ok + Proceed, + /// There is something wrong with that endorsement + Discard(DiscardReason), + /// It must wait for its slot to be fully processed + WaitForSlot, +} + +impl ConsensusState { + /// Process an incoming header. + /// + /// Checks performed: + /// - Number of parents matches thread count. + /// - Slot above 0. + /// - Valid thread. + /// - Check that the block is older than the latest final one in thread. + /// - Check that the block slot is not too much into the future, + /// as determined by the configuration `future_block_processing_max_periods`. + /// - Check if it was the creator's turn to create this block. + /// - TODO: check for double staking. + /// - Check parents are present. + /// - Check the topological consistency of the parents. + /// - Check endorsements. + /// - Check thread incompatibility test. + /// - Check grandpa incompatibility test. + /// - Check if the block is incompatible with a parent. + /// - Check if the block is incompatible with a final block. + pub fn check_header( + &self, + block_id: &BlockId, + header: &WrappedHeader, + current_slot: Option, + read_shared_state: &ConsensusState, + ) -> Result { + massa_trace!("consensus.block_graph.check_header", { + "block_id": block_id + }); + let mut parents: Vec<(BlockId, u64)> = + Vec::with_capacity(self.config.thread_count as usize); + let mut incomp = PreHashSet::::default(); + let mut missing_deps = PreHashSet::::default(); + let creator_addr = header.creator_address; + + // check that is older than the latest final block in that thread + // Note: this excludes genesis blocks + if header.content.slot.period + <= read_shared_state.latest_final_blocks_periods[header.content.slot.thread as usize].1 + { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); + } + + // check if block slot is too much in the future + if let Some(cur_slot) = current_slot { + if header.content.slot.period + > cur_slot + .period + .saturating_add(self.config.future_block_processing_max_periods) + { + return Ok(HeaderCheckOutcome::WaitForSlot); + } + } + + // check if it was the creator's turn to create this block + // (step 1 in consensus/pos.md) + let slot_draw_address = match self + .channels + .selector_controller + .get_producer(header.content.slot) + { + Ok(draw) => draw, + Err(_) => return Ok(HeaderCheckOutcome::WaitForSlot), // TODO properly handle PoS errors + }; + if creator_addr != slot_draw_address { + // it was not the creator's turn to create a block for this slot + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + format!("Bad creator turn for the slot:{}", header.content.slot), + ))); + } + + // check if block is in the future: queue it + // note: do it after testing signature + draw to prevent queue flooding/DoS + // note: Some(x) > None + if Some(header.content.slot) > current_slot { + return Ok(HeaderCheckOutcome::WaitForSlot); + } + + // Note: here we will check if we already have a block for that slot + // and if someone double staked, they will be denounced + + // list parents and ensure they are present + let parent_set: PreHashSet = header.content.parents.iter().copied().collect(); + for parent_thread in 0u8..self.config.thread_count { + let parent_hash = header.content.parents[parent_thread as usize]; + match read_shared_state.block_statuses.get(&parent_hash) { + Some(BlockStatus::Discarded { reason, .. }) => { + // parent is discarded + return Ok(HeaderCheckOutcome::Discard(match reason { + DiscardReason::Invalid(invalid_reason) => DiscardReason::Invalid(format!( + "discarded because a parent was discarded for the following reason: {}", + invalid_reason + )), + r => r.clone(), + })); + } + Some(BlockStatus::Active { + a_block: parent, .. + }) => { + // parent is active + + // check that the parent is from an earlier slot in the right thread + if parent.slot.thread != parent_thread || parent.slot >= header.content.slot { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + format!( + "Bad parent {} in thread:{} or slot:{} for {}.", + parent_hash, parent_thread, parent.slot, header.content.slot + ), + ))); + } + + // inherit parent incompatibilities + // and ensure parents are mutually compatible + if let Some(p_incomp) = read_shared_state.gi_head.get(&parent_hash) { + if !p_incomp.is_disjoint(&parent_set) { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + "Parent not mutually compatible".to_string(), + ))); + } + incomp.extend(p_incomp); + } + + parents.push((parent_hash, parent.slot.period)); + } + _ => { + // parent is missing or queued + if read_shared_state.genesis_hashes.contains(&parent_hash) { + // forbid depending on discarded genesis block + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); + } + missing_deps.insert(parent_hash); + } + } + } + if !missing_deps.is_empty() { + return Ok(HeaderCheckOutcome::WaitForDependencies(missing_deps)); + } + let inherited_incomp_count = incomp.len(); + + // check the topological consistency of the parents + { + let mut gp_max_slots = vec![0u64; self.config.thread_count as usize]; + for parent_i in 0..self.config.thread_count { + let (parent_h, parent_period) = parents[parent_i as usize]; + let parent = match read_shared_state.block_statuses.get(&parent_h) { + Some(BlockStatus::Active { + a_block, + storage: _, + }) => a_block, + _ => { + return Err(ConsensusError::ContainerInconsistency(format!( + "inconsistency inside block statuses searching parent {} of block {}", + parent_h, block_id + ))) + } + }; + if parent_period < gp_max_slots[parent_i as usize] { + // a parent is earlier than a block known by another parent in that thread + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + "a parent is earlier than a block known by another parent in that thread" + .to_string(), + ))); + } + gp_max_slots[parent_i as usize] = parent_period; + if parent_period == 0 { + // genesis + continue; + } + for gp_i in 0..self.config.thread_count { + if gp_i == parent_i { + continue; + } + let gp_h = parent.parents[gp_i as usize].0; + match read_shared_state.block_statuses.get(&gp_h) { + // this grandpa is discarded + Some(BlockStatus::Discarded { reason, .. }) => { + return Ok(HeaderCheckOutcome::Discard(reason.clone())); + } + // this grandpa is active + Some(BlockStatus::Active { a_block: gp, .. }) => { + if gp.slot.period > gp_max_slots[gp_i as usize] { + if gp_i < parent_i { + return Ok(HeaderCheckOutcome::Discard( + DiscardReason::Invalid( + "grandpa error: gp_i < parent_i".to_string(), + ), + )); + } + gp_max_slots[gp_i as usize] = gp.slot.period; + } + } + // this grandpa is missing, assume stale + _ => return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)), + } + } + } + } + + // get parent in own thread + let parent_in_own_thread = match read_shared_state + .block_statuses + .get(&parents[header.content.slot.thread as usize].0) + { + Some(BlockStatus::Active { + a_block, + storage: _, + }) => Some(a_block), + _ => None, + } + .ok_or_else(|| { + ConsensusError::ContainerInconsistency(format!( + "inconsistency inside block statuses searching parent {} in own thread of block {}", + parents[header.content.slot.thread as usize].0, block_id + )) + })?; + + // check endorsements + match self.check_endorsements(header)? { + EndorsementsCheckOutcome::Proceed => {} + EndorsementsCheckOutcome::Discard(reason) => { + return Ok(HeaderCheckOutcome::Discard(reason)) + } + EndorsementsCheckOutcome::WaitForSlot => return Ok(HeaderCheckOutcome::WaitForSlot), + } + + // thread incompatibility test + parent_in_own_thread.children[header.content.slot.thread as usize] + .keys() + .filter(|&sibling_h| sibling_h != block_id) + .try_for_each(|&sibling_h| { + incomp.extend(self.get_active_block_and_descendants(&sibling_h)?); + Result::<(), ConsensusError>::Ok(()) + })?; + + // grandpa incompatibility test + for tau in (0u8..self.config.thread_count).filter(|&t| t != header.content.slot.thread) { + // for each parent in a different thread tau + // traverse parent's descendants in tau + let mut to_explore = vec![(0usize, header.content.parents[tau as usize])]; + while let Some((cur_gen, cur_h)) = to_explore.pop() { + let cur_b = match read_shared_state.block_statuses.get(&cur_h) { + Some(BlockStatus::Active { a_block, storage: _ }) => Some(a_block), + _ => None, + }.ok_or_else(|| ConsensusError::ContainerInconsistency(format!("inconsistency inside block statuses searching {} while checking grandpa incompatibility of block {}",cur_h, block_id)))?; + + // traverse but do not check up to generation 1 + if cur_gen <= 1 { + to_explore.extend( + cur_b.children[tau as usize] + .keys() + .map(|&c_h| (cur_gen + 1, c_h)), + ); + continue; + } + + let parent_id = { + self.storage + .read_blocks() + .get(&cur_b.block_id) + .ok_or_else(|| { + ConsensusError::MissingBlock(format!( + "missing block in grandpa incomp test: {}", + cur_b.block_id + )) + })? + .content + .header + .content + .parents[header.content.slot.thread as usize] + }; + + // check if the parent in tauB has a strictly lower period number than B's parent in tauB + // note: cur_b cannot be genesis at gen > 1 + let parent_period = match read_shared_state.block_statuses.get(&parent_id) { + Some(BlockStatus::Active { a_block, storage: _ }) => Some(a_block), + _ => None, + }.ok_or_else(|| + ConsensusError::ContainerInconsistency( + format!("inconsistency inside block statuses searching {} check if the parent in tauB has a strictly lower period number than B's parent in tauB while checking grandpa incompatibility of block {}", + parent_id, + block_id) + ))?.slot.period; + if parent_period < parent_in_own_thread.slot.period { + // GPI detected + incomp.extend(self.get_active_block_and_descendants(&cur_h)?); + } // otherwise, cur_b and its descendants cannot be GPI with the block: don't traverse + } + } + + // check if the block is incompatible with a parent + if !incomp.is_disjoint(&parents.iter().map(|(h, _p)| *h).collect()) { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( + "Block incompatible with a parent".to_string(), + ))); + } + + // check if the block is incompatible with a final block + if !incomp.is_disjoint( + &read_shared_state + .active_index + .iter() + .filter_map(|h| { + if let Some(BlockStatus::Active { a_block: a, .. }) = + read_shared_state.block_statuses.get(h) + { + if a.is_final { + return Some(*h); + } + } + None + }) + .collect(), + ) { + return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); + } + massa_trace!("consensus.block_graph.check_header.ok", { + "block_id": block_id + }); + + Ok(HeaderCheckOutcome::Proceed { + parents_hash_period: parents, + incompatibilities: incomp, + inherited_incompatibilities_count: inherited_incomp_count, + fitness: header.get_fitness(), + }) + } + + /// check endorsements: + /// * endorser was selected for that (slot, index) + /// * endorsed slot is `parent_in_own_thread` slot + pub fn check_endorsements( + &self, + header: &WrappedHeader, + ) -> Result { + // check endorsements + let endorsement_draws = match self + .channels + .selector_controller + .get_selection(header.content.slot) + { + Ok(sel) => sel.endorsements, + Err(_) => return Ok(EndorsementsCheckOutcome::WaitForSlot), + }; + for endorsement in header.content.endorsements.iter() { + // check that the draw is correct + if endorsement.creator_address != endorsement_draws[endorsement.content.index as usize] + { + return Ok(EndorsementsCheckOutcome::Discard(DiscardReason::Invalid( + format!( + "endorser draw mismatch for header in slot: {}", + header.content.slot + ), + ))); + } + + // note that the following aspects are checked in protocol + // * signature + // * index reuse + // * slot matching the block's + // * the endorsed block is the containing block's parent + } + + Ok(EndorsementsCheckOutcome::Proceed) + } +} diff --git a/massa-consensus-worker/src/tests/block_factory.rs b/massa-consensus-worker/src/tests/block_factory.rs deleted file mode 100644 index 0a415dde219..00000000000 --- a/massa-consensus-worker/src/tests/block_factory.rs +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -//! This is a factory that can be used in consensus test -//! but at it was introduced quite late in the development process -//! it has only be used in scenarios basic - -use super::tools::{validate_notpropagate_block, validate_propagate_block}; -use massa_hash::Hash; -use massa_models::{ - block::{Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock}, - endorsement::WrappedEndorsement, - operation::WrappedOperation, - slot::Slot, - wrapped::{Id, WrappedContent}, -}; -use massa_protocol_exports::test_exports::MockProtocolController; -use massa_signature::KeyPair; -use massa_storage::Storage; - -pub struct BlockFactory { - pub best_parents: Vec, - pub creator_keypair: KeyPair, - pub slot: Slot, - pub endorsements: Vec, - pub operations: Vec, - pub protocol_controller: MockProtocolController, -} - -impl BlockFactory { - pub fn start_block_factory( - genesis: Vec, - protocol_controller: MockProtocolController, - ) -> BlockFactory { - BlockFactory { - best_parents: genesis, - creator_keypair: KeyPair::generate(), - slot: Slot::new(1, 0), - endorsements: Vec::new(), - operations: Vec::new(), - protocol_controller, - } - } - - pub async fn create_and_receive_block(&mut self, valid: bool) -> WrappedBlock { - let header = BlockHeader::new_wrapped( - BlockHeader { - slot: self.slot, - parents: self.best_parents.clone(), - operation_merkle_root: Hash::compute_from( - &self - .operations - .iter() - .flat_map(|op| op.id.get_hash().into_bytes()) - .collect::>()[..], - ), - endorsements: self.endorsements.clone(), - }, - BlockHeaderSerializer::new(), - &self.creator_keypair, - ) - .unwrap(); - - let block = Block::new_wrapped( - Block { - header, - operations: self - .operations - .clone() - .into_iter() - .map(|op| op.id) - .collect(), - }, - BlockSerializer::new(), - &self.creator_keypair, - ) - .unwrap(); - - let mut storage = Storage::create_root(); - let id = block.id; - let slot = block.content.header.content.slot; - storage.store_block(block.clone()); - - self.protocol_controller - .receive_block(id, slot, storage) - .await; - if valid { - // Assert that the block is propagated. - validate_propagate_block(&mut self.protocol_controller, id, 2000).await; - } else { - // Assert that the the block is not propagated. - validate_notpropagate_block(&mut self.protocol_controller, id, 500).await; - } - block - } - - pub fn sign_header(&self, header: BlockHeader) -> WrappedBlock { - let header = - BlockHeader::new_wrapped(header, BlockHeaderSerializer::new(), &self.creator_keypair) - .unwrap(); - - Block::new_wrapped( - Block { - header, - operations: self - .operations - .clone() - .into_iter() - .map(|op| op.id) - .collect(), - }, - BlockSerializer::new(), - &self.creator_keypair, - ) - .unwrap() - } - - pub async fn receive_block( - &mut self, - valid: bool, - block_id: BlockId, - slot: Slot, - storage: Storage, - ) { - self.protocol_controller - .receive_block(block_id, slot, storage) - .await; - if valid { - // Assert that the block is propagated. - validate_propagate_block(&mut self.protocol_controller, block_id, 2000).await; - } else { - // Assert that the the block is not propagated. - validate_notpropagate_block(&mut self.protocol_controller, block_id, 500).await; - } - } - - pub fn take_protocol_controller(self) -> MockProtocolController { - self.protocol_controller - } -} diff --git a/massa-consensus-worker/src/tests/inter_cycle_batch_finalization.rs b/massa-consensus-worker/src/tests/inter_cycle_batch_finalization.rs deleted file mode 100644 index d9b7da9b817..00000000000 --- a/massa-consensus-worker/src/tests/inter_cycle_batch_finalization.rs +++ /dev/null @@ -1,199 +0,0 @@ -//! Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use massa_time::MassaTime; -use serial_test::serial; -use std::{collections::HashSet, str::FromStr}; - -/// # Context -/// -/// Regression test for `https://github.com/massalabs/massa/pull/2433` -/// -/// When we have the following block sequence -/// ``` -/// 1 thread, periods_per_cycle = 2, delta_f0 = 1, 1 endorsement per block -/// -/// cycle 0 | cycle 1 | cycle 2 -/// G - B1 - B2 - B3 - B4 -/// where G is the genesis block -/// and B4 contains a roll sell operation -/// ``` -/// -/// And the block `B1` is received AFTER `B4`, blocks will be processed recursively: -/// ``` -/// * B1 is received and included -/// * B2 is processed -/// * B1 becomes final in the graph -/// * B3 is processed -/// * B2 becomes final in the graph -/// * B4 is processed -/// * B3 becomes final in the graph -/// * PoS is told about all finalized blocks -/// ``` -/// -/// The problem we had is that in order to check rolls to verify `B4`'s roll sell, -/// the final roll registry was assumed to be attached to the last final block known by the graph, -/// but that was inaccurate because PoS was the one holding the final roll registry, -/// and PoS was not yet aware of the blocks that finalized during recursion, -/// so it was actually still attached to G when `B4` was checked. -/// -/// The correction involved taking the point of view of PoS on where the final roll registry is attached. -/// This test ensures non-regression by making sure `B4` is propagated when `B1` is received. -#[tokio::test(flavor = "multi_thread", worker_threads = 2)] -#[serial] -async fn test_inter_cycle_batch_finalization() { - let t0: MassaTime = 1000.into(); - let staking_key = - KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8").unwrap(); - let warmup_time: MassaTime = 1000.into(); - let margin_time: MassaTime = 300.into(); - let cfg = ConsensusConfig { - periods_per_cycle: 2, - delta_f0: 1, - thread_count: 1, - endorsement_count: 1, - max_future_processing_blocks: 10, - max_dependency_blocks: 10, - future_block_processing_max_periods: 10, - t0, - genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(warmup_time), - ..ConsensusConfig::default() - }; - - consensus_pool_test_with_storage( - cfg.clone(), - None, - async move |pool_controller, - mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - mut storage, - selector_controller| { - // wait for consensus warmup time - tokio::time::sleep(warmup_time.to_duration()).await; - - let genesis_blocks: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // create B1 but DO NOT SEND IT - tokio::time::sleep(t0.to_duration()).await; - let b1_block = - create_block(&cfg, Slot::new(1, 0), genesis_blocks.clone(), &staking_key); - - // create and send B2 - tokio::time::sleep(t0.to_duration()).await; - let b2_block = create_block_with_operations_and_endorsements( - &cfg, - Slot::new(2, 0), - &vec![b1_block.id], - &staking_key, - vec![], - vec![create_endorsement( - &staking_key, - Slot::new(1, 0), - b1_block.id, - 0, - )], - ); - let b2_block_id = b2_block.id; - let b2_block_slot = b2_block.content.header.content.slot; - storage.store_block(b2_block); - protocol_controller - .receive_block(b2_block_id, b2_block_slot, storage.clone()) - .await; - - // create and send B3 - tokio::time::sleep(t0.to_duration()).await; - let b3_block = create_block_with_operations_and_endorsements( - &cfg, - Slot::new(3, 0), - &vec![b2_block_id], - &staking_key, - vec![], - vec![create_endorsement( - &staking_key, - Slot::new(2, 0), - b2_block_id, - 0, - )], - ); - let b3_block_id = b3_block.id; - let b3_block_slot = b3_block.content.header.content.slot; - storage.store_block(b3_block); - protocol_controller - .receive_block(b3_block_id, b3_block_slot, storage.clone()) - .await; - - // create and send B4 - tokio::time::sleep(t0.to_duration()).await; - let roll_sell = create_roll_sell(&staking_key, 1, 4, 0); - storage.store_operations(vec![roll_sell.clone()]); - let b4_block = create_block_with_operations_and_endorsements( - &cfg, - Slot::new(4, 0), - &vec![b3_block_id], - &staking_key, - vec![roll_sell], - vec![create_endorsement( - &staking_key, - Slot::new(3, 0), - b3_block_id, - 0, - )], - ); - let b4_block_id = b4_block.id; - let b4_block_slot = b4_block.content.header.content.slot; - storage.store_block(b4_block); - protocol_controller - .receive_block(b4_block_id, b4_block_slot, storage.clone()) - .await; - - // wait for the slot after B4 - tokio::time::sleep(t0.saturating_mul(5).to_duration()).await; - - // send B1 - let b1_block_id = b1_block.id; - let b1_block_slot = b1_block.content.header.content.slot; - storage.store_block(b1_block); - protocol_controller - .receive_block(b1_block_id, b1_block_slot, storage.clone()) - .await; - - approve_producer_and_selector_for_staker(&staking_key, &selector_controller); - - // wait for the propagation of B1, B2, B3 and B4 (unordered) - let mut to_propagate: HashSet<_> = - vec![b1_block_id, b2_block_id, b3_block_id, b4_block_id] - .into_iter() - .collect(); - for _ in 0u8..4 { - to_propagate.remove( - &validate_propagate_block_in_list( - &mut protocol_controller, - &to_propagate.clone().into_iter().collect(), - margin_time.to_millis(), - ) - .await, - ); - } - - ( - pool_controller, - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/mod.rs b/massa-consensus-worker/src/tests/mod.rs deleted file mode 100644 index 5d62262b27a..00000000000 --- a/massa-consensus-worker/src/tests/mod.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -mod block_factory; -// mod inter_cycle_batch_finalization; /* TODO repair this test https://github.com/massalabs/massa/issues/3099 -mod scenario_block_creation; -mod scenario_roll; -mod scenarios106; -mod scenarios91_1; -mod scenarios91_2; -mod scenarios_basic; -mod scenarios_endorsements; -mod scenarios_get_operations; -mod scenarios_get_selection_draws; -mod scenarios_header_check; -mod scenarios_incompatibilities; -mod scenarios_note_attack_attempt; -mod scenarios_operations_check; -mod scenarios_parents; -mod scenarios_pool_commands; -mod scenarios_pruning; -mod scenarios_reward_split; -mod scenarios_send_block; -mod scenarios_wishlist; -mod test_block_graph; -pub mod tools; diff --git a/massa-consensus-worker/src/tests/scenario_block_creation.rs b/massa-consensus-worker/src/tests/scenario_block_creation.rs deleted file mode 100644 index 709a78a78a6..00000000000 --- a/massa-consensus-worker/src/tests/scenario_block_creation.rs +++ /dev/null @@ -1,849 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::random_address_on_thread; -use crate::tests::tools; -use massa_consensus_exports::ConsensusConfig; -use massa_models::ledger_models::LedgerData; -use massa_models::rolls::{RollCounts, RollUpdate, RollUpdates}; -use massa_models::{amount::Amount, slot::Slot}; -use massa_protocol_exports::ProtocolCommand; -use massa_time::MassaTime; -use serial_test::serial; -use std::collections::HashMap; -use tokio::time::sleep_until; - -// #[tokio::test] -// #[serial] -// async fn test_genesis_block_creation() { -// // define addresses use for the test -// // addresses a and b both in thread 0 -// // addr 1 has 1 roll and 0 coins -// // addr 2 is in consensus and has 0 roll and 1000 coins -// let thread_count = 2; -// let (address_1, keypair_1) = random_address_on_thread(0, thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(0, thread_count).into(); -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_2, -// LedgerData::new(Amount::from_str("1000").unwrap()), -// ); -// let mut cfg = ConsensusConfig { -// genesis_timestamp: MassaTime::now(0) -// .unwrap() -// .saturating_sub(MassaTime::from(30000)), -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&[keypair_1, keypair_2], &ledger) -// }; -// // init roll count -// let mut roll_counts = RollCounts::default(); -// let update = RollUpdate { -// roll_purchases: 1, -// roll_sales: 0, -// }; -// let mut updates = RollUpdates::default(); -// updates.apply(&address_1, &update).unwrap(); -// roll_counts.apply_updates(&updates).unwrap(); - -// let initial_rolls_file = generate_roll_counts_file(&roll_counts); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); - -// tools::consensus_without_pool_test( -// cfg.clone(), -// async move |protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let _genesis_ids = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// /// /// See the test removed at https://gitlab.com/massalabs/massa-network/-/merge_requests/381/diffs#a5bee3b1b5cc9d8157b6feee0ac3e775aa457a33_544_539 -// /// -// /// **NOTE: that test is expected to fail 1 / 1000 times** -// /// -// /// -// /// ### Context -// /// ``` -// /// * price per roll = 1000 -// /// * periods per cycle = 30 000 -// /// * t0 = 500ms -// /// * look-back = 2 -// /// * thread count = 2 -// /// * delta f0 = 3 -// /// * genesis timestamp = now - t0 * periods per cycle * 3 - 1000 -// /// * block reward = 0 -// /// * fee = 0 for every operation -// /// * address 1 has 1 roll and 0 coins -// /// * address 2 is in consensus and has 0 roll and 1000 coins -// /// ``` -// /// ### Initialization -// /// Following blocks are sent through a protocol event to consensus right at the beginning. They all have best parents as parents. -// /// * block at slot(1,0) with operation address 2 buys 1 roll -// /// * block at slot( period per cycle, 0) -// /// * block at slot( period per cycle, 1) -// /// * block at slot( period per cycle + 1, 0) -// /// * block at slot( period per cycle + 1, 1) -// /// * block at slot( period per cycle + 2, 0) -// /// * block at slot( period per cycle + 2, 0) -// /// -// /// ### Scenario -// /// -// /// * start consensus -// /// * blocks previously described are sent to consensus through a protocol event -// /// * assert they are propagated -// /// * ```let draws = get_selection_draws( (3*periods_per cycle, 0), (4*periods_per cycle, 0)``` -// /// * assert -// /// ```math -// /// abs(1/2 - \frac{TimesAddr1WasDrawn}{ThreadCount * PeriodsPerCycle}) < 0.01 -// /// ``` -// /// (see [the math](https://en.wikipedia.org/wiki/Checking_whether_a_coin_is_fair)) -// /// * wait for cycle 3 beginning -// /// * for the 10 first slots of cycle 3 -// /// * if address 2 was selected assert consensus created and propagated a block -// /// * if address 1 was selected assert nothing is propagated -// #[tokio::test] -// #[serial] -// //#[ignore] -// async fn test_block_creation_with_draw() { -// let thread_count = 2; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// // addr 1 has 1 roll and 0 coins -// // addr 2 is in consensus and has 0 roll and 1000 coins -// let (address_1, keypair_1) = random_address_on_thread(0, thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(0, thread_count).into(); - -// let staking_keys = vec![keypair_1.clone(), keypair_2.clone()]; - -// // init address_2 with 1000 coins -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_2, -// LedgerData::new(Amount::from_str("1000").unwrap()), -// ); - -// // finally create the configuration -// let t0 = MassaTime::from(1000); -// let periods_per_cycle = 1000; -// let mut cfg = ConsensusConfig { -// block_reward: Amount::default(), -// delta_f0: 3, -// max_operations_per_block: 50, -// operation_validity_periods: 100, -// periods_per_cycle, -// roll_price: Amount::from_str("1000").unwrap(), -// t0, -// genesis_timestamp: MassaTime::now(0) -// .unwrap() -// .checked_sub((t0.to_millis() * periods_per_cycle * 3).into()) -// .unwrap() -// .checked_add(2000.into()) -// .unwrap(), -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&staking_keys, &ledger) -// }; - -// // init roll count -// let mut roll_counts = RollCounts::default(); -// let update = RollUpdate { -// roll_purchases: 1, -// roll_sales: 0, -// }; -// let mut updates = RollUpdates::default(); -// updates.apply(&address_1, &update).unwrap(); -// roll_counts.apply_updates(&updates).unwrap(); -// let initial_rolls_file = generate_roll_counts_file(&roll_counts); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); - -// let operation_fee = 0; -// tools::consensus_without_pool_with_storage_test( -// cfg.clone(), -// async move |mut storage, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let genesis_ids = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// // initial block: addr2 buys 1 roll -// let op1 = create_roll_transaction(&keypair_2, 1, true, 10, operation_fee); -// storage.store_operations(vec![op1.clone()]); -// let block = tools::create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &genesis_ids, -// &staking_keys[0], -// vec![op1], -// ); - -// tools::propagate_block(&mut protocol_controller, block.clone(), true, 1000).await; - -// // make cycle 0 final/finished by sending enough blocks in each thread in cycle 1 -// // note that blocks in cycle 3 may be created during this, so make sure that their clique is overrun by sending a large amount of blocks -// let mut cur_parents = vec![block.id, genesis_ids[1]]; -// for delta_period in 0u64..10 { -// for thread in 0..cfg.thread_count { -// let res_block_id = tools::create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(cfg.periods_per_cycle + delta_period, thread), -// cur_parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; -// cur_parents[thread as usize] = res_block_id; -// } -// } - -// // get draws for cycle 3 (lookback = cycle 0) -// let mut draws: HashMap = HashMap::default(); -// for i in (3 * cfg.periods_per_cycle)..(4 * cfg.periods_per_cycle) { -// let slot = Slot::new(i, 0); -// draws.insert( -// slot, -// selector_controller.get_selection(slot).unwrap().producer, -// ); -// } -// let nb_address1_draws = draws.iter().filter(|(_, addr)| **addr == address_1).count(); -// // fair coin test. See https://en.wikipedia.org/wiki/Checking_whether_a_coin_is_fair -// // note: this is a statistical test. It may fail in rare occasions. -// assert!( -// (0.5 - ((nb_address1_draws as f32) -// / ((cfg.thread_count as u64 * cfg.periods_per_cycle) as f32))) -// .abs() -// < 0.15 -// ); - -// // check 10 draws -// let draws: HashMap = draws.into_iter().collect(); -// let mut cur_slot = Slot::new(cfg.periods_per_cycle * 3, 0); -// for _ in 0..10 { -// // wait block propagation -// let block_creator = protocol_controller -// .wait_command(3500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// if stored_block.content.header.content.slot == cur_slot { -// Some(stored_block.creator_public_key) -// } else { -// None -// } -// } -// _ => None, -// }) -// .await -// .expect("block did not propagate in time"); -// assert_eq!( -// draws[&cur_slot], -// Address::from_public_key(&block_creator), -// "wrong block creator" -// ); -// cur_slot = cur_slot.get_next_slot(cfg.thread_count).unwrap(); -// } - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -/// https://gitlab.com/massalabs/massa/-/issues/301 -/// -/// Block creation reception mix test -/// -/// see https://gitlab.com/massalabs/massa/-/issues/295#note_693561778 -/// -/// -/// two staking keys. Only key a is registered in consensus -/// start before genesis timestamp -/// retrieve next draws -/// for a few slots: -/// if it's key b time to create a block create it and send it to consensus -/// if key a created a block, assert it has chosen as parents expected blocks (no misses), and that it was sent to protocol around the time it was expected. -#[tokio::test] -#[serial] -#[ignore] -async fn test_interleaving_block_creation_with_reception() { - let thread_count = 1; - // define addresses use for the test - // addresses a and b both in thread 0 - let (address_1, _) = random_address_on_thread(0, thread_count).into(); - let (address_2, keypair_2) = random_address_on_thread(0, thread_count).into(); - - let mut ledger = HashMap::new(); - ledger.insert( - address_2, - LedgerData::new(Amount::from_mantissa_scale(1000, 0)), - ); - let cfg = ConsensusConfig { - thread_count, - t0: 1000.into(), - genesis_timestamp: MassaTime::now(0).unwrap().checked_add(1000.into()).unwrap(), - ..ConsensusConfig::default() - }; - // init roll count - let mut roll_counts = RollCounts::default(); - let update = RollUpdate { - roll_purchases: 1, - roll_sales: 0, - }; - let mut updates = RollUpdates::default(); - updates.apply(&address_1, &update).unwrap(); - updates.apply(&address_2, &update).unwrap(); - roll_counts.apply_updates(&updates).unwrap(); - - tools::consensus_without_pool_with_storage_test( - cfg.clone(), - async move |mut storage, - mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let mut parents = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - sleep_until(tokio::time::Instant::from_std( - cfg.genesis_timestamp - .saturating_add(cfg.t0) - .saturating_sub(150.into()) - .estimate_instant(0) - .expect("could not estimate instant for genesis timestamps"), - )) - .await; - - // check 10 draws - // Key1 and key2 can be drawn to produce block, - // but the local node only has key1, - // so when key2 is selected a block must be produced remotly - // and sent to the local node through protocol - for i in 1..11 { - let cur_slot = Slot::new(i, 0); - let creator = &selector_controller - .get_selection(cur_slot) - .expect("missing slot in drawss") - .producer; - - let block_id = if *creator == address_1 { - // wait block propagation - let (header, id) = protocol_controller - .wait_command(cfg.t0.saturating_add(300.into()), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { block_id, storage } => { - let block = storage - .read_blocks() - .get(&block_id) - .unwrap_or_else(|| { - panic!("Block id : {} not found in storage", block_id) - }) - .clone(); - if block.content.header.content.slot == cur_slot { - Some((block.content.header, block_id)) - } else { - None - } - } - _ => None, - }) - .await - .expect("block did not propagate in time"); - assert_eq!(*creator, header.creator_address, "wrong block creator"); - id - } else if *creator == address_2 { - // create block and propagate it - let block = tools::create_block_with_operations( - &cfg, - cur_slot, - &parents, - &keypair_2, - vec![], - ); - storage.store_block(block.clone()); - tools::propagate_block( - &mut protocol_controller, - block.id, - block.content.header.content.slot, - storage.clone(), - true, - cfg.t0.to_millis() + 300, - ) - .await; - block.id - } else { - panic!("unexpected block creator"); - }; - parents[0] = block_id; - } - - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -// /// https://gitlab.com/massalabs/massa-network-archive/-/issues/343 -// /// Test block creation with operations -// /// -// /// Test consensus block creation with an initial graph and simulated pool -// /// -// /// In all tests, once it has started there is only one block creator, so we expect consensus to create blocks at every slots after initialization. -// /// -// /// context -// /// -// /// ``` -// /// initial ledger: A:100 -// /// op1 : A -> B : 5, fee 1 -// /// op2 : A -> B : 50, fee 10 -// /// op3 : B -> A : 10, fee 15 -// /// ``` -// /// -// /// --- -// /// -// /// ``` -// /// create block at (0,1) -// /// operations should be [op2, op1] -// /// ``` -// #[tokio::test] -// #[serial] -// async fn test_order_of_inclusion() { -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// // Increase timestamp a bit to avoid missing the first slot. -// let init_time: MassaTime = 1000.into(); -// let mut cfg = ConsensusConfig { -// genesis_timestamp: MassaTime::now(0).unwrap().checked_add(init_time).unwrap(), -// max_operations_per_block: 50, -// operation_validity_periods: 10, -// t0: 1000.into(), -// ..ConsensusConfig::default() -// }; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, keypair_b) = random_address_on_thread(0, cfg.thread_count).into(); - -// let mut ledger = HashMap::new(); -// ledger.insert(address_a, LedgerData::new(Amount::from_str("100").unwrap())); -// let initial_ledger_file = generate_ledger_file(&ledger); // don't drop the `NamedTempFile` -// cfg.initial_ledger_path = initial_ledger_file.path().to_path_buf(); - -// let op1 = create_transaction(&keypair_a, address_b, 5, 10, 1); -// let op2 = create_transaction(&keypair_a, address_b, 50, 10, 10); -// let op3 = create_transaction(&keypair_b, address_a, 10, 10, 15); - -// // there is only one node so it should be drawn at every slot - -// tools::consensus_pool_test_with_storage( -// cfg.clone(), -// None, -// async move |pool_controller, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// storage, -// selector_controller| { -// //TODO: Replace -// // wait for first slot -// // pool_controller -// // .wait_command( -// // cfg.t0.saturating_mul(2).saturating_add(init_time), -// // |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => { -// // if s == Slot::new(1, 0) { -// // Some(()) -// // } else { -// // None -// // } -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }, -// // ) -// // .await -// // .expect("timeout while waiting for slot"); -// // -// // respond to first pool batch command -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { response_tx, .. } => { -// // response_tx -// // .send(vec![ -// // (op3.clone(), 50), -// // (op2.clone(), 50), -// // (op1.clone(), 50), -// // ]) -// // .unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 1st operation batch request"); - -// // // respond to second pool batch command -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // exclude, -// // .. -// // } => { -// // assert!(!exclude.is_empty()); -// // response_tx.send(vec![]).unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 2nd operation batch request"); - -// // wait for block -// let block = protocol_controller -// .wait_command(300.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(1, 0)); -// let expected = vec![op2.clone(), op1.clone()]; -// let res = block.content.operations.clone(); -// assert_eq!(block.content.operations.len(), 2); -// for i in 0..2 { -// assert!(res.contains(&expected[i].id)); -// } -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// /// https://gitlab.com/massalabs/massa-network-archive/-/issues/343 -// /// Test block creation with operations -// /// -// /// Test consensus block creation with an initial graph and simulated pool -// /// -// /// In all tests, once it has started there is only one block creator, so we expect consensus to create blocks at every slots after initialization. -// /// -// /// context -// /// -// /// ```` -// /// initial ledger A = 1 000 000 -// /// max_block_size = 500 -// /// max_operations_per_block = 10 000 -// /// op_i = A -> B : 10, 1, signed for the i-th time -// /// ``` -// /// -// /// --- -// /// ``` -// /// let block_size = size of dummy block at (1,0) without any operation -// /// let op_size = size of an operation -// /// while consensus is asking for operations send next ops -// /// assert created_block_size is max_block_size +/- one op_size -// /// assert created_block_size = block_size +`op_size * op_count -// /// ``` -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_block_filling() { -// let thread_count = 2; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (address_a, keypair_a) = random_address_on_thread(0, thread_count).into(); -// let (address_b, keypair_b) = random_address_on_thread(0, thread_count).into(); -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_a, -// LedgerData::new(Amount::from_str("1000000000").unwrap()), -// ); -// let cfg = ConsensusConfig { -// endorsement_count: 10, -// max_block_size: 2000, -// max_operations_per_block: 5000, -// operation_validity_periods: 10, -// periods_per_cycle: 3, -// t0: 1000.into(), -// ..ConsensusConfig::default_with_staking_keys_and_ledger( -// &[keypair_a.clone(), keypair_b.clone()], -// &ledger, -// ) -// }; - -// let mut ops = vec![create_executesc( -// &keypair_a, -// 10, -// 10, -// vec![1; 200], // dummy bytes as here we do not test the content -// 1_000, -// 0, -// 1, -// )]; // this operation has an higher rentability than any other - -// for _ in 0..500 { -// ops.push(create_transaction(&keypair_a, address_a, 5, 10, 1)) -// } - -// tools::consensus_pool_test_with_storage( -// cfg.clone(), -// None, -// async move |pool_controller, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// storage, -// selector_controller| { -// let op_size = 10; - -// // wait for slot -// //let mut prev_blocks = Vec::new(); -// for cur_slot in [Slot::new(1, 0), Slot::new(1, 1)] { -// //TODO: Replace -// // pool_controller -// // .wait_command(cfg.t0.checked_mul(2).unwrap(), |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => { -// // if s == cur_slot { -// // Some(()) -// // } else { -// // None -// // } -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for slot"); -// // // respond to pool batch command -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { response_tx, .. } => { -// // response_tx.send(Default::default()).unwrap(); -// // Some(()) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for operation batch request"); -// // // wait for block -// // let block = protocol_controller -// // .wait_command(500.into(), |cmd| match cmd { -// // ProtocolCommand::IntegratedBlock { block_id } => { -// // let block = storage -// // .retrieve_block(&block_id) -// // .expect(&format!("Block id : {} not found in storage", block_id)); -// // let stored_block = block.read(); -// // Some(stored_block.clone()) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for block"); -// // assert_eq!(block.content.header.content.slot, cur_slot); -// // prev_blocks.push(block.id); -// // } -// } - -// // // wait for slot p2t0 -// // pool_controller -// // .wait_command(cfg.t0, |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => { -// // if s == Slot::new(2, 0) { -// // Some(()) -// // } else { -// // None -// // } -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for slot"); - -// // // respond to endorsement command -// // let eds = pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetEndorsements { -// // target_slot, -// // parent, -// // creators, -// // response_tx, -// // .. -// // } => { -// // assert_eq!(Slot::new(1, 0), target_slot); -// // assert_eq!(parent, prev_blocks[0]); -// // let mut eds: Vec = Vec::new(); -// // for (index, creator) in creators.iter().enumerate() { -// // let ed = if *creator == address_a { -// // create_endorsement(&keypair_a, target_slot, parent, index as u32) -// // } else if *creator == address_b { -// // create_endorsement(&keypair_b, target_slot, parent, index as u32) -// // } else { -// // panic!("invalid endorser choice"); -// // }; -// // eds.push(ed); -// // } -// // response_tx.send(eds.clone()).unwrap(); -// // Some(eds) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for endorsement request"); -// // assert_eq!(eds.len() as u32, cfg.endorsement_count); - -// // respond to first pool batch command -// //TODO: Replace -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { response_tx, .. } => { -// // response_tx -// // .send(ops.iter().map(|op| (op.clone(), op_size)).collect()) -// // .unwrap(); -// // Some(()) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 1st operation batch request"); - -// // respond to second pool batch command -// //TODO: Replace -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // exclude, -// // .. -// // } => { -// // assert!(!exclude.is_empty()); -// // response_tx.send(vec![]).unwrap(); -// // Some(()) -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 2nd operation batch request"); - -// let eds: Vec = Vec::new(); -// // wait for block -// let block = protocol_controller -// .wait_command(500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(2, 0)); - -// // assert it includes the sent endorsements -// assert_eq!(block.content.header.content.endorsements.len(), eds.len()); -// for (e_found, e_expected) in block -// .content -// .header -// .content -// .endorsements -// .iter() -// .zip(eds.iter()) -// { -// assert_eq!(e_found.id, e_expected.id); -// assert_eq!(e_expected.id, e_expected.id); -// } - -// // create empty block -// let header = BlockHeader::new_wrapped( -// BlockHeader { -// slot: block.content.header.content.slot, -// parents: block.content.header.content.parents.clone(), -// operation_merkle_root: Hash::compute_from(&Vec::new()[..]), -// endorsements: eds, -// }, -// BlockHeaderSerializer::new(), -// &keypair_a, -// ) -// .unwrap(); -// let empty: WrappedBlock = Block::new_wrapped( -// Block { -// header, -// operations: Default::default(), -// }, -// BlockSerializer::new(), -// &keypair_a, -// ) -// .unwrap(); -// let remaining_block_space = (cfg.max_block_size as usize) -// .checked_sub(empty.serialized_data.len() as usize) -// .unwrap(); - -// let nb = remaining_block_space / (op_size as usize); -// assert_eq!(block.content.operations.len(), nb); -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } diff --git a/massa-consensus-worker/src/tests/scenario_roll.rs b/massa-consensus-worker/src/tests/scenario_roll.rs deleted file mode 100644 index 20c30933c3d..00000000000 --- a/massa-consensus-worker/src/tests/scenario_roll.rs +++ /dev/null @@ -1,974 +0,0 @@ -// // Copyright (c) 2022 MASSA LABS - -// use massa_consensus_exports::{ -// settings::ConsensusChannels, -// test_exports::{ -// generate_default_roll_counts_file, generate_ledger_file, generate_staking_keys_file, -// }, -// ConsensusConfig, -// }; -// use massa_execution_exports::test_exports::MockExecutionController; -// use massa_models::{Address, Amount, BlockId, Slot}; -// use massa_pos_exports::SelectorConfig; -// use massa_pos_worker::start_selector_worker; -// use massa_protocol_exports::ProtocolCommand; -// use massa_storage::Storage; -// use massa_time::MassaTime; -// use num::rational::Ratio; -// use rand::{prelude::SliceRandom, rngs::StdRng, SeedableRng}; -// use serial_test::serial; -// use std::collections::{HashMap, VecDeque}; -// use std::str::FromStr; - -// use crate::{ -// start_consensus_controller, -// tests::{ -// mock_pool_controller::MockPoolController, -// mock_protocol_controller::MockProtocolController, -// tools::{ -// consensus_pool_test_with_storage, create_block, create_block_with_operations, -// create_roll_buy, create_roll_sell, get_creator_for_draw, propagate_block, -// random_address_on_thread, wait_pool_slot, -// }, -// }, -// }; -// use massa_models::ledger_models::LedgerData; -// use massa_models::prehash::Set; - -// #[tokio::test] -// #[serial] -// async fn test_roll() { -// // setup logging -// /* -// stderrlog::new() -// .verbosity(2) -// .timestamp(stderrlog::Timestamp::Millisecond) -// .init() -// .unwrap(); -// */ -// let init_time: MassaTime = 1000.into(); -// let mut cfg = ConsensusConfig { -// t0: 500.into(), -// periods_per_cycle: 2, -// delta_f0: 3, -// block_reward: Amount::default(), -// roll_price: Amount::from_str("1000").unwrap(), -// operation_validity_periods: 100, -// genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(init_time), -// ..Default::default() -// }; -// // define addresses use for the test -// // addresses 1 and 2 both in thread 0 -// let (address_1, keypair_1) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(0, cfg.thread_count).into(); - -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_2, -// LedgerData::new(Amount::from_str("10000").unwrap()), -// ); -// let initial_ledger_file = generate_ledger_file(&ledger); -// cfg.initial_ledger_path = initial_ledger_file.path().to_path_buf(); - -// let staking_keys_file = generate_staking_keys_file(&[keypair_2.clone()]); -// cfg.staking_keys_path = staking_keys_file.path().to_path_buf(); - -// let initial_rolls_file = generate_default_roll_counts_file(vec![keypair_1.clone()]); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); - -// consensus_pool_test_with_storage( -// cfg.clone(), -// None, -// async move |mut pool_controller, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// mut storage, -// selector_controller| { -// let mut parents: Vec = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .best_parents -// .iter() -// .map(|(b, _p)| *b) -// .collect(); - -// // operations -// let rb_a1_r1_err = create_roll_buy(&keypair_1, 1, 90, 0); -// let rs_a2_r1_err = create_roll_sell(&keypair_2, 1, 90, 0); -// let rb_a2_r1 = create_roll_buy(&keypair_2, 1, 90, 0); -// let rs_a2_r1 = create_roll_sell(&keypair_2, 1, 90, 0); -// let rb_a2_r2 = create_roll_buy(&keypair_2, 2, 90, 0); -// let rs_a2_r2 = create_roll_sell(&keypair_2, 2, 90, 0); - -// // Store operations to make them accessible to the consensus worker. -// storage.store_operations(vec![ -// rb_a1_r1_err.clone(), -// rs_a2_r1_err.clone(), -// rb_a2_r1.clone(), -// rs_a2_r1.clone(), -// rb_a2_r2.clone(), -// rs_a2_r2.clone(), -// ]); - -// let mut addresses = Set::
::default(); -// addresses.insert(address_2); -// let addresses = addresses; - -// // cycle 0 -// let block1_err1 = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &parents, -// &keypair_1, -// vec![rb_a1_r1_err], -// ); -// tokio::time::sleep(init_time.to_duration()).await; -// wait_pool_slot(&mut pool_controller, cfg.t0, 1, 0).await; -// // invalid because a1 has not enough coins to buy a roll -// propagate_block(&mut protocol_controller, block1_err1, false, 150).await; - -// let block1_err2 = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &parents, -// &keypair_1, -// vec![rs_a2_r1_err], -// ); -// // invalid because a2 does not have enough rolls to sell -// propagate_block(&mut protocol_controller, block1_err2, false, 150).await; - -// let block1 = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &parents, -// &keypair_1, -// vec![rb_a2_r1], -// ); - -// // valid -// propagate_block(&mut protocol_controller, block1.clone(), true, 150).await; -// parents[0] = block1.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 1); -// assert_eq!( -// addr_state.ledger_info.candidate_ledger_info.balance, -// Amount::from_str("9000").unwrap() -// ); - -// let block1t1 = -// create_block_with_operations(&cfg, Slot::new(1, 1), &parents, &keypair_1, vec![]); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 1, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block1t1.clone(), true, 150).await; -// parents[1] = block1t1.id; - -// // cycle 1 - -// let block2 = create_block_with_operations( -// &cfg, -// Slot::new(2, 0), -// &parents, -// &keypair_1, -// vec![rs_a2_r1], -// ); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 2, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block2.clone(), true, 150).await; -// parents[0] = block2.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); -// let balance = addr_state.ledger_info.candidate_ledger_info.balance; -// assert_eq!(balance, Amount::from_str("9000").unwrap()); - -// let block2t2 = -// create_block_with_operations(&cfg, Slot::new(2, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 2, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block2t2.clone(), true, 150).await; -// parents[1] = block2t2.id; - -// // miss block 3 in thread 0 - -// // block 3 in thread 1 -// let block3t1 = -// create_block_with_operations(&cfg, Slot::new(3, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 3, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block3t1.clone(), true, 150).await; -// parents[1] = block3t1.id; - -// // cycle 2 - -// // miss block 4 - -// let block4t1 = -// create_block_with_operations(&cfg, Slot::new(4, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 4, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block4t1.clone(), true, 150).await; -// parents[1] = block4t1.id; - -// let block5 = -// create_block_with_operations(&cfg, Slot::new(5, 0), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 5, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block5.clone(), true, 150).await; -// parents[0] = block5.id; - -// let block5t1 = -// create_block_with_operations(&cfg, Slot::new(5, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 5, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block5t1.clone(), true, 150).await; -// parents[1] = block5t1.id; - -// let other_addr = -// if selector_controller.get_producer(Slot::new(6, 0)).unwrap() == address_1 { -// address_2 -// } else { -// address_1 -// }; - -// let block6_err = create_block_with_operations( -// &cfg, -// Slot::new(6, 0), -// &parents, -// &get_creator_for_draw(&other_addr, &vec![keypair_1.clone(), keypair_2.clone()]), -// vec![], -// ); -// wait_pool_slot(&mut pool_controller, cfg.t0, 6, 0).await; -// // invalid: other_addr wasn't drawn for that block creation -// propagate_block(&mut protocol_controller, block6_err, false, 150).await; - -// let block6 = create_block_with_operations( -// &cfg, -// Slot::new(6, 0), -// &parents, -// &get_creator_for_draw( -// &selector_controller.get_producer(Slot::new(6, 0)).unwrap(), -// &vec![keypair_1.clone(), keypair_2.clone()], -// ), -// vec![], -// ); - -// // valid -// propagate_block(&mut protocol_controller, block6.clone(), true, 150).await; -// parents[0] = block6.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 1); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); - -// let block6t1 = create_block_with_operations( -// &cfg, -// Slot::new(6, 1), -// &parents, -// &get_creator_for_draw( -// &selector_controller.get_producer(Slot::new(6, 0)).unwrap(), -// &vec![keypair_1.clone(), keypair_2.clone()], -// ), -// vec![], -// ); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 6, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block6t1.clone(), true, 150).await; -// parents[1] = block6t1.id; - -// let block7 = create_block_with_operations( -// &cfg, -// Slot::new(7, 0), -// &parents, -// &get_creator_for_draw( -// &selector_controller.get_producer(Slot::new(6, 0)).unwrap(), -// &vec![keypair_1.clone(), keypair_2.clone()], -// ), -// vec![], -// ); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 7, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block7.clone(), true, 150).await; -// parents[0] = block7.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 1); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); - -// let block7t1 = create_block_with_operations( -// &cfg, -// Slot::new(7, 1), -// &parents, -// &get_creator_for_draw( -// &selector_controller.get_producer(Slot::new(6, 0)).unwrap(), -// &vec![keypair_1.clone(), keypair_2.clone()], -// ), -// vec![], -// ); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 7, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block7t1.clone(), true, 150).await; -// parents[1] = block7t1.id; - -// // cycle 4 - -// let block8 = create_block_with_operations( -// &cfg, -// Slot::new(8, 0), -// &parents, -// &keypair_1, -// vec![rb_a2_r2], -// ); -// wait_pool_slot(&mut pool_controller, cfg.t0, 8, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block8.clone(), true, 150).await; -// parents[0] = block8.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 2); -// let balance = addr_state.ledger_info.candidate_ledger_info.balance; -// assert_eq!(balance, Amount::from_str("7000").unwrap()); - -// let block8t1 = -// create_block_with_operations(&cfg, Slot::new(8, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 8, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block8t1.clone(), true, 150).await; -// parents[1] = block8t1.id; - -// let block9 = create_block_with_operations( -// &cfg, -// Slot::new(9, 0), -// &parents, -// &keypair_1, -// vec![rs_a2_r2], -// ); -// wait_pool_slot(&mut pool_controller, cfg.t0, 9, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block9.clone(), true, 150).await; -// parents[0] = block9.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); -// let balance = addr_state.ledger_info.candidate_ledger_info.balance; -// assert_eq!(balance, Amount::from_str("9000").unwrap()); - -// let block9t1 = -// create_block_with_operations(&cfg, Slot::new(9, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 9, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block9t1.clone(), true, 150).await; -// parents[1] = block9t1.id; - -// // cycle 5 - -// let block10 = -// create_block_with_operations(&cfg, Slot::new(10, 0), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 10, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block10.clone(), true, 150).await; -// parents[0] = block10.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 2); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); - -// let balance = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .ledger_info -// .candidate_ledger_info -// .balance; -// assert_eq!(balance, Amount::from_str("10000").unwrap()); - -// let block10t1 = -// create_block_with_operations(&cfg, Slot::new(10, 1), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 10, 1).await; -// // valid -// propagate_block(&mut protocol_controller, block10t1.clone(), true, 150).await; -// parents[1] = block10t1.id; - -// let block11 = -// create_block_with_operations(&cfg, Slot::new(11, 0), &parents, &keypair_1, vec![]); -// wait_pool_slot(&mut pool_controller, cfg.t0, 11, 0).await; -// // valid -// propagate_block(&mut protocol_controller, block11.clone(), true, 150).await; -// parents[0] = block11.id; - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// async fn test_roll_block_creation() { -// // setup logging -// /* -// stderrlog::new() -// .verbosity(4) -// .timestamp(stderrlog::Timestamp::Millisecond) -// .init() -// .unwrap(); -// */ -// let mut cfg = ConsensusConfig { -// block_reward: Amount::default(), -// delta_f0: 3, -// operation_validity_periods: 10, -// max_block_size: 500, -// max_operations_per_block: 5000, -// periods_per_cycle: 2, -// roll_price: Amount::from_str("1000").unwrap(), -// t0: 500.into(), -// ..Default::default() -// }; -// // define addresses use for the test -// // addresses 1 and 2 both in thread 0 -// let (_, keypair_1) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(0, cfg.thread_count).into(); - -// let mut ledger = HashMap::new(); -// ledger.insert( -// address_2, -// LedgerData::new(Amount::from_str("10000").unwrap()), -// ); -// let initial_ledger_file = generate_ledger_file(&ledger); -// let staking_keys_file = generate_staking_keys_file(&[keypair_1.clone()]); -// let initial_rolls_file = generate_default_roll_counts_file(vec![keypair_1.clone()]); -// cfg.initial_ledger_path = initial_ledger_file.path().to_path_buf(); -// cfg.staking_keys_path = staking_keys_file.path().to_path_buf(); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); -// // mock protocol & pool -// let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = -// MockProtocolController::new(); -// let mut pool_controller = MockPoolController::new(); -// let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); - -// let init_time: MassaTime = 1000.into(); -// cfg.genesis_timestamp = MassaTime::now(0).unwrap().saturating_add(init_time); -// let storage: Storage = Storage::create_root(); -// // launch consensus controller -// let selector_config = SelectorConfig { -// initial_rolls_path: cfg.initial_rolls_path.clone(), -// ..Default::default() -// }; -// let (_selector_manager, selector_controller) = -// start_selector_worker(selector_config, VecDeque::new()).unwrap(); -// let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = -// start_consensus_controller( -// cfg.clone(), -// ConsensusChannels { -// execution_controller, -// protocol_command_sender: protocol_command_sender.clone(), -// protocol_event_receiver, -// pool_command_sender: Box::new(pool_controller.clone()), -// selector_controller, -// }, -// None, -// storage.clone(), -// 0, -// ) -// .await -// .expect("could not start consensus controller"); - -// // operations -// let rb_a2_r1 = create_roll_buy(&keypair_2, 1, 90, 0); -// let rs_a2_r1 = create_roll_sell(&keypair_2, 1, 90, 0); - -// let mut addresses = Set::
::default(); -// addresses.insert(address_2); -// let addresses = addresses; - -// // wait for first slot -// // TODO: Replace ?? -// // pool_controller -// // .wait_command( -// // cfg.t0.saturating_mul(2).saturating_add(init_time), -// // |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => { -// // if s == Slot::new(1, 0) { -// // Some(()) -// // } else { -// // None -// // } -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }, -// // ) -// // .await -// // .expect("timeout while waiting for slot"); - -// // // cycle 0 -// // println!("Test"); -// // // respond to first pool batch command -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // target_slot, -// // .. -// // } => { -// // assert_eq!(target_slot, Slot::new(1, 0)); -// // response_tx.send(vec![(rb_a2_r1.clone(), 10)]).unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 1st operation batch request"); - -// // wait for block -// let block = protocol_controller -// .wait_command(500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// println!("Integrated block"); -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(1, 0)); -// assert_eq!(block.content.operations.len(), 1); -// assert!(block.content.operations.contains(&rb_a2_r1.id)); - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 1); - -// let balance = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .ledger_info -// .candidate_ledger_info -// .balance; -// assert_eq!(balance, Amount::from_str("9000").unwrap()); - -// wait_pool_slot(&mut pool_controller, cfg.t0, 1, 1).await; -// // TODO: Replace ?? -// // slot 1,1 -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // target_slot, -// // .. -// // } => { -// // assert_eq!(target_slot, Slot::new(1, 1)); -// // response_tx.send(vec![]).unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for operation batch request"); - -// // wait for block -// let block = protocol_controller -// .wait_command(500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(1, 1)); -// assert!(block.content.operations.is_empty()); - -// // cycle 1 - -// //TODO: replace -// // pool_controller -// // .wait_command(300.into(), |cmd| match cmd { -// // PoolCommand::GetOperationBatch { -// // response_tx, -// // target_slot, -// // .. -// // } => { -// // assert_eq!(target_slot, Slot::new(2, 0)); -// // response_tx.send(vec![(rs_a2_r1.clone(), 10)]).unwrap(); -// // Some(()) -// // } -// // PoolCommand::GetEndorsements { response_tx, .. } => { -// // response_tx.send(Vec::new()).unwrap(); -// // None -// // } -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for 1st operation batch request"); - -// // wait for block -// let block = protocol_controller -// .wait_command(500.into(), |cmd| match cmd { -// ProtocolCommand::IntegratedBlock { block_id } => { -// let block = storage -// .retrieve_block(&block_id) -// .expect(&format!("Block id : {} not found in storage", block_id)); -// let stored_block = block.read(); -// Some(stored_block.clone()) -// } -// _ => None, -// }) -// .await -// .expect("timeout while waiting for block"); - -// // assert it's the expected block -// assert_eq!(block.content.header.content.slot, Slot::new(2, 0)); -// assert_eq!(block.content.operations.len(), 1); -// assert!(block.content.operations.contains(&rs_a2_r1.id)); - -// let addr_state = consensus_command_sender -// .get_addresses_info(addresses.clone()) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .clone(); -// assert_eq!(addr_state.rolls.active_rolls, 0); -// assert_eq!(addr_state.rolls.final_rolls, 0); -// assert_eq!(addr_state.rolls.candidate_rolls, 0); -// let balance = addr_state.ledger_info.candidate_ledger_info.balance; -// assert_eq!(balance, Amount::from_str("9000").unwrap()); -// } - -// #[tokio::test] -// #[serial] -// async fn test_roll_deactivation() { -// /* -// Scenario: -// * deactivation threshold at 50% -// * thread_count = 10 -// * lookback_cycles = 2 -// * periods_per_cycle = 10 -// * delta_f0 = 2 -// * all addresses have 1 roll initially -// * in cycle 0: -// * an address A0 in thread 0 produces 20% of its blocks -// * an address B0 in thread 0 produces 80% of its blocks -// * an address A1 in thread 1 produces 20% of its blocks -// * an address B1 in thread 1 produces 80% of its blocks -// * at the next cycles, all addresses produce all their blocks -// * at the 1st block of thread 0 in cycle 2: -// * address A0 has (0 candidate, 1 final, 1 active) rolls -// * address B0 has (1 candidate, 1 final, 1 active) rolls -// * address A1 has (1 candidate, 1 final, 1 active) rolls -// * address B1 has (1 candidate, 1 final, 1 active) rolls -// * at the 1st block of thread 1 in cycle 2: -// * address A0 has (0 candidate, 1 final, 1 active) rolls -// * address B0 has (1 candidate, 1 final, 1 active) rolls -// * address A1 has (0 candidate, 1 final, 1 active) rolls -// * address B1 has (1 candidate, 1 final, 1 active) rolls -// */ -// let mut cfg = ConsensusConfig { -// delta_f0: 2, -// thread_count: 4, -// periods_per_cycle: 5, -// pos_lookback_cycles: 1, -// t0: 400.into(), -// roll_price: Amount::from_mantissa_scale(10, 0), -// pos_miss_rate_deactivation_threshold: Ratio::new(50, 100), -// ..Default::default() -// }; -// let storage: Storage = Storage::create_root(); - -// // setup addresses -// let (address_a0, keypair_a0) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b0, keypair_b0) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_a1, keypair_a1) = random_address_on_thread(1, cfg.thread_count).into(); -// let (address_b1, keypair_b1) = random_address_on_thread(1, cfg.thread_count).into(); - -// let initial_ledger_file = generate_ledger_file(&HashMap::new()); -// let staking_keys_file = generate_staking_keys_file(&[]); -// let initial_rolls_file = generate_default_roll_counts_file(vec![ -// keypair_a0.clone(), -// keypair_a1.clone(), -// keypair_b0.clone(), -// keypair_b1.clone(), -// ]); - -// cfg.initial_ledger_path = initial_ledger_file.path().to_path_buf(); -// cfg.staking_keys_path = staking_keys_file.path().to_path_buf(); -// cfg.initial_rolls_path = initial_rolls_file.path().to_path_buf(); - -// // mock protocol & pool -// let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = -// MockProtocolController::new(); -// let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); -// let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); -// let selector_config = SelectorConfig { -// initial_rolls_path: cfg.initial_rolls_path.clone(), -// ..Default::default() -// }; -// let (_selector_manager, selector_controller) = -// start_selector_worker(selector_config, VecDeque::new()).unwrap(); -// cfg.genesis_timestamp = MassaTime::now(0).unwrap().saturating_add(300.into()); - -// // launch consensus controller -// let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = -// start_consensus_controller( -// cfg.clone(), -// ConsensusChannels { -// execution_controller, -// protocol_command_sender: protocol_command_sender.clone(), -// protocol_event_receiver, -// pool_command_sender: pool_controller, -// selector_controller: selector_controller.clone(), -// }, -// None, -// storage, -// 0, -// ) -// .await -// .expect("could not start consensus controller"); - -// let mut cur_slot = Slot::new(0, 0); -// let mut best_parents = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .unwrap() -// .genesis_blocks; -// let mut cycle_draws = HashMap::new(); -// let mut draws_cycle = None; -// 'outer: loop { -// // wait for slot info -// // let latest_slot = pool_controller -// // .wait_command(cfg.t0.checked_mul(2).unwrap(), |cmd| match cmd { -// // PoolCommand::UpdateCurrentSlot(s) => Some(s), -// // _ => None, -// // }) -// // .await -// // .expect("timeout while waiting for slot"); -// let latest_slot = Slot::new(0, 0); -// // apply all slots in-between -// while cur_slot <= latest_slot { -// // skip genesis -// if cur_slot.period == 0 { -// cur_slot = cur_slot.get_next_slot(cfg.thread_count).unwrap(); -// continue; -// } -// let cur_cycle = cur_slot.get_cycle(cfg.periods_per_cycle); - -// // get draws -// if draws_cycle != Some(cur_cycle) { -// for i in std::cmp::max(cur_cycle * cfg.periods_per_cycle, 1)..(cur_cycle + 1) { -// let slot = Slot::new(i, 0); -// cycle_draws.insert( -// slot, -// Some(selector_controller.get_selection(slot).unwrap().producer), -// ); -// } -// if cur_cycle == 0 { -// // controlled misses in cycle 0 -// for address in [address_a0, address_a1, address_b0, address_b1] { -// let mut address_draws: Vec = cycle_draws -// .iter() -// .filter_map(|(s, opt_a)| { -// if let Some(a) = opt_a { -// if *a == address { -// return Some(*s); -// } -// } -// None -// }) -// .collect(); -// assert!( -// !address_draws.is_empty(), -// "unlucky seed: address has no draws in cycle 0, cannot perform test" -// ); -// address_draws.shuffle(&mut StdRng::from_entropy()); -// let produce_count: usize = if address == address_a0 || address == address_a1 -// { -// // produce less than 20% -// 20 * address_draws.len() / 100 -// } else { -// // produce more than 80% -// std::cmp::min(address_draws.len(), (80 * address_draws.len() / 100) + 1) -// }; -// address_draws.truncate(produce_count); -// for (slt, opt_addr) in cycle_draws.iter_mut() { -// if *opt_addr == Some(address) && !address_draws.contains(slt) { -// *opt_addr = None; -// } -// } -// } -// } -// draws_cycle = Some(cur_cycle); -// } -// let cur_draw = cycle_draws[&cur_slot]; - -// // create and propagate block -// if let Some(addr) = cur_draw { -// let creator_privkey = if addr == address_a0 { -// keypair_a0.clone() -// } else if addr == address_a1 { -// keypair_a1.clone() -// } else if addr == address_b0 { -// keypair_b0.clone() -// } else if addr == address_b1 { -// keypair_b1.clone() -// } else { -// panic!("invalid address selected"); -// }; -// let block_id = propagate_block( -// &mut protocol_controller, -// create_block(&cfg, cur_slot, best_parents.clone(), &creator_privkey), -// true, -// 500, -// ) -// .await; - -// // update best parents -// best_parents[cur_slot.thread as usize] = block_id; -// } - -// // check candidate rolls -// let addrs_info = consensus_command_sender -// .get_addresses_info( -// vec![address_a0, address_a1, address_b0, address_b1] -// .into_iter() -// .collect(), -// ) -// .await -// .unwrap() -// .clone(); -// if cur_slot.period == (1 + cfg.pos_lookback_cycles) * cfg.periods_per_cycle { -// if cur_slot.thread == 0 { -// assert_eq!(addrs_info[&address_a0].rolls.candidate_rolls, 0); -// assert_eq!(addrs_info[&address_b0].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_a1].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_b1].rolls.candidate_rolls, 1); -// } else if cur_slot.thread == 1 { -// assert_eq!(addrs_info[&address_a0].rolls.candidate_rolls, 0); -// assert_eq!(addrs_info[&address_b0].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_a1].rolls.candidate_rolls, 0); -// assert_eq!(addrs_info[&address_b1].rolls.candidate_rolls, 1); -// } else { -// break 'outer; -// } -// } else { -// assert_eq!(addrs_info[&address_a0].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_b0].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_a1].rolls.candidate_rolls, 1); -// assert_eq!(addrs_info[&address_b1].rolls.candidate_rolls, 1); -// } - -// cur_slot = cur_slot.get_next_slot(cfg.thread_count).unwrap(); -// } -// } -// } diff --git a/massa-consensus-worker/src/tests/scenarios106.rs b/massa-consensus-worker/src/tests/scenarios106.rs deleted file mode 100644 index 38ccc690cf9..00000000000 --- a/massa-consensus-worker/src/tests/scenarios106.rs +++ /dev/null @@ -1,869 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test scenarios106 -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::prehash::PreHashSet; -use massa_models::timeslots; -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use serial_test::serial; -use std::collections::HashSet; -use std::time::Duration; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_unsorted_block() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 1000.into(), - future_block_processing_max_periods: 50, - max_future_processing_blocks: 10, - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let start_period = 3; - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - // create test blocks - - let t0s1 = create_block( - &cfg, - Slot::new(1 + start_period, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t1s1 = create_block( - &cfg, - Slot::new(1 + start_period, 1), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t0s2 = create_block( - &cfg, - Slot::new(2 + start_period, 0), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - let t1s2 = create_block( - &cfg, - Slot::new(2 + start_period, 1), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - - let t0s3 = create_block( - &cfg, - Slot::new(3 + start_period, 0), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - let t1s3 = create_block( - &cfg, - Slot::new(3 + start_period, 1), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - - let t0s4 = create_block( - &cfg, - Slot::new(4 + start_period, 0), - vec![t0s3.id, t1s3.id], - &staking_keys[0], - ); - let t1s4 = create_block( - &cfg, - Slot::new(4 + start_period, 1), - vec![t0s3.id, t1s3.id], - &staking_keys[0], - ); - - // send blocks t0s1, t1s1, - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s1.clone()); - protocol_controller - .receive_block(t1s1.id, t1s1.content.header.content.slot, storage.clone()) - .await; - // send blocks t0s3, t1s4, t0s4, t0s2, t1s3, t1s2 - storage.store_block(t0s3.clone()); - protocol_controller - .receive_block(t0s3.id, t0s3.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s4.clone()); - protocol_controller - .receive_block(t1s4.id, t1s4.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t0s4.clone()); - protocol_controller - .receive_block(t0s4.id, t0s4.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t0s2.clone()); - protocol_controller - .receive_block(t0s2.id, t0s2.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s3.clone()); - protocol_controller - .receive_block(t1s3.id, t1s3.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s2.clone()); - protocol_controller - .receive_block(t1s2.id, t1s2.content.header.content.slot, storage.clone()) - .await; - - // block t0s1 and t1s1 are propagated - let hash_list = vec![t0s1.id, t1s1.id]; - validate_propagate_block_in_list( - &mut protocol_controller, - &hash_list, - 3000 + start_period * 1000, - ) - .await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - // block t0s2 and t1s2 are propagated - let hash_list = vec![t0s2.id, t1s2.id]; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - // block t0s3 and t1s3 are propagated - let hash_list = vec![t0s3.id, t1s3.id]; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - // block t0s4 and t1s4 are propagated - let hash_list = vec![t0s4.id, t1s4.id]; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 4000).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -//test future_incoming_blocks block in the future with max_future_processing_blocks. -#[tokio::test] -#[serial] -#[ignore] -async fn test_unsorted_block_with_to_much_in_the_future() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 1000.into(), - // slot 1 is in the past - genesis_timestamp: MassaTime::now(0).unwrap().saturating_sub(2000.into()), - future_block_processing_max_periods: 3, - max_future_processing_blocks: 5, - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - // create test blocks - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // a block in the past must be propagated - let block1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - storage.store_block(block1.clone()); - protocol_controller - .receive_block( - block1.id, - block1.content.header.content.slot, - storage.clone(), - ) - .await; - validate_propagate_block(&mut protocol_controller, block1.id, 2500).await; - - // this block is slightly in the future: will wait for it - let slot = timeslots::get_current_latest_block_slot( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - 0, - ) - .unwrap() - .unwrap(); - let block2 = create_block( - &cfg, - Slot::new(slot.period + 2, slot.thread), - genesis_hashes.clone(), - &staking_keys[0], - ); - storage.store_block(block2.clone()); - protocol_controller - .receive_block( - block2.id, - block2.content.header.content.slot, - storage.clone(), - ) - .await; - assert!(!validate_notpropagate_block(&mut protocol_controller, block2.id, 500).await); - validate_propagate_block(&mut protocol_controller, block2.id, 2500).await; - - // this block is too much in the future: do not process - let slot = timeslots::get_current_latest_block_slot( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - 0, - ) - .unwrap() - .unwrap(); - let block3 = create_block( - &cfg, - Slot::new(slot.period + 1000, slot.thread), - genesis_hashes.clone(), - &staking_keys[0], - ); - storage.store_block(block3.clone()); - protocol_controller - .receive_block( - block3.id, - block3.content.header.content.slot, - storage.clone(), - ) - .await; - assert!(!validate_notpropagate_block(&mut protocol_controller, block3.id, 2500).await); - - // Check that the block has been silently dropped and not discarded for being too much in the future. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - assert!(!block_graph.active_blocks.contains_key(&block3.id)); - assert!(!block_graph.discarded_blocks.contains_key(&block3.id)); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_too_many_blocks_in_the_future() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - delta_f0: 1000, - future_block_processing_max_periods: 100, - // slot 1 is in the past - genesis_timestamp: MassaTime::now(0).unwrap().saturating_sub(2000.into()), - max_future_processing_blocks: 2, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - // get genesis block hashes - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // generate 5 blocks but there is only space for 2 in the waiting line - let mut expected_block_hashes: HashSet = HashSet::new(); - let mut max_period = 0; - let slot = timeslots::get_current_latest_block_slot( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - 0, - ) - .unwrap() - .unwrap(); - for period in 0..5 { - max_period = slot.period + 2 + period; - let block = create_block( - &cfg, - Slot::new(max_period, slot.thread), - genesis_hashes.clone(), - &staking_keys[0], - ); - storage.store_block(block.clone()); - protocol_controller - .receive_block(block.id, block.content.header.content.slot, storage.clone()) - .await; - if period < 2 { - expected_block_hashes.insert(block.id); - } - } - // wait for the 2 waiting blocks to propagate - let mut expected_clone = expected_block_hashes.clone(); - while !expected_block_hashes.is_empty() { - assert!( - expected_block_hashes.remove( - &validate_propagate_block_in_list( - &mut protocol_controller, - &expected_block_hashes.iter().copied().collect(), - 2500 - ) - .await - ), - "unexpected block propagated" - ); - } - // wait until we reach the slot of the last block - while timeslots::get_current_latest_block_slot( - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - 0, - ) - .unwrap() - .unwrap() - < Slot::new(max_period + 1, 0) - {} - // ensure that the graph contains only what we expect - let graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - expected_clone.extend(graph.genesis_blocks); - assert_eq!( - expected_clone, - graph - .active_blocks - .keys() - .copied() - .collect::>(), - "unexpected block graph" - ); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_dep_in_back_order() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(1000).checked_mul(1000).unwrap()), - t0: 1000.into(), - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t1s1 = create_block( - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t0s2 = create_block( - &cfg, - Slot::new(2, 0), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - let t1s2 = create_block( - &cfg, - Slot::new(2, 1), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - - let t0s3 = create_block( - &cfg, - Slot::new(3, 0), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - let t1s3 = create_block( - &cfg, - Slot::new(3, 1), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - - let t0s4 = create_block( - &cfg, - Slot::new(4, 0), - vec![t0s3.id, t1s3.id], - &staking_keys[0], - ); - let t1s4 = create_block( - &cfg, - Slot::new(4, 1), - vec![t0s3.id, t1s3.id], - &staking_keys[0], - ); - - // send blocks t0s2, t1s3, t0s1, t0s4, t1s4, t1s1, t0s3, t1s2 - storage.store_block(t0s2.clone()); - protocol_controller - .receive_block(t0s2.id, t0s2.content.header.content.slot, storage.clone()) - .await; // not propagated and update wishlist - validate_wishlist( - &mut protocol_controller, - vec![t0s1.id, t1s1.id].into_iter().collect(), - PreHashSet::::default(), - 500, - ) - .await; - validate_notpropagate_block(&mut protocol_controller, t0s2.id, 500).await; - - storage.store_block(t1s3.clone()); - protocol_controller - .receive_block(t1s3.id, t1s3.content.header.content.slot, storage.clone()) - .await; // not propagated and no wishlist update - validate_notpropagate_block(&mut protocol_controller, t1s3.id, 500).await; - - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; // we have its parents so it should be integrated right now and update wishlist - - validate_propagate_block(&mut protocol_controller, t0s1.id, 500).await; - validate_wishlist( - &mut protocol_controller, - PreHashSet::::default(), - vec![t0s1.id].into_iter().collect(), - 500, - ) - .await; - - storage.store_block(t0s4.clone()); - protocol_controller - .receive_block(t0s4.id, t0s4.content.header.content.slot, storage.clone()) - .await; // not propagated and no wishlist update - validate_notpropagate_block(&mut protocol_controller, t0s4.id, 500).await; - - storage.store_block(t1s4.clone()); - protocol_controller - .receive_block(t1s4.id, t1s4.content.header.content.slot, storage.clone()) - .await; // not propagated and no wishlist update - validate_notpropagate_block(&mut protocol_controller, t1s4.id, 500).await; - - storage.store_block(t1s1.clone()); - protocol_controller - .receive_block(t1s1.id, t1s1.content.header.content.slot, storage.clone()) - .await; // assert t1s1 is integrated and t0s2 is integrated and wishlist updated - validate_propagate_block_in_list( - &mut protocol_controller, - &vec![t1s1.id, t0s2.id], - 500, - ) - .await; - - validate_propagate_block_in_list( - &mut protocol_controller, - &vec![t1s1.id, t0s2.id], - 500, - ) - .await; - validate_wishlist( - &mut protocol_controller, - vec![].into_iter().collect(), - vec![t1s1.id].into_iter().collect(), - 500, - ) - .await; - - storage.store_block(t0s3.clone()); - protocol_controller - .receive_block(t0s3.id, t0s3.content.header.content.slot, storage.clone()) - .await; // not propagated and no wishlist update - validate_notpropagate_block(&mut protocol_controller, t0s3.id, 500).await; - - storage.store_block(t1s2.clone()); - protocol_controller - .receive_block(t1s2.id, t1s2.content.header.content.slot, storage.clone()) - .await; - - // All remaining blocks are propagated - let integrated = vec![t1s2.id, t0s3.id, t1s3.id, t0s4.id, t1s4.id]; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &integrated, 1000).await; - validate_wishlist( - &mut protocol_controller, - PreHashSet::::default(), - vec![t1s2.id].into_iter().collect(), - 500, - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_dep_in_back_order_with_max_dependency_blocks() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(1000).checked_mul(1000).unwrap()), - max_dependency_blocks: 2, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - tokio::time::sleep(Duration::from_millis(1000)).await; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t1s1 = create_block( - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t0s2 = create_block( - &cfg, - Slot::new(2, 0), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - let t1s2 = create_block( - &cfg, - Slot::new(2, 1), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - - let t0s3 = create_block( - &cfg, - Slot::new(3, 0), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - let t1s3 = create_block( - &cfg, - Slot::new(3, 1), - vec![t0s2.id, t1s2.id], - &staking_keys[0], - ); - - // send blocks t0s2, t1s3, t0s1, t0s4, t1s4, t1s1, t0s3, t1s2 - storage.store_block(t0s2.clone()); - protocol_controller - .receive_block(t0s2.id, t0s2.content.header.content.slot, storage.clone()) - .await; - validate_wishlist( - &mut protocol_controller, - vec![t0s1.id, t1s1.id].into_iter().collect(), - PreHashSet::::default(), - 500, - ) - .await; - validate_notpropagate_block(&mut protocol_controller, t0s2.id, 500).await; - - storage.store_block(t1s3.clone()); - protocol_controller - .receive_block(t1s3.id, t1s3.content.header.content.slot, storage.clone()) - .await; - validate_notpropagate_block(&mut protocol_controller, t1s3.id, 500).await; - - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; - validate_propagate_block(&mut protocol_controller, t0s1.id, 500).await; - validate_wishlist( - &mut protocol_controller, - PreHashSet::::default(), - vec![t0s1.id].into_iter().collect(), - 500, - ) - .await; - storage.store_block(t0s3.clone()); - protocol_controller - .receive_block(t0s3.id, t0s3.content.header.content.slot, storage.clone()) - .await; - validate_notpropagate_block(&mut protocol_controller, t0s3.id, 500).await; - - storage.store_block(t1s2.clone()); - protocol_controller - .receive_block(t1s2.id, t1s2.content.header.content.slot, storage.clone()) - .await; - validate_notpropagate_block(&mut protocol_controller, t1s2.id, 500).await; - - storage.store_block(t1s1.clone()); - protocol_controller - .receive_block(t1s1.id, t1s1.content.header.content.slot, storage.clone()) - .await; - validate_propagate_block_in_list( - &mut protocol_controller, - &vec![t1s1.id, t1s2.id], - 500, - ) - .await; - validate_propagate_block_in_list( - &mut protocol_controller, - &vec![t1s1.id, t1s2.id], - 500, - ) - .await; - validate_wishlist( - &mut protocol_controller, - PreHashSet::::default(), - vec![t1s1.id].into_iter().collect(), - 500, - ) - .await; - - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_add_block_that_depends_on_invalid_block() { - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(1000).checked_mul(1000).unwrap()), - max_dependency_blocks: 7, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let t1s1 = create_block( - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - &staking_keys[0], - ); - - // blocks t3s2 with wrong thread and (t0s1, t1s1) parents. - let t3s2 = create_block( - &cfg, - Slot::new(2, 3), - vec![t0s1.id, t1s1.id], - &staking_keys[0], - ); - - // blocks t0s3 and t1s3 with (t3s2, t1s2) parents. - let t0s3 = create_block( - &cfg, - Slot::new(3, 0), - vec![t3s2.id, t1s1.id], - &staking_keys[0], - ); - let t1s3 = create_block( - &cfg, - Slot::new(3, 1), - vec![t3s2.id, t1s1.id], - &staking_keys[0], - ); - - // add block in this order t0s1, t1s1, t0s3, t1s3, t3s2 - // send blocks t0s2, t1s3, t0s1, t0s4, t1s4, t1s1, t0s3, t1s2 - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s1.clone()); - protocol_controller - .receive_block(t1s1.id, t1s1.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t0s3.clone()); - protocol_controller - .receive_block(t0s3.id, t0s1.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t1s3.clone()); - protocol_controller - .receive_block(t1s3.id, t1s3.content.header.content.slot, storage.clone()) - .await; - storage.store_block(t3s2.clone()); - protocol_controller - .receive_block(t3s2.id, t3s2.content.header.content.slot, storage.clone()) - .await; - - // block t0s1 and t1s1 are propagated - let hash_list = vec![t0s1.id, t1s1.id]; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - validate_propagate_block_in_list(&mut protocol_controller, &hash_list, 1000).await; - - // block t0s3, t1s3 are not propagated - let hash_list = vec![t0s3.id, t1s3.id]; - assert!( - !validate_notpropagate_block_in_list(&mut protocol_controller, &hash_list, 2000) - .await - ); - assert!( - !validate_notpropagate_block_in_list(&mut protocol_controller, &hash_list, 2000) - .await - ); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios91_1.rs b/massa-consensus-worker/src/tests/scenarios91_1.rs deleted file mode 100644 index 4d3c0d4a858..00000000000 --- a/massa-consensus-worker/src/tests/scenarios91_1.rs +++ /dev/null @@ -1,446 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test test_block_validity -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_hash::Hash; -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use serial_test::serial; - -//use time::MassaTime; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_ti() { - /* stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap(); */ - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - // to avoid timing pb for block in the future - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create a valid block for thread 0 - let valid_hasht0s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let valid_hasht1s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // one click with 2 block compatible - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let block1_clic = get_cliques(&block_graph, valid_hasht0s1); - let block2_clic = get_cliques(&block_graph, valid_hasht1s1); - assert_eq!(1, block1_clic.len()); - assert_eq!(1, block2_clic.len()); - assert_eq!(block1_clic, block2_clic); - - // Create other clique bock T0S2 - let fork_block = create_block_with_merkle_root( - &cfg, - Hash::compute_from("Other hash!".as_bytes()), - Slot::new(2, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - storage.store_block(fork_block.clone()); - protocol_controller - .receive_block( - fork_block.id, - fork_block.content.header.content.slot, - storage.clone(), - ) - .await; - validate_propagate_block(&mut protocol_controller, fork_block.id, 1000).await; - // two clique with valid_hasht0s1 and valid_hasht1s1 in one and fork_block_hash, valid_hasht1s1 in the other - // test the first clique hasn't changed. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let block1_clic = get_cliques(&block_graph, valid_hasht0s1); - let block2_clic = get_cliques(&block_graph, valid_hasht1s1); - assert_eq!(1, block1_clic.len()); - assert_eq!(2, block2_clic.len()); - assert!(block2_clic.intersection(&block1_clic).next().is_some()); - // test the new click - let fork_clic = get_cliques(&block_graph, fork_block.id); - assert_eq!(1, fork_clic.len()); - assert!(fork_clic.intersection(&block1_clic).next().is_none()); - assert!(fork_clic.intersection(&block2_clic).next().is_some()); - - // extend first clique - let mut parentt0sn_hash = valid_hasht0s1; - for period in 3..=35 { - let block_hash = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(period, 0), - vec![parentt0sn_hash, valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - // validate the added block isn't in the forked block click. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let block_clic = get_cliques(&block_graph, block_hash); - let fork_clic = get_cliques(&block_graph, fork_block.id); - assert!(fork_clic.intersection(&block_clic).next().is_none()); - - parentt0sn_hash = block_hash; - } - - // create new block in other clique - let block = create_block( - &cfg, - Slot::new(2, 1), - vec![fork_block.id, valid_hasht1s1], - &staking_keys[0], - ); - storage.store_block(block.clone()); - protocol_controller - .receive_block(block.id, block.content.header.content.slot, storage.clone()) - .await; - assert!(!validate_notpropagate_block(&mut protocol_controller, block.id, 1000,).await); - // verify that the clique has been pruned. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let fork_clic = get_cliques(&block_graph, fork_block.id); - assert_eq!(0, fork_clic.len()); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_gpi() { - // // setup logging - /*stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap();*/ - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 1 normal block in each thread (t0s1 and t1s1) with genesis parents - // create a valids block for thread 0 - let valid_hasht0s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let valid_hasht1s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // one click with 2 block compatible - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let block1_clic = get_cliques(&block_graph, valid_hasht0s1); - let block2_clic = get_cliques(&block_graph, valid_hasht1s1); - assert_eq!(1, block1_clic.len()); - assert_eq!(1, block2_clic.len()); - assert_eq!(block1_clic, block2_clic); - - // create 2 clique - // * create 1 block in t0s2 with parents of slots (t0s1, t1s0) - let valid_hasht0s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 0), - vec![valid_hasht0s1, genesis_hashes[1]], - true, - false, - &staking_keys[0], - ) - .await; - // * create 1 block in t1s2 with parents of slots (t0s0, t1s1) - let valid_hasht1s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 1), - vec![genesis_hashes[0], valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - - // * after processing the block in t1s2, the block of t0s2 is incompatible with block of t1s2 (link in gi) - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let blockt1s2_clic = get_cliques(&block_graph, valid_hasht1s2); - let blockt0s2_clic = get_cliques(&block_graph, valid_hasht0s2); - assert!(blockt1s2_clic - .intersection(&blockt0s2_clic) - .next() - .is_none()); - // * after processing the block in t1s2, there are 2 cliques, one with block of t0s2 and one with block of t1s2, and the parent vector uses the clique of minimum hash sum so the block of minimum hash between t0s2 and t1s2 - assert_eq!(1, blockt1s2_clic.len()); - assert_eq!(1, blockt0s2_clic.len()); - let parents: Vec = block_graph.best_parents.iter().map(|(b, _p)| *b).collect(); - if valid_hasht1s2 > valid_hasht0s2 { - assert_eq!(parents[0], valid_hasht0s2) - } else { - assert_eq!(parents[1], valid_hasht1s2) - } - - // * continue with 33 additional blocks in thread 0, that extend the clique of the block in t0s2: - // - a block in slot t0sX has parents (t0sX-1, t1s1), for X from 3 to 35 - let mut parentt0sn_hash = valid_hasht0s2; - for period in 3..=35 { - let block_hash = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(period, 0), - vec![parentt0sn_hash, valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - parentt0sn_hash = block_hash; - } - // * create 1 block in t1s2 with the genesis blocks as parents - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(3, 1), - vec![valid_hasht0s1, valid_hasht1s2], - false, - false, - &staking_keys[0], - ) - .await; - - // * after processing the 33 blocks, one clique is removed (too late), - // the block of minimum hash becomes final, the one of maximum hash becomes stale - // verify that the clique has been pruned. - let block_graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let fork_clic = get_cliques(&block_graph, valid_hasht1s2); - assert_eq!(0, fork_clic.len()); - assert!(block_graph.discarded_blocks.contains_key(&valid_hasht1s2)); - assert!(block_graph.active_blocks.contains_key(&valid_hasht0s2)); - assert!(!block_graph.active_blocks.contains_key(&valid_hasht1s2)); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_old_stale() { - // // setup logging - // stderrlog::new() - // .verbosity(4) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 40 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let mut valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let mut valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // and loop for the 39 other blocks - for i in 0..39 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 1), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - } - - // create 1 block in thread 0 slot 1 with genesis parents - let _valid_hasht0s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios91_2.rs b/massa-consensus-worker/src/tests/scenarios91_2.rs deleted file mode 100644 index 2ba975e4f3d..00000000000 --- a/massa-consensus-worker/src/tests/scenarios91_2.rs +++ /dev/null @@ -1,514 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_hash::Hash; -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_queueing() { - // setup logging - // stderrlog::new() - // .verbosity(3) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 30 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let mut valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let mut valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // and loop for the 29 other blocks - for i in 0..29 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 1), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - } - - let missed_block = create_block( - &cfg, - Slot::new(32, 0), - vec![valid_hasht0, valid_hasht1], - &staking_keys[0], - ); - - // create 1 block in thread 0 slot 33 with missed block as parent - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(33, 0), - vec![missed_block.id, valid_hasht1], - false, - false, - &staking_keys[0], - ) - .await; - - // and loop again for the 99 other blocks - for i in 0..30 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 34, 0), - vec![valid_hasht0, valid_hasht1], - false, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 34, 1), - vec![valid_hasht0, valid_hasht1], - false, - false, - &staking_keys[0], - ) - .await; - } - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_doubles() { - // setup logging - // stderrlog::new() - // .verbosity(3) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 40 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let mut valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let mut valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // and loop for the 39 other blocks - for i in 0..39 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 1), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - } - - // create 1 block in thread 0 slot 41 with missed block as parent - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(41, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_double_staking() { - // setup logging - // stderrlog::new() - // .verbosity(3) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 40 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let mut valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let mut valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // and loop for the 39 other blocks - for i in 0..39 { - valid_hasht0 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 0), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - valid_hasht1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i + 2, 1), - vec![valid_hasht0, valid_hasht1], - true, - false, - &staking_keys[0], - ) - .await; - } - - // same creator same slot, different block - let operation_merkle_root = Hash::compute_from("42".as_bytes()); - let block_1 = create_block_with_merkle_root( - &cfg, - operation_merkle_root, - Slot::new(41, 0), - vec![valid_hasht0, valid_hasht1], - &staking_keys[0], - ); - storage.store_block(block_1.clone()); - propagate_block( - &mut protocol_controller, - block_1.id, - block_1.content.header.content.slot, - storage.clone(), - true, - 150, - ) - .await; - - let operation_merkle_root = - Hash::compute_from("so long and thanks for all the fish".as_bytes()); - let block_2 = create_block_with_merkle_root( - &cfg, - operation_merkle_root, - Slot::new(41, 0), - vec![valid_hasht0, valid_hasht1], - &staking_keys[0], - ); - storage.store_block(block_2.clone()); - propagate_block( - &mut protocol_controller, - block_2.id, - block_2.content.header.content.slot, - storage.clone(), - true, - 150, - ) - .await; - - let graph = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap(); - let cliques_1 = get_cliques(&graph, block_1.id); - let cliques_2 = get_cliques(&graph, block_2.id); - assert!(cliques_1.is_disjoint(&cliques_2)); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_test_parents() { - // // setup logging - // stderrlog::new() - // .verbosity(4) - // .timestamp(stderrlog::Timestamp::Millisecond) - // .init() - // .unwrap(); - - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - // to avoid timing problems for blocks in the future - genesis_timestamp: MassaTime::now(0) - .unwrap() - .saturating_sub(MassaTime::from_millis(32000).checked_mul(1000).unwrap()), - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // * create 2 normal blocks in each thread: in slot 1 they have genesis parents, in slot 2 they have slot 1 parents - // create a valid block for slot 1 - let valid_hasht0s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - let valid_hasht1s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block for slot 2 - let valid_hasht0s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 0), - vec![valid_hasht0s1, valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - - // create a valid block on the other thread. - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 1), - vec![valid_hasht0s1, valid_hasht1s1], - true, - false, - &staking_keys[0], - ) - .await; - - // * create 1 block in t0s3 with parents (t0s2, t1s0) - // create a valid block for slot 2 - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(3, 0), - vec![valid_hasht0s2, genesis_hashes[1usize]], - false, - false, - &staking_keys[0], - ) - .await; - - // * create 1 block in t1s3 with parents (t0s0, t0s0) - create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(3, 1), - vec![genesis_hashes[0usize], genesis_hashes[0usize]], - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_basic.rs b/massa-consensus-worker/src/tests/scenarios_basic.rs deleted file mode 100644 index a0e773f60af..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_basic.rs +++ /dev/null @@ -1,261 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools; -use crate::tests::block_factory::BlockFactory; -use massa_consensus_exports::ConsensusConfig; -use massa_hash::Hash; -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_old_stale_not_propagated_and_discarded() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - tools::consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut block_factory = - BlockFactory::start_block_factory(parents.clone(), protocol_controller); - block_factory.creator_keypair = staking_keys[0].clone(); - block_factory.slot = Slot::new(1, 0); - - let block_1 = block_factory.create_and_receive_block(true).await; - - block_factory.slot = Slot::new(1, 1); - block_factory.create_and_receive_block(true).await; - - block_factory.slot = Slot::new(1, 0); - block_factory.best_parents = vec![block_1.id, parents[0]]; - let block_3 = block_factory.create_and_receive_block(false).await; - - // Old stale block was discarded. - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert_eq!(status.discarded_blocks.len(), 1); - assert!(status.discarded_blocks.get(&block_3.id).is_some()); - ( - block_factory.take_protocol_controller(), - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_block_not_processed_multiple_times() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 500.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - tools::consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut block_factory = - BlockFactory::start_block_factory(parents.clone(), protocol_controller); - block_factory.creator_keypair = staking_keys[0].clone(); - block_factory.slot = Slot::new(1, 0); - let block_1 = block_factory.create_and_receive_block(true).await; - - // Send it again, it should not be propagated. - storage.store_block(block_1.clone()); - block_factory - .receive_block( - false, - block_1.id, - block_1.content.header.content.slot, - storage.clone(), - ) - .await; - - // Send it again, it should not be propagated. - block_factory - .receive_block( - false, - block_1.id, - block_1.content.header.content.slot, - storage.clone(), - ) - .await; - - // Block was not discarded. - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert_eq!(status.discarded_blocks.len(), 0); - ( - block_factory.take_protocol_controller(), - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_queuing() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - - tools::consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut block_factory = - BlockFactory::start_block_factory(parents.clone(), protocol_controller); - block_factory.creator_keypair = staking_keys[0].clone(); - block_factory.slot = Slot::new(3, 0); - - let block_1 = block_factory.create_and_receive_block(false).await; - - block_factory.slot = Slot::new(4, 0); - block_factory.best_parents = vec![block_1.id, parents[1]]; - - block_factory.create_and_receive_block(false).await; - - // Blocks were queued, not discarded. - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert_eq!(status.discarded_blocks.len(), 0); - ( - block_factory.take_protocol_controller(), - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_double_staking_does_not_propagate() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - future_block_processing_max_periods: 50, - t0: 1000.into(), - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - tools::consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut block_factory = - BlockFactory::start_block_factory(parents.clone(), protocol_controller); - block_factory.creator_keypair = staking_keys[0].clone(); - block_factory.slot = Slot::new(1, 0); - let mut block_1 = block_factory.create_and_receive_block(true).await; - - // Same creator, same slot, different block - block_1.content.header.content.operation_merkle_root = - Hash::compute_from("hello world".as_bytes()); - let block = block_factory.sign_header(block_1.content.header.content); - - // Note: currently does propagate, see #190. - storage.store_block(block.clone()); - block_factory - .receive_block( - true, - block.id, - block.content.header.content.slot, - storage.clone(), - ) - .await; - - // Block was not discarded. - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert_eq!(status.discarded_blocks.len(), 0); - ( - block_factory.take_protocol_controller(), - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_endorsements.rs b/massa-consensus-worker/src/tests/scenarios_endorsements.rs deleted file mode 100644 index 0900906ba9e..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_endorsements.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use massa_models::{ - block::BlockId, - endorsement::{Endorsement, EndorsementSerializer}, - slot::Slot, - wrapped::WrappedContent, -}; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use serial_test::serial; - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_endorsement_check() { - // setup logging - /* - stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap(); - */ - let cfg = ConsensusConfig { - delta_f0: 3, - endorsement_count: 1, - genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(300.into()), - operation_validity_periods: 100, - periods_per_cycle: 2, - t0: 500.into(), - ..ConsensusConfig::default() - }; - // define addresses use for the test - // addresses 1 and 2 both in thread 0 - - let (address_1, keypair_1) = random_address_on_thread(0, cfg.thread_count).into(); - let (address_2, keypair_2) = random_address_on_thread(0, cfg.thread_count).into(); - assert_eq!(0, address_2.get_thread(cfg.thread_count)); - - let mut storage = Storage::create_root(); - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let address_a = selector_controller - .get_selection(Slot::new(1, 0)) - .unwrap() - .producer; - let address_b = selector_controller - .get_selection(Slot::new(1, 0)) - .unwrap() - .endorsements[0]; - let address_c = selector_controller - .get_selection(Slot::new(1, 1)) - .unwrap() - .endorsements[1]; - - let keypair_a = if address_a == address_1 { - keypair_1.clone() - } else { - keypair_2.clone() - }; - let keypair_b = if address_b == address_1 { - keypair_1.clone() - } else { - keypair_2.clone() - }; - let keypair_c = if address_c == address_1 { - keypair_1.clone() - } else { - keypair_2.clone() - }; - - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .unwrap() - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - let mut b10 = create_block(&cfg, Slot::new(1, 0), parents.clone(), &keypair_a); - - // create an otherwise valid endorsement with another address, include it in valid block(1,0), assert it is not propagated - let sender_keypair = KeyPair::generate(); - let content = Endorsement { - slot: Slot::new(1, 0), - index: 0, - endorsed_block: parents[0], - }; - let ed = Endorsement::new_wrapped( - content.clone(), - EndorsementSerializer::new(), - &sender_keypair, - ) - .unwrap(); - b10.content.header.content.endorsements = vec![ed]; - - storage.store_block(b10.clone()); - propagate_block( - &mut protocol_controller, - b10.id, - b10.content.header.content.slot, - storage.clone(), - false, - 500, - ) - .await; - - // create an otherwise valid endorsement at slot (1,1), include it in valid block(1,0), assert it is not propagated - let content = Endorsement { - slot: Slot::new(1, 1), - index: 0, - endorsed_block: parents[1], - }; - let ed = - Endorsement::new_wrapped(content.clone(), EndorsementSerializer::new(), &keypair_c) - .unwrap(); - let mut b10 = create_block(&cfg, Slot::new(1, 0), parents.clone(), &keypair_a); - b10.content.header.content.endorsements = vec![ed]; - - storage.store_block(b10.clone()); - propagate_block( - &mut protocol_controller, - b10.id, - b10.content.header.content.slot, - storage.clone(), - false, - 500, - ) - .await; - - // create an otherwise valid endorsement with genesis 1 as endorsed block, include it in valid block(1,0), assert it is not propagated - let content = Endorsement { - slot: Slot::new(1, 0), - index: 0, - endorsed_block: parents[1], - }; - let ed = - Endorsement::new_wrapped(content.clone(), EndorsementSerializer::new(), &keypair_b) - .unwrap(); - let mut b10 = create_block(&cfg, Slot::new(1, 0), parents.clone(), &keypair_a); - b10.content.header.content.endorsements = vec![ed]; - - storage.store_block(b10.clone()); - propagate_block( - &mut protocol_controller, - b10.id, - b10.content.header.content.slot, - storage.clone(), - false, - 500, - ) - .await; - - // create a valid endorsement, include it in valid block(1,1), assert it is propagated - let content = Endorsement { - slot: Slot::new(1, 0), - index: 0, - endorsed_block: parents[0], - }; - let ed = - Endorsement::new_wrapped(content.clone(), EndorsementSerializer::new(), &keypair_b) - .unwrap(); - let mut b10 = create_block(&cfg, Slot::new(1, 0), parents.clone(), &keypair_a); - b10.content.header.content.endorsements = vec![ed]; - - storage.store_block(b10.clone()); - propagate_block( - &mut protocol_controller, - b10.id, - b10.content.header.content.slot, - storage.clone(), - false, - 500, - ) - .await; - - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_get_operations.rs b/massa-consensus-worker/src/tests/scenarios_get_operations.rs deleted file mode 100644 index a2b53535c8c..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_get_operations.rs +++ /dev/null @@ -1,201 +0,0 @@ -// // Copyright (c) 2022 MASSA LABS - -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; - -// use massa_graph::BootstrapableGraph; -// use massa_models::WrappedOperation; -// use massa_models::{ -// clique::Clique, BlockId, OperationSearchResult, OperationSearchResultStatus, Slot, -// }; -// use massa_signature::KeyPair; -// use massa_time::MassaTime; -// use serial_test::serial; -// use std::collections::HashMap; - -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_get_operation() { -// // // setup logging -// // stderrlog::new() -// // .verbosity(4) -// // .timestamp(stderrlog::Timestamp::Millisecond) -// // .init() -// // .unwrap(); -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// operation_validity_periods: 10, -// max_operations_per_block: 50, -// genesis_timestamp: MassaTime::now(0) -// .unwrap() -// .saturating_sub(MassaTime::from(32000).checked_mul(4).unwrap()) -// .saturating_add(300.into()), -// ..ConsensusConfig::default() -// }; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (_address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, _) = random_address_on_thread(0, cfg.thread_count).into(); -// // to avoid timing pb for block in the future - -// let op1 = create_transaction(&keypair_a, address_b, 1, 10, 1); -// let op2 = create_transaction(&keypair_a, address_b, 2, 10, 1); -// let op3 = create_transaction(&keypair_a, address_b, 3, 10, 1); -// let op4 = create_transaction(&keypair_a, address_b, 4, 10, 1); -// let op5 = create_transaction(&keypair_a, address_b, 5, 10, 1); - -// let ops = [ -// op1.clone(), -// op2.clone(), -// op3.clone(), -// op4.clone(), -// op5.clone(), -// ]; - -// let (boot_graph, b1, b2) = get_bootgraph(vec![op2.clone(), op3.clone()]); -// // there is only one node so it should be drawn at every slot - -// consensus_pool_test( -// cfg.clone(), -// Some(boot_graph), -// async move |pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// let (ops, _storage) = consensus_command_sender -// .get_operations(ops.iter().map(|op| op.id).collect()) -// .await -// .unwrap(); - -// let mut expected = HashMap::new(); - -// expected.insert( -// op2.id, -// OperationSearchResult { -// status: OperationSearchResultStatus::Pending, -// op: op2, -// in_pool: false, -// in_blocks: vec![(b1, (0, true))].into_iter().collect(), -// }, -// ); -// expected.insert( -// op3.id, -// OperationSearchResult { -// status: OperationSearchResultStatus::Pending, -// op: op3, -// in_pool: false, -// in_blocks: vec![(b2, (0, false))].into_iter().collect(), -// }, -// ); - -// assert_eq!(ops.len(), expected.len()); - -// for ( -// id, -// OperationSearchResult { -// op, -// in_blocks, -// in_pool, -// .. -// }, -// ) in ops.iter() -// { -// assert!(expected.contains_key(id)); -// let OperationSearchResult { -// op: ex_op, -// in_pool: ex_pool, -// in_blocks: ex_blocks, -// .. -// } = expected.get(id).unwrap(); -// assert_eq!(op.id, ex_op.id); -// assert_eq!(in_pool, ex_pool); -// assert_eq!(in_blocks.len(), ex_blocks.len()); -// for (b_id, val) in in_blocks.iter() { -// assert!(ex_blocks.contains_key(b_id)); -// assert_eq!(ex_blocks.get(b_id).unwrap(), val); -// } -// } -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// fn get_bootgraph(operations: Vec) -> (BootstrapableGraph, BlockId, BlockId) { -// let genesis_0 = get_export_active_test_block(vec![], vec![], Slot::new(0, 0), true); -// let genesis_1 = get_export_active_test_block(vec![], vec![], Slot::new(0, 1), true); -// let p1t0 = get_export_active_test_block( -// vec![(genesis_0.block_id, 0), (genesis_1.block_id, 0)], -// vec![operations[0].clone()], -// Slot::new(1, 0), -// true, -// ); -// let p1t1 = get_export_active_test_block( -// vec![(genesis_0.block_id, 0), (genesis_1.block_id, 0)], -// vec![], -// Slot::new(1, 1), -// false, -// ); -// let p2t0 = get_export_active_test_block( -// vec![(p1t0.block_id, 1), (p1t1.block_id, 1)], -// vec![operations[1].clone()], -// Slot::new(2, 0), -// false, -// ); -// ( -// BootstrapableGraph { -// /// Map of active blocks, where blocks are in their exported version. -// final_blocks: vec![ -// (genesis_0.block_id, genesis_0.clone()), -// (genesis_1.block_id, genesis_1.clone()), -// (p1t0.block_id, p1t0.clone()), -// (p1t1.block_id, p1t1.clone()), -// (p2t0.block_id, p2t0.clone()), -// ] -// .into_iter() -// .collect(), -// /// Best parents hashes in each thread. -// best_parents: vec![(p2t0.block_id, 2), (p1t1.block_id, 1)], -// /// Latest final period and block hash in each thread. -// latest_final_blocks_periods: vec![ -// (genesis_0.block_id, 0u64), -// (genesis_1.block_id, 0u64), -// ], -// /// Head of the incompatibility graph. -// gi_head: vec![ -// (genesis_0.block_id, Default::default()), -// (p1t0.block_id, Default::default()), -// (p2t0.block_id, Default::default()), -// (genesis_1.block_id, Default::default()), -// (p1t0.block_id, Default::default()), -// (p2t0.block_id, Default::default()), -// ] -// .into_iter() -// .collect(), - -// /// List of maximal cliques of compatible blocks. -// max_cliques: vec![Clique { -// block_ids: vec![ -// genesis_0.block_id, -// p1t0.block_id, -// genesis_1.block_id, -// p1t1.block_id, -// p2t0.block_id, -// ] -// .into_iter() -// .collect(), -// fitness: 123, -// is_blockclique: true, -// }], -// }, -// p1t0.block_id, -// p2t0.block_id, -// ) -// } diff --git a/massa-consensus-worker/src/tests/scenarios_get_selection_draws.rs b/massa-consensus-worker/src/tests/scenarios_get_selection_draws.rs deleted file mode 100644 index dba75bcde76..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_get_selection_draws.rs +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::ledger_models::LedgerData; -use massa_models::{amount::Amount, slot::Slot}; -use massa_time::MassaTime; -use serial_test::serial; -use std::collections::HashMap; -use std::str::FromStr; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_get_selection_draws_high_end_slot() { - // setup logging - /* - stderrlog::new() - .verbosity(4) - .timestamp(stderrlog::Timestamp::Millisecond) - .init() - .unwrap(); - */ - let cfg = ConsensusConfig { - periods_per_cycle: 2, - t0: 500.into(), - delta_f0: 3, - operation_validity_periods: 100, - genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(300.into()), - ..Default::default() - }; - // define addresses use for the test - // addresses 1 and 2 both in thread 0 - //let addr_1 = random_address_on_thread(0, cfg.thread_count); - let addr_2 = random_address_on_thread(0, cfg.thread_count); - - let mut ledger = HashMap::new(); - ledger.insert( - addr_2.address, - LedgerData::new(Amount::from_str("10000").unwrap()), - ); - - consensus_without_pool_test( - cfg.clone(), - async move |protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let draws = selector_controller.get_selection(Slot::new(1, 0)); - assert!(draws.is_ok()); - - // Too high end selection should return an error. - let too_high_draws = selector_controller.get_selection(Slot::new(200, 0)); - assert!(too_high_draws.is_err()); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_header_check.rs b/massa-consensus-worker/src/tests/scenarios_header_check.rs deleted file mode 100644 index 98554102664..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_header_check.rs +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test scenarios106 -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_consensus_asks_for_block() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 500.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - // send header for block t0s1 - protocol_controller - .receive_header(t0s1.content.header.clone()) - .await; - - validate_ask_for_block(&mut protocol_controller, t0s1.id, 1000).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_consensus_does_not_ask_for_block() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let start_slot = 3; - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1 + start_slot, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - let header = t0s1.content.header.clone(); - let id = t0s1.id; - // Send the actual block. - storage.store_block(t0s1); - protocol_controller - .receive_block(header.id, header.content.slot, storage.clone()) - .await; - - // block t0s1 is propagated - let hash_list = vec![id]; - validate_propagate_block_in_list( - &mut protocol_controller, - &hash_list, - 3000 + start_slot * 1000, - ) - .await; - - // Send the hash - protocol_controller.receive_header(header).await; - - // Consensus should not ask for the block, so the time-out should be hit. - validate_does_not_ask_for_block(&mut protocol_controller, &id, 10).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_incompatibilities.rs b/massa-consensus-worker/src/tests/scenarios_incompatibilities.rs deleted file mode 100644 index 422557c0a68..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_incompatibilities.rs +++ /dev/null @@ -1,311 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; - -// use massa_models::{BlockId, Slot}; -// use massa_signature::KeyPair; -// use serial_test::serial; -// use std::collections::{HashSet, VecDeque}; - -// #[tokio::test] -// #[serial] -// async fn test_thread_incompatibility() { -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// let cfg = ConsensusConfig { -// t0: 200.into(), -// future_block_processing_max_periods: 50, -// ..ConsensusConfig::default() -// }; - -// consensus_without_pool_test( -// cfg.clone(), -// async move |mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let parents: Vec = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .best_parents -// .iter() -// .map(|(b, _p)| *b) -// .collect(); - -// let hash_1 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 0), -// parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_2 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 1), -// parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_3 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 0), -// parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); - -// if hash_1 > hash_3 { -// assert_eq!(status.best_parents[0].0, hash_3); -// } else { -// assert_eq!(status.best_parents[0].0, hash_1); -// } -// assert_eq!(status.best_parents[1].0, hash_2); - -// assert!(if let Some(h) = status.gi_head.get(&hash_3) { -// h.contains(&hash_1) -// } else { -// panic!("missing hash in gi_head") -// }); - -// assert_eq!(status.max_cliques.len(), 2); - -// for clique in status.max_cliques.clone() { -// if clique.block_ids.contains(&hash_1) && clique.block_ids.contains(&hash_3) { -// panic!("incompatible blocks in the same clique") -// } -// } - -// let mut current_period = 3; -// let mut parents = vec![hash_1, hash_2]; -// for _ in 0..3 { -// let hash = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(current_period, 0), -// parents.clone(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; -// current_period += 1; -// parents[0] = hash; -// } - -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); - -// assert!(if let Some(h) = status.gi_head.get(&hash_3) { -// h.contains(&status.best_parents[0].0) -// } else { -// panic!("missing block in clique") -// }); - -// let mut parents = vec![status.best_parents[0].0, hash_2]; -// let mut current_period = 8; -// for _ in 0..30 { -// let b = create_block( -// &cfg, -// Slot::new(current_period, 0), -// parents.clone(), -// &staking_keys[0], -// ); -// current_period += 1; -// parents[0] = b.id; -// protocol_controller.receive_block(b.clone()).await; - -// // Note: higher timeout required. -// validate_propagate_block_in_list(&mut protocol_controller, &vec![b.id], 5000).await; -// } - -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); - -// assert_eq!(status.max_cliques.len(), 1); - -// // clique should have been deleted by now -// let parents = vec![hash_3, hash_2]; -// let _ = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(40, 0), -// parents.clone(), -// false, -// false, -// &staking_keys[0], -// ) -// .await; - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// async fn test_grandpa_incompatibility() { -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// let cfg = ConsensusConfig { -// t0: 200.into(), -// future_block_processing_max_periods: 50, -// ..ConsensusConfig::default() -// }; - -// consensus_without_pool_test( -// cfg.clone(), -// async move |mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let genesis = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// let hash_1 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 0), -// vec![genesis[0], genesis[1]], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_2 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 1), -// vec![genesis[0], genesis[1]], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_3 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 0), -// vec![hash_1, genesis[1]], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let hash_4 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 1), -// vec![genesis[0], hash_2], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); - -// assert!(if let Some(h) = status.gi_head.get(&hash_4) { -// h.contains(&hash_3) -// } else { -// panic!("missing block in gi_head") -// }); - -// assert_eq!(status.max_cliques.len(), 2); - -// for clique in status.max_cliques.clone() { -// if clique.block_ids.contains(&hash_3) && clique.block_ids.contains(&hash_4) { -// panic!("incompatible blocks in the same clique") -// } -// } - -// let parents: Vec = status.best_parents.iter().map(|(b, _p)| *b).collect(); -// if hash_4 > hash_3 { -// assert_eq!(parents[0], hash_3) -// } else { -// assert_eq!(parents[1], hash_4) -// } - -// let mut latest_extra_blocks = VecDeque::new(); -// for extend_i in 0..33 { -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); -// let hash = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(3 + extend_i, 0), -// status.best_parents.iter().map(|(b, _p)| *b).collect(), -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// latest_extra_blocks.push_back(hash); -// while latest_extra_blocks.len() > cfg.delta_f0 as usize + 1 { -// latest_extra_blocks.pop_front(); -// } -// } - -// let latest_extra_blocks: HashSet = latest_extra_blocks.into_iter().collect(); -// let status = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status"); -// assert_eq!(status.max_cliques.len(), 1, "wrong cliques (len)"); -// assert_eq!( -// status.max_cliques[0] -// .block_ids -// .iter() -// .cloned() -// .collect::>(), -// latest_extra_blocks, -// "wrong cliques" -// ); - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } diff --git a/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs b/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs deleted file mode 100644 index ccabf914f68..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use crate::start_consensus_controller; -use massa_pool_exports::test_exports::MockPoolController; - -use massa_consensus_exports::settings::ConsensusChannels; -use massa_consensus_exports::ConsensusConfig; -use massa_execution_exports::test_exports::MockExecutionController; -use massa_hash::Hash; -use massa_models::{address::Address, block::BlockId, slot::Slot}; -use massa_pos_exports::SelectorConfig; -use massa_pos_worker::start_selector_worker; -use massa_protocol_exports::test_exports::MockProtocolController; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_invalid_block_notified_as_attack_attempt() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - let storage: Storage = Storage::create_root(); - - // mock protocol & pool - let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let selector_config = SelectorConfig { - thread_count: 2, - periods_per_cycle: 100, - genesis_address: Address::from_public_key(&staking_keys[0].get_public_key()), - endorsement_count: 0, - max_draw_cache: 10, - channel_size: 256, - }; - let (_selector_manager, selector_controller) = start_selector_worker(selector_config).unwrap(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); - // launch consensus controller - let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller, - selector_controller, - }, - None, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Block for a non-existent thread. - let block = create_block_with_merkle_root( - &cfg, - Hash::compute_from("different".as_bytes()), - Slot::new(1, cfg.thread_count + 1), - parents.clone(), - &staking_keys[0], - ); - let block_id = block.id; - let slot = block.content.header.content.slot; - protocol_controller - .receive_block(block_id, slot, storage.clone()) - .await; - - validate_notify_block_attack_attempt(&mut protocol_controller, block_id, 1000).await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_invalid_header_notified_as_attack_attempt() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - // mock protocol & pool - let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - let selector_config = SelectorConfig { - thread_count: 2, - periods_per_cycle: 100, - genesis_address: Address::from_public_key(&staking_keys[0].get_public_key()), - endorsement_count: 0, - max_draw_cache: 10, - channel_size: 256, - }; - let (_selector_manager, selector_controller) = start_selector_worker(selector_config).unwrap(); - let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); - let storage: Storage = Storage::create_root(); - // launch consensus controller - let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller, - selector_controller, - }, - None, - storage, - 0, - ) - .await - .expect("could not start consensus controller"); - - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Block for a non-existent thread. - let block = create_block_with_merkle_root( - &cfg, - Hash::compute_from("different".as_bytes()), - Slot::new(1, cfg.thread_count + 1), - parents.clone(), - &staking_keys[0], - ); - protocol_controller - .receive_header(block.content.header) - .await; - - validate_notify_block_attack_attempt(&mut protocol_controller, block.id, 1000).await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_operations_check.rs b/massa-consensus-worker/src/tests/scenarios_operations_check.rs deleted file mode 100644 index 8d41645e1b7..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_operations_check.rs +++ /dev/null @@ -1,203 +0,0 @@ -// // Copyright (c) 2022 MASSA LABS -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; - -// use massa_models::ledger_models::LedgerData; -// use massa_models::prehash::Set; -// use massa_models::{Address, Amount, Slot}; -// use massa_signature::KeyPair; -// use massa_time::MassaTime; -// use serial_test::serial; -// use std::collections::HashMap; -// use std::str::FromStr; - -// #[tokio::test] -// #[serial] -// async fn test_operations_check() { -// // setup logging -// /* -// stderrlog::new() -// .verbosity(4) -// .timestamp(stderrlog::Timestamp::Millisecond) -// .init() -// .unwrap(); -// */ -// let thread_count = 2; - -// let (address_1, keypair_1) = random_address_on_thread(0, thread_count).into(); -// let (address_2, keypair_2) = random_address_on_thread(1, thread_count).into(); - -// assert_eq!(1, address_2.get_thread(thread_count)); -// let mut ledger = HashMap::new(); -// ledger.insert(address_1, LedgerData::new(Amount::from_str("5").unwrap())); - -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// future_block_processing_max_periods: 50, -// operation_validity_periods: 10, -// genesis_timestamp: MassaTime::now(0).unwrap().saturating_sub(10000.into()), -// endorsement_count: 0, -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&[keypair_1.clone()], &ledger) -// }; - -// consensus_without_pool_with_storage_test( -// cfg.clone(), -// async move |mut storage, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let genesis_ids = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// // Valid block A sending 5 from addr1 to addr2 + reward 1 to addr1 -// let operation_1 = create_transaction(&keypair_1, address_2, 5, 5, 1); -// storage.store_operations(vec![operation_1.clone()]); -// let block_a = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &genesis_ids, -// &keypair_1, -// vec![operation_1.clone()], -// ); -// propagate_block(&mut protocol_controller, block_a.clone(), true, 150).await; - -// // assert address 1 has 1 coin at blocks (A, genesis_ids[1]) (see #269) -// let mut set = Set::
::default(); -// set.insert(address_1); -// let res = consensus_command_sender -// .get_addresses_info(set) -// .await -// .unwrap() -// .get(&address_1) -// .unwrap() -// .ledger_info -// .candidate_ledger_info; -// assert_eq!(res.balance, Amount::from_str("1").unwrap()); - -// // receive block b with invalid operation (not enough coins) -// let operation_2 = create_transaction(&keypair_2, address_1, 10, 8, 1); -// storage.store_operations(vec![operation_2.clone()]); -// let block_2b = create_block_with_operations( -// &cfg, -// Slot::new(1, 1), -// &vec![block_a.id, genesis_ids[1]], -// &keypair_2, -// vec![operation_2], -// ); -// propagate_block(&mut protocol_controller, block_2b, false, 1000).await; - -// // receive empty block b -// let block_b = create_block_with_operations( -// &cfg, -// Slot::new(1, 1), -// &vec![block_a.id, genesis_ids[1]], -// &keypair_1, -// vec![], -// ); -// propagate_block(&mut protocol_controller, block_b.clone(), true, 150).await; - -// // assert address 2 has 5 coins at block B -// let mut set = Set::
::default(); -// set.insert(address_2); -// let res = consensus_command_sender -// .get_addresses_info(set) -// .await -// .unwrap() -// .get(&address_2) -// .unwrap() -// .ledger_info -// .candidate_ledger_info; -// assert_eq!(res.balance, Amount::from_str("5").unwrap()); - -// // receive block with reused operation -// let block_1c = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &vec![block_a.id, block_b.id], -// &keypair_1, -// vec![operation_1.clone()], -// ); -// propagate_block(&mut protocol_controller, block_1c.clone(), false, 1000).await; - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// async fn test_execution_check() { -// let (address_1, keypair_1) = random_address().into(); - -// let mut ledger = HashMap::new(); -// ledger.insert(address_1, LedgerData::new(Amount::from_str("5").unwrap())); - -// let staking_keys: Vec = vec![keypair_1.clone()]; -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// future_block_processing_max_periods: 50, -// operation_validity_periods: 10, -// genesis_key: keypair_1.clone(), -// genesis_timestamp: MassaTime::now(0).unwrap().saturating_sub(10000.into()), -// endorsement_count: 0, -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&staking_keys, &ledger) -// }; - -// consensus_without_pool_with_storage_test( -// cfg.clone(), -// async move |mut storage, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// let genesis_ids = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .expect("could not get block graph status") -// .genesis_blocks; - -// // Valid block A executing some bytecode and spending 2 coins. -// let operation_1 = create_executesc(&keypair_1, 5, 5, Default::default(), 1, 2, 1); -// storage.store_operations(vec![operation_1.clone()]); -// let block_a = create_block_with_operations( -// &cfg, -// Slot::new(1, 0), -// &genesis_ids, -// &keypair_1, -// vec![operation_1.clone()], -// ); -// propagate_block(&mut protocol_controller, block_a, true, 150).await; - -// // assert the `coins` argument as been deducted from the balance of address 1. -// let mut set = Set::
::default(); -// set.insert(address_1); -// let res = consensus_command_sender -// .get_addresses_info(set) -// .await -// .unwrap() -// .get(&address_1) -// .unwrap() -// .ledger_info -// .candidate_ledger_info; -// assert_eq!(res.balance, Amount::from_str("3").unwrap()); - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } diff --git a/massa-consensus-worker/src/tests/scenarios_parents.rs b/massa-consensus-worker/src/tests/scenarios_parents.rs deleted file mode 100644 index 1dd8edfcabb..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_parents.rs +++ /dev/null @@ -1,206 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_parent_in_the_future() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // Parent, in the future. - let t0s1 = create_block( - &cfg, - Slot::new(4, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(5, 0), - vec![t0s1.id], - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_parents() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // generate two normal blocks in each thread - let hasht1s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(3, 0), - vec![hasht1s1, genesis_hashes[0]], - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_parents_in_incompatible_cliques() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - let hasht0s1 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - let hasht0s2 = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 0), - genesis_hashes.clone(), - true, - false, - &staking_keys[0], - ) - .await; - - // from that point we have two incompatible clique - - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(1, 1), - vec![hasht0s1, genesis_hashes[1]], - true, - false, - &staking_keys[0], - ) - .await; - - // Block with incompatible parents. - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(2, 1), - vec![hasht0s1, hasht0s2], - false, - false, - &staking_keys[0], - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_pool_commands.rs b/massa-consensus-worker/src/tests/scenarios_pool_commands.rs deleted file mode 100644 index 46b08bd7d85..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_pool_commands.rs +++ /dev/null @@ -1,460 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -//TODO: Still needed ? -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; - -// use massa_graph::BootstrapableGraph; -// use massa_models::clique::Clique; -// use massa_models::ledger_models::LedgerData; -// use massa_models::{Amount, BlockId, Slot, WrappedOperation}; -// use massa_signature::KeyPair; -// use massa_time::MassaTime; -// use serial_test::serial; -// use std::str::FromStr; - -// #[tokio::test] -// #[serial] -// async fn test_update_current_slot_cmd_notification() { -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// thread_count: 1, -// genesis_timestamp: MassaTime::now(0).unwrap().checked_add(1000.into()).unwrap(), -// ..ConsensusConfig::default_with_paths() -// }; - -// let timeout = 150; - -// consensus_pool_test( -// cfg.clone(), -// None, -// async move |mut pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// let slot_notification_filter = |cmd| match cmd { -// massa_pool::PoolCommand::UpdateCurrentSlot(slot) => { -// Some((slot, MassaTime::now(0).unwrap())) -// } -// _ => None, -// }; - -// // wait for UpdateCurrentSlot pool command -// if let Some((slot_cmd, rec_time)) = pool_controller -// .wait_command(1500.into(), slot_notification_filter) -// .await -// { -// assert_eq!(slot_cmd, Slot::new(0, 0)); -// if rec_time > cfg.genesis_timestamp { -// assert!( -// rec_time.saturating_sub(cfg.genesis_timestamp) < MassaTime::from(timeout) -// ) -// } else { -// assert!( -// cfg.genesis_timestamp.saturating_sub(rec_time) < MassaTime::from(timeout) -// ) -// } -// } - -// // wait for next UpdateCurrentSlot pool command -// if let Some((slot_cmd, rec_time)) = pool_controller -// .wait_command(500.into(), slot_notification_filter) -// .await -// { -// assert_eq!(slot_cmd, Slot::new(0, 1)); -// if rec_time > cfg.genesis_timestamp { -// assert!( -// rec_time.saturating_sub(cfg.genesis_timestamp.saturating_add(cfg.t0)) -// < MassaTime::from(timeout) -// ); -// } else { -// assert!( -// cfg.genesis_timestamp -// .saturating_add(cfg.t0) -// .saturating_sub(rec_time) -// < MassaTime::from(timeout) -// ); -// } -// } -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// async fn test_update_latest_final_block_cmd_notification() { -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// delta_f0: 2, -// ..ConsensusConfig::default_with_paths() -// }; - -// consensus_pool_test( -// cfg.clone(), -// None, -// None, -// async move |mut pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// // UpdateLatestFinalPeriods pool command filter -// let update_final_notification_filter = |cmd| match cmd { -// massa_pool::PoolCommand::UpdateLatestFinalPeriods(periods) => Some(periods), -// PoolCommand::GetOperationBatch { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// PoolCommand::GetEndorsements { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// _ => None, -// }; -// // wait for initial final periods notification -// let final_periods = pool_controller -// .wait_command(1000.into(), update_final_notification_filter) -// .await; -// assert_eq!(final_periods, Some(vec![0, 0])); - -// // wait for next final periods notification -// let final_periods = pool_controller -// .wait_command( -// (cfg.t0.to_millis() * 3).into(), -// update_final_notification_filter, -// ) -// .await; -// assert_eq!(final_periods, Some(vec![1, 0])); -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_new_final_ops() { -// let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// delta_f0: 2, -// genesis_timestamp: MassaTime::now(0).unwrap(), -// ..ConsensusConfig::default() -// }; - -// // define addresses use for the test -// // addresses a and b both in thread 0 - -// let (address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, _) = random_address_on_thread(0, cfg.thread_count).into(); - -// let boot_ledger = ConsensusLedgerSubset( -// vec![(address_a, LedgerData::new(Amount::from_str("100").unwrap()))] -// .into_iter() -// .collect(), -// ); -// let op = create_transaction(&keypair_a, address_b, 1, 10, 1); -// let (boot_graph, mut p0, mut p1) = get_bootgraph(op.clone(), boot_ledger); - -// consensus_pool_test( -// cfg.clone(), -// None, -// Some(boot_graph), -// async move |mut pool_controller, -// mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// p1 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(1, 1), -// vec![p0, p1], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// p0 = create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 0), -// vec![p0, p1], -// true, -// false, -// &staking_keys[0], -// ) -// .await; - -// create_and_test_block( -// &mut protocol_controller, -// &cfg, -// Slot::new(2, 1), -// vec![p0, p1], -// true, -// false, -// &staking_keys[0], -// ) -// .await; -// // UpdateLatestFinalPeriods pool command filter -// let new_final_ops_filter = |cmd| match cmd { -// PoolCommand::FinalOperations(ops) => Some(ops), -// _ => None, -// }; - -// // wait for initial final periods notification -// let final_ops = pool_controller -// .wait_command(300.into(), new_final_ops_filter) -// .await; -// if let Some(finals) = final_ops { -// assert!(finals.contains_key(&op.id)); -// assert_eq!(finals.get(&op.id), Some(&(10, 0))) -// } else { -// panic!("no final ops") -// } -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_max_attempts_get_operations() { -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// genesis_timestamp: MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap(), -// delta_f0: 2, -// ..ConsensusConfig::default_with_paths() -// }; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, _) = random_address_on_thread(0, cfg.thread_count).into(); - -// let boot_ledger = ConsensusLedgerSubset( -// vec![(address_a, LedgerData::new(Amount::from_str("100").unwrap()))] -// .into_iter() -// .collect(), -// ); -// let op = create_transaction(&keypair_a, address_b, 1, 10, 1); -// let (boot_graph, _p0, _p1) = get_bootgraph(op.clone(), boot_ledger); - -// consensus_pool_test( -// cfg.clone(), -// None, -// Some(boot_graph), -// async move |mut pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// // Test that consensus keeps trying to fill the block, -// // until the max number of attempts has been reached. -// let mut attempts = 0; -// let mut slot = None; -// while attempts != cfg.max_operations_fill_attempts { -// let get_operations_batch_filter = |cmd| match cmd { -// PoolCommand::GetOperationBatch { -// response_tx, -// target_slot, -// .. -// } => Some((response_tx, target_slot)), -// PoolCommand::GetEndorsements { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// _ => None, -// }; - -// let (response_tx, target_slot) = pool_controller -// .wait_command(1000.into(), get_operations_batch_filter) -// .await -// .expect("No response chan and target slot."); - -// // Test that the batch requests are still for the same slot. -// if let Some(slot) = slot { -// assert_eq!(slot, target_slot); -// } else { -// slot = Some(target_slot); -// } - -// // Send a full batch back. -// response_tx -// .send(vec![(op.clone(), 10)]) -// .expect("Failed to send empty batch."); -// attempts += 1; -// } - -// // The next command should be a slot update. -// let slot_filter = |cmd| match cmd { -// PoolCommand::UpdateCurrentSlot(slot) => Some(slot), -// PoolCommand::GetEndorsements { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// _ => None, -// }; - -// pool_controller.wait_command(3000.into(), slot_filter).await; -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// #[tokio::test] -// #[serial] -// #[ignore] -// async fn test_max_batch_size_get_operations() { -// let cfg = ConsensusConfig { -// t0: 1000.into(), -// genesis_timestamp: MassaTime::now(0).unwrap().checked_sub(1000.into()).unwrap(), -// delta_f0: 2, -// ..ConsensusConfig::default_with_paths() -// }; -// // define addresses use for the test -// // addresses a and b both in thread 0 -// let (address_a, keypair_a) = random_address_on_thread(0, cfg.thread_count).into(); -// let (address_b, _) = random_address_on_thread(0, cfg.thread_count).into(); - -// let boot_ledger = ConsensusLedgerSubset( -// vec![(address_a, LedgerData::new(Amount::from_str("100").unwrap()))] -// .into_iter() -// .collect(), -// ); -// let op = create_transaction(&keypair_a, address_b, 1, 10, 1); -// let (boot_graph, _p0, _p1) = get_bootgraph(op.clone(), boot_ledger); - -// consensus_pool_test( -// cfg.clone(), -// None, -// Some(boot_graph), -// async move |mut pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver| { -// // Test that consensus stops trying to fill the block, -// // once a non-full batch has been received. -// let get_operations_batch_filter = |cmd| match cmd { -// PoolCommand::GetOperationBatch { -// response_tx, -// target_slot, -// .. -// } => Some((response_tx, target_slot)), -// PoolCommand::GetEndorsements { response_tx, .. } => { -// response_tx.send(Vec::new()).unwrap(); -// None -// } -// _ => None, -// }; - -// let (response_tx, target_slot) = pool_controller -// .wait_command(1000.into(), get_operations_batch_filter) -// .await -// .expect("No response chan and target slot."); - -// // Send a non-full batch back. -// response_tx -// .send(vec![(op.clone(), 10)]) -// .expect("Failed to send non-full batch."); - -// // The next command should be a slot update. -// let slot_filter = |cmd| match cmd { -// PoolCommand::UpdateCurrentSlot(slot) => Some(slot), -// _ => None, -// }; - -// let slot_update = pool_controller -// .wait_command(3000.into(), slot_filter) -// .await -// .expect("Not slot update received."); - -// // Test that the update is for the slot -// // after the one for the just created block. -// assert_eq!(slot_update.period, target_slot.period + 1); -// ( -// pool_controller, -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// ) -// }, -// ) -// .await; -// } - -// fn get_bootgraph( -// operation: WrappedOperation, -// ledger: ConsensusLedgerSubset, -// ) -> (BootstrapableGraph, BlockId, BlockId) { -// let genesis_0 = get_export_active_test_block(vec![], vec![], Slot::new(0, 0), true); -// let genesis_1 = get_export_active_test_block(vec![], vec![], Slot::new(0, 1), true); -// let p1t0 = get_export_active_test_block( -// vec![(genesis_0.block_id, 0), (genesis_1.block_id, 0)], -// vec![operation], -// Slot::new(1, 0), -// false, -// ); -// ( -// BootstrapableGraph { -// /// Map of active blocks, where blocks are in their exported version. -// active_blocks: vec![ -// (genesis_0.block_id, genesis_0.clone()), -// (genesis_1.block_id, genesis_1.clone()), -// (p1t0.block_id, p1t0.clone()), -// ] -// .into_iter() -// .collect(), -// /// Best parents hash in each thread. -// best_parents: vec![(p1t0.block_id, 1), (genesis_1.block_id, 0)], -// /// Latest final period and block hash in each thread. -// latest_final_blocks_periods: vec![ -// (genesis_0.block_id, 0u64), -// (genesis_1.block_id, 0u64), -// ], -// /// Head of the incompatibility graph. -// gi_head: vec![ -// (genesis_0.block_id, Default::default()), -// (p1t0.block_id, Default::default()), -// (genesis_1.block_id, Default::default()), -// ] -// .into_iter() -// .collect(), - -// /// List of maximal cliques of compatible blocks. -// max_cliques: vec![Clique { -// block_ids: vec![genesis_0.block_id, p1t0.block_id, genesis_1.block_id] -// .into_iter() -// .collect(), -// fitness: 1111, -// is_blockclique: true, -// }], -// ledger, -// }, -// p1t0.block_id, -// genesis_1.block_id, -// ) -// } diff --git a/massa-consensus-worker/src/tests/scenarios_pruning.rs b/massa-consensus-worker/src/tests/scenarios_pruning.rs deleted file mode 100644 index 943d35d387a..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_pruning.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::{block::BlockId, slot::Slot}; -use massa_signature::KeyPair; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_pruning_of_discarded_blocks() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Send more bad blocks than the max number of cached discarded. - for i in 0..(cfg.max_discarded_blocks + 5) as u64 { - // Too far into the future. - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(100000000 + i, 0), - parents.clone(), - false, - false, - &staking_keys[0], - ) - .await; - } - - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert!(status.discarded_blocks.len() <= cfg.max_discarded_blocks); - - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_pruning_of_awaiting_slot_blocks() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Send more blocks in the future than the max number of future processing blocks. - for i in 0..(cfg.max_future_processing_blocks + 5) as u64 { - // Too far into the future. - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(10 + i, 0), - parents.clone(), - false, - false, - &staking_keys[0], - ) - .await; - } - - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - assert!(status.discarded_blocks.len() <= cfg.max_future_processing_blocks); - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_pruning_of_awaiting_dependencies_blocks_with_discarded_dependency() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 200.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let parents: Vec = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .best_parents - .iter() - .map(|(b, _p)| *b) - .collect(); - - // Too far into the future. - let bad_block = - create_block(&cfg, Slot::new(10000, 0), parents.clone(), &staking_keys[0]); - - for i in 1..4 { - // Sent several headers with the bad parent as dependency. - let _ = create_and_test_block( - &mut protocol_controller, - &cfg, - Slot::new(i, 0), - vec![bad_block.id, parents.clone()[0]], - false, - false, - &staking_keys[0], - ) - .await; - } - - // Now, send the bad parent. - protocol_controller - .receive_header(bad_block.content.header) - .await; - validate_notpropagate_block_in_list(&mut protocol_controller, &vec![bad_block.id], 10) - .await; - - // Eventually, all blocks will be discarded due to their bad parent. - // Note the parent too much in the future will not be discarded, but ignored. - loop { - let status = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status"); - if status.discarded_blocks.len() == 3 { - break; - } - } - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_reward_split.rs b/massa-consensus-worker/src/tests/scenarios_reward_split.rs deleted file mode 100644 index 8fe0f8313e0..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_reward_split.rs +++ /dev/null @@ -1,295 +0,0 @@ -// // Copyright (c) 2022 MASSA LABS - -// use super::tools::*; -// use massa_consensus_exports::ConsensusConfig; -// use massa_models::ledger_models::LedgerData; -// use massa_models::wrapped::WrappedContent; -// use massa_models::{Address, Amount, BlockId, Endorsement, EndorsementSerializer, Slot}; -// use massa_pos_exports::Selection; -// use massa_time::MassaTime; -// use serial_test::serial; -// use std::collections::HashMap; -// use std::str::FromStr; - -// #[tokio::test] -// #[serial] -// async fn test_reward_split() { -// // setup logging -// // stderrlog::new() -// // .verbosity(2) -// // .timestamp(stderrlog::Timestamp::Millisecond) -// // .init() -// // .unwrap(); -// let thread_count = 2; - -// // Create addresses -// let (address_a, keypair_a) = random_address_on_thread(0, thread_count).into(); -// let (address_b, keypair_b) = random_address_on_thread(0, thread_count).into(); - -// let mut ledger = HashMap::new(); -// ledger.insert(address_a, LedgerData::new(Amount::from_str("10").unwrap())); -// ledger.insert(address_b, LedgerData::new(Amount::from_str("10").unwrap())); -// let staking_keys = vec![keypair_a.clone(), keypair_b.clone()]; -// let init_time: MassaTime = 1000.into(); -// let cfg = ConsensusConfig { -// endorsement_count: 5, -// genesis_timestamp: MassaTime::now(0).unwrap().saturating_add(init_time), -// max_block_size: 2000, -// max_operations_per_block: 5000, -// operation_validity_periods: 10, -// periods_per_cycle: 3, -// t0: 500.into(), -// ..ConsensusConfig::default_with_staking_keys_and_ledger(&staking_keys, &ledger) -// }; - -// consensus_without_pool_test( -// cfg.clone(), -// async move |mut protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller| { -// // Check initial balances. -// let addresses_state = consensus_command_sender -// .get_addresses_info(vec![address_a, address_b].into_iter().collect()) -// .await -// .unwrap(); - -// let addresse_a_state = addresses_state.get(&address_a).unwrap(); -// assert_eq!( -// addresse_a_state.ledger_info.candidate_ledger_info.balance, -// Amount::from_str("10").unwrap() -// ); - -// let addresse_b_state = addresses_state.get(&address_b).unwrap(); -// assert_eq!( -// addresse_b_state.ledger_info.candidate_ledger_info.balance, -// Amount::from_str("10").unwrap() -// ); - -// let draws: Selection = selector_controller.get_selection(Slot::new(1, 0)).unwrap(); - -// let slot_one_block_addr = draws.producer; -// let slot_one_endorsements_addrs = draws.endorsements; - -// let slot_one_keypair = if slot_one_block_addr == address_a { -// keypair_a.clone() -// } else { -// keypair_b.clone() -// }; - -// // Create, and propagate, block 1. -// let parents: Vec = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .unwrap() -// .best_parents -// .iter() -// .map(|(b, _p)| *b) -// .collect(); - -// let b1 = create_block(&cfg, Slot::new(1, 0), parents, &slot_one_keypair); - -// propagate_block( -// &mut protocol_controller, -// b1.clone(), -// true, -// init_time -// .saturating_add(cfg.t0.saturating_mul(2)) -// .to_millis(), -// ) -// .await; - -// let slot_two_block_addr = selector_controller.get_producer(Slot::new(2, 0)).unwrap(); - -// let slot_two_keypair = if slot_two_block_addr == address_a { -// keypair_a.clone() -// } else { -// keypair_b.clone() -// }; - -// // Create, and propagate, block 2. -// let parents: Vec = consensus_command_sender -// .get_block_graph_status(None, None) -// .await -// .unwrap() -// .best_parents -// .iter() -// .map(|(b, _p)| *b) -// .collect(); -// assert!(parents.contains(&b1.id)); - -// let mut b2 = create_block(&cfg, Slot::new(2, 0), parents, &slot_two_keypair); - -// // Endorsements in block 2. - -// // Creator of second block endorses the first. -// let index = slot_one_endorsements_addrs -// .iter() -// .position(|&addr| { -// addr == Address::from_public_key(&slot_two_keypair.get_public_key()) -// }) -// .unwrap() as u32; -// let content = Endorsement { -// slot: Slot::new(1, 0), -// index, -// endorsed_block: b1.id, -// }; -// let ed_1 = Endorsement::new_wrapped( -// content.clone(), -// EndorsementSerializer::new(), -// &slot_two_keypair, -// ) -// .unwrap(); - -// // Creator of first block endorses the first. -// let index = slot_one_endorsements_addrs -// .iter() -// .position(|&addr| { -// addr == Address::from_public_key(&slot_one_keypair.get_public_key()) -// }) -// .unwrap() as u32; -// let content = Endorsement { -// slot: Slot::new(1, 0), -// index, -// endorsed_block: b1.id, -// }; -// let ed_2 = Endorsement::new_wrapped( -// content.clone(), -// EndorsementSerializer::new(), -// &slot_one_keypair, -// ) -// .unwrap(); - -// // Creator of second block endorses the first, again. -// let index = slot_one_endorsements_addrs -// .iter() -// .position(|&addr| { -// addr == Address::from_public_key(&slot_two_keypair.get_public_key()) -// }) -// .unwrap() as u32; -// let content = Endorsement { -// slot: Slot::new(1, 0), -// index, -// endorsed_block: b1.id, -// }; -// let ed_3 = Endorsement::new_wrapped( -// content.clone(), -// EndorsementSerializer::new(), -// &slot_two_keypair, -// ) -// .unwrap(); - -// // Add endorsements to block. -// b2.content.header.content.endorsements = vec![ed_1, ed_2, ed_3]; - -// // Propagate block. -// tokio::time::sleep(cfg.t0.to_duration()).await; -// propagate_block(&mut protocol_controller, b2, true, 300).await; - -// // Check balances after second block. -// let addresses_state = consensus_command_sender -// .get_addresses_info(vec![address_a, address_b].into_iter().collect()) -// .await -// .unwrap(); - -// let third = cfg -// .block_reward -// .checked_div_u64((3 * (1 + cfg.endorsement_count)).into()) -// .unwrap(); - -// let expected_a = Amount::from_str("10") -// .unwrap() // initial ledger -// .saturating_add(if keypair_a.to_bytes() == slot_one_keypair.to_bytes() { -// // block 1 reward -// cfg.block_reward -// .checked_mul_u64(1) -// .unwrap() -// .checked_div_u64((1 + cfg.endorsement_count).into()) -// .unwrap() -// .saturating_sub(third.checked_mul_u64(0).unwrap()) -// // endorsements reward -// .saturating_add( -// third // parent in ed 1 -// .saturating_add(third) // creator of ed 2 -// .saturating_add(third) // parent in ed 2 -// .saturating_add(third), // parent in ed 3 -// ) -// } else { -// Default::default() -// }) -// .saturating_add(if keypair_a.to_bytes() == slot_two_keypair.to_bytes() { -// // block 2 creation reward -// cfg.block_reward -// .checked_mul_u64(1 + 3) -// .unwrap() -// .checked_div_u64((1 + cfg.endorsement_count).into()) -// .unwrap() -// .saturating_sub(third.checked_mul_u64(2 * 3).unwrap()) -// // endorsement rewards -// .saturating_add( -// third // creator of ed 1 -// .saturating_add(third), // creator of ed 3 -// ) -// } else { -// Default::default() -// }); - -// let expected_b = Amount::from_str("10") -// .unwrap() // initial ledger -// .saturating_add(if keypair_b.to_bytes() == slot_one_keypair.to_bytes() { -// // block 1 reward -// cfg.block_reward -// .checked_mul_u64(1) -// .unwrap() -// .checked_div_u64((1 + cfg.endorsement_count).into()) -// .unwrap() -// .saturating_sub(third.checked_mul_u64(0).unwrap()) -// // endorsements reward -// .saturating_add( -// third // parent in ed 1 -// .saturating_add(third) // creator of ed 2 -// .saturating_add(third) // parent in ed 2 -// .saturating_add(third), // parent in ed 3 -// ) -// } else { -// Default::default() -// }) -// .saturating_add(if keypair_b.to_bytes() == slot_two_keypair.to_bytes() { -// // block 2 creation reward -// cfg.block_reward -// .checked_mul_u64(1 + 3) -// .unwrap() -// .checked_div_u64((1 + cfg.endorsement_count).into()) -// .unwrap() -// .saturating_sub(third.checked_mul_u64(2 * 3).unwrap()) -// // endorsement rewards -// .saturating_add( -// third // creator of ed 1 -// .saturating_add(third), // creator of ed 3 -// ) -// } else { -// Default::default() -// }); - -// let state_a = addresses_state.get(&address_a).unwrap(); -// assert_eq!( -// state_a.ledger_info.candidate_ledger_info.balance, -// expected_a -// ); - -// let state_b = addresses_state.get(&address_b).unwrap(); -// assert_eq!( -// state_b.ledger_info.candidate_ledger_info.balance, -// expected_b -// ); - -// ( -// protocol_controller, -// consensus_command_sender, -// consensus_event_receiver, -// selector_controller, -// ) -// }, -// ) -// .await; -// } diff --git a/massa-consensus-worker/src/tests/scenarios_send_block.rs b/massa-consensus-worker/src/tests/scenarios_send_block.rs deleted file mode 100644 index c2201b0eb1a..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_send_block.rs +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test scenarios106 -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_consensus_sends_block_to_peer_who_asked_for_it() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let start_slot = 3; - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let slot = Slot::new(1 + start_slot, 0); - let draw = selector_controller.get_selection(slot).unwrap().producer; - let creator = get_creator_for_draw(&draw, &staking_keys.clone()); - let t0s1 = create_block( - &cfg, - Slot::new(1 + start_slot, 0), - genesis_hashes.clone(), - &creator, - ); - - let t0s1_id = t0s1.id; - let t0s1_slot = t0s1.content.header.content.slot; - storage.store_block(t0s1); - - // Send the actual block. - protocol_controller - .receive_block(t0s1_id, t0s1_slot, storage.clone()) - .await; - - // block t0s1 is propagated - let hash_list = vec![t0s1_id]; - validate_propagate_block_in_list( - &mut protocol_controller, - &hash_list, - 3000 + start_slot * 1000, - ) - .await; - - // Ask for the block to consensus. - protocol_controller - .receive_get_active_blocks(vec![t0s1_id]) - .await; - - // Consensus should respond with results including the block. - validate_block_found(&mut protocol_controller, &t0s1_id, 100).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_consensus_block_not_found() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let start_slot = 3; - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1 + start_slot, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - - // Ask for the block to consensus. - protocol_controller - .receive_get_active_blocks(vec![t0s1.id]) - .await; - - // Consensus should not have the block. - validate_block_not_found(&mut protocol_controller, &t0s1.id, 100).await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/scenarios_wishlist.rs b/massa-consensus-worker/src/tests/scenarios_wishlist.rs deleted file mode 100644 index b1b29dc19ea..00000000000 --- a/massa-consensus-worker/src/tests/scenarios_wishlist.rs +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -// RUST_BACKTRACE=1 cargo test scenarios106 -- --nocapture - -use super::tools::*; -use massa_consensus_exports::ConsensusConfig; - -use massa_models::slot::Slot; -use massa_signature::KeyPair; -use massa_storage::Storage; -use serial_test::serial; -use std::collections::HashSet; -use std::iter::FromIterator; - -#[tokio::test] -#[serial] -#[ignore] -async fn test_wishlist_delta_with_empty_remove() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - // create test blocks - let slot = Slot::new(1, 0); - let draw = selector_controller - .get_selection(slot) - .expect("could not get selection draws.") - .producer; - let creator = get_creator_for_draw(&draw, &staking_keys.clone()); - let t0s1 = create_block(&cfg, Slot::new(1, 0), genesis_hashes.clone(), &creator); - - // send header for block t0s1 - protocol_controller - .receive_header(t0s1.content.header.clone()) - .await; - - let expected_new = HashSet::from_iter(vec![t0s1.id].into_iter()); - let expected_remove = HashSet::from_iter(vec![].into_iter()); - validate_wishlist( - &mut protocol_controller, - expected_new, - expected_remove, - cfg.t0.saturating_add(1000.into()).to_millis(), // leave 1sec extra for init and margin - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} - -#[tokio::test] -#[serial] -#[ignore] -async fn test_wishlist_delta_remove() { - let staking_keys: Vec = (0..1).map(|_| KeyPair::generate()).collect(); - let cfg = ConsensusConfig { - t0: 32.into(), - future_block_processing_max_periods: 50, - ..ConsensusConfig::default() - }; - - let mut storage = Storage::create_root(); - - consensus_without_pool_test( - cfg.clone(), - async move |mut protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller| { - let genesis_hashes = consensus_command_sender - .get_block_graph_status(None, None) - .await - .expect("could not get block graph status") - .genesis_blocks; - - // create test blocks - let t0s1 = create_block( - &cfg, - Slot::new(1, 0), - genesis_hashes.clone(), - &staking_keys[0], - ); - // send header for block t0s1 - protocol_controller - .receive_header(t0s1.content.header.clone()) - .await; - - let expected_new = HashSet::from_iter(vec![t0s1.id].into_iter()); - let expected_remove = HashSet::from_iter(vec![].into_iter()); - validate_wishlist( - &mut protocol_controller, - expected_new, - expected_remove, - cfg.t0.saturating_add(1000.into()).to_millis(), // leave 1sec extra for init and margin, - ) - .await; - - storage.store_block(t0s1.clone()); - protocol_controller - .receive_block(t0s1.id, t0s1.content.header.content.slot, storage.clone()) - .await; - let expected_new = HashSet::from_iter(vec![].into_iter()); - let expected_remove = HashSet::from_iter(vec![t0s1.id].into_iter()); - validate_wishlist( - &mut protocol_controller, - expected_new, - expected_remove, - 1000, - ) - .await; - ( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - }, - ) - .await; -} diff --git a/massa-consensus-worker/src/tests/test_block_graph.rs b/massa-consensus-worker/src/tests/test_block_graph.rs deleted file mode 100644 index 13f9a086eb8..00000000000 --- a/massa-consensus-worker/src/tests/test_block_graph.rs +++ /dev/null @@ -1,174 +0,0 @@ -use crate::tests::tools::get_dummy_block_id; -use massa_graph::{ - export_active_block::ExportActiveBlock, BootstrapableGraph, BootstrapableGraphDeserializer, - BootstrapableGraphSerializer, -}; -use massa_hash::Hash; -use massa_models::{ - block::{Block, BlockHeader, BlockHeaderSerializer, BlockSerializer, WrappedBlock}, - endorsement::{Endorsement, EndorsementSerializerLW}, - slot::Slot, - wrapped::WrappedContent, -}; - -use massa_serialization::{DeserializeError, Deserializer, Serializer}; -use massa_signature::KeyPair; -use serial_test::serial; - -/// the data input to create the public keys was generated using the `secp256k1` curve -/// a test using this function is a regression test not an implementation test -fn get_export_active_test_block() -> (WrappedBlock, ExportActiveBlock) { - let keypair = KeyPair::generate(); - let block = Block::new_wrapped( - Block { - header: BlockHeader::new_wrapped( - BlockHeader { - operation_merkle_root: Hash::compute_from(&Vec::new()), - parents: vec![get_dummy_block_id("parent1"), get_dummy_block_id("parent2")], - slot: Slot::new(1, 0), - endorsements: vec![Endorsement::new_wrapped( - Endorsement { - endorsed_block: get_dummy_block_id("parent1"), - index: 0, - slot: Slot::new(1, 0), - }, - EndorsementSerializerLW::new(), - &keypair, - ) - .unwrap()], - }, - BlockHeaderSerializer::new(), - &keypair, - ) - .unwrap(), - operations: Default::default(), - }, - BlockSerializer::new(), - &keypair, - ) - .unwrap(); - - ( - block.clone(), - ExportActiveBlock { - parents: vec![ - (get_dummy_block_id("parent11"), 23), - (get_dummy_block_id("parent12"), 24), - ], - block, - operations: vec![], - is_final: true, - }, - ) -} - -#[test] -#[serial] -fn test_bootstrapable_graph_serialized() { - //let storage: Storage = Storage::create_root(); - - let (_, active_block) = get_export_active_test_block(); - - //storage.store_block(block.header.content.compute_id().expect("Fail to calculate block id."), block, block.to_bytes_compact().expect("Fail to serialize block")); - - let graph = BootstrapableGraph { - /// Map of active blocks, were blocks are in their exported version. - final_blocks: vec![active_block].into_iter().collect(), - }; - - let bootstrapable_graph_serializer = BootstrapableGraphSerializer::new(); - let bootstrapable_graph_deserializer = BootstrapableGraphDeserializer::new( - 2, 8, 10000, 10000, 10000, 10000, 10000, 10, 255, 10_000, - ); - let mut bytes = Vec::new(); - - bootstrapable_graph_serializer - .serialize(&graph, &mut bytes) - .unwrap(); - let (_, new_graph) = bootstrapable_graph_deserializer - .deserialize::(&bytes) - .unwrap(); - - assert_eq!( - graph.final_blocks[0].block.serialized_data, - new_graph.final_blocks[0].block.serialized_data - ); -} - -// #[tokio::test] -// #[serial] -// async fn test_clique_calculation() { -// let ledger_file = generate_ledger_file(&Map::default()); -// let cfg = ConsensusConfig::from(ledger_file.path()); -// let storage: Storage = Storage::create_root(); -// let selector_config = SelectorConfig { -// initial_rolls_path: cfg.initial_rolls_path.clone(), -// thread_count: 2, -// periods_per_cycle: 100, -// genesis_address: Address::from_str("A12hgh5ULW9o8fJE9muLNXhQENaUUswQbxPyDSq8ridnDGu5gRiJ") -// .unwrap(), -// endorsement_count: 0, -// max_draw_cache: 10, -// initial_draw_seed: "".to_string(), -// }; -// let (mut selector_manager, selector_controller) = -// start_selector_worker(selector_config).unwrap(); -// let mut block_graph = -// BlockGraph::new(GraphConfig::from(&cfg), None, storage, selector_controller) -// .await -// .unwrap(); -// let hashes: Vec = vec![ -// "VzCRpnoZVYY1yQZTXtVQbbxwzdu6hYtdCUZB5BXWSabsiXyfP", -// "JnWwNHRR1tUD7UJfnEFgDB4S4gfDTX2ezLadr7pcwuZnxTvn1", -// "xtvLedxC7CigAPytS5qh9nbTuYyLbQKCfbX8finiHsKMWH6SG", -// "2Qs9sSbc5sGpVv5GnTeDkTKdDpKhp4AgCVT4XFcMaf55msdvJN", -// "2VNc8pR4tNnZpEPudJr97iNHxXbHiubNDmuaSzrxaBVwKXxV6w", -// "2bsrYpfLdvVWAJkwXoJz1kn4LWshdJ6QjwTrA7suKg8AY3ecH1", -// "kfUeGj3ZgBprqFRiAQpE47dW5tcKTAueVaWXZquJW6SaPBd4G", -// ] -// .into_iter() -// .map(|h| BlockId::from_bs58_check(h).unwrap()) -// .collect(); -// block_graph.gi_head = vec![ -// (0, vec![1, 2, 3, 4]), -// (1, vec![0]), -// (2, vec![0]), -// (3, vec![0]), -// (4, vec![0]), -// (5, vec![6]), -// (6, vec![5]), -// ] -// .into_iter() -// .map(|(idx, lst)| (hashes[idx], lst.into_iter().map(|i| hashes[i]).collect())) -// .collect(); -// let computed_sets = block_graph.compute_max_cliques(); - -// let expected_sets: Vec> = vec![ -// vec![1, 2, 3, 4, 5], -// vec![1, 2, 3, 4, 6], -// vec![0, 5], -// vec![0, 6], -// ] -// .into_iter() -// .map(|lst| lst.into_iter().map(|i| hashes[i]).collect()) -// .collect(); - -// assert_eq!(computed_sets.len(), expected_sets.len()); -// for expected in expected_sets.into_iter() { -// assert!(computed_sets.iter().any(|v| v == &expected)); -// } -// selector_manager.stop(); -// } - -// /// generate a named temporary JSON ledger file -// fn generate_ledger_file(ledger_vec: &Map) -> NamedTempFile { -// use std::io::prelude::*; -// let ledger_file_named = NamedTempFile::new().expect("cannot create temp file"); -// serde_json::to_writer_pretty(ledger_file_named.as_file(), &ledger_vec) -// .expect("unable to write ledger file"); -// ledger_file_named -// .as_file() -// .seek(std::io::SeekFrom::Start(0)) -// .expect("could not seek file"); -// ledger_file_named -// } diff --git a/massa-consensus-worker/src/tests/tools.rs b/massa-consensus-worker/src/tests/tools.rs deleted file mode 100644 index c29c1d3093e..00000000000 --- a/massa-consensus-worker/src/tests/tools.rs +++ /dev/null @@ -1,1056 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -#![allow(clippy::ptr_arg)] // this allow &Vec<..> as function argument type - -use crate::start_consensus_controller; -use massa_cipher::decrypt; -use massa_consensus_exports::error::ConsensusResult; -use massa_consensus_exports::{ - settings::ConsensusChannels, ConsensusCommandSender, ConsensusConfig, ConsensusEventReceiver, -}; -use massa_execution_exports::test_exports::MockExecutionController; -use massa_graph::{export_active_block::ExportActiveBlock, BlockGraphExport, BootstrapableGraph}; -use massa_hash::Hash; -use massa_models::prehash::PreHashMap; -use massa_models::{ - address::Address, - amount::Amount, - block::{ - Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock, - WrappedHeader, - }, - operation::{Operation, OperationSerializer, OperationType, WrappedOperation}, - prehash::PreHashSet, - slot::Slot, - wrapped::{Id, WrappedContent}, -}; -use massa_pool_exports::test_exports::MockPoolController; -use massa_pool_exports::PoolController; -use massa_pos_exports::{SelectorConfig, SelectorController}; -use massa_pos_worker::start_selector_worker; -use massa_protocol_exports::test_exports::MockProtocolController; -use massa_protocol_exports::ProtocolCommand; -use massa_signature::KeyPair; -use massa_storage::Storage; -use massa_time::MassaTime; -use parking_lot::Mutex; -use std::{collections::BTreeMap, collections::HashSet, future::Future, path::Path}; -use std::{str::FromStr, sync::Arc, time::Duration}; - -use tracing::info; - -/* TODO https://github.com/massalabs/massa/issues/3099 -/// Handle the expected selector messages, always approving the address. -pub fn approve_producer_and_selector_for_staker( - staking_key: &KeyPair, - selector_controller: &Receiver, -) { - let addr = Address::from_public_key(&staking_key.get_public_key()); - // Drain all messages, assuming there can be a slight delay between sending some. - loop { - let timeout = Duration::from_millis(100); - match selector_controller.recv_timeout(timeout) { - Ok(MockSelectorControllerMessage::GetSelection { - slot: _, - response_tx, - }) => { - let selection = Selection { - producer: addr.clone(), - endorsements: vec![addr.clone(); ENDORSEMENT_COUNT as usize], - }; - response_tx.send(Ok(selection)).unwrap(); - } - Ok(MockSelectorControllerMessage::GetProducer { - slot: _, - response_tx, - }) => { - response_tx.send(Ok(addr.clone())).unwrap(); - } - Ok(msg) => panic!("Unexpected selector message {:?}", msg), - Err(RecvTimeoutError::Timeout) => break, - _ => panic!("Unexpected error from selector receiver"), - } - } -} -*/ - -pub fn get_dummy_block_id(s: &str) -> BlockId { - BlockId(Hash::compute_from(s.as_bytes())) -} - -pub struct AddressTest { - pub address: Address, - pub keypair: KeyPair, -} - -impl From for (Address, KeyPair) { - fn from(addr: AddressTest) -> Self { - (addr.address, addr.keypair) - } -} - -/// Same as `random_address()` but force a specific thread -pub fn random_address_on_thread(thread: u8, thread_count: u8) -> AddressTest { - loop { - let keypair = KeyPair::generate(); - let address = Address::from_public_key(&keypair.get_public_key()); - if thread == address.get_thread(thread_count) { - return AddressTest { address, keypair }; - } - } -} - -/// Generate a random address -pub fn _random_address() -> AddressTest { - let keypair = KeyPair::generate(); - AddressTest { - address: Address::from_public_key(&keypair.get_public_key()), - keypair, - } -} - -/// return true if another block has been seen -pub async fn validate_notpropagate_block( - protocol_controller: &mut MockProtocolController, - not_propagated: BlockId, - timeout_ms: u64, -) -> bool { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { - block_id, - storage: _, - } => Some(block_id), - _ => None, - }) - .await; - match param { - Some(block_id) => not_propagated != block_id, - None => false, - } -} - -/// return true if another block has been seen -pub async fn validate_notpropagate_block_in_list( - protocol_controller: &mut MockProtocolController, - not_propagated: &Vec, - timeout_ms: u64, -) -> bool { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { - block_id, - storage: _, - } => Some(block_id), - _ => None, - }) - .await; - match param { - Some(block_id) => !not_propagated.contains(&block_id), - None => false, - } -} - -pub async fn validate_propagate_block_in_list( - protocol_controller: &mut MockProtocolController, - valid: &Vec, - timeout_ms: u64, -) -> BlockId { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { - block_id, - storage: _, - } => Some(block_id), - _ => None, - }) - .await; - match param { - Some(block_id) => { - assert!( - valid.contains(&block_id), - "not the valid hash propagated, it can be a genesis_timestamp problem" - ); - block_id - } - None => panic!("Hash not propagated."), - } -} - -pub async fn validate_ask_for_block( - protocol_controller: &mut MockProtocolController, - valid: BlockId, - timeout_ms: u64, -) -> BlockId { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::WishlistDelta { new, .. } => Some(new), - _ => None, - }) - .await; - match param { - Some(new) => { - assert!(new.contains_key(&valid), "not the valid hash asked for"); - assert_eq!(new.len(), 1); - valid - } - None => panic!("Block not asked for before timeout."), - } -} - -pub async fn validate_wishlist( - protocol_controller: &mut MockProtocolController, - new: PreHashSet, - remove: PreHashSet, - timeout_ms: u64, -) { - let new: PreHashMap> = - new.into_iter().map(|id| (id, None)).collect(); - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::WishlistDelta { new, remove } => Some((new, remove)), - _ => None, - }) - .await; - match param { - Some((got_new, got_remove)) => { - for key in got_new.keys() { - assert!(new.contains_key(key)); - } - assert_eq!(remove, got_remove); - } - None => panic!("Wishlist delta not sent for before timeout."), - } -} - -pub async fn validate_does_not_ask_for_block( - protocol_controller: &mut MockProtocolController, - hash: &BlockId, - timeout_ms: u64, -) { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::WishlistDelta { new, .. } => Some(new), - _ => None, - }) - .await; - if let Some(new) = param { - if new.contains_key(hash) { - panic!("unexpected ask for block {}", hash); - } - } -} - -pub async fn validate_propagate_block( - protocol_controller: &mut MockProtocolController, - valid_hash: BlockId, - timeout_ms: u64, -) { - protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::IntegratedBlock { - block_id, - storage: _, - } => { - if block_id == valid_hash { - return Some(()); - } - None - } - _ => None, - }) - .await - .expect("Block not propagated before timeout.") -} - -pub async fn validate_notify_block_attack_attempt( - protocol_controller: &mut MockProtocolController, - valid_hash: BlockId, - timeout_ms: u64, -) { - let param = protocol_controller - .wait_command(timeout_ms.into(), |cmd| match cmd { - ProtocolCommand::AttackBlockDetected(hash) => Some(hash), - _ => None, - }) - .await; - match param { - Some(hash) => assert_eq!(valid_hash, hash, "Attack attempt notified for wrong hash."), - None => panic!("Attack attempt not notified before timeout."), - } -} - -pub async fn validate_block_found( - _protocol_controller: &mut MockProtocolController, - _valid_hash: &BlockId, - _timeout_ms: u64, -) { -} - -pub async fn validate_block_not_found( - _protocol_controller: &mut MockProtocolController, - _valid_hash: &BlockId, - _timeout_ms: u64, -) { -} - -pub async fn create_and_test_block( - protocol_controller: &mut MockProtocolController, - cfg: &ConsensusConfig, - slot: Slot, - best_parents: Vec, - valid: bool, - trace: bool, - creator: &KeyPair, -) -> BlockId { - let block = create_block(cfg, slot, best_parents, creator); - let block_id = block.id; - let slot = block.content.header.content.slot; - let mut storage = Storage::create_root(); - if trace { - info!("create block:{}", block.id); - } - - storage.store_block(block); - protocol_controller - .receive_block(block_id, slot, storage.clone()) - .await; - if valid { - // Assert that the block is propagated. - validate_propagate_block(protocol_controller, block_id, 2000).await; - } else { - // Assert that the the block is not propagated. - validate_notpropagate_block(protocol_controller, block_id, 500).await; - } - block_id -} - -pub async fn propagate_block( - protocol_controller: &mut MockProtocolController, - block_id: BlockId, - slot: Slot, - storage: Storage, - valid: bool, - timeout_ms: u64, -) -> BlockId { - let block_hash = block_id; - protocol_controller - .receive_block(block_id, slot, storage) - .await; - if valid { - // see if the block is propagated. - validate_propagate_block(protocol_controller, block_hash, timeout_ms).await; - } else { - // see if the block is propagated. - validate_notpropagate_block(protocol_controller, block_hash, timeout_ms).await; - } - block_hash -} - -pub fn _create_roll_transaction( - keypair: &KeyPair, - roll_count: u64, - buy: bool, - expire_period: u64, - fee: u64, -) -> WrappedOperation { - let op = if buy { - OperationType::RollBuy { roll_count } - } else { - OperationType::RollSell { roll_count } - }; - - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} - -pub async fn _wait_pool_slot( - _pool_controller: &mut MockPoolController, - _t0: MassaTime, - period: u64, - thread: u8, -) -> Slot { - // TODO: Replace ?? - // pool_controller - // .wait_command(t0.checked_mul(2).unwrap(), |cmd| match cmd { - // PoolCommand::UpdateCurrentSlot(s) => { - // if s >= Slot::new(period, thread) { - // Some(s) - // } else { - // None - // } - // } - // _ => None, - // }) - // .await - // .expect("timeout while waiting for slot") - Slot::new(period, thread) -} - -pub fn _create_transaction( - keypair: &KeyPair, - recipient_address: Address, - amount: u64, - expire_period: u64, - fee: u64, -) -> WrappedOperation { - let op = OperationType::Transaction { - recipient_address, - amount: Amount::from_str(&amount.to_string()).unwrap(), - }; - - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} - -#[allow(clippy::too_many_arguments)] -pub fn _create_executesc( - keypair: &KeyPair, - expire_period: u64, - fee: u64, - data: Vec, - max_gas: u64, - gas_price: u64, -) -> WrappedOperation { - let op = OperationType::ExecuteSC { - data, - max_gas, - gas_price: Amount::from_str(&gas_price.to_string()).unwrap(), - datastore: BTreeMap::new(), - }; - - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} - -pub fn _create_roll_buy( - keypair: &KeyPair, - roll_count: u64, - expire_period: u64, - fee: u64, -) -> WrappedOperation { - let op = OperationType::RollBuy { roll_count }; - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} - -/* TODO https://github.com/massalabs/massa/issues/3099 -pub fn create_roll_sell( - keypair: &KeyPair, - roll_count: u64, - expire_period: u64, - fee: u64, -) -> WrappedOperation { - let op = OperationType::RollSell { roll_count }; - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), keypair).unwrap() -} -*/ - -// returns hash and resulting discarded blocks -pub fn create_block( - cfg: &ConsensusConfig, - slot: Slot, - best_parents: Vec, - creator: &KeyPair, -) -> WrappedBlock { - create_block_with_merkle_root( - cfg, - Hash::compute_from("default_val".as_bytes()), - slot, - best_parents, - creator, - ) -} - -// returns hash and resulting discarded blocks -pub fn create_block_with_merkle_root( - _cfg: &ConsensusConfig, - operation_merkle_root: Hash, - slot: Slot, - best_parents: Vec, - creator: &KeyPair, -) -> WrappedBlock { - let header = BlockHeader::new_wrapped( - BlockHeader { - slot, - parents: best_parents, - operation_merkle_root, - endorsements: Vec::new(), - }, - BlockHeaderSerializer::new(), - creator, - ) - .unwrap(); - - Block::new_wrapped( - Block { - header, - operations: Default::default(), - }, - BlockSerializer::new(), - creator, - ) - .unwrap() -} - -/* TODO https://github.com/massalabs/massa/issues/3099 -/// Creates an endorsement for use in consensus tests. -pub fn create_endorsement( - sender_keypair: &KeyPair, - slot: Slot, - endorsed_block: BlockId, - index: u32, -) -> WrappedEndorsement { - let content = Endorsement { - slot, - index, - endorsed_block, - }; - Endorsement::new_wrapped(content, EndorsementSerializer::new(), sender_keypair).unwrap() -} -*/ - -pub fn _get_export_active_test_block( - parents: Vec<(BlockId, u64)>, - operations: Vec, - slot: Slot, - is_final: bool, -) -> ExportActiveBlock { - let keypair = KeyPair::generate(); - let block = Block::new_wrapped( - Block { - header: BlockHeader::new_wrapped( - BlockHeader { - operation_merkle_root: Hash::compute_from( - &operations - .iter() - .flat_map(|op| op.id.into_bytes()) - .collect::>()[..], - ), - parents: parents.iter().map(|(id, _)| *id).collect(), - slot, - endorsements: Vec::new(), - }, - BlockHeaderSerializer::new(), - &keypair, - ) - .unwrap(), - operations: operations.iter().cloned().map(|op| op.id).collect(), - }, - BlockSerializer::new(), - &keypair, - ) - .unwrap(); - - ExportActiveBlock { - parents, - block, - operations, - is_final, - } -} - -pub fn create_block_with_operations( - _cfg: &ConsensusConfig, - slot: Slot, - best_parents: &Vec, - creator: &KeyPair, - operations: Vec, -) -> WrappedBlock { - let operation_merkle_root = Hash::compute_from( - &operations.iter().fold(Vec::new(), |acc, v| { - [acc, v.id.get_hash().to_bytes().to_vec()].concat() - })[..], - ); - - let header = BlockHeader::new_wrapped( - BlockHeader { - slot, - parents: best_parents.clone(), - operation_merkle_root, - endorsements: Vec::new(), - }, - BlockHeaderSerializer::new(), - creator, - ) - .unwrap(); - - Block::new_wrapped( - Block { - header, - operations: operations.into_iter().map(|op| op.id).collect(), - }, - BlockSerializer::new(), - creator, - ) - .unwrap() -} - -/* TODO https://github.com/massalabs/massa/issues/3099 -pub fn create_block_with_operations_and_endorsements( - _cfg: &ConsensusConfig, - slot: Slot, - best_parents: &Vec, - creator: &KeyPair, - operations: Vec, - endorsements: Vec, -) -> WrappedBlock { - let operation_merkle_root = Hash::compute_from( - &operations.iter().fold(Vec::new(), |acc, v| { - [acc, v.id.get_hash().to_bytes().to_vec()].concat() - })[..], - ); - - let header = BlockHeader::new_wrapped( - BlockHeader { - slot, - parents: best_parents.clone(), - operation_merkle_root, - endorsements, - }, - BlockHeaderSerializer::new(), - creator, - ) - .unwrap(); - - Block::new_wrapped( - Block { - header, - operations: operations.into_iter().map(|op| op.id).collect(), - }, - BlockSerializer::new(), - creator, - ) - .unwrap() -} -*/ - -pub fn get_creator_for_draw(draw: &Address, nodes: &Vec) -> KeyPair { - for key in nodes.iter() { - let address = Address::from_public_key(&key.get_public_key()); - if address == *draw { - return key.clone(); - } - } - panic!("Matching key for draw not found."); -} - -/// Load staking keys from file and derive public keys and addresses -pub async fn _load_initial_staking_keys( - path: &Path, - password: &str, -) -> ConsensusResult> { - if !std::path::Path::is_file(path) { - return Ok(PreHashMap::default()); - } - let (_version, data) = decrypt(password, &tokio::fs::read(path).await?)?; - serde_json::from_slice::>(&data) - .unwrap() - .into_iter() - .map(|key| Ok((Address::from_public_key(&key.get_public_key()), key))) - .collect() -} - -/// Runs a consensus test, passing a mock pool controller to it. -pub async fn _consensus_pool_test( - cfg: ConsensusConfig, - boot_graph: Option, - test: F, -) where - F: FnOnce( - Box, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - ) -> V, - V: Future< - Output = ( - Box, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - ), - >, -{ - let mut storage: Storage = Storage::create_root(); - if let Some(ref graph) = boot_graph { - for export_block in &graph.final_blocks { - storage.store_block(export_block.block.clone()); - } - } - // mock protocol & pool - let (protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded - let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); - let stop_sinks = Arc::new(Mutex::new(false)); - let stop_sinks_clone = stop_sinks.clone(); - let execution_sink = std::thread::spawn(move || { - while !*stop_sinks_clone.lock() { - let _ = execution_rx.recv_timeout(Duration::from_millis(500)); - } - }); - let staking_key = - KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8").unwrap(); - let genesis_address = Address::from_public_key(&staking_key.get_public_key()); - let selector_config = SelectorConfig { - max_draw_cache: 12, - channel_size: 256, - thread_count: 2, - endorsement_count: 8, - periods_per_cycle: 2, - genesis_address, - }; - // launch consensus controller - let (_selector_manager, selector_controller) = start_selector_worker(selector_config).unwrap(); - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller.clone(), - selector_controller, - }, - boot_graph, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - // Call test func. - let ( - _pool_controller, - mut protocol_controller, - _consensus_command_sender, - consensus_event_receiver, - ) = test( - pool_controller, - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - ) - .await; - - // stop controller while ignoring all commands - let stop_fut = consensus_manager.stop(consensus_event_receiver); - tokio::pin!(stop_fut); - protocol_controller - .ignore_commands_while(stop_fut) - .await - .unwrap(); - - // stop sinks - *stop_sinks.lock() = true; - execution_sink.join().unwrap(); -} - -/* TODO https://github.com/massalabs/massa/issues/3099 -/// Runs a consensus test, passing a mock pool controller to it. -pub async fn consensus_pool_test_with_storage( - cfg: ConsensusConfig, - boot_graph: Option, - test: F, -) where - F: FnOnce( - Box, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Storage, - Receiver, - ) -> V, - V: Future< - Output = ( - Box, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Receiver, - ), - >, -{ - let mut storage: Storage = Storage::create_root(); - if let Some(ref graph) = boot_graph { - for export_block in &graph.final_blocks { - storage.store_block(export_block.block.clone()); - } - } - // mock protocol & pool - let (protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded - let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); - let stop_sinks = Arc::new(Mutex::new(false)); - let stop_sinks_clone = stop_sinks.clone(); - let execution_sink = std::thread::spawn(move || { - while !*stop_sinks_clone.lock() { - let _ = execution_rx.recv_timeout(Duration::from_millis(500)); - } - }); - let (selector_controller, selector_receiver) = MockSelectorController::new_with_receiver(); - // launch consensus controller - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller.clone(), - selector_controller: selector_controller, - }, - boot_graph, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - // Call test func. - let ( - _pool_controller, - mut protocol_controller, - _consensus_command_sender, - consensus_event_receiver, - _selector_controller, - ) = test( - pool_controller, - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - storage, - selector_receiver, - ) - .await; - - // stop controller while ignoring all commands - let stop_fut = consensus_manager.stop(consensus_event_receiver); - tokio::pin!(stop_fut); - protocol_controller - .ignore_commands_while(stop_fut) - .await - .unwrap(); - - // stop sinks - *stop_sinks.lock() = true; - execution_sink.join().unwrap(); -} -*/ - -/// Runs a consensus test, without passing a mock pool controller to it. -pub async fn consensus_without_pool_test(cfg: ConsensusConfig, test: F) -where - F: FnOnce( - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Box, - ) -> V, - V: Future< - Output = ( - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Box, - ), - >, -{ - let storage: Storage = Storage::create_root(); - // mock protocol & pool - let (protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - let staking_key = - KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8").unwrap(); - let genesis_address = Address::from_public_key(&staking_key.get_public_key()); - let selector_config = SelectorConfig { - max_draw_cache: 12, - channel_size: 256, - thread_count: 2, - endorsement_count: 8, - periods_per_cycle: 2, - genesis_address, - }; - let (mut selector_manager, selector_controller) = - start_selector_worker(selector_config).unwrap(); - // for now, execution_rx is ignored: clique updates to Execution pile up and are discarded - let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); - let stop_sinks = Arc::new(Mutex::new(false)); - let stop_sinks_clone = stop_sinks.clone(); - let execution_sink = std::thread::spawn(move || { - while !*stop_sinks_clone.lock() { - let _ = execution_rx.recv_timeout(Duration::from_millis(500)); - } - }); - // launch consensus controller - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller, - selector_controller: selector_controller.clone(), - }, - None, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - // Call test func. - let ( - mut protocol_controller, - _consensus_command_sender, - consensus_event_receiver, - _selector_controller, - ) = test( - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - .await; - - // stop controller while ignoring all commands - let stop_fut = consensus_manager.stop(consensus_event_receiver); - tokio::pin!(stop_fut); - protocol_controller - .ignore_commands_while(stop_fut) - .await - .unwrap(); - selector_manager.stop(); - // stop sinks - *stop_sinks.lock() = true; - execution_sink.join().unwrap(); -} - -/// Runs a consensus test, without passing a mock pool controller to it, -/// and passing a reference to storage. -pub async fn consensus_without_pool_with_storage_test(cfg: ConsensusConfig, test: F) -where - F: FnOnce( - Storage, - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Box, - ) -> V, - V: Future< - Output = ( - MockProtocolController, - ConsensusCommandSender, - ConsensusEventReceiver, - Box, - ), - >, -{ - let storage: Storage = Storage::create_root(); - // mock protocol & pool - let (protocol_controller, protocol_command_sender, protocol_event_receiver) = - MockProtocolController::new(); - let (pool_controller, _pool_event_receiver) = MockPoolController::new_with_receiver(); - // for now, execution_rx is ignored: clique updates to Execution pile up and are discarded - let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); - let stop_sinks = Arc::new(Mutex::new(false)); - let stop_sinks_clone = stop_sinks.clone(); - let execution_sink = std::thread::spawn(move || { - while !*stop_sinks_clone.lock() { - let _ = execution_rx.recv_timeout(Duration::from_millis(500)); - } - }); - let staking_key = - KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8").unwrap(); - let genesis_address = Address::from_public_key(&staking_key.get_public_key()); - let selector_config = SelectorConfig { - max_draw_cache: 12, - channel_size: 256, - thread_count: 2, - endorsement_count: 8, - periods_per_cycle: 2, - genesis_address, - }; - let (mut selector_manager, selector_controller) = - start_selector_worker(selector_config).unwrap(); - // launch consensus controller - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - cfg.clone(), - ConsensusChannels { - execution_controller, - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller, - selector_controller: selector_controller.clone(), - }, - None, - storage.clone(), - 0, - ) - .await - .expect("could not start consensus controller"); - - // Call test func. - let ( - mut protocol_controller, - _consensus_command_sender, - consensus_event_receiver, - _selector_controller, - ) = test( - storage, - protocol_controller, - consensus_command_sender, - consensus_event_receiver, - selector_controller, - ) - .await; - - // stop controller while ignoring all commands - let stop_fut = consensus_manager.stop(consensus_event_receiver); - tokio::pin!(stop_fut); - protocol_controller - .ignore_commands_while(stop_fut) - .await - .unwrap(); - selector_manager.stop(); - // stop sinks - *stop_sinks.lock() = true; - execution_sink.join().unwrap(); -} - -pub fn get_cliques(graph: &BlockGraphExport, hash: BlockId) -> HashSet { - let mut res = HashSet::new(); - for (i, clique) in graph.max_cliques.iter().enumerate() { - if clique.block_ids.contains(&hash) { - res.insert(i); - } - } - res -} diff --git a/massa-consensus-worker/src/tools.rs b/massa-consensus-worker/src/tools.rs deleted file mode 100644 index 2d771e2de71..00000000000 --- a/massa-consensus-worker/src/tools.rs +++ /dev/null @@ -1,107 +0,0 @@ -use crate::consensus_worker::ConsensusWorker; -use massa_consensus_exports::settings::ConsensusConfig; -use massa_consensus_exports::{ - commands::{ConsensusCommand, ConsensusManagementCommand}, - error::{ConsensusError, ConsensusResult as Result}, - events::ConsensusEvent, - settings::{ConsensusChannels, ConsensusWorkerChannels}, - ConsensusCommandSender, ConsensusEventReceiver, ConsensusManager, -}; -use massa_graph::{settings::GraphConfig, BlockGraph, BootstrapableGraph}; -use massa_storage::Storage; -use tokio::sync::mpsc; -use tracing::{debug, error, info}; - -/// Creates a new consensus controller. -/// -/// # Arguments -/// * `cfg`: consensus configuration -/// * `protocol_command_sender`: a `ProtocolCommandSender` instance to send commands to Protocol. -/// * `protocol_event_receiver`: a `ProtocolEventReceiver` instance to receive events from Protocol. -#[allow(clippy::too_many_arguments)] -pub async fn start_consensus_controller( - cfg: ConsensusConfig, - channels: ConsensusChannels, - boot_graph: Option, - storage: Storage, - clock_compensation: i64, -) -> Result<( - ConsensusCommandSender, - ConsensusEventReceiver, - ConsensusManager, -)> { - debug!("starting consensus controller"); - massa_trace!( - "consensus.consensus_controller.start_consensus_controller", - {} - ); - - // todo that is checked when loading the config, should be removed - // ensure that the parameters are sane - if cfg.thread_count == 0 { - return Err(ConsensusError::ConfigError( - "thread_count should be strictly more than 0".to_string(), - )); - } - if cfg.t0 == 0.into() { - return Err(ConsensusError::ConfigError( - "t0 should be strictly more than 0".to_string(), - )); - } - if cfg.t0.checked_rem_u64(cfg.thread_count as u64)? != 0.into() { - return Err(ConsensusError::ConfigError( - "thread_count should divide t0".to_string(), - )); - } - - // start worker - let block_db = BlockGraph::new( - GraphConfig::from(&cfg), - boot_graph, - storage.clone_without_refs(), - channels.selector_controller.clone(), - ) - .await?; - let (command_tx, command_rx) = mpsc::channel::(cfg.channel_size); - let (event_tx, event_rx) = mpsc::channel::(cfg.channel_size); - let (manager_tx, manager_rx) = mpsc::channel::(1); - let cfg_copy = cfg.clone(); - let join_handle = tokio::spawn(async move { - let res = ConsensusWorker::new( - cfg_copy, - ConsensusWorkerChannels { - protocol_command_sender: channels.protocol_command_sender, - protocol_event_receiver: channels.protocol_event_receiver, - execution_controller: channels.execution_controller, - pool_command_sender: channels.pool_command_sender, - selector_controller: channels.selector_controller, - controller_command_rx: command_rx, - controller_event_tx: event_tx, - controller_manager_rx: manager_rx, - }, - block_db, - clock_compensation, - ) - .await? - .run_loop() - .await; - match res { - Err(err) => { - error!("consensus worker crashed: {}", err); - Err(err) - } - Ok(v) => { - info!("consensus worker finished cleanly"); - Ok(v) - } - } - }); - Ok(( - ConsensusCommandSender(command_tx), - ConsensusEventReceiver(event_rx), - ConsensusManager { - manager_tx, - join_handle, - }, - )) -} diff --git a/massa-consensus-worker/src/worker/init.rs b/massa-consensus-worker/src/worker/init.rs new file mode 100644 index 00000000000..5af5969165c --- /dev/null +++ b/massa-consensus-worker/src/worker/init.rs @@ -0,0 +1,309 @@ +use massa_consensus_exports::{ + block_status::BlockStatus, bootstrapable_graph::BootstrapableGraph, error::ConsensusError, + ConsensusConfig, +}; +use massa_hash::Hash; +use massa_models::{ + active_block::ActiveBlock, + address::Address, + block::{Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock}, + prehash::PreHashMap, + slot::Slot, + timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, + wrapped::WrappedContent, +}; +use massa_storage::Storage; +use massa_time::MassaTime; +use parking_lot::RwLock; +use std::{ + collections::{HashMap, VecDeque}, + sync::{mpsc, Arc}, +}; +use tracing::log::info; + +use crate::{commands::ConsensusCommand, state::ConsensusState}; + +use super::ConsensusWorker; + +/// Creates genesis block in given thread. +/// +/// # Arguments +/// * `cfg`: consensus configuration +/// * `thread_number`: thread in which we want a genesis block +/// +/// # Returns +/// A genesis block +pub fn create_genesis_block( + cfg: &ConsensusConfig, + thread_number: u8, +) -> Result { + let keypair = &cfg.genesis_key; + let header = BlockHeader::new_wrapped( + BlockHeader { + slot: Slot::new(0, thread_number), + parents: Vec::new(), + operation_merkle_root: Hash::compute_from(&Vec::new()), + endorsements: Vec::new(), + }, + BlockHeaderSerializer::new(), + keypair, + )?; + + Ok(Block::new_wrapped( + Block { + header, + operations: Default::default(), + }, + BlockSerializer::new(), + keypair, + )?) +} + +impl ConsensusWorker { + /// Creates a new consensus worker. + /// + /// # Arguments + /// * `config`: consensus configuration + /// * `command_receiver`: channel to receive commands from controller + /// * `channels`: channels to communicate with other workers + /// * `shared_state`: shared state with the controller + /// * `init_graph`: Optional graph of blocks to initiate the worker + /// * `storage`: shared storage + /// + /// # Returns: + /// A `ConsensusWorker`, to interact with it use the `ConsensusController` + pub fn new( + config: ConsensusConfig, + command_receiver: mpsc::Receiver, + shared_state: Arc>, + init_graph: Option, + storage: Storage, + ) -> Result { + let now = MassaTime::now(config.clock_compensation_millis) + .expect("Couldn't init timer consensus"); + let previous_slot = get_latest_block_slot_at_timestamp( + config.thread_count, + config.t0, + config.genesis_timestamp, + now, + ) + .expect("Couldn't get the init slot consensus."); + + // load genesis blocks + let mut block_statuses = PreHashMap::default(); + let mut genesis_block_ids = Vec::with_capacity(config.thread_count as usize); + for thread in 0u8..config.thread_count { + let block = create_genesis_block(&config, thread).map_err(|err| { + ConsensusError::GenesisCreationError(format!("genesis error {}", err)) + })?; + let mut storage = storage.clone_without_refs(); + storage.store_block(block.clone()); + genesis_block_ids.push(block.id); + block_statuses.insert( + block.id, + BlockStatus::Active { + a_block: Box::new(ActiveBlock { + creator_address: block.creator_address, + parents: Vec::new(), + children: vec![PreHashMap::default(); config.thread_count as usize], + descendants: Default::default(), + is_final: true, + block_id: block.id, + slot: block.content.header.content.slot, + fitness: block.get_fitness(), + }), + storage, + }, + ); + } + + let next_slot = previous_slot.map_or(Ok(Slot::new(0u64, 0u8)), |s| { + s.get_next_slot(config.thread_count) + })?; + let next_instant = get_block_slot_timestamp( + config.thread_count, + config.t0, + config.genesis_timestamp, + next_slot, + )? + .estimate_instant(config.clock_compensation_millis)?; + + info!( + "Started node at time {}, cycle {}, period {}, thread {}", + now.to_utc_string(), + next_slot.get_cycle(config.periods_per_cycle), + next_slot.period, + next_slot.thread, + ); + + if config.genesis_timestamp > now { + let (days, hours, mins, secs) = config + .genesis_timestamp + .saturating_sub(now) + .days_hours_mins_secs()?; + info!( + "{} days, {} hours, {} minutes, {} seconds remaining to genesis", + days, hours, mins, secs, + ) + } + + // add genesis blocks to stats + let genesis_addr = Address::from_public_key(&config.genesis_key.get_public_key()); + let mut final_block_stats = VecDeque::new(); + for thread in 0..config.thread_count { + final_block_stats.push_back(( + get_block_slot_timestamp( + config.thread_count, + config.t0, + config.genesis_timestamp, + Slot::new(0, thread), + )?, + genesis_addr, + false, + )) + } + + let mut res_consensus = ConsensusWorker { + config: config.clone(), + command_receiver, + shared_state, + previous_slot, + next_slot, + next_instant, + }; + + if let Some(BootstrapableGraph { final_blocks }) = init_graph { + // load final blocks + let final_blocks: Vec<(ActiveBlock, Storage)> = final_blocks + .into_iter() + .map(|export_b| export_b.to_active_block(&storage, config.thread_count)) + .collect::>()?; + + // compute latest_final_blocks_periods + let mut latest_final_blocks_periods: Vec<(BlockId, u64)> = + genesis_block_ids.iter().map(|id| (*id, 0u64)).collect(); + for (b, _) in &final_blocks { + if let Some(v) = latest_final_blocks_periods.get_mut(b.slot.thread as usize) { + if b.slot.period > v.1 { + *v = (b.block_id, b.slot.period); + } + } + } + + { + let mut write_shared_state = res_consensus.shared_state.write(); + write_shared_state.genesis_hashes = genesis_block_ids; + write_shared_state.active_index = + final_blocks.iter().map(|(b, _)| b.block_id).collect(); + write_shared_state.best_parents = latest_final_blocks_periods.clone(); + write_shared_state.latest_final_blocks_periods = latest_final_blocks_periods; + write_shared_state.block_statuses = final_blocks + .into_iter() + .map(|(b, s)| { + Ok(( + b.block_id, + BlockStatus::Active { + a_block: Box::new(b), + storage: s, + }, + )) + }) + .collect::>()?; + write_shared_state.final_block_stats = final_block_stats; + } + + res_consensus.claim_parent_refs()?; + } else { + { + let mut write_shared_state = res_consensus.shared_state.write(); + write_shared_state.active_index = genesis_block_ids.iter().copied().collect(); + write_shared_state.latest_final_blocks_periods = + genesis_block_ids.iter().map(|h| (*h, 0)).collect(); + write_shared_state.best_parents = + genesis_block_ids.iter().map(|v| (*v, 0)).collect(); + write_shared_state.genesis_hashes = genesis_block_ids; + write_shared_state.block_statuses = block_statuses; + write_shared_state.final_block_stats = final_block_stats; + } + } + + // Notify execution module of current blockclique and all final blocks. + // we need to do this because the bootstrap snapshots of the executor vs the consensus may not have been taken in sync + // because the two modules run concurrently and out of sync. + { + let mut write_shared_state = res_consensus.shared_state.write(); + let mut block_storage: PreHashMap = Default::default(); + let notify_finals: HashMap = write_shared_state + .get_all_final_blocks() + .into_iter() + .map(|(b_id, block_infos)| { + block_storage.insert(b_id, block_infos.1); + (block_infos.0, b_id) + }) + .collect(); + let notify_blockclique: HashMap = write_shared_state + .get_blockclique() + .iter() + .map(|b_id| { + let (a_block, storage) = write_shared_state + .get_full_active_block(b_id) + .expect("active block missing from block_db"); + let slot = a_block.slot; + block_storage.insert(*b_id, storage.clone()); + (slot, *b_id) + }) + .collect(); + write_shared_state.prev_blockclique = + notify_blockclique.iter().map(|(k, v)| (*v, *k)).collect(); + write_shared_state + .channels + .execution_controller + .update_blockclique_status(notify_finals, Some(notify_blockclique), block_storage); + } + + Ok(res_consensus) + } + + /// Internal function used at initialization of the `ConsensusWorker` to link blocks with their parents + fn claim_parent_refs(&mut self) -> Result<(), ConsensusError> { + let mut write_shared_state = self.shared_state.write(); + for (_b_id, block_status) in write_shared_state.block_statuses.iter_mut() { + if let BlockStatus::Active { + a_block, + storage: block_storage, + } = block_status + { + // claim parent refs + let n_claimed_parents = block_storage + .claim_block_refs(&a_block.parents.iter().map(|(p_id, _)| *p_id).collect()) + .len(); + + if !a_block.is_final { + // note: parents of final blocks will be missing, that's ok, but it shouldn't be the case for non-finals + if n_claimed_parents != self.config.thread_count as usize { + return Err(ConsensusError::MissingBlock( + "block storage could not claim refs to all parent blocks".into(), + )); + } + } + } + } + + // list active block parents + let active_blocks_map: PreHashMap)> = write_shared_state + .block_statuses + .iter() + .filter_map(|(h, s)| { + if let BlockStatus::Active { a_block: a, .. } = s { + return Some((*h, (a.slot, a.parents.iter().map(|(ph, _)| *ph).collect()))); + } + None + }) + .collect(); + + for (b_id, (b_slot, b_parents)) in active_blocks_map.into_iter() { + write_shared_state.insert_parents_descendants(b_id, b_slot, b_parents); + } + Ok(()) + } +} diff --git a/massa-consensus-worker/src/worker/main_loop.rs b/massa-consensus-worker/src/worker/main_loop.rs new file mode 100644 index 00000000000..88489e1fafc --- /dev/null +++ b/massa-consensus-worker/src/worker/main_loop.rs @@ -0,0 +1,159 @@ +use std::{sync::mpsc, time::Instant}; + +use massa_consensus_exports::error::ConsensusError; +use massa_models::{ + slot::Slot, + timeslots::{get_block_slot_timestamp, get_closest_slot_to_timestamp}, +}; +use massa_time::MassaTime; +use tracing::{info, log::warn}; + +use crate::commands::ConsensusCommand; + +use super::ConsensusWorker; + +enum WaitingStatus { + Ended, + Interrupted, + Disconnected, +} + +impl ConsensusWorker { + /// Execute a command received from the controller also run an update of the graph after processing the command. + /// + /// # Arguments: + /// * `command`: the command to execute + /// + /// # Returns: + /// An error if the command failed + fn manage_command(&mut self, command: ConsensusCommand) -> Result<(), ConsensusError> { + let mut write_shared_state = self.shared_state.write(); + match command { + ConsensusCommand::RegisterBlockHeader(block_id, header) => { + write_shared_state.register_block_header(block_id, header, self.previous_slot)?; + write_shared_state.block_db_changed() + } + ConsensusCommand::RegisterBlock(block_id, slot, block_storage, created) => { + write_shared_state.register_block( + block_id, + slot, + self.previous_slot, + block_storage, + created, + )?; + write_shared_state.block_db_changed() + } + ConsensusCommand::MarkInvalidBlock(block_id, header) => { + write_shared_state.mark_invalid_block(&block_id, header); + Ok(()) + } + } + } + + /// Wait and interrupt or wait until an instant or a stop signal + /// + /// # Return value + /// Returns the error of the process of the command if any. + /// Returns true if we reached the instant. + /// Returns false if we were interrupted by a command. + fn wait_slot_or_command(&mut self, deadline: Instant) -> WaitingStatus { + match self.command_receiver.recv_deadline(deadline) { + // message received => manage it + Ok(command) => { + if let Err(err) = self.manage_command(command) { + warn!("Error in consensus: {}", err); + } + WaitingStatus::Interrupted + } + // timeout => continue main loop + Err(mpsc::RecvTimeoutError::Timeout) => WaitingStatus::Ended, + // channel disconnected (sender dropped) => quit main loop + Err(mpsc::RecvTimeoutError::Disconnected) => WaitingStatus::Disconnected, + } + } + + /// Gets the next slot and the instant when it will happen. + /// Slots can be skipped if we waited too much in-between. + /// Extra safety against double-production caused by clock adjustments (this is the role of the `previous_slot` parameter). + fn get_next_slot(&self, previous_slot: Option) -> (Slot, Instant) { + // get current absolute time + let now = MassaTime::now(self.config.clock_compensation_millis) + .expect("could not get current time"); + + // get closest slot according to the current absolute time + let mut next_slot = get_closest_slot_to_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + now, + ); + + // protection against double-production on unexpected system clock adjustment + if let Some(prev_slot) = previous_slot { + if next_slot <= prev_slot { + next_slot = prev_slot + .get_next_slot(self.config.thread_count) + .expect("could not compute next slot"); + } + } + + // get the timestamp of the target slot + let next_instant = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + next_slot, + ) + .expect("could not get block slot timestamp") + .estimate_instant(self.config.clock_compensation_millis) + .expect("could not estimate block slot instant"); + + (next_slot, next_instant) + } + + /// Runs in loop forever. This loop must stop every slot to perform operations on stats and graph + /// but can be stopped anytime by a command received. + pub fn run(&mut self) { + let mut last_prune = Instant::now(); + loop { + match self.wait_slot_or_command(self.next_instant) { + WaitingStatus::Ended => { + let previous_cycle = self + .previous_slot + .map(|s| s.get_cycle(self.config.periods_per_cycle)); + let observed_cycle = self.next_slot.get_cycle(self.config.periods_per_cycle); + if previous_cycle.is_none() { + // first cycle observed + info!("Massa network has started ! 🎉") + } + if previous_cycle < Some(observed_cycle) { + info!("Started cycle {}", observed_cycle); + } + { + let mut write_shared_state = self.shared_state.write(); + if let Err(err) = write_shared_state.slot_tick(self.next_slot) { + warn!("Error while processing block tick: {}", err); + } + }; + if last_prune.elapsed().as_millis() + > self.config.block_db_prune_interval.to_millis() as u128 + { + self.shared_state + .write() + .prune() + .expect("Error while pruning"); + last_prune = Instant::now(); + } + self.previous_slot = Some(self.next_slot); + (self.next_slot, self.next_instant) = self.get_next_slot(Some(self.next_slot)); + } + WaitingStatus::Disconnected => { + break; + } + WaitingStatus::Interrupted => { + continue; + } + }; + } + } +} diff --git a/massa-consensus-worker/src/worker/mod.rs b/massa-consensus-worker/src/worker/mod.rs new file mode 100644 index 00000000000..dc9f2e3e74b --- /dev/null +++ b/massa-consensus-worker/src/worker/mod.rs @@ -0,0 +1,118 @@ +use massa_consensus_exports::{ + bootstrapable_graph::BootstrapableGraph, ConsensusChannels, ConsensusConfig, + ConsensusController, ConsensusManager, +}; +use massa_models::block::BlockId; +use massa_models::clique::Clique; +use massa_models::config::CHANNEL_SIZE; +use massa_models::prehash::PreHashSet; +use massa_models::slot::Slot; +use massa_storage::Storage; +use massa_time::MassaTime; +use parking_lot::RwLock; +use std::sync::{mpsc, Arc}; +use std::thread; +use std::time::Instant; + +use crate::commands::ConsensusCommand; +use crate::controller::ConsensusControllerImpl; +use crate::manager::ConsensusManagerImpl; +use crate::state::ConsensusState; + +/// The consensus worker structure that contains all information and tools for the consensus worker thread. +pub struct ConsensusWorker { + /// Channel to receive command from the controller + command_receiver: mpsc::Receiver, + /// Configuration of the consensus + config: ConsensusConfig, + /// State shared with the controller + shared_state: Arc>, + /// Previous slot. + previous_slot: Option, + /// Next slot + next_slot: Slot, + /// Next slot instant + next_instant: Instant, +} + +mod init; +mod main_loop; + +/// Create a new consensus worker thread. +/// +/// # Arguments: +/// * `config`: Configuration of the consensus +/// * `channels`: Channels to communicate with others modules +/// * `init_graph`: Optional initial graph to bootstrap the graph. if None, the graph will have only genesis blocks. +/// * `storage`: Storage to use for the consensus +/// +/// # Returns: +/// * The consensus controller to communicate with the consensus worker thread +/// * The consensus manager to manage the consensus worker thread +pub fn start_consensus_worker( + config: ConsensusConfig, + channels: ConsensusChannels, + init_graph: Option, + storage: Storage, +) -> (Box, Box) { + let (tx, rx) = mpsc::sync_channel(CHANNEL_SIZE); + // desync detection timespan + let bootstrap_part_size = config.bootstrap_part_size; + let stats_desync_detection_timespan = + config.t0.checked_mul(config.periods_per_cycle * 2).unwrap(); + let shared_state = Arc::new(RwLock::new(ConsensusState { + storage: storage.clone(), + config: config.clone(), + channels, + max_cliques: vec![Clique { + block_ids: PreHashSet::::default(), + fitness: 0, + is_blockclique: true, + }], + sequence_counter: 0, + waiting_for_slot_index: Default::default(), + waiting_for_dependencies_index: Default::default(), + discarded_index: Default::default(), + to_propagate: Default::default(), + attack_attempts: Default::default(), + new_final_blocks: Default::default(), + new_stale_blocks: Default::default(), + incoming_index: Default::default(), + active_index: Default::default(), + save_final_periods: Default::default(), + latest_final_blocks_periods: Default::default(), + best_parents: Default::default(), + block_statuses: Default::default(), + genesis_hashes: Default::default(), + gi_head: Default::default(), + final_block_stats: Default::default(), + stale_block_stats: Default::default(), + protocol_blocks: Default::default(), + wishlist: Default::default(), + launch_time: MassaTime::now(config.clock_compensation_millis).unwrap(), + stats_desync_detection_timespan, + stats_history_timespan: std::cmp::max( + stats_desync_detection_timespan, + config.stats_timespan, + ), + prev_blockclique: Default::default(), + })); + + let shared_state_cloned = shared_state.clone(); + let consensus_thread = thread::Builder::new() + .name("consensus worker".into()) + .spawn(move || { + let mut consensus_worker = + ConsensusWorker::new(config, rx, shared_state_cloned, init_graph, storage).unwrap(); + consensus_worker.run() + }) + .expect("Can't spawn consensus thread."); + + let manager = ConsensusManagerImpl { + consensus_thread: Some((tx.clone(), consensus_thread)), + }; + + let controller = ConsensusControllerImpl::new(tx, shared_state, bootstrap_part_size); + + (Box::new(controller), Box::new(manager)) +} diff --git a/massa-executed-ops/src/executed_ops.rs b/massa-executed-ops/src/executed_ops.rs index 3d74d837433..c0265778e2c 100644 --- a/massa-executed-ops/src/executed_ops.rs +++ b/massa-executed-ops/src/executed_ops.rs @@ -26,7 +26,7 @@ use std::{ ops::Bound::{Excluded, Included, Unbounded}, }; -const EXECUTED_OPS_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; +const EXECUTED_OPS_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; /// A structure to list and prune previously executed operations #[derive(Debug, Clone)] @@ -48,7 +48,7 @@ impl ExecutedOps { config, sorted_ops: BTreeMap::new(), ops: PreHashSet::default(), - hash: Hash::from_bytes(EXECUTED_OPS_INITIAL_BYTES), + hash: Hash::from_bytes(EXECUTED_OPS_HASH_INITIAL_BYTES), } } @@ -123,7 +123,7 @@ impl ExecutedOps { let left_bound = match cursor { StreamingStep::Started => Unbounded, StreamingStep::Ongoing(slot) => Excluded(slot), - StreamingStep::Finished => return (ops_part, cursor), + StreamingStep::Finished(_) => return (ops_part, cursor), }; let mut ops_part_last_slot: Option = None; for (slot, ids) in self.sorted_ops.range((left_bound, Unbounded)) { @@ -137,7 +137,7 @@ impl ExecutedOps { if let Some(last_slot) = ops_part_last_slot { (ops_part, StreamingStep::Ongoing(last_slot)) } else { - (ops_part, StreamingStep::Finished) + (ops_part, StreamingStep::Finished(None)) } } @@ -156,7 +156,7 @@ impl ExecutedOps { if let Some(slot) = self.sorted_ops.last_key_value().map(|(slot, _)| slot) { StreamingStep::Ongoing(*slot) } else { - StreamingStep::Finished + StreamingStep::Finished(None) } } } @@ -215,7 +215,7 @@ fn test_executed_ops_xor_computing() { // at this point the hash should have been XORed with itself assert_eq!( a.hash, - Hash::from_bytes(EXECUTED_OPS_INITIAL_BYTES), + Hash::from_bytes(EXECUTED_OPS_HASH_INITIAL_BYTES), "'a' was not reset to its initial value" ); } diff --git a/massa-execution-exports/src/types.rs b/massa-execution-exports/src/types.rs index 463e588c15e..c1b2e075b18 100644 --- a/massa-execution-exports/src/types.rs +++ b/massa-execution-exports/src/types.rs @@ -63,8 +63,6 @@ pub struct ReadOnlyExecutionOutput { pub struct ReadOnlyExecutionRequest { /// Maximum gas to spend in the execution. pub max_gas: u64, - /// The simulated price of gas for the read-only execution. - pub simulated_gas_price: Amount, /// Call stack to simulate, older caller first pub call_stack: Vec, /// Target of the request @@ -84,7 +82,7 @@ pub enum ReadOnlyExecutionTarget { /// Target function target_func: String, /// Parameter to pass to the target function - parameter: String, + parameter: Vec, }, } @@ -93,8 +91,6 @@ pub enum ReadOnlyExecutionTarget { pub struct ReadOnlyCallRequest { /// Maximum gas to spend in the execution. pub max_gas: u64, - /// The simulated price of gas for the read-only execution. - pub simulated_gas_price: Amount, /// Call stack to simulate, older caller first. Target should be last. pub call_stack: Vec, /// Target address diff --git a/massa-execution-worker/Cargo.toml b/massa-execution-worker/Cargo.toml index 24bc0c2a47b..5f3ff06339c 100644 --- a/massa-execution-worker/Cargo.toml +++ b/massa-execution-worker/Cargo.toml @@ -21,7 +21,7 @@ massa_execution_exports = { path = "../massa-execution-exports" } massa_models = { path = "../massa-models" } massa_storage = { path = "../massa-storage" } massa_hash = { path = "../massa-hash" } -massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", tag = "v0.8.2" } +massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", tag = "v0.9.0" } massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } massa_ledger_exports = { path = "../massa-ledger-exports" } @@ -33,8 +33,21 @@ serial_test = "0.9" tempfile = "3.2" # custom modules with testing enabled massa_pos_worker = { path = "../massa-pos-worker" } -massa_ledger_worker = { path = "../massa-ledger-worker" } +massa_ledger_worker = { path = "../massa-ledger-worker", features = [ + "testing", +] } +massa_execution_exports = { path = "../massa-execution-exports", features = [ + "testing", +] } +massa_final_state = { path = "../massa-final-state", features = ["testing"] } [features] sandbox = ["massa_async_pool/sandbox"] -testing = ["massa_execution_exports/testing", "massa_ledger_exports/testing", "massa_pos_exports/testing", "massa_pos_worker/testing", "massa_ledger_worker/testing", "massa_final_state/testing"] \ No newline at end of file +testing = [ + "massa_execution_exports/testing", + "massa_ledger_exports/testing", + "massa_pos_exports/testing", + "massa_pos_worker/testing", + "massa_ledger_worker/testing", + "massa_final_state/testing", +] diff --git a/massa-execution-worker/src/active_history.rs b/massa-execution-worker/src/active_history.rs index be282ff988d..590e01f251b 100644 --- a/massa-execution-worker/src/active_history.rs +++ b/massa-execution-worker/src/active_history.rs @@ -5,7 +5,7 @@ use massa_ledger_exports::{ use massa_models::{ address::Address, amount::Amount, operation::OperationId, prehash::PreHashMap, slot::Slot, }; -use std::collections::{BTreeMap, VecDeque}; +use std::collections::VecDeque; #[derive(Default)] /// History of the outputs of recently executed slots. @@ -143,36 +143,8 @@ impl ActiveHistory { }) } - /// Traverse the whole history and return every deferred credit of `addr` _after_ `slot` (included). - /// - /// # Arguments - /// * `slot`: slot _after_ which we fetch the credits - /// * `addr`: address to fetch the credits from - #[allow(dead_code)] - pub fn fetch_deferred_credits_after( - &self, - slot: &Slot, - addr: &Address, - ) -> BTreeMap { - self.0 - .iter() - .flat_map(|output| { - output - .state_changes - .pos_changes - .deferred_credits - .0 - .range(slot..) - .filter_map(|(&slot, credits)| credits.get(addr).map(|&amount| (slot, amount))) - }) - .collect() - } - - /// Traverse the whole history and return every deferred credit _at_ `slot` - /// - /// # Arguments - /// * `slot`: slot _at_ which we fetch the credits - pub fn fetch_all_deferred_credits_at(&self, slot: &Slot) -> PreHashMap { + /// Gets all the deferred credits that will be credited at a given slot + pub fn get_all_deferred_credits_for(&self, slot: &Slot) -> PreHashMap { self.0 .iter() .filter_map(|output| { @@ -180,7 +152,7 @@ impl ActiveHistory { .state_changes .pos_changes .deferred_credits - .0 + .credits .get(slot) .cloned() }) @@ -188,6 +160,25 @@ impl ActiveHistory { .collect() } + /// Gets the deferred credits for a given address that will be credited at a given slot + pub(crate) fn get_adress_deferred_credit_for( + &self, + addr: &Address, + slot: &Slot, + ) -> Option { + for hist_item in self.0.iter().rev() { + if let Some(v) = hist_item + .state_changes + .pos_changes + .deferred_credits + .get_address_deferred_credit_for_slot(addr, slot) + { + return Some(v); + } + } + None + } + /// Gets the index of a slot in history pub fn get_slot_index(&self, slot: &Slot, thread_count: u8) -> SlotIndexPosition { let first_slot = match self.0.front() { diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs index d13e2aeaee8..381970d2c88 100644 --- a/massa-execution-worker/src/context.rs +++ b/massa-execution-worker/src/context.rs @@ -31,7 +31,7 @@ use massa_pos_exports::PoSChanges; use parking_lot::RwLock; use rand::SeedableRng; use rand_xoshiro::Xoshiro256PlusPlus; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, BTreeSet}; use std::sync::Arc; use tracing::debug; @@ -91,9 +91,6 @@ pub(crate) struct ExecutionContext { /// max gas for this execution pub max_gas: u64, - /// gas price of the execution - pub gas_price: Amount, - /// slot at which the execution happens pub slot: Slot, @@ -163,7 +160,6 @@ impl ExecutionContext { ), speculative_executed_ops: SpeculativeExecutedOps::new(final_state, active_history), max_gas: Default::default(), - gas_price: Default::default(), slot: Slot::new(0, 0), created_addr_index: Default::default(), created_event_index: Default::default(), @@ -250,7 +246,6 @@ impl ExecutionContext { config: ExecutionConfig, slot: Slot, max_gas: u64, - gas_price: Amount, call_stack: Vec, final_state: Arc>, active_history: Arc>, @@ -273,7 +268,6 @@ impl ExecutionContext { // return readonly context ExecutionContext { max_gas, - gas_price, slot, stack: call_stack, read_only: true, @@ -445,6 +439,11 @@ impl ExecutionContext { self.speculative_ledger.get_bytecode(address) } + /// gets the datastore keys of an address if it exists in the speculative ledger, or returns None + pub fn get_keys(&self, address: &Address) -> Option>> { + self.speculative_ledger.get_keys(address) + } + /// gets the data from a datastore entry of an address if it exists in the speculative ledger, or returns None pub fn get_data_entry(&self, address: &Address, key: &[u8]) -> Option> { self.speculative_ledger.get_data_entry(address, key) @@ -658,12 +657,22 @@ impl ExecutionContext { /// * `slot`: associated slot of the deferred credits to be executed /// * `credits`: deferred to be executed pub fn execute_deferred_credits(&mut self, slot: &Slot) { - let credits = self.speculative_roll_state.get_deferred_credits(slot); - for (addr, amount) in credits { - if let Err(e) = self.transfer_coins(None, Some(addr), amount, false) { + let executed_credits = self.speculative_roll_state.get_deferred_credits(slot); + + for (address, amount) in executed_credits { + self.speculative_roll_state + .added_changes + .deferred_credits + .credits + .entry(*slot) + .or_default() + .entry(address) + .and_modify(|credit_amount| *credit_amount = Amount::default()) + .or_default(); + if let Err(e) = self.transfer_coins(None, Some(address), amount, false) { debug!( "could not credit {} deferred coins to {} at slot {}: {}", - amount, addr, slot, e + amount, address, slot, e ); } } diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs index d690d799640..c2bf27ad1f8 100644 --- a/massa-execution-worker/src/execution.rs +++ b/massa-execution-worker/src/execution.rs @@ -232,9 +232,8 @@ impl ExecutionState { // get operation ID let operation_id = operation.id; - // compute fee from (op.max_gas * op.gas_price + op.fee) - let op_fees = operation.get_total_fee(); - let new_block_credits = block_credits.saturating_add(op_fees); + // Add fee from operation. + let new_block_credits = block_credits.saturating_add(operation.content.fee); let context_snapshot; { @@ -248,9 +247,11 @@ impl ExecutionState { )); } - // debit the fee and coins from the operation sender + // debit the fee from the operation sender // fail execution if there are not enough coins - if let Err(err) = context.transfer_coins(Some(sender_addr), None, op_fees, false) { + if let Err(err) = + context.transfer_coins(Some(sender_addr), None, operation.content.fee, false) + { return Err(ExecutionError::IncludeOperationError(format!( "could not spend fees: {}", err @@ -268,9 +269,6 @@ impl ExecutionState { // save a snapshot of the context to revert any further changes on error context_snapshot = context.get_snapshot(); - // set the context gas price to match the one defined in the operation - context.gas_price = operation.get_gas_price(); - // set the context max gas to match the one defined in the operation context.max_gas = operation.get_gas_usage(); @@ -630,11 +628,10 @@ impl ExecutionState { ) -> Result<(), ExecutionError> { // prepare execution context let context_snapshot; - let (bytecode, data): (Vec, &str) = { + let bytecode: Vec = { let mut context = context_guard!(self); context_snapshot = context.get_snapshot(); context.max_gas = message.max_gas; - context.gas_price = message.gas_price; context.creator_address = None; context.stack = vec![ ExecutionStackElement { @@ -653,9 +650,9 @@ impl ExecutionState { // If there is no target bytecode or if message data is invalid, // reimburse sender with coins and quit - let (bytecode, data) = match (bytecode, std::str::from_utf8(&message.data)) { - (Some(bc), Ok(d)) => (bc, d), - (bc, _d) => { + let bytecode = match bytecode { + Some(bc) => bc, + bc => { let err = if bc.is_none() { ExecutionError::RuntimeError("no target bytecode found".into()) } else { @@ -683,7 +680,7 @@ impl ExecutionState { return Err(err); } - (bytecode, data) + bytecode }; // run the target function @@ -691,7 +688,7 @@ impl ExecutionState { &bytecode, message.max_gas, &message.handler, - data, + &message.data, &*self.execution_interface, ) { // execution failed: reset context to snapshot and reimburse sender @@ -1025,7 +1022,6 @@ impl ExecutionState { self.config.clone(), slot, req.max_gas, - req.simulated_gas_price, req.call_stack, self.final_state.clone(), self.active_history.clone(), @@ -1129,7 +1125,13 @@ impl ExecutionState { addr: &Address, ) -> (BTreeSet>, BTreeSet>) { // here, get the final keys from the final ledger, and make a copy of it for the candidate list - let final_keys = self.final_state.read().ledger.get_datastore_keys(addr); + // let final_keys = final_state.read().ledger.get_datastore_keys(addr); + let final_keys = self + .final_state + .read() + .ledger + .get_datastore_keys(addr) + .unwrap_or_default(); let mut candidate_keys = final_keys.clone(); // here, traverse the history from oldest to newest, applying additions and deletions diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs index fa07a94df10..4bdd73be77d 100644 --- a/massa-execution-worker/src/interface_impl.rs +++ b/massa-execution-worker/src/interface_impl.rs @@ -16,6 +16,7 @@ use massa_models::{ use massa_sc_runtime::{Interface, InterfaceClone}; use parking_lot::Mutex; use rand::Rng; +use std::collections::BTreeSet; use std::str::FromStr; use std::sync::Arc; use tracing::debug; @@ -174,6 +175,32 @@ impl Interface for InterfaceImpl { } } + /// Get the datastore keys (aka entries) for a given address + /// + /// # Returns + /// A list of keys (keys are byte arrays) + fn get_keys(&self) -> Result>> { + let context = context_guard!(self); + let addr = context.get_current_address()?; + match context.get_keys(&addr) { + Some(value) => Ok(value), + _ => bail!("data entry not found"), + } + } + + /// Get the datastore keys (aka entries) for a given address + /// + /// # Returns + /// A list of keys (keys are byte arrays) + fn get_keys_for(&self, address: &str) -> Result>> { + let addr = &Address::from_str(address)?; + let context = context_guard!(self); + match context.get_keys(addr) { + Some(value) => Ok(value), + _ => bail!("data entry not found"), + } + } + /// Gets a datastore value by key for a given address. /// /// # Arguments @@ -182,10 +209,10 @@ impl Interface for InterfaceImpl { /// /// # Returns /// The datastore value matching the provided key, if found, otherwise an error. - fn raw_get_data_for(&self, address: &str, key: &str) -> Result> { + fn raw_get_data_for(&self, address: &str, key: &[u8]) -> Result> { let addr = &massa_models::address::Address::from_str(address)?; let context = context_guard!(self); - match context.get_data_entry(addr, key.as_bytes()) { + match context.get_data_entry(addr, key) { Some(value) => Ok(value), _ => bail!("data entry not found"), } @@ -199,10 +226,10 @@ impl Interface for InterfaceImpl { /// * address: string representation of the address /// * key: string key of the datastore entry to set /// * value: new value to set - fn raw_set_data_for(&self, address: &str, key: &str, value: &[u8]) -> Result<()> { + fn raw_set_data_for(&self, address: &str, key: &[u8], value: &[u8]) -> Result<()> { let addr = massa_models::address::Address::from_str(address)?; let mut context = context_guard!(self); - context.set_data_entry(&addr, key.as_bytes().to_vec(), value.to_vec())?; + context.set_data_entry(&addr, key.to_vec(), value.to_vec())?; Ok(()) } @@ -213,9 +240,9 @@ impl Interface for InterfaceImpl { /// * address: string representation of the address /// * key: string key of the datastore entry /// * value: value to append - fn raw_append_data_for(&self, address: &str, key: &str, value: &[u8]) -> Result<()> { + fn raw_append_data_for(&self, address: &str, key: &[u8], value: &[u8]) -> Result<()> { let addr = massa_models::address::Address::from_str(address)?; - context_guard!(self).append_data_entry(&addr, key.as_bytes().to_vec(), value.to_vec())?; + context_guard!(self).append_data_entry(&addr, key.to_vec(), value.to_vec())?; Ok(()) } @@ -225,9 +252,9 @@ impl Interface for InterfaceImpl { /// # Arguments /// * address: string representation of the address /// * key: string key of the datastore entry to delete - fn raw_delete_data_for(&self, address: &str, key: &str) -> Result<()> { + fn raw_delete_data_for(&self, address: &str, key: &[u8]) -> Result<()> { let addr = &massa_models::address::Address::from_str(address)?; - context_guard!(self).delete_data_entry(addr, key.as_bytes())?; + context_guard!(self).delete_data_entry(addr, key)?; Ok(()) } @@ -239,10 +266,10 @@ impl Interface for InterfaceImpl { /// /// # Returns /// true if the address exists and has the entry matching the provided key in its datastore, otherwise false - fn has_data_for(&self, address: &str, key: &str) -> Result { + fn has_data_for(&self, address: &str, key: &[u8]) -> Result { let addr = massa_models::address::Address::from_str(address)?; let context = context_guard!(self); - Ok(context.has_data_entry(&addr, key.as_bytes())) + Ok(context.has_data_entry(&addr, key)) } /// Gets a datastore value by key for the current address (top of the call stack). @@ -252,10 +279,10 @@ impl Interface for InterfaceImpl { /// /// # Returns /// The datastore value matching the provided key, if found, otherwise an error. - fn raw_get_data(&self, key: &str) -> Result> { + fn raw_get_data(&self, key: &[u8]) -> Result> { let context = context_guard!(self); let addr = context.get_current_address()?; - match context.get_data_entry(&addr, key.as_bytes()) { + match context.get_data_entry(&addr, key) { Some(data) => Ok(data), _ => bail!("data entry not found"), } @@ -269,10 +296,10 @@ impl Interface for InterfaceImpl { /// * address: string representation of the address /// * key: string key of the datastore entry to set /// * value: new value to set - fn raw_set_data(&self, key: &str, value: &[u8]) -> Result<()> { + fn raw_set_data(&self, key: &[u8], value: &[u8]) -> Result<()> { let mut context = context_guard!(self); let addr = context.get_current_address()?; - context.set_data_entry(&addr, key.as_bytes().to_vec(), value.to_vec())?; + context.set_data_entry(&addr, key.to_vec(), value.to_vec())?; Ok(()) } @@ -283,10 +310,10 @@ impl Interface for InterfaceImpl { /// * address: string representation of the address /// * key: string key of the datastore entry /// * value: value to append - fn raw_append_data(&self, key: &str, value: &[u8]) -> Result<()> { + fn raw_append_data(&self, key: &[u8], value: &[u8]) -> Result<()> { let mut context = context_guard!(self); let addr = context.get_current_address()?; - context.append_data_entry(&addr, key.as_bytes().to_vec(), value.to_vec())?; + context.append_data_entry(&addr, key.to_vec(), value.to_vec())?; Ok(()) } @@ -295,10 +322,10 @@ impl Interface for InterfaceImpl { /// /// # Arguments /// * key: string key of the datastore entry to delete - fn raw_delete_data(&self, key: &str) -> Result<()> { + fn raw_delete_data(&self, key: &[u8]) -> Result<()> { let mut context = context_guard!(self); let addr = context.get_current_address()?; - context.delete_data_entry(&addr, key.as_bytes())?; + context.delete_data_entry(&addr, key)?; Ok(()) } @@ -309,10 +336,10 @@ impl Interface for InterfaceImpl { /// /// # Returns /// true if the address exists and has the entry matching the provided key in its datastore, otherwise false - fn has_data(&self, key: &str) -> Result { + fn has_data(&self, key: &[u8]) -> Result { let context = context_guard!(self); let addr = context.get_current_address()?; - Ok(context.has_data_entry(&addr, key.as_bytes())) + Ok(context.has_data_entry(&addr, key)) } /// Get the operation datastore keys (aka entries). @@ -546,7 +573,7 @@ impl Interface for InterfaceImpl { /// * `validity_start`: Tuple containing the period and thread of the validity start slot /// * `validity_end`: Tuple containing the period and thread of the validity end slot /// * `max_gas`: Maximum gas for the message execution - /// * `gas_price`: Price of one gas unit + /// * `fee`: Fee to pay /// * `raw_coins`: Coins given by the sender /// * `data`: Message data fn send_message( @@ -556,7 +583,7 @@ impl Interface for InterfaceImpl { validity_start: (u64, u8), validity_end: (u64, u8), max_gas: u64, - gas_price: u64, + raw_fee: u64, raw_coins: u64, data: &[u8], ) -> Result<()> { @@ -571,20 +598,22 @@ impl Interface for InterfaceImpl { let emission_index = execution_context.created_message_index; let sender = execution_context.get_current_address()?; let coins = Amount::from_raw(raw_coins); + let fee = Amount::from_raw(raw_fee); execution_context.transfer_coins(Some(sender), None, coins, true)?; - execution_context.push_new_message(AsyncMessage { + execution_context.transfer_coins(Some(sender), None, fee, true)?; + execution_context.push_new_message(AsyncMessage::new_with_hash( emission_slot, emission_index, sender, - destination: Address::from_str(target_address)?, - handler: target_handler.to_string(), - validity_start: Slot::new(validity_start.0, validity_start.1), - validity_end: Slot::new(validity_end.0, validity_end.1), + Address::from_str(target_address)?, + target_handler.to_string(), max_gas, - gas_price: Amount::from_raw(gas_price), + fee, coins, - data: data.to_vec(), - }); + Slot::new(validity_start.0, validity_start.1), + Slot::new(validity_end.0, validity_end.1), + data.to_vec(), + )); execution_context.created_message_index += 1; Ok(()) } diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs index a030a075ee8..d48babb3149 100644 --- a/massa-execution-worker/src/lib.rs +++ b/massa-execution-worker/src/lib.rs @@ -81,6 +81,7 @@ #![feature(is_sorted)] #![feature(map_try_insert)] #![feature(let_chains)] +#![feature(option_get_or_insert_default)] mod active_history; mod context; diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs index 8d6a35a2fe3..8e5e30718b3 100644 --- a/massa-execution-worker/src/speculative_ledger.rs +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -9,9 +9,10 @@ use crate::active_history::{ActiveHistory, HistorySearchResult}; use massa_execution_exports::ExecutionError; use massa_execution_exports::StorageCostsConstants; use massa_final_state::FinalState; -use massa_ledger_exports::{Applicable, LedgerChanges}; +use massa_ledger_exports::{Applicable, LedgerChanges, SetOrDelete, SetUpdateOrDelete}; use massa_models::{address::Address, amount::Amount}; use parking_lot::RwLock; +use std::collections::BTreeSet; use std::sync::Arc; use tracing::debug; @@ -360,6 +361,55 @@ impl SpeculativeLedger { Ok(()) } + /// Gets a copy of a datastore keys for a given address + /// + /// # Arguments + /// * `addr`: address to query + /// + /// # Returns + /// `Some(Vec>)` for found keys, `None` if the address does not exist. + pub fn get_keys(&self, addr: &Address) -> Option>> { + let mut keys: Option>> = + self.final_state.read().ledger.get_datastore_keys(addr); + + // here, traverse the history from oldest to newest with added_changes at the end, applying additions and deletions + let active_history = self.active_history.read(); + let changes_iterator = active_history + .0 + .iter() + .map(|item| &item.state_changes.ledger_changes) + .chain(std::iter::once(&self.added_changes)); + for ledger_changes in changes_iterator { + match ledger_changes.get(addr) { + // address absent from the changes + None => (), + + // address ledger entry being reset to an absolute new list of keys + Some(SetUpdateOrDelete::Set(new_ledger_entry)) => { + keys = Some(new_ledger_entry.datastore.keys().cloned().collect()); + } + + // address ledger entry being updated + Some(SetUpdateOrDelete::Update(entry_updates)) => { + let ref_keys = keys.get_or_insert_default(); + for (ds_key, ds_update) in &entry_updates.datastore { + match ds_update { + SetOrDelete::Set(_) => ref_keys.insert(ds_key.clone()), + SetOrDelete::Delete => ref_keys.remove(ds_key), + }; + } + } + + // address ledger entry being deleted + Some(SetUpdateOrDelete::Delete) => { + keys = None; + } + } + } + + keys + } + /// Gets a copy of a datastore value for a given address and datastore key /// /// # Arguments diff --git a/massa-execution-worker/src/speculative_roll_state.rs b/massa-execution-worker/src/speculative_roll_state.rs index f3b1ecda2b8..7da56cb3035 100644 --- a/massa-execution-worker/src/speculative_roll_state.rs +++ b/massa-execution-worker/src/speculative_roll_state.rs @@ -24,7 +24,7 @@ pub(crate) struct SpeculativeRollState { active_history: Arc>, /// List of changes to the state after settling roll sell/buy - added_changes: PoSChanges, + pub(crate) added_changes: PoSChanges, } impl SpeculativeRollState { @@ -118,17 +118,8 @@ impl SpeculativeRollState { ))); } - let cur_cycle = slot.get_cycle(periods_per_cycle); - - // remove the rolls - let current_rolls = self - .added_changes - .roll_changes - .entry(*seller_addr) - .or_insert_with(|| owned_count); - *current_rolls = owned_count.saturating_sub(roll_count); - // compute deferred credit slot + let cur_cycle = slot.get_cycle(periods_per_cycle); let target_slot = Slot::new_last_of_cycle( cur_cycle .checked_add(3) @@ -136,16 +127,23 @@ impl SpeculativeRollState { periods_per_cycle, thread_count, ) - .expect("unexepected slot overflot in try_sell_rolls"); + .expect("unexpected slot overflow in try_sell_rolls"); - // add deferred reimbursement corresponding to the sold rolls value - let credit = self - .added_changes + // Note 1: Deferred credits are stored as absolute value + let new_deferred_credits = self + .get_address_deferred_credit_for_slot(seller_addr, &target_slot) + .unwrap_or_default() + .saturating_add(roll_price.saturating_mul_u64(roll_count)); + + // Remove the rolls + self.added_changes + .roll_changes + .insert(*seller_addr, owned_count.saturating_sub(roll_count)); + + // Add deferred credits (reimbursement) corresponding to the sold rolls value + self.added_changes .deferred_credits - .0 - .entry(target_slot) - .or_insert_with(PreHashMap::default); - credit.insert(*seller_addr, roll_price.saturating_mul_u64(roll_count)); + .insert(*seller_addr, target_slot, new_deferred_credits); Ok(()) } @@ -218,17 +216,14 @@ impl SpeculativeRollState { if owned_count != 0 { if let Some(amount) = roll_price.checked_mul_u64(owned_count) { target_credits.insert(addr, amount); - self.added_changes - .roll_changes - .entry(addr) - .or_insert_with(|| 0); + self.added_changes.roll_changes.insert(addr, 0); } } } } if !target_credits.is_empty() { let mut credits = DeferredCredits::default(); - credits.0.insert(target_slot, target_credits); + credits.credits.insert(target_slot, target_credits); self.added_changes.deferred_credits.nested_extend(credits); } } @@ -242,7 +237,12 @@ impl SpeculativeRollState { let mut res: HashMap = HashMap::default(); // get added values - for (slot, addr_amount) in self.added_changes.deferred_credits.0.range(min_slot..) { + for (slot, addr_amount) in self + .added_changes + .deferred_credits + .credits + .range(min_slot..) + { if let Some(amount) = addr_amount.get(address) { let _ = res.try_insert(*slot, *amount); }; @@ -256,7 +256,7 @@ impl SpeculativeRollState { .state_changes .pos_changes .deferred_credits - .0 + .credits .range(min_slot..) { if let Some(amount) = addr_amount.get(address) { @@ -269,7 +269,12 @@ impl SpeculativeRollState { // get values from final state { let final_state = self.final_state.read(); - for (slot, addr_amount) in final_state.pos_state.deferred_credits.0.range(min_slot..) { + for (slot, addr_amount) in final_state + .pos_state + .deferred_credits + .credits + .range(min_slot..) + { if let Some(amount) = addr_amount.get(address) { let _ = res.try_insert(*slot, *amount); }; @@ -279,6 +284,40 @@ impl SpeculativeRollState { res.into_iter().filter(|(_s, v)| !v.is_zero()).collect() } + /// Gets the deferred credits for a given address that will be credited at a given slot + fn get_address_deferred_credit_for_slot(&self, addr: &Address, slot: &Slot) -> Option { + // search in the added changes + if let Some(v) = self + .added_changes + .deferred_credits + .get_address_deferred_credit_for_slot(addr, slot) + { + return Some(v); + } + + // search in the history + if let Some(v) = self + .active_history + .read() + .get_adress_deferred_credit_for(addr, slot) + { + return Some(v); + } + + // search in the final state + if let Some(v) = self + .final_state + .read() + .pos_state + .deferred_credits + .get_address_deferred_credit_for_slot(addr, slot) + { + return Some(v); + } + + None + } + /// Get the production statistics for a given address at a given cycle. pub fn get_address_cycle_infos( &self, @@ -466,11 +505,11 @@ impl SpeculativeRollState { credits.extend( self.active_history .read() - .fetch_all_deferred_credits_at(slot), + .get_all_deferred_credits_for(slot), ); // added deferred credits - if let Some(creds) = self.added_changes.deferred_credits.0.get(slot) { + if let Some(creds) = self.added_changes.deferred_credits.credits.get(slot) { credits.extend(creds.clone()); } diff --git a/massa-execution-worker/src/tests/mock.rs b/massa-execution-worker/src/tests/mock.rs index 7e49192bce6..9a8c224b4bb 100644 --- a/massa-execution-worker/src/tests/mock.rs +++ b/massa-execution-worker/src/tests/mock.rs @@ -95,8 +95,7 @@ pub fn get_sample_state() -> Result<(Arc>, NamedTempFile, Tem }; let (_, selector_controller) = start_selector_worker(SelectorConfig::default()) .expect("could not start selector controller"); - let mut final_state = - FinalState::new(cfg, Box::new(ledger), selector_controller.clone()).unwrap(); + let mut final_state = FinalState::new(cfg, Box::new(ledger), selector_controller).unwrap(); final_state.compute_initial_draws().unwrap(); final_state.pos_state.create_initial_cycle(); Ok((Arc::new(RwLock::new(final_state)), tempfile, tempdir)) diff --git a/massa-execution-worker/src/tests/mod.rs b/massa-execution-worker/src/tests/mod.rs index 48b5245b0f1..7ab4452af2d 100644 --- a/massa-execution-worker/src/tests/mod.rs +++ b/massa-execution-worker/src/tests/mod.rs @@ -2,3 +2,4 @@ mod mock; mod scenarios_mandatories; +mod tests_active_history; diff --git a/massa-execution-worker/src/tests/scenarios_mandatories.rs b/massa-execution-worker/src/tests/scenarios_mandatories.rs index fdea2341088..531e0316770 100644 --- a/massa-execution-worker/src/tests/scenarios_mandatories.rs +++ b/massa-execution-worker/src/tests/scenarios_mandatories.rs @@ -19,6 +19,7 @@ use massa_models::{ use massa_signature::KeyPair; use massa_storage::Storage; use massa_time::MassaTime; +use num::rational::Ratio; use serial_test::serial; use std::{ cmp::Reverse, collections::BTreeMap, collections::HashMap, str::FromStr, time::Duration, @@ -65,7 +66,6 @@ fn test_readonly_execution() { let mut res = controller .execute_readonly_request(ReadOnlyExecutionRequest { max_gas: 1_000_000, - simulated_gas_price: Amount::from_mantissa_scale(1_000_000, 0), call_stack: vec![], target: ReadOnlyExecutionTarget::BytecodeExecution( include_bytes!("./wasm/event_test.wasm").to_vec(), @@ -135,11 +135,15 @@ fn test_nested_call_gas_usage() { // get random keypair let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); - // load bytecode - // you can check the source code of the following wasm file in massa-sc-examples + // load bytecodes + // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/nested_call.wasm"); + let datastore_bytecode = include_bytes!("./wasm/test.wasm").to_vec(); + let mut datastore = BTreeMap::new(); + datastore.insert(b"smart-contract".to_vec(), datastore_bytecode); + // create the block containing the smart contract execution operation - let operation = create_execute_sc_operation(&keypair, bytecode).unwrap(); + let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); // store the block in storage @@ -159,7 +163,7 @@ fn test_nested_call_gas_usage() { std::thread::sleep(Duration::from_millis(10)); // length of the sub contract test.wasm - let bytecode_sub_contract_len = 3715; + let bytecode_sub_contract_len = 4374; let balance = sample_state .read() @@ -197,8 +201,8 @@ fn test_nested_call_gas_usage() { 10000000, Amount::from_str("0").unwrap(), Address::from_str(&address).unwrap(), - String::from("test"), address, + b"test".to_vec(), ) .unwrap(); // Init new storage for this block @@ -274,12 +278,15 @@ fn send_and_receive_async_message() { init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); - // load send_message bytecode - // you can check the source code of the following wasm file in massa-sc-examples + // load bytecodes + // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/send_message.wasm"); + let datastore_bytecode = include_bytes!("./wasm/receive_message.wasm").to_vec(); + let mut datastore = BTreeMap::new(); + datastore.insert(b"smart-contract".to_vec(), datastore_bytecode); // create the block contaning the smart contract execution operation - let operation = create_execute_sc_operation(&keypair, bytecode).unwrap(); + let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); // store the block in storage @@ -309,7 +316,7 @@ fn send_and_receive_async_message() { // match the events assert!(events.len() == 1, "One event was expected"); - assert_eq!(events[0].data, "message received: hello my good friend!"); + assert_eq!(events[0].data, "message correctly received: 42,42,42,42"); // stop the execution controller manager.stop(); } @@ -458,14 +465,22 @@ pub fn roll_buy() { #[test] #[serial] pub fn roll_sell() { + // Try to sell 10 rolls (operation 1) then 1 rolls (operation 2) + // Check for resulting roll count + resulting deferred credits + // setup the period duration - let exec_cfg = ExecutionConfig { + let mut exec_cfg = ExecutionConfig { t0: 100.into(), periods_per_cycle: 2, thread_count: 2, cursor_delay: 0.into(), ..Default::default() }; + // turn off roll selling on missed block opportunities + // otherwise balance will be credited with those sold roll (and we need to check the balance for + // if the deferred credits are reimbursed + exec_cfg.max_miss_ratio = Ratio::new(1, 1); + // get a sample final state let (sample_state, _keep_file, _keep_dir) = get_sample_state().unwrap(); @@ -482,20 +497,48 @@ pub fn roll_sell() { // generate the keypair and its corresponding address let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); let address = Address::from_public_key(&keypair.get_public_key()); - // create the operation - let operation = Operation::new_wrapped( + + // get initial balance + let balance_initial = sample_state.read().ledger.get_balance(&address).unwrap(); + + // get initial roll count + let roll_count_initial = sample_state.read().pos_state.get_rolls_for(&address); + let roll_sell_1 = 10; + let roll_sell_2 = 1; + + // create operation 1 + let operation1 = Operation::new_wrapped( Operation { fee: Amount::zero(), expire_period: 10, - op: OperationType::RollSell { roll_count: 10 }, + op: OperationType::RollSell { + roll_count: roll_sell_1, + }, + }, + OperationSerializer::new(), + &keypair, + ) + .unwrap(); + let operation2 = Operation::new_wrapped( + Operation { + fee: Amount::zero(), + expire_period: 10, + op: OperationType::RollSell { + roll_count: roll_sell_2, + }, }, OperationSerializer::new(), &keypair, ) .unwrap(); // create the block containing the roll buy operation - storage.store_operations(vec![operation.clone()]); - let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); + storage.store_operations(vec![operation1.clone(), operation2.clone()]); + let block = create_block( + KeyPair::generate(), + vec![operation1, operation2], + Slot::new(1, 0), + ) + .unwrap(); // store the block in storage storage.store_block(block.clone()); // set the block as final so the sell and credits are processed @@ -508,18 +551,51 @@ pub fn roll_sell() { Default::default(), block_storage.clone(), ); - std::thread::sleep(Duration::from_millis(350)); + std::thread::sleep(Duration::from_millis(1000)); // check roll count deferred credits and candidate balance of the seller address let sample_read = sample_state.read(); let mut credits = PreHashMap::default(); - credits.insert(address, Amount::from_str("1000").unwrap()); - assert_eq!(sample_read.pos_state.get_rolls_for(&address), 90); + let roll_remaining = roll_count_initial - roll_sell_1 - roll_sell_2; + let roll_sold = roll_sell_1 + roll_sell_2; + credits.insert( + address, + exec_cfg.roll_price.checked_mul_u64(roll_sold).unwrap(), + ); + + assert_eq!( + sample_read.pos_state.get_rolls_for(&address), + roll_remaining + ); assert_eq!( sample_read .pos_state .get_deferred_credits_at(&Slot::new(7, 1)), credits ); + + // Check that deferred credit are reimbursed + let credits = PreHashMap::default(); + assert_eq!( + sample_read + .pos_state + .get_deferred_credits_at(&Slot::new(8, 1)), + credits + ); + + // Now check balance + let balances = controller.get_final_and_candidate_balance(&[address]); + let candidate_balance = balances.get(0).unwrap().1.unwrap(); + + assert_eq!( + candidate_balance, + exec_cfg + .roll_price + .checked_mul_u64(roll_sell_1 + roll_sell_2) + .unwrap() + .checked_add(balance_initial) + .unwrap() + ); + // stop the execution controller manager.stop(); } @@ -550,10 +626,10 @@ fn sc_execution_error() { // keypair associated to thread 0 let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); // load bytecode - // you can check the source code of the following wasm file in massa-sc-examples + // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/execution_error.wasm"); // create the block contaning the erroneous smart contract execution operation - let operation = create_execute_sc_operation(&keypair, bytecode).unwrap(); + let operation = create_execute_sc_operation(&keypair, bytecode, BTreeMap::default()).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); // store the block in storage @@ -609,14 +685,12 @@ fn sc_datastore() { // keypair associated to thread 0 let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); // load bytecode - // you can check the source code of the following wasm file in massa-sc-examples + // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/datastore.wasm"); - let datastore = BTreeMap::from([(vec![65, 66], vec![255]), (vec![9], vec![10, 11])]); // create the block contaning the erroneous smart contract execution operation - let operation = - create_execute_sc_operation_with_datastore(&keypair, bytecode, datastore).unwrap(); + let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); // store the block in storage @@ -667,11 +741,15 @@ fn set_bytecode_error() { init_execution_worker(&exec_cfg, &storage, controller.clone()); // keypair associated to thread 0 let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); - // load bytecode - // you can check the source code of the following wasm file in massa-sc-examples + // load bytecodes + // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/set_bytecode_fail.wasm"); + let datastore_bytecode = include_bytes!("./wasm/smart-contract.wasm").to_vec(); + let mut datastore = BTreeMap::new(); + datastore.insert(b"smart-contract".to_vec(), datastore_bytecode); + // create the block contaning the erroneous smart contract execution operation - let operation = create_execute_sc_operation(&keypair, bytecode).unwrap(); + let operation = create_execute_sc_operation(&keypair, bytecode, datastore).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); // store the block in storage @@ -725,11 +803,13 @@ fn datastore_manipulations() { // keypair associated to thread 0 let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); + // let address = Address::from_public_key(&keypair.get_public_key()); + // load bytecode - // you can check the source code of the following wasm file in massa-sc-examples + // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/datastore_manipulations.wasm"); // create the block contaning the erroneous smart contract execution operation - let operation = create_execute_sc_operation(&keypair, bytecode).unwrap(); + let operation = create_execute_sc_operation(&keypair, bytecode, BTreeMap::default()).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); // store the block in storage @@ -746,8 +826,29 @@ fn datastore_manipulations() { .into(), ); + let events = controller.get_filtered_sc_output_event(EventFilter::default()); + // match the events + assert!(!events.is_empty(), "2 events were expected"); + let key = "TEST".to_string(); + // in ASC, string are utf16 encoded + let s16 = key.encode_utf16(); + let s16_as_bytes: Vec = s16.map(|item| item.to_ne_bytes()).flatten().collect(); + // in SC, we use the builtin string formatting (using `keys: ${keys}`) & replicate it in Rust + let keys_str: String = s16_as_bytes + .iter() + .map(|b| format!("{}", b)) + .collect::>() + .join(","); + assert!(events[0].data.contains(&format!("keys: {}", keys_str))); + assert!(events[1].data.contains(&format!("keys2: {}", keys_str))); + // Length of the value left in the datastore. See sources for more context. - let value_len = 10; + let value_len = "TEST_VALUE" + .to_string() + .encode_utf16() + .size_hint() + .1 + .unwrap() as u64; assert_eq!( sample_state .read() @@ -808,7 +909,8 @@ fn events_from_switching_blockclique() { let keypair = KeyPair::from_str("S1kEBGgxHFBdsNC4HtRHhsZsB5irAtYHEmuAKATkfiomYmj58tm").unwrap(); let event_test_data = include_bytes!("./wasm/event_test.wasm"); - let operation = create_execute_sc_operation(&keypair, event_test_data).unwrap(); + let operation = + create_execute_sc_operation(&keypair, event_test_data, BTreeMap::default()).unwrap(); let blockclique_block = create_block(keypair, vec![operation.clone()], blockclique_block_slot).unwrap(); blockclique_blocks.insert(blockclique_block_slot, blockclique_block.id); @@ -834,7 +936,8 @@ fn events_from_switching_blockclique() { let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); let event_test_data = include_bytes!("./wasm/event_test.wasm"); - let operation = create_execute_sc_operation(&keypair, event_test_data).unwrap(); + let operation = + create_execute_sc_operation(&keypair, event_test_data, BTreeMap::default()).unwrap(); let blockclique_block = create_block(keypair, vec![operation.clone()], blockclique_block_slot).unwrap(); blockclique_blocks.insert(blockclique_block_slot, blockclique_block.id); @@ -863,41 +966,16 @@ fn events_from_switching_blockclique() { fn create_execute_sc_operation( sender_keypair: &KeyPair, data: &[u8], -) -> Result { - let op = OperationType::ExecuteSC { - data: data.to_vec(), - max_gas: 100_000, - gas_price: Amount::from_mantissa_scale(1, 0), - datastore: BTreeMap::new(), - }; - let op = Operation::new_wrapped( - Operation { - fee: Amount::zero(), - expire_period: 10, - op, - }, - OperationSerializer::new(), - sender_keypair, - )?; - Ok(op) -} - -/// Create an operation for the given sender with `data` as bytecode. -/// Return a result that should be unwrapped in the root `#[test]` routine. -fn create_execute_sc_operation_with_datastore( - sender_keypair: &KeyPair, - data: &[u8], datastore: Datastore, ) -> Result { let op = OperationType::ExecuteSC { data: data.to_vec(), max_gas: 100_000, - gas_price: Amount::from_mantissa_scale(1, 0), datastore, }; let op = Operation::new_wrapped( Operation { - fee: Amount::zero(), + fee: Amount::from_mantissa_scale(100000, 0), expire_period: 10, op, }, @@ -912,22 +990,21 @@ fn create_execute_sc_operation_with_datastore( fn create_call_sc_operation( sender_keypair: &KeyPair, max_gas: u64, - gas_price: Amount, + fee: Amount, target_addr: Address, target_func: String, - param: String, + param: Vec, ) -> Result { let op = OperationType::CallSC { max_gas, target_addr, coins: Amount::from_str("0").unwrap(), - gas_price, target_func, param, }; let op = Operation::new_wrapped( Operation { - fee: Amount::zero(), + fee, expire_period: 10, op, }, @@ -963,10 +1040,10 @@ fn sc_builtins() { // keypair associated to thread 0 let keypair = KeyPair::from_str("S1JJeHiZv1C1zZN5GLFcbz6EXYiccmUPLkYuDFA3kayjxP39kFQ").unwrap(); // load bytecode - // you can check the source code of the following wasm file in massa-sc-examples + // you can check the source code of the following wasm file in massa-unit-tests-src let bytecode = include_bytes!("./wasm/use_builtins.wasm"); // create the block contaning the erroneous smart contract execution operation - let operation = create_execute_sc_operation(&keypair, bytecode).unwrap(); + let operation = create_execute_sc_operation(&keypair, bytecode, BTreeMap::default()).unwrap(); storage.store_operations(vec![operation.clone()]); let block = create_block(KeyPair::generate(), vec![operation], Slot::new(1, 0)).unwrap(); // store the block in storage diff --git a/massa-execution-worker/src/tests/tests_active_history.rs b/massa-execution-worker/src/tests/tests_active_history.rs new file mode 100644 index 00000000000..6e9485d3b58 --- /dev/null +++ b/massa-execution-worker/src/tests/tests_active_history.rs @@ -0,0 +1,67 @@ +use crate::active_history::ActiveHistory; +use massa_execution_exports::ExecutionOutput; +use massa_models::slot::Slot; +use std::collections::{BTreeMap, VecDeque}; + +use massa_final_state::StateChanges; +use massa_hash::Hash; +use massa_models::address::Address; +use massa_models::amount::Amount; +use massa_models::prehash::{CapacityAllocator, PreHashMap}; +use massa_pos_exports::{DeferredCredits, PoSChanges}; +use serial_test::serial; + +#[test] +#[serial] +fn test_active_history_deferred_credits() { + let slot1 = Slot::new(2, 2); + let slot2 = Slot::new(4, 11); + + let addr1 = Address(Hash::compute_from("A1".as_bytes())); + let addr2 = Address(Hash::compute_from("A2".as_bytes())); + + let amount_a1_s1 = Amount::from_raw(500); + let amount_a2_s1 = Amount::from_raw(2702); + let amount_a1_s2 = Amount::from_raw(37); + let amount_a2_s2 = Amount::from_raw(3); + + let mut ph1 = PreHashMap::with_capacity(2); + ph1.insert(addr1, amount_a1_s1); + ph1.insert(addr2, amount_a2_s1); + let mut ph2 = PreHashMap::with_capacity(2); + ph2.insert(addr1, amount_a1_s2); + ph2.insert(addr2, amount_a2_s2); + + let mut credits = DeferredCredits::default(); + credits.credits = BTreeMap::from([(slot1, ph1), (slot2, ph2)]); + + let exec_output_1 = ExecutionOutput { + slot: Slot::new(1, 0), + block_id: None, + state_changes: StateChanges { + ledger_changes: Default::default(), + async_pool_changes: Default::default(), + pos_changes: PoSChanges { + seed_bits: Default::default(), + roll_changes: Default::default(), + production_stats: Default::default(), + deferred_credits: credits, + }, + executed_ops_changes: Default::default(), + }, + events: Default::default(), + }; + + let active_history = ActiveHistory { + 0: VecDeque::from([exec_output_1]), + }; + + assert_eq!( + active_history.get_adress_deferred_credit_for(&addr1, &slot2), + Some(amount_a1_s2) + ); + + let deferred_credit_for_slot1 = active_history.get_all_deferred_credits_for(&slot1); + assert_eq!(deferred_credit_for_slot1.get(&addr1), Some(&amount_a1_s1)); + assert_eq!(deferred_credit_for_slot1.get(&addr2), Some(&amount_a2_s1)); +} diff --git a/massa-execution-worker/src/tests/wasm/datastore.wasm b/massa-execution-worker/src/tests/wasm/datastore.wasm index 335474e993b..da8b62742ed 100644 Binary files a/massa-execution-worker/src/tests/wasm/datastore.wasm and b/massa-execution-worker/src/tests/wasm/datastore.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/datastore_manipulations.wasm b/massa-execution-worker/src/tests/wasm/datastore_manipulations.wasm index 31b29483323..6475257b617 100644 Binary files a/massa-execution-worker/src/tests/wasm/datastore_manipulations.wasm and b/massa-execution-worker/src/tests/wasm/datastore_manipulations.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/execution_error.wasm b/massa-execution-worker/src/tests/wasm/execution_error.wasm index e71ef62a10b..e9f83a2fe7c 100644 Binary files a/massa-execution-worker/src/tests/wasm/execution_error.wasm and b/massa-execution-worker/src/tests/wasm/execution_error.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/nested_call.wasm b/massa-execution-worker/src/tests/wasm/nested_call.wasm index 20ae56c045a..826e7e2f577 100644 Binary files a/massa-execution-worker/src/tests/wasm/nested_call.wasm and b/massa-execution-worker/src/tests/wasm/nested_call.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/receive_message.wasm b/massa-execution-worker/src/tests/wasm/receive_message.wasm index 7746d8bc8ec..7adbe83fb28 100644 Binary files a/massa-execution-worker/src/tests/wasm/receive_message.wasm and b/massa-execution-worker/src/tests/wasm/receive_message.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/send_message.wasm b/massa-execution-worker/src/tests/wasm/send_message.wasm index 4786d2c5b45..e5ab00c79d8 100644 Binary files a/massa-execution-worker/src/tests/wasm/send_message.wasm and b/massa-execution-worker/src/tests/wasm/send_message.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/set_bytecode_fail.wasm b/massa-execution-worker/src/tests/wasm/set_bytecode_fail.wasm index 7f772e51809..a1299b0263c 100644 Binary files a/massa-execution-worker/src/tests/wasm/set_bytecode_fail.wasm and b/massa-execution-worker/src/tests/wasm/set_bytecode_fail.wasm differ diff --git a/massa-execution-worker/src/tests/wasm/test.wasm b/massa-execution-worker/src/tests/wasm/test.wasm index 034193bb7d8..b5bf9cede2b 100644 Binary files a/massa-execution-worker/src/tests/wasm/test.wasm and b/massa-execution-worker/src/tests/wasm/test.wasm differ diff --git a/massa-factory-exports/src/types.rs b/massa-factory-exports/src/types.rs index 591b7ac4ea9..35a2675ce6f 100644 --- a/massa-factory-exports/src/types.rs +++ b/massa-factory-exports/src/types.rs @@ -1,4 +1,4 @@ -use massa_consensus_exports::ConsensusCommandSender; +use massa_consensus_exports::ConsensusController; use massa_models::block::Block; use massa_pool_exports::PoolController; use massa_pos_exports::SelectorController; @@ -15,7 +15,7 @@ pub struct FactoryChannels { /// selector controller to get draws pub selector: Box, /// consensus controller - pub consensus: ConsensusCommandSender, + pub consensus: Box, /// pool controller pub pool: Box, /// protocol controller diff --git a/massa-factory-worker/Cargo.toml b/massa-factory-worker/Cargo.toml index 86afe9b3336..dc3b8b42138 100644 --- a/massa-factory-worker/Cargo.toml +++ b/massa-factory-worker/Cargo.toml @@ -22,18 +22,17 @@ massa_wallet = { path = "../massa-wallet" } massa_hash = { path = "../massa-hash" } massa_pos_exports = { path = "../massa-pos-exports" } massa_serialization = { path = "../massa-serialization" } -massa_consensus_exports = { path = "../massa-consensus-exports" } massa_pool_exports = { path = "../massa-pool-exports" } [dev-dependencies] serial_test = "0.9" massa_protocol_exports = { path = "../massa-protocol-exports", features=["testing"] } +massa_consensus_exports = { path = "../massa-consensus-exports", features = ["testing"] } massa_factory_exports = { path = "../massa-factory-exports", features=["testing"] } massa_wallet = { path = "../massa-wallet", features=["testing"] } massa_pos_exports = { path = "../massa-pos-exports", features=["testing"] } -massa_consensus_exports = { path = "../massa-consensus-exports", features=["testing"] } massa_pool_exports = { path = "../massa-pool-exports", features=["testing"] } [features] sandbox = [] -testing = ["massa_factory_exports/testing", "massa_pos_exports/testing", "massa_pool_exports/testing", "massa_consensus_exports/testing", "massa_protocol_exports/testing", "massa_wallet/testing"] +testing = ["massa_factory_exports/testing", "massa_pos_exports/testing", "massa_pool_exports/testing", "massa_protocol_exports/testing", "massa_wallet/testing"] diff --git a/massa-factory-worker/src/block_factory.rs b/massa-factory-worker/src/block_factory.rs index a0a094d0f7f..cc690b78be2 100644 --- a/massa-factory-worker/src/block_factory.rs +++ b/massa-factory-worker/src/block_factory.rs @@ -143,12 +143,8 @@ impl BlockFactoryWorker { return; }; // get best parents and their periods - let parents: Vec<(BlockId, u64)> = self - .channels - .consensus - .get_best_parents() - .expect("Couldn't get best parents"); // Vec<(parent_id, parent_period)> - // generate the local storage object + let parents: Vec<(BlockId, u64)> = self.channels.consensus.get_best_parents(); // Vec<(parent_id, parent_period)> + // generate the local storage object let mut block_storage = self.channels.storage.clone_without_refs(); // claim block parents in local storage @@ -235,14 +231,9 @@ impl BlockFactoryWorker { ); // send full block to consensus - if self - .channels + self.channels .consensus - .send_block(block_id, slot, block_storage) - .is_err() - { - warn!("could not send produced block to consensus: channel error"); - } + .register_block(block_id, slot, block_storage, true); } /// main run loop of the block creator thread diff --git a/massa-factory-worker/src/endorsement_factory.rs b/massa-factory-worker/src/endorsement_factory.rs index 0c3205eb0a9..4c61cf0b062 100644 --- a/massa-factory-worker/src/endorsement_factory.rs +++ b/massa-factory-worker/src/endorsement_factory.rs @@ -162,23 +162,10 @@ impl EndorsementFactoryWorker { } // get consensus block ID for that slot - let endorsed_block: BlockId = match self + let endorsed_block: BlockId = self .channels .consensus - .get_latest_blockclique_block_at_slot(slot) - { - // error getting block ID at target slot - Err(_) => { - warn!( - "could not get latest blockclique block to create endorsement to be included at slot {}", - slot - ); - return; - } - - // latest block found - Ok(b_id) => b_id, - }; + .get_latest_blockclique_block_at_slot(slot); // produce endorsements let mut endorsements: Vec = Vec::with_capacity(producers_indices.len()); diff --git a/massa-factory-worker/src/tests/tools.rs b/massa-factory-worker/src/tests/tools.rs index 4e1c0cf82c3..ef2335f5874 100644 --- a/massa-factory-worker/src/tests/tools.rs +++ b/massa-factory-worker/src/tests/tools.rs @@ -1,3 +1,6 @@ +use massa_consensus_exports::test_exports::{ + ConsensusEventReceiver, MockConsensusController, MockConsensusControllerMessage, +}; use parking_lot::RwLock; use std::{ sync::{mpsc::Receiver, Arc}, @@ -5,7 +8,6 @@ use std::{ time::Duration, }; -use massa_consensus_exports::{commands::ConsensusCommand, test_exports::MockConsensusController}; use massa_factory_exports::{ test_exports::create_empty_block, FactoryChannels, FactoryConfig, FactoryManager, }; @@ -34,7 +36,7 @@ use massa_wallet::test_exports::create_test_wallet; /// You can use the method `new` to build all the mocks and make the connections /// Then you can use the method `get_next_created_block` that will manage the answers from the mock to the factory depending on the parameters you gave. pub struct TestFactory { - consensus_controller: MockConsensusController, + consensus_event_receiver: ConsensusEventReceiver, pool_receiver: PoolEventReceiver, selector_receiver: Receiver, factory_config: FactoryConfig, @@ -53,13 +55,12 @@ impl TestFactory { /// - `TestFactory`: the structure that will be used to manage the tests pub fn new(default_keypair: &KeyPair) -> TestFactory { let (selector_controller, selector_receiver) = MockSelectorController::new_with_receiver(); - let (consensus_controller, consensus_command_sender, _consensus_event_receiver) = + let (consensus_controller, consensus_event_receiver) = MockConsensusController::new_with_receiver(); let (pool_controller, pool_receiver) = MockPoolController::new_with_receiver(); let mut storage = Storage::create_root(); let mut factory_config = FactoryConfig::default(); - let (_protocol_controller, protocol_command_sender, _protocol_event_receiver) = - MockProtocolController::new(); + let (_protocol_controller, protocol_command_sender) = MockProtocolController::new(); let producer_keypair = default_keypair; let producer_address = Address::from_public_key(&producer_keypair.get_public_key()); let mut accounts = PreHashMap::default(); @@ -82,7 +83,7 @@ impl TestFactory { Arc::new(RwLock::new(create_test_wallet(Some(accounts)))), FactoryChannels { selector: selector_controller.clone(), - consensus: consensus_command_sender, + consensus: consensus_controller, pool: pool_controller.clone(), protocol: protocol_command_sender, storage: storage.clone_without_refs(), @@ -90,7 +91,7 @@ impl TestFactory { ); TestFactory { - consensus_controller, + consensus_event_receiver, pool_receiver, selector_receiver, factory_config, @@ -150,17 +151,16 @@ impl TestFactory { _ => panic!("unexpected message"), } } - match self - .consensus_controller - .consensus_command_rx - .blocking_recv() - .unwrap() - { - ConsensusCommand::GetBestParents { response_tx } => { - response_tx.send(self.genesis_blocks.clone()).unwrap(); - } - _ => panic!("unexpected message"), - } + self.consensus_event_receiver + .wait_command(MassaTime::from_millis(100), |command| { + if let MockConsensusControllerMessage::GetBestParents { response_tx } = command { + response_tx.send(self.genesis_blocks.clone()).unwrap(); + Some(()) + } else { + None + } + }) + .unwrap(); self.pool_receiver .wait_command(MassaTime::from_millis(100), |command| match command { MockPoolControllerMessage::GetBlockEndorsements { @@ -203,23 +203,21 @@ impl TestFactory { _ => panic!("unexpected message"), }) .unwrap(); - match self - .consensus_controller - .consensus_command_rx - .blocking_recv() + self.consensus_event_receiver + .wait_command(MassaTime::from_millis(100), |command| { + if let MockConsensusControllerMessage::RegisterBlock { + block_id, + block_storage, + slot: _, + created: _, + } = command + { + Some((block_id, block_storage)) + } else { + None + } + }) .unwrap() - { - ConsensusCommand::SendBlock { - block_id, - block_storage, - slot: _, - response_tx, - } => { - response_tx.send(()).unwrap(); - (block_id, block_storage) - } - _ => panic!("unexpected message"), - } } } diff --git a/massa-final-state/Cargo.toml b/massa-final-state/Cargo.toml index b0d676e57b7..29ae05d130b 100644 --- a/massa-final-state/Cargo.toml +++ b/massa-final-state/Cargo.toml @@ -17,6 +17,7 @@ massa_models = { path = "../massa-models" } massa_async_pool = { path = "../massa-async-pool" } massa_serialization = { path = "../massa-serialization" } massa_pos_exports = { path = "../massa-pos-exports" } +massa_hash = { path = "../massa-hash" } [dev-dependencies] massa_async_pool = { path = "../massa-async-pool", features = ["testing"] } diff --git a/massa-final-state/src/final_state.rs b/massa-final-state/src/final_state.rs index 378a9f3e473..c22ad0dde05 100644 --- a/massa-final-state/src/final_state.rs +++ b/massa-final-state/src/final_state.rs @@ -8,11 +8,12 @@ use crate::{config::FinalStateConfig, error::FinalStateError, state_changes::StateChanges}; use massa_async_pool::{AsyncMessageId, AsyncPool, AsyncPoolChanges, Change}; use massa_executed_ops::ExecutedOps; +use massa_hash::Hash; use massa_ledger_exports::{get_address_from_key, LedgerChanges, LedgerController}; use massa_models::{slot::Slot, streaming_step::StreamingStep}; -use massa_pos_exports::{PoSFinalState, SelectorController}; +use massa_pos_exports::{DeferredCredits, PoSFinalState, SelectorController}; use std::collections::VecDeque; -use tracing::debug; +use tracing::{debug, info}; /// Represents a final state `(ledger, async pool, executed_ops and the state of the PoS)` pub struct FinalState { @@ -117,15 +118,39 @@ impl FinalState { self.changes_history.push_back((slot, changes)); } + // final hash computing and sub hashes logging + // 1. init hash concatenation with the ledger hash + let ledger_hash = self.ledger.get_ledger_hash(); + let mut hash_concat: Vec = ledger_hash.to_bytes().to_vec(); + debug!("ledger hash at slot {}: {}", slot, ledger_hash); + // 2. async_pool hash + hash_concat.extend(self.async_pool.hash.to_bytes()); + debug!("async_pool hash at slot {}: {}", slot, self.async_pool.hash); + // 3. pos deferred_credit hash + hash_concat.extend(self.pos_state.deferred_credits.hash.to_bytes()); debug!( - "ledger hash at slot {}: {}", - slot, - self.ledger.get_ledger_hash() + "deferred_credit hash at slot {}: {}", + slot, self.pos_state.deferred_credits.hash ); + // 4. pos cycle history hashes + let n = (self.pos_state.cycle_history.len() == self.config.pos_config.cycle_history_length) + as usize; + for cycle_info in self.pos_state.cycle_history.iter().skip(n) { + hash_concat.extend(cycle_info.global_hash.to_bytes()); + debug!( + "cycle ({}) hash at slot {}: {}", + cycle_info.cycle, slot, cycle_info.global_hash + ); + } + // 5. executed operations hash + hash_concat.extend(self.executed_ops.hash.to_bytes()); debug!( - "executed_ops hash at slot {}: {:?}", + "executed_ops hash at slot {}: {}", slot, self.executed_ops.hash ); + // 6. final state hash + let final_state_hash = Hash::compute_from(&hash_concat); + info!("final_state hash at slot {}: {}", slot, final_state_hash); } /// Used for bootstrap. @@ -134,8 +159,10 @@ impl FinalState { /// * ledger change that is after `slot` and before or equal to `ledger_step` key /// * ledger change if main bootstrap process is finished /// * async pool change that is after `slot` and before or equal to `pool_step` message id - /// * proof-of-stake change if main bootstrap process is finished - /// * proof-of-stake change if main bootstrap process is finished + /// * async pool change if main bootstrap process is finished + /// * proof-of-stake deferred credits change if main bootstrap process is finished + /// * proof-of-stake deferred credits change that is after `slot` and before or equal to `credits_step` slot + /// * proof-of-stake cycle history change if main bootstrap process is finished /// * executed ops change if main bootstrap process is finished /// /// Produces an error when the `slot` is too old for `self.changes_history` @@ -197,7 +224,7 @@ impl FinalState { ); slot_changes.ledger_changes = ledger_changes; } - StreamingStep::Finished => { + StreamingStep::Finished(_) => { slot_changes.ledger_changes = changes.ledger_changes.clone(); } _ => (), @@ -221,18 +248,49 @@ impl FinalState { ); slot_changes.async_pool_changes = async_pool_changes; } - StreamingStep::Finished => { + StreamingStep::Finished(_) => { slot_changes.async_pool_changes = changes.async_pool_changes.clone(); } _ => (), } - // Get Proof of Stake state changes if current bootstrap cycle is incomplete (so last) - if cycle_step.finished() && credits_step.finished() { - slot_changes.pos_changes = changes.pos_changes.clone(); + // Get PoS deferred credits changes that concern credits <= credits_step + match credits_step { + StreamingStep::Ongoing(cursor_slot) => { + let deferred_credits = DeferredCredits { + credits: changes + .pos_changes + .deferred_credits + .credits + .iter() + .filter_map(|(credits_slot, credits)| { + if *credits_slot <= cursor_slot { + Some((*credits_slot, credits.clone())) + } else { + None + } + }) + .collect(), + ..Default::default() + }; + slot_changes.pos_changes.deferred_credits = deferred_credits; + } + StreamingStep::Finished(_) => { + slot_changes.pos_changes.deferred_credits = + changes.pos_changes.deferred_credits.clone(); + } + _ => (), + } + + // Get PoS cycle changes if cycle history main bootstrap finished + if cycle_step.finished() { + slot_changes.pos_changes.seed_bits = changes.pos_changes.seed_bits.clone(); + slot_changes.pos_changes.roll_changes = changes.pos_changes.roll_changes.clone(); + slot_changes.pos_changes.production_stats = + changes.pos_changes.production_stats.clone(); } - // Get executed operations changes if classic bootstrap finished + // Get executed operations changes if executed ops main bootstrap finished if ops_step.finished() { slot_changes.executed_ops_changes = changes.executed_ops_changes.clone(); } @@ -262,7 +320,7 @@ mod tests { #[test] fn get_state_changes_part() { - let message = get_random_message(); + let message = get_random_message(None); // Building the state changes let mut history_state_changes: VecDeque<(Slot, StateChanges)> = VecDeque::new(); let (low_address, high_address) = { diff --git a/massa-final-state/src/state_changes.rs b/massa-final-state/src/state_changes.rs index 104cfb4ec22..ed52113f435 100644 --- a/massa-final-state/src/state_changes.rs +++ b/massa-final-state/src/state_changes.rs @@ -68,19 +68,19 @@ impl Serializer for StateChangesSerializer { /// use massa_async_pool::{AsyncMessage, Change, AsyncPoolChanges}; /// /// let mut state_changes = StateChanges::default(); - /// let message = AsyncMessage { - /// emission_slot: Slot::new(1, 0), - /// emission_index: 0, - /// sender: Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), - /// destination: Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), - /// handler: String::from("test"), - /// max_gas: 10000000, - /// gas_price: Amount::from_str("1").unwrap(), - /// coins: Amount::from_str("1").unwrap(), - /// validity_start: Slot::new(2, 0), - /// validity_end: Slot::new(3, 0), - /// data: vec![1, 2, 3, 4] - /// }; + /// let message = AsyncMessage::new_with_hash( + /// Slot::new(1, 0), + /// 0, + /// Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + /// Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), + /// String::from("test"), + /// 10000000, + /// Amount::from_str("1").unwrap(), + /// Amount::from_str("1").unwrap(), + /// Slot::new(2, 0), + /// Slot::new(3, 0), + /// vec![1, 2, 3, 4], + /// ); /// let async_pool_changes: AsyncPoolChanges = AsyncPoolChanges(vec![Change::Add(message.compute_id(), message)]); /// state_changes.async_pool_changes = async_pool_changes; /// @@ -175,19 +175,19 @@ impl Deserializer for StateChangesDeserializer { /// use massa_async_pool::{AsyncMessage, Change, AsyncPoolChanges}; /// /// let mut state_changes = StateChanges::default(); - /// let message = AsyncMessage { - /// emission_slot: Slot::new(1, 0), - /// emission_index: 0, - /// sender: Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), - /// destination: Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), - /// handler: String::from("test"), - /// max_gas: 10000000, - /// gas_price: Amount::from_str("1").unwrap(), - /// coins: Amount::from_str("1").unwrap(), - /// validity_start: Slot::new(2, 0), - /// validity_end: Slot::new(3, 0), - /// data: vec![1, 2, 3, 4] - /// }; + /// let message = AsyncMessage::new_with_hash( + /// Slot::new(1, 0), + /// 0, + /// Address::from_str("A12dG5xP1RDEB5ocdHkymNVvvSJmUL9BgHwCksDowqmGWxfpm93x").unwrap(), + /// Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), + /// String::from("test"), + /// 10000000, + /// Amount::from_str("1").unwrap(), + /// Amount::from_str("1").unwrap(), + /// Slot::new(2, 0), + /// Slot::new(3, 0), + /// vec![1, 2, 3, 4], + /// ); /// let async_pool_changes: AsyncPoolChanges = AsyncPoolChanges(vec![Change::Add(message.compute_id(), message)]); /// state_changes.async_pool_changes = async_pool_changes; /// diff --git a/massa-final-state/src/test_exports/bootstrap.rs b/massa-final-state/src/test_exports/bootstrap.rs index 2fc320abc4b..05fd77a8733 100644 --- a/massa-final-state/src/test_exports/bootstrap.rs +++ b/massa-final-state/src/test_exports/bootstrap.rs @@ -59,3 +59,46 @@ pub fn assert_eq_final_state(v1: &FinalState, v2: &FinalState) { "executed_ops.sorted_ops mismatch" ); } + +/// asserts that two `FinalState` hashes are equal +pub fn assert_eq_final_state_hash(v1: &FinalState, v2: &FinalState) { + assert_eq!( + v1.ledger.get_ledger_hash(), + v2.ledger.get_ledger_hash(), + "ledger hash mismatch" + ); + assert_eq!( + v1.async_pool.hash, v2.async_pool.hash, + "async pool hash mismatch" + ); + assert_eq!( + v1.pos_state.deferred_credits.hash, v2.pos_state.deferred_credits.hash, + "deferred credits hash mismatch" + ); + for (cycle1, cycle2) in v1 + .pos_state + .cycle_history + .iter() + .zip(v2.pos_state.cycle_history.iter()) + { + assert_eq!( + cycle1.roll_counts_hash, cycle2.roll_counts_hash, + "cycle ({}) roll_counts_hash mismatch", + cycle1.cycle + ); + assert_eq!( + cycle1.production_stats_hash, cycle2.production_stats_hash, + "cycle ({}) roll_counts_hash mismatch", + cycle1.cycle + ); + assert_eq!( + cycle1.global_hash, cycle2.global_hash, + "cycle ({}) global_hash mismatch", + cycle1.cycle + ); + } + assert_eq!( + v1.executed_ops.hash, v2.executed_ops.hash, + "executed ops hash mismatch" + ); +} diff --git a/massa-graph/Cargo.toml b/massa-graph/Cargo.toml deleted file mode 100644 index dac2300d26f..00000000000 --- a/massa-graph/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "massa_graph" -version = "0.1.0" -authors = ["Massa Labs "] -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -displaydoc = "0.2" -num = { version = "0.4", features = ["serde"] } -nom = "7.1" -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -thiserror = "1.0" -tracing = "0.1" -# custom modules -massa_pos_exports = { path = "../massa-pos-exports" } -massa_execution_exports = { path = "../massa-execution-exports" } -massa_hash = { path = "../massa-hash" } -massa_logging = { path = "../massa-logging" } -massa_models = { path = "../massa-models" } -massa_storage = { path = "../massa-storage" } -massa_signature = { path = "../massa-signature" } -massa_serialization = { path = "../massa-serialization"} - diff --git a/massa-graph/src/block_graph.rs b/massa-graph/src/block_graph.rs deleted file mode 100644 index 66549955784..00000000000 --- a/massa-graph/src/block_graph.rs +++ /dev/null @@ -1,2742 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -//! All information concerning blocks, the block graph and cliques is managed here. -use crate::{ - bootstrapable_graph::BootstrapableGraph, - error::{GraphError, GraphResult as Result}, - export_active_block::ExportActiveBlock, - settings::GraphConfig, -}; -use massa_hash::Hash; -use massa_logging::massa_trace; -use massa_models::prehash::{CapacityAllocator, PreHashMap, PreHashSet}; -use massa_models::{ - active_block::ActiveBlock, api::BlockGraphStatus, clique::Clique, wrapped::WrappedContent, -}; -use massa_models::{ - address::Address, - block::{ - Block, BlockHeader, BlockHeaderSerializer, BlockId, BlockSerializer, WrappedBlock, - WrappedHeader, - }, - slot::Slot, -}; -use massa_pos_exports::SelectorController; -use massa_signature::PublicKey; -use massa_storage::Storage; -use serde::{Deserialize, Serialize}; -use std::collections::{hash_map, BTreeSet, HashMap, VecDeque}; -use std::mem; -use tracing::{debug, info}; - -#[derive(Debug, Clone)] -#[allow(clippy::large_enum_variant)] -enum HeaderOrBlock { - Header(WrappedHeader), - Block { - id: BlockId, - slot: Slot, - storage: Storage, - }, -} - -impl HeaderOrBlock { - /// Gets slot for that header or block - pub fn get_slot(&self) -> Slot { - match self { - HeaderOrBlock::Header(header) => header.content.slot, - HeaderOrBlock::Block { slot, .. } => *slot, - } - } -} - -/// Something can be discarded -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub enum DiscardReason { - /// Block is invalid, either structurally, or because of some incompatibility. The String contains the reason for info or debugging. - Invalid(String), - /// Block is incompatible with a final block. - Stale, - /// Block has enough fitness. - Final, -} - -/// Enum used in `BlockGraph`'s state machine -#[derive(Debug, Clone)] -enum BlockStatus { - /// The block/header has reached consensus but no consensus-level check has been performed. - /// It will be processed during the next iteration - Incoming(HeaderOrBlock), - /// The block's or header's slot is too much in the future. - /// It will be processed at the block/header slot - WaitingForSlot(HeaderOrBlock), - /// The block references an unknown Block id - WaitingForDependencies { - /// Given header/block - header_or_block: HeaderOrBlock, - /// includes self if it's only a header - unsatisfied_dependencies: PreHashSet, - /// Used to limit and sort the number of blocks/headers waiting for dependencies - sequence_number: u64, - }, - /// The block was checked and included in the blockgraph - Active { - a_block: Box, - storage: Storage, - }, - /// The block was discarded and is kept to avoid reprocessing it - Discarded { - /// Just the slot of that block - slot: Slot, - /// Address of the creator of the block - creator: Address, - /// Ids of parents blocks - parents: Vec, - /// why it was discarded - reason: DiscardReason, - /// Used to limit and sort the number of blocks/headers waiting for dependencies - sequence_number: u64, - }, -} - -/// Block status in the graph that can be exported. -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum ExportBlockStatus { - /// received but not yet graph processed - Incoming, - /// waiting for its slot - WaitingForSlot, - /// waiting for a missing dependency - WaitingForDependencies, - /// valid and not yet final - Active(Block), - /// immutable - Final(Block), - /// not part of the graph - Discarded(DiscardReason), -} - -/// The block version that can be exported. -/// Note that the detailed list of operation is not exported -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct ExportCompiledBlock { - /// Header of the corresponding block. - pub header: WrappedHeader, - /// For (i, set) in children, - /// set contains the headers' hashes - /// of blocks referencing exported block as a parent, - /// in thread i. - pub children: Vec>, - /// Active or final - pub is_final: bool, -} - -/// Status -#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] -pub enum Status { - /// without enough fitness to be part of immutable history - Active, - /// with enough fitness to be part of immutable history - Final, -} - -impl<'a> BlockGraphExport { - /// Conversion from blockgraph. - pub fn extract_from( - block_graph: &'a BlockGraph, - slot_start: Option, - slot_end: Option, - ) -> Result { - let mut export = BlockGraphExport { - genesis_blocks: block_graph.genesis_hashes.clone(), - active_blocks: PreHashMap::with_capacity(block_graph.block_statuses.len()), - discarded_blocks: PreHashMap::with_capacity(block_graph.block_statuses.len()), - best_parents: block_graph.best_parents.clone(), - latest_final_blocks_periods: block_graph.latest_final_blocks_periods.clone(), - gi_head: block_graph.gi_head.clone(), - max_cliques: block_graph.max_cliques.clone(), - }; - - let filter = |&s| { - if let Some(s_start) = slot_start { - if s < s_start { - return false; - } - } - if let Some(s_end) = slot_end { - if s >= s_end { - return false; - } - } - true - }; - - for (hash, block) in block_graph.block_statuses.iter() { - match block { - BlockStatus::Discarded { - slot, - creator, - parents, - reason, - .. - } => { - if filter(slot) { - export - .discarded_blocks - .insert(*hash, (reason.clone(), (*slot, *creator, parents.clone()))); - } - } - BlockStatus::Active { a_block, storage } => { - if filter(&a_block.slot) { - let stored_block = - storage.read_blocks().get(hash).cloned().ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block in BlockGraphExport::extract_from: {}", - hash - )) - })?; - export.active_blocks.insert( - *hash, - ExportCompiledBlock { - header: stored_block.content.header, - children: a_block - .children - .iter() - .map(|thread| { - thread.keys().copied().collect::>() - }) - .collect(), - is_final: a_block.is_final, - }, - ); - } - } - _ => continue, - } - } - - Ok(export) - } -} - -/// Bootstrap compatible version of the block graph -#[derive(Debug, Clone)] -#[allow(clippy::type_complexity)] -pub struct BlockGraphExport { - /// Genesis blocks. - pub genesis_blocks: Vec, - /// Map of active blocks, were blocks are in their exported version. - pub active_blocks: PreHashMap, - /// Finite cache of discarded blocks, in exported version `(slot, creator_address, parents)`. - pub discarded_blocks: PreHashMap))>, - /// Best parents hashes in each thread. - pub best_parents: Vec<(BlockId, u64)>, - /// Latest final period and block hash in each thread. - pub latest_final_blocks_periods: Vec<(BlockId, u64)>, - /// Head of the incompatibility graph. - pub gi_head: PreHashMap>, - /// List of maximal cliques of compatible blocks. - pub max_cliques: Vec, -} - -/// Graph management -pub struct BlockGraph { - /// Consensus related configuration - cfg: GraphConfig, - /// Block ids of genesis blocks - genesis_hashes: Vec, - /// Used to limit the number of waiting and discarded blocks - sequence_counter: u64, - /// Every block we know about - block_statuses: PreHashMap, - /// Ids of incoming blocks/headers - incoming_index: PreHashSet, - /// ids of waiting for slot blocks/headers - waiting_for_slot_index: PreHashSet, - /// ids of waiting for dependencies blocks/headers - waiting_for_dependencies_index: PreHashSet, - /// ids of active blocks - active_index: PreHashSet, - /// ids of discarded blocks - discarded_index: PreHashSet, - /// One (block id, period) per thread - latest_final_blocks_periods: Vec<(BlockId, u64)>, - /// One `(block id, period)` per thread TODO not sure I understand the difference with `latest_final_blocks_periods` - best_parents: Vec<(BlockId, u64)>, - /// Incompatibility graph: maps a block id to the block ids it is incompatible with - /// One entry per Active Block - gi_head: PreHashMap>, - /// All the cliques - max_cliques: Vec, - /// Blocks that need to be propagated - to_propagate: PreHashMap, - /// List of block ids we think are attack attempts - attack_attempts: Vec, - /// Newly final blocks - new_final_blocks: PreHashSet, - /// Newly stale block mapped to creator and slot - new_stale_blocks: PreHashMap, - /// Shared storage, - storage: Storage, - /// Selector controller - selector_controller: Box, -} - -/// Possible output of a header check -#[derive(Debug)] -enum HeaderCheckOutcome { - /// it's ok and here are some useful values - Proceed { - /// one (parent block id, parent's period) per thread - parents_hash_period: Vec<(BlockId, u64)>, - /// blocks that header is incompatible with - incompatibilities: PreHashSet, - /// number of incompatibilities that are inherited from the parents - inherited_incompatibilities_count: usize, - /// fitness - fitness: u64, - }, - /// there is something wrong with that header - Discard(DiscardReason), - /// it must wait for its slot to be fully processed - WaitForSlot, - /// it must wait for these block ids to be fully processed - WaitForDependencies(PreHashSet), -} - -/// Possible outcomes of endorsements check -#[derive(Debug)] -enum EndorsementsCheckOutcome { - /// Everything is ok - Proceed, - /// There is something wrong with that endorsement - Discard(DiscardReason), - /// It must wait for its slot to be fully processed - WaitForSlot, -} - -/// Creates genesis block in given thread. -/// -/// # Arguments -/// * `cfg`: consensus configuration -/// * `thread_number`: thread in which we want a genesis block -pub fn create_genesis_block( - cfg: &GraphConfig, - thread_number: u8, -) -> Result<(BlockId, WrappedBlock)> { - let keypair = &cfg.genesis_key; - let header = BlockHeader::new_wrapped( - BlockHeader { - slot: Slot::new(0, thread_number), - parents: Vec::new(), - operation_merkle_root: Hash::compute_from(&Vec::new()), - endorsements: Vec::new(), - }, - BlockHeaderSerializer::new(), - keypair, - )?; - - Ok(( - header.id, - Block::new_wrapped( - Block { - header, - operations: Default::default(), - }, - BlockSerializer::new(), - keypair, - )?, - )) -} - -impl BlockGraph { - /// Creates a new `BlockGraph`. - /// - /// # Argument - /// * `cfg`: consensus configuration. - /// * `init`: A bootstrap graph to start the graph with - /// * `storage`: A shared storage that share data across all modules. - /// * `selector_controller`: Access to the PoS selector to get draws - pub async fn new( - cfg: GraphConfig, - init: Option, - storage: Storage, - selector_controller: Box, - ) -> Result { - // load genesis blocks - - let mut block_statuses = PreHashMap::default(); - let mut genesis_block_ids = Vec::with_capacity(cfg.thread_count as usize); - for thread in 0u8..cfg.thread_count { - let (block_id, block) = create_genesis_block(&cfg, thread).map_err(|err| { - GraphError::GenesisCreationError(format!("genesis error {}", err)) - })?; - let mut storage = storage.clone_without_refs(); - storage.store_block(block.clone()); - genesis_block_ids.push(block_id); - block_statuses.insert( - block_id, - BlockStatus::Active { - a_block: Box::new(ActiveBlock { - creator_address: block.creator_address, - parents: Vec::new(), - children: vec![PreHashMap::default(); cfg.thread_count as usize], - descendants: Default::default(), - is_final: true, - block_id, - slot: block.content.header.content.slot, - fitness: block.get_fitness(), - }), - storage, - }, - ); - } - - massa_trace!("consensus.block_graph.new", {}); - if let Some(BootstrapableGraph { final_blocks }) = init { - // load final blocks - let final_blocks: Vec<(ActiveBlock, Storage)> = final_blocks - .into_iter() - .map(|export_b| export_b.to_active_block(&storage, cfg.thread_count)) - .collect::>()?; - - // compute latest_final_blocks_periods - let mut latest_final_blocks_periods: Vec<(BlockId, u64)> = - genesis_block_ids.iter().map(|id| (*id, 0u64)).collect(); - for (b, _) in &final_blocks { - if let Some(v) = latest_final_blocks_periods.get_mut(b.slot.thread as usize) { - if b.slot.period > v.1 { - *v = (b.block_id, b.slot.period); - } - } - } - - // generate graph - let mut res_graph = BlockGraph { - cfg: cfg.clone(), - sequence_counter: 0, - genesis_hashes: genesis_block_ids, - active_index: final_blocks.iter().map(|(b, _)| b.block_id).collect(), - incoming_index: Default::default(), - waiting_for_slot_index: Default::default(), - waiting_for_dependencies_index: Default::default(), - discarded_index: Default::default(), - best_parents: latest_final_blocks_periods.clone(), - latest_final_blocks_periods, - gi_head: Default::default(), - max_cliques: vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }], - to_propagate: Default::default(), - attack_attempts: Default::default(), - new_final_blocks: Default::default(), - new_stale_blocks: Default::default(), - storage, - selector_controller, - block_statuses: final_blocks - .into_iter() - .map(|(b, s)| { - Ok(( - b.block_id, - BlockStatus::Active { - a_block: Box::new(b), - storage: s, - }, - )) - }) - .collect::>()?, - }; - - // claim parent refs - for (_b_id, block_status) in res_graph.block_statuses.iter_mut() { - if let BlockStatus::Active { - a_block, - storage: block_storage, - } = block_status - { - // claim parent refs - let n_claimed_parents = block_storage - .claim_block_refs(&a_block.parents.iter().map(|(p_id, _)| *p_id).collect()) - .len(); - - if !a_block.is_final { - // note: parents of final blocks will be missing, that's ok, but it shouldn't be the case for non-finals - if n_claimed_parents != cfg.thread_count as usize { - return Err(GraphError::MissingBlock( - "block storage could not claim refs to all parent blocks".into(), - )); - } - } - } - } - - // list active block parents - let active_blocks_map: PreHashMap)> = res_graph - .block_statuses - .iter() - .filter_map(|(h, s)| { - if let BlockStatus::Active { a_block: a, .. } = s { - return Some((*h, (a.slot, a.parents.iter().map(|(ph, _)| *ph).collect()))); - } - None - }) - .collect(); - // deduce children and descendants - for (b_id, (b_slot, b_parents)) in active_blocks_map.into_iter() { - // deduce children - for parent_id in &b_parents { - if let Some(BlockStatus::Active { - a_block: parent, .. - }) = res_graph.block_statuses.get_mut(parent_id) - { - parent.children[b_slot.thread as usize].insert(b_id, b_slot.period); - } - } - - // deduce descendants - let mut ancestors: VecDeque = b_parents.into_iter().collect(); - let mut visited: PreHashSet = Default::default(); - while let Some(ancestor_h) = ancestors.pop_back() { - if !visited.insert(ancestor_h) { - continue; - } - if let Some(BlockStatus::Active { a_block: ab, .. }) = - res_graph.block_statuses.get_mut(&ancestor_h) - { - ab.descendants.insert(b_id); - for (ancestor_parent_h, _) in ab.parents.iter() { - ancestors.push_front(*ancestor_parent_h); - } - } - } - } - Ok(res_graph) - } else { - Ok(BlockGraph { - cfg, - sequence_counter: 0, - block_statuses, - incoming_index: Default::default(), - waiting_for_slot_index: Default::default(), - waiting_for_dependencies_index: Default::default(), - active_index: genesis_block_ids.iter().copied().collect(), - discarded_index: Default::default(), - latest_final_blocks_periods: genesis_block_ids.iter().map(|h| (*h, 0)).collect(), - best_parents: genesis_block_ids.iter().map(|v| (*v, 0)).collect(), - genesis_hashes: genesis_block_ids, - gi_head: PreHashMap::default(), - max_cliques: vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }], - to_propagate: Default::default(), - attack_attempts: Default::default(), - new_final_blocks: Default::default(), - new_stale_blocks: Default::default(), - storage, - selector_controller, - }) - } - } - - /// export full graph in a bootstrap compatible version - pub fn export_bootstrap_graph(&self) -> Result { - let mut required_final_blocks: PreHashSet<_> = self.list_required_active_blocks()?; - required_final_blocks.retain(|b_id| { - if let Some(BlockStatus::Active { a_block, .. }) = self.block_statuses.get(b_id) { - if a_block.is_final { - // filter only final actives - return true; - } - } - false - }); - let mut final_blocks: Vec = - Vec::with_capacity(required_final_blocks.len()); - for b_id in &required_final_blocks { - if let Some(BlockStatus::Active { a_block, storage }) = self.block_statuses.get(b_id) { - final_blocks.push(ExportActiveBlock::from_active_block(a_block, storage)); - } else { - return Err(GraphError::ContainerInconsistency(format!( - "block {} was expected to be active but wasn't on bootstrap graph export", - b_id - ))); - } - } - - Ok(BootstrapableGraph { final_blocks }) - } - - /// Gets latest final blocks (hash, period) for each thread. - pub fn get_latest_final_blocks_periods(&self) -> &Vec<(BlockId, u64)> { - &self.latest_final_blocks_periods - } - - /// Gets best parents. - pub fn get_best_parents(&self) -> &Vec<(BlockId, u64)> { - &self.best_parents - } - - /// Gets the list of cliques. - pub fn get_cliques(&self) -> Vec { - self.max_cliques.clone() - } - - /// Returns the list of block IDs created by a given address, and their finality statuses - pub fn get_block_ids_by_creator(&self, address: &Address) -> PreHashMap { - // iterate on active (final and non-final) blocks - self.active_index - .iter() - .filter_map(|block_id| match self.block_statuses.get(block_id) { - Some(BlockStatus::Active { a_block, .. }) => { - if a_block.creator_address == *address { - Some(( - *block_id, - if a_block.is_final { - Status::Final - } else { - Status::Active - }, - )) - } else { - None - } - } - _ => None, - }) - .collect() - } - - /// Gets whole compiled block corresponding to given hash, if it is active. - /// - /// # Argument - /// * `block_id`: block ID - pub fn get_active_block(&self, block_id: &BlockId) -> Option<(&ActiveBlock, &Storage)> { - BlockGraph::get_full_active_block(&self.block_statuses, *block_id) - } - - /// get block graph status - pub fn get_block_status(&self, block_id: &BlockId) -> BlockGraphStatus { - match self.block_statuses.get(block_id) { - None => BlockGraphStatus::NotFound, - Some(BlockStatus::Active { a_block, .. }) => { - if a_block.is_final { - BlockGraphStatus::Final - } else if self - .max_cliques - .iter() - .find(|clique| clique.is_blockclique) - .expect("blockclique absent") - .block_ids - .contains(block_id) - { - BlockGraphStatus::ActiveInBlockclique - } else { - BlockGraphStatus::ActiveInAlternativeCliques - } - } - Some(BlockStatus::Discarded { .. }) => BlockGraphStatus::Discarded, - Some(BlockStatus::Incoming(_)) => BlockGraphStatus::Incoming, - Some(BlockStatus::WaitingForDependencies { .. }) => { - BlockGraphStatus::WaitingForDependencies - } - Some(BlockStatus::WaitingForSlot(_)) => BlockGraphStatus::WaitingForSlot, - } - } - - /// signal new slot - pub fn slot_tick(&mut self, current_slot: Option) -> Result<()> { - // list all elements for which the time has come - let to_process: BTreeSet<(Slot, BlockId)> = self - .waiting_for_slot_index - .iter() - .filter_map(|b_id| match self.block_statuses.get(b_id) { - Some(BlockStatus::WaitingForSlot(header_or_block)) => { - let slot = header_or_block.get_slot(); - if Some(slot) <= current_slot { - Some((slot, *b_id)) - } else { - None - } - } - _ => None, - }) - .collect(); - - massa_trace!("consensus.block_graph.slot_tick", {}); - // process those elements - self.rec_process(to_process, current_slot)?; - - Ok(()) - } - - /// A new header has come ! - /// - /// Checks performed: - /// - Ignore genesis blocks. - /// - See `process`. - pub fn incoming_header( - &mut self, - block_id: BlockId, - header: WrappedHeader, - current_slot: Option, - ) -> Result<()> { - // ignore genesis blocks - if self.genesis_hashes.contains(&block_id) { - return Ok(()); - } - - debug!( - "received header {} for slot {}", - block_id, header.content.slot - ); - massa_trace!("consensus.block_graph.incoming_header", {"block_id": block_id, "header": header}); - let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); - match self.block_statuses.entry(block_id) { - // if absent => add as Incoming, call rec_ack on it - hash_map::Entry::Vacant(vac) => { - to_ack.insert((header.content.slot, block_id)); - vac.insert(BlockStatus::Incoming(HeaderOrBlock::Header(header))); - self.incoming_index.insert(block_id); - } - hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { - BlockStatus::Discarded { - sequence_number, .. - } => { - // promote if discarded - *sequence_number = BlockGraph::new_sequence_number(&mut self.sequence_counter); - } - BlockStatus::WaitingForDependencies { .. } => { - // promote in dependencies - self.promote_dep_tree(block_id)?; - } - _ => {} - }, - } - - // process - self.rec_process(to_ack, current_slot)?; - - Ok(()) - } - - /// A new block has come - /// - /// Checks performed: - /// - Ignore genesis blocks. - /// - See `process`. - pub fn incoming_block( - &mut self, - block_id: BlockId, - slot: Slot, - current_slot: Option, - storage: Storage, - ) -> Result<()> { - // ignore genesis blocks - if self.genesis_hashes.contains(&block_id) { - return Ok(()); - } - - debug!("received block {} for slot {}", block_id, slot); - - let mut to_ack: BTreeSet<(Slot, BlockId)> = BTreeSet::new(); - match self.block_statuses.entry(block_id) { - // if absent => add as Incoming, call rec_ack on it - hash_map::Entry::Vacant(vac) => { - to_ack.insert((slot, block_id)); - vac.insert(BlockStatus::Incoming(HeaderOrBlock::Block { - id: block_id, - slot, - storage, - })); - self.incoming_index.insert(block_id); - } - hash_map::Entry::Occupied(mut occ) => match occ.get_mut() { - BlockStatus::Discarded { - sequence_number, .. - } => { - // promote if discarded - *sequence_number = BlockGraph::new_sequence_number(&mut self.sequence_counter); - } - BlockStatus::WaitingForSlot(header_or_block) => { - // promote to full block - *header_or_block = HeaderOrBlock::Block { - id: block_id, - slot, - storage, - }; - } - BlockStatus::WaitingForDependencies { - header_or_block, - unsatisfied_dependencies, - .. - } => { - // promote to full block and satisfy self-dependency - if unsatisfied_dependencies.remove(&block_id) { - // a dependency was satisfied: process - to_ack.insert((slot, block_id)); - } - *header_or_block = HeaderOrBlock::Block { - id: block_id, - slot, - storage, - }; - // promote in dependencies - self.promote_dep_tree(block_id)?; - } - _ => return Ok(()), - }, - } - - // process - self.rec_process(to_ack, current_slot)?; - - Ok(()) - } - - fn new_sequence_number(sequence_counter: &mut u64) -> u64 { - let res = *sequence_counter; - *sequence_counter += 1; - res - } - - /// acknowledge a set of items recursively - fn rec_process( - &mut self, - mut to_ack: BTreeSet<(Slot, BlockId)>, - current_slot: Option, - ) -> Result<()> { - // order processing by (slot, hash) - while let Some((_slot, hash)) = to_ack.pop_first() { - to_ack.extend(self.process(hash, current_slot)?) - } - Ok(()) - } - - /// Acknowledge a single item, return a set of items to re-ack - fn process( - &mut self, - block_id: BlockId, - current_slot: Option, - ) -> Result> { - // list items to reprocess - let mut reprocess = BTreeSet::new(); - - massa_trace!("consensus.block_graph.process", { "block_id": block_id }); - // control all the waiting states and try to get a valid block - let ( - valid_block_creator, - valid_block_slot, - valid_block_parents_hash_period, - valid_block_incomp, - valid_block_inherited_incomp_count, - valid_block_storage, - valid_block_fitness, - ) = match self.block_statuses.get(&block_id) { - None => return Ok(BTreeSet::new()), // disappeared before being processed: do nothing - - // discarded: do nothing - Some(BlockStatus::Discarded { .. }) => { - massa_trace!("consensus.block_graph.process.discarded", { - "block_id": block_id - }); - return Ok(BTreeSet::new()); - } - - // already active: do nothing - Some(BlockStatus::Active { .. }) => { - massa_trace!("consensus.block_graph.process.active", { - "block_id": block_id - }); - return Ok(BTreeSet::new()); - } - - // incoming header - Some(BlockStatus::Incoming(HeaderOrBlock::Header(_))) => { - massa_trace!("consensus.block_graph.process.incoming_header", { - "block_id": block_id - }); - // remove header - let header = if let Some(BlockStatus::Incoming(HeaderOrBlock::Header(header))) = - self.block_statuses.remove(&block_id) - { - self.incoming_index.remove(&block_id); - header - } else { - return Err(GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses removing incoming header {}", - block_id - ))); - }; - match self.check_header(&block_id, &header, current_slot)? { - HeaderCheckOutcome::Proceed { .. } => { - // set as waiting dependencies - let mut dependencies = PreHashSet::::default(); - dependencies.insert(block_id); // add self as unsatisfied - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForDependencies { - header_or_block: HeaderOrBlock::Header(header), - unsatisfied_dependencies: dependencies, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.waiting_for_dependencies_index.insert(block_id); - self.promote_dep_tree(block_id)?; - - massa_trace!( - "consensus.block_graph.process.incoming_header.waiting_for_self", - { "block_id": block_id } - ); - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::WaitForDependencies(mut dependencies) => { - // set as waiting dependencies - dependencies.insert(block_id); // add self as unsatisfied - massa_trace!("consensus.block_graph.process.incoming_header.waiting_for_dependencies", {"block_id": block_id, "dependencies": dependencies}); - - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForDependencies { - header_or_block: HeaderOrBlock::Header(header), - unsatisfied_dependencies: dependencies, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.waiting_for_dependencies_index.insert(block_id); - self.promote_dep_tree(block_id)?; - - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::WaitForSlot => { - // make it wait for slot - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForSlot(HeaderOrBlock::Header(header)), - ); - self.waiting_for_slot_index.insert(block_id); - - massa_trace!( - "consensus.block_graph.process.incoming_header.waiting_for_slot", - { "block_id": block_id } - ); - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::Discard(reason) => { - self.maybe_note_attack_attempt(&reason, &block_id); - massa_trace!("consensus.block_graph.process.incoming_header.discarded", {"block_id": block_id, "reason": reason}); - // count stales - if reason == DiscardReason::Stale { - self.new_stale_blocks - .insert(block_id, (header.creator_address, header.content.slot)); - } - // discard - self.block_statuses.insert( - block_id, - BlockStatus::Discarded { - slot: header.content.slot, - creator: header.creator_address, - parents: header.content.parents, - reason, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.discarded_index.insert(block_id); - - return Ok(BTreeSet::new()); - } - } - } - - // incoming block - Some(BlockStatus::Incoming(HeaderOrBlock::Block { id: block_id, .. })) => { - let block_id = *block_id; - massa_trace!("consensus.block_graph.process.incoming_block", { - "block_id": block_id - }); - let (slot, storage) = - if let Some(BlockStatus::Incoming(HeaderOrBlock::Block { - slot, storage, .. - })) = self.block_statuses.remove(&block_id) - { - self.incoming_index.remove(&block_id); - (slot, storage) - } else { - return Err(GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses removing incoming block {}", - block_id - ))); - }; - let stored_block = storage - .read_blocks() - .get(&block_id) - .cloned() - .expect("incoming block not found in storage"); - - match self.check_header(&block_id, &stored_block.content.header, current_slot)? { - HeaderCheckOutcome::Proceed { - parents_hash_period, - incompatibilities, - inherited_incompatibilities_count, - fitness, - } => { - // block is valid: remove it from Incoming and return it - massa_trace!("consensus.block_graph.process.incoming_block.valid", { - "block_id": block_id - }); - ( - stored_block.content.header.creator_public_key, - slot, - parents_hash_period, - incompatibilities, - inherited_incompatibilities_count, - storage, - fitness, - ) - } - HeaderCheckOutcome::WaitForDependencies(dependencies) => { - // set as waiting dependencies - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForDependencies { - header_or_block: HeaderOrBlock::Block { - id: block_id, - slot, - storage, - }, - unsatisfied_dependencies: dependencies, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.waiting_for_dependencies_index.insert(block_id); - self.promote_dep_tree(block_id)?; - massa_trace!( - "consensus.block_graph.process.incoming_block.waiting_for_dependencies", - { "block_id": block_id } - ); - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::WaitForSlot => { - // set as waiting for slot - self.block_statuses.insert( - block_id, - BlockStatus::WaitingForSlot(HeaderOrBlock::Block { - id: block_id, - slot, - storage, - }), - ); - self.waiting_for_slot_index.insert(block_id); - - massa_trace!( - "consensus.block_graph.process.incoming_block.waiting_for_slot", - { "block_id": block_id } - ); - return Ok(BTreeSet::new()); - } - HeaderCheckOutcome::Discard(reason) => { - self.maybe_note_attack_attempt(&reason, &block_id); - massa_trace!("consensus.block_graph.process.incoming_block.discarded", {"block_id": block_id, "reason": reason}); - // count stales - if reason == DiscardReason::Stale { - self.new_stale_blocks.insert( - block_id, - ( - stored_block.content.header.creator_address, - stored_block.content.header.content.slot, - ), - ); - } - // add to discard - self.block_statuses.insert( - block_id, - BlockStatus::Discarded { - slot: stored_block.content.header.content.slot, - creator: stored_block.creator_address, - parents: stored_block.content.header.content.parents.clone(), - reason, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.discarded_index.insert(block_id); - - return Ok(BTreeSet::new()); - } - } - } - - Some(BlockStatus::WaitingForSlot(header_or_block)) => { - massa_trace!("consensus.block_graph.process.waiting_for_slot", { - "block_id": block_id - }); - let slot = header_or_block.get_slot(); - if Some(slot) > current_slot { - massa_trace!( - "consensus.block_graph.process.waiting_for_slot.in_the_future", - { "block_id": block_id } - ); - // in the future: ignore - return Ok(BTreeSet::new()); - } - // send back as incoming and ask for reprocess - if let Some(BlockStatus::WaitingForSlot(header_or_block)) = - self.block_statuses.remove(&block_id) - { - self.waiting_for_slot_index.remove(&block_id); - self.block_statuses - .insert(block_id, BlockStatus::Incoming(header_or_block)); - self.incoming_index.insert(block_id); - reprocess.insert((slot, block_id)); - massa_trace!( - "consensus.block_graph.process.waiting_for_slot.reprocess", - { "block_id": block_id } - ); - return Ok(reprocess); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot block or header {}", block_id))); - }; - } - - Some(BlockStatus::WaitingForDependencies { - unsatisfied_dependencies, - .. - }) => { - massa_trace!("consensus.block_graph.process.waiting_for_dependencies", { - "block_id": block_id - }); - if !unsatisfied_dependencies.is_empty() { - // still has unsatisfied dependencies: ignore - return Ok(BTreeSet::new()); - } - // send back as incoming and ask for reprocess - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, .. - }) = self.block_statuses.remove(&block_id) - { - self.waiting_for_dependencies_index.remove(&block_id); - reprocess.insert((header_or_block.get_slot(), block_id)); - self.block_statuses - .insert(block_id, BlockStatus::Incoming(header_or_block)); - self.incoming_index.insert(block_id); - massa_trace!( - "consensus.block_graph.process.waiting_for_dependencies.reprocess", - { "block_id": block_id } - ); - return Ok(reprocess); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing waiting for slot header or block {}", block_id))); - } - } - }; - - // add block to graph - self.add_block_to_graph( - block_id, - valid_block_parents_hash_period, - valid_block_creator, - valid_block_slot, - valid_block_incomp, - valid_block_inherited_incomp_count, - valid_block_fitness, - valid_block_storage, - )?; - - // if the block was added, update linked dependencies and mark satisfied ones for recheck - if let Some(BlockStatus::Active { storage, .. }) = self.block_statuses.get(&block_id) { - massa_trace!("consensus.block_graph.process.is_active", { - "block_id": block_id - }); - self.to_propagate.insert(block_id, storage.clone()); - for itm_block_id in self.waiting_for_dependencies_index.iter() { - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, - unsatisfied_dependencies, - .. - }) = self.block_statuses.get_mut(itm_block_id) - { - if unsatisfied_dependencies.remove(&block_id) { - // a dependency was satisfied: retry - reprocess.insert((header_or_block.get_slot(), *itm_block_id)); - } - } - } - } - - Ok(reprocess) - } - - /// Mark a block as invalid - pub fn invalid_block( - &mut self, - block_id: &BlockId, - header: WrappedHeader, - ) -> Result<(), GraphError> { - let reason = DiscardReason::Invalid("invalid".to_string()); - self.maybe_note_attack_attempt(&reason, block_id); - massa_trace!("consensus.block_graph.process.invalid_block", {"block_id": block_id, "reason": reason}); - - // add to discard - self.block_statuses.insert( - *block_id, - BlockStatus::Discarded { - slot: header.content.slot, - creator: header.creator_address, - parents: header.content.parents, - reason, - sequence_number: BlockGraph::new_sequence_number(&mut self.sequence_counter), - }, - ); - self.discarded_index.insert(*block_id); - - Ok(()) - } - - /// Note an attack attempt if the discard reason indicates one. - fn maybe_note_attack_attempt(&mut self, reason: &DiscardReason, hash: &BlockId) { - massa_trace!("consensus.block_graph.maybe_note_attack_attempt", {"hash": hash, "reason": reason}); - // If invalid, note the attack attempt. - if let DiscardReason::Invalid(reason) = reason { - info!( - "consensus.block_graph.maybe_note_attack_attempt DiscardReason::Invalid:{}", - reason - ); - self.attack_attempts.push(*hash); - } - } - - /// Gets whole `ActiveBlock` corresponding to given `block_id` - /// - /// # Argument - /// * `block_id`: block ID - fn get_full_active_block( - block_statuses: &PreHashMap, - block_id: BlockId, - ) -> Option<(&ActiveBlock, &Storage)> { - match block_statuses.get(&block_id) { - Some(BlockStatus::Active { a_block, storage }) => Some((a_block.as_ref(), storage)), - _ => None, - } - } - - /// Gets a block and all its descendants - /// - /// # Argument - /// * hash : hash of the given block - fn get_active_block_and_descendants(&self, block_id: &BlockId) -> Result> { - let mut to_visit = vec![*block_id]; - let mut result = PreHashSet::::default(); - while let Some(visit_h) = to_visit.pop() { - if !result.insert(visit_h) { - continue; // already visited - } - BlockGraph::get_full_active_block(&self.block_statuses, visit_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses iterating through descendants of {} - missing {}", block_id, visit_h)))? - .0 - .children - .iter() - .for_each(|thread_children| to_visit.extend(thread_children.keys())); - } - Ok(result) - } - - /// Process an incoming header. - /// - /// Checks performed: - /// - Number of parents matches thread count. - /// - Slot above 0. - /// - Valid thread. - /// - Check that the block is older than the latest final one in thread. - /// - Check that the block slot is not too much into the future, - /// as determined by the configuration `future_block_processing_max_periods`. - /// - Check if it was the creator's turn to create this block. - /// - TODO: check for double staking. - /// - Check parents are present. - /// - Check the topological consistency of the parents. - /// - Check endorsements. - /// - Check thread incompatibility test. - /// - Check grandpa incompatibility test. - /// - Check if the block is incompatible with a parent. - /// - Check if the block is incompatible with a final block. - fn check_header( - &self, - block_id: &BlockId, - header: &WrappedHeader, - current_slot: Option, - ) -> Result { - massa_trace!("consensus.block_graph.check_header", { - "block_id": block_id - }); - let mut parents: Vec<(BlockId, u64)> = Vec::with_capacity(self.cfg.thread_count as usize); - let mut incomp = PreHashSet::::default(); - let mut missing_deps = PreHashSet::::default(); - let creator_addr = header.creator_address; - - // check that is older than the latest final block in that thread - // Note: this excludes genesis blocks - if header.content.slot.period - <= self.latest_final_blocks_periods[header.content.slot.thread as usize].1 - { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); - } - - // check if block slot is too much in the future - if let Some(cur_slot) = current_slot { - if header.content.slot.period - > cur_slot - .period - .saturating_add(self.cfg.future_block_processing_max_periods) - { - return Ok(HeaderCheckOutcome::WaitForSlot); - } - } - - // check if it was the creator's turn to create this block - let slot_draw_address = match self.selector_controller.get_producer(header.content.slot) { - Ok(draw) => draw, - Err(_) => return Ok(HeaderCheckOutcome::WaitForSlot), // TODO properly handle PoS errors - }; - if creator_addr != slot_draw_address { - // it was not the creator's turn to create a block for this slot - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - format!("Bad creator turn for the slot:{}", header.content.slot), - ))); - } - - // check if block is in the future: queue it - // note: do it after testing signature + draw to prevent queue flooding/DoS - // note: Some(x) > None - if Some(header.content.slot) > current_slot { - return Ok(HeaderCheckOutcome::WaitForSlot); - } - - // Note: here we will check if we already have a block for that slot - // and if someone double staked, they will be denounced - - // list parents and ensure they are present - let parent_set: PreHashSet = header.content.parents.iter().copied().collect(); - for parent_thread in 0u8..self.cfg.thread_count { - let parent_hash = header.content.parents[parent_thread as usize]; - match self.block_statuses.get(&parent_hash) { - Some(BlockStatus::Discarded { reason, .. }) => { - // parent is discarded - return Ok(HeaderCheckOutcome::Discard(match reason { - DiscardReason::Invalid(invalid_reason) => DiscardReason::Invalid(format!( - "discarded because a parent was discarded for the following reason: {}", - invalid_reason - )), - r => r.clone(), - })); - } - Some(BlockStatus::Active { - a_block: parent, .. - }) => { - // parent is active - - // check that the parent is from an earlier slot in the right thread - if parent.slot.thread != parent_thread || parent.slot >= header.content.slot { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - format!( - "Bad parent {} in thread:{} or slot:{} for {}.", - parent_hash, parent_thread, parent.slot, header.content.slot - ), - ))); - } - - // inherit parent incompatibilities - // and ensure parents are mutually compatible - if let Some(p_incomp) = self.gi_head.get(&parent_hash) { - if !p_incomp.is_disjoint(&parent_set) { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - "Parent not mutually compatible".to_string(), - ))); - } - incomp.extend(p_incomp); - } - - parents.push((parent_hash, parent.slot.period)); - } - _ => { - // parent is missing or queued - if self.genesis_hashes.contains(&parent_hash) { - // forbid depending on discarded genesis block - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); - } - missing_deps.insert(parent_hash); - } - } - } - if !missing_deps.is_empty() { - return Ok(HeaderCheckOutcome::WaitForDependencies(missing_deps)); - } - let inherited_incomp_count = incomp.len(); - - // check the topological consistency of the parents - { - let mut gp_max_slots = vec![0u64; self.cfg.thread_count as usize]; - for parent_i in 0..self.cfg.thread_count { - let (parent_h, parent_period) = parents[parent_i as usize]; - let parent = self - .get_active_block(&parent_h) - .ok_or_else(|| { - GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses searching parent {} of block {}", - parent_h, block_id - )) - })? - .0; - if parent_period < gp_max_slots[parent_i as usize] { - // a parent is earlier than a block known by another parent in that thread - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - "a parent is earlier than a block known by another parent in that thread" - .to_string(), - ))); - } - gp_max_slots[parent_i as usize] = parent_period; - if parent_period == 0 { - // genesis - continue; - } - for gp_i in 0..self.cfg.thread_count { - if gp_i == parent_i { - continue; - } - let gp_h = parent.parents[gp_i as usize].0; - match self.block_statuses.get(&gp_h) { - // this grandpa is discarded - Some(BlockStatus::Discarded { reason, .. }) => { - return Ok(HeaderCheckOutcome::Discard(reason.clone())); - } - // this grandpa is active - Some(BlockStatus::Active { a_block: gp, .. }) => { - if gp.slot.period > gp_max_slots[gp_i as usize] { - if gp_i < parent_i { - return Ok(HeaderCheckOutcome::Discard( - DiscardReason::Invalid( - "grandpa error: gp_i < parent_i".to_string(), - ), - )); - } - gp_max_slots[gp_i as usize] = gp.slot.period; - } - } - // this grandpa is missing, assume stale - _ => return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)), - } - } - } - } - - // get parent in own thread - let parent_in_own_thread = BlockGraph::get_full_active_block( - &self.block_statuses, - parents[header.content.slot.thread as usize].0, - ) - .ok_or_else(|| { - GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses searching parent {} in own thread of block {}", - parents[header.content.slot.thread as usize].0, block_id - )) - })? - .0; - - // check endorsements - match self.check_endorsements(header)? { - EndorsementsCheckOutcome::Proceed => {} - EndorsementsCheckOutcome::Discard(reason) => { - return Ok(HeaderCheckOutcome::Discard(reason)) - } - EndorsementsCheckOutcome::WaitForSlot => return Ok(HeaderCheckOutcome::WaitForSlot), - } - - // thread incompatibility test - parent_in_own_thread.children[header.content.slot.thread as usize] - .keys() - .filter(|&sibling_h| sibling_h != block_id) - .try_for_each(|&sibling_h| { - incomp.extend(self.get_active_block_and_descendants(&sibling_h)?); - Result::<()>::Ok(()) - })?; - - // grandpa incompatibility test - for tau in (0u8..self.cfg.thread_count).filter(|&t| t != header.content.slot.thread) { - // for each parent in a different thread tau - // traverse parent's descendants in tau - let mut to_explore = vec![(0usize, header.content.parents[tau as usize])]; - while let Some((cur_gen, cur_h)) = to_explore.pop() { - let cur_b = BlockGraph::get_full_active_block(&self.block_statuses, cur_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses searching {} while checking grandpa incompatibility of block {}",cur_h, block_id)))?.0; - - // traverse but do not check up to generation 1 - if cur_gen <= 1 { - to_explore.extend( - cur_b.children[tau as usize] - .keys() - .map(|&c_h| (cur_gen + 1, c_h)), - ); - continue; - } - - let parent_id = { - self.storage - .read_blocks() - .get(&cur_b.block_id) - .ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block in grandpa incomp test: {}", - cur_b.block_id - )) - })? - .content - .header - .content - .parents[header.content.slot.thread as usize] - }; - - // check if the parent in tauB has a strictly lower period number than B's parent in tauB - // note: cur_b cannot be genesis at gen > 1 - if BlockGraph::get_full_active_block( - &self.block_statuses, - parent_id, - ) - .ok_or_else(|| - GraphError::ContainerInconsistency( - format!("inconsistency inside block statuses searching {} check if the parent in tauB has a strictly lower period number than B's parent in tauB while checking grandpa incompatibility of block {}", - parent_id, - block_id) - ))? - .0 - .slot - .period - < parent_in_own_thread.slot.period - { - // GPI detected - incomp.extend(self.get_active_block_and_descendants(&cur_h)?); - } // otherwise, cur_b and its descendants cannot be GPI with the block: don't traverse - } - } - - // check if the block is incompatible with a parent - if !incomp.is_disjoint(&parents.iter().map(|(h, _p)| *h).collect()) { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Invalid( - "Block incompatible with a parent".to_string(), - ))); - } - - // check if the block is incompatible with a final block - if !incomp.is_disjoint( - &self - .active_index - .iter() - .filter_map(|h| { - if let Some(BlockStatus::Active { a_block: a, .. }) = self.block_statuses.get(h) - { - if a.is_final { - return Some(*h); - } - } - None - }) - .collect(), - ) { - return Ok(HeaderCheckOutcome::Discard(DiscardReason::Stale)); - } - massa_trace!("consensus.block_graph.check_header.ok", { - "block_id": block_id - }); - - Ok(HeaderCheckOutcome::Proceed { - parents_hash_period: parents, - incompatibilities: incomp, - inherited_incompatibilities_count: inherited_incomp_count, - fitness: header.get_fitness(), - }) - } - - /// check endorsements: - /// * endorser was selected for that (slot, index) - /// * endorsed slot is `parent_in_own_thread` slot - fn check_endorsements(&self, header: &WrappedHeader) -> Result { - // check endorsements - let endorsement_draws = match self.selector_controller.get_selection(header.content.slot) { - Ok(sel) => sel.endorsements, - Err(_) => return Ok(EndorsementsCheckOutcome::WaitForSlot), - }; - for endorsement in header.content.endorsements.iter() { - // check that the draw is correct - if endorsement.creator_address != endorsement_draws[endorsement.content.index as usize] - { - return Ok(EndorsementsCheckOutcome::Discard(DiscardReason::Invalid( - format!( - "endorser draw mismatch for header in slot: {}", - header.content.slot - ), - ))); - } - - // note that the following aspects are checked in protocol - // * signature - // * index reuse - // * slot matching the block's - // * the endorsed block is the containing block's parent - } - - Ok(EndorsementsCheckOutcome::Proceed) - } - - /// get genesis block ids - pub fn get_genesis_block_ids(&self) -> &Vec { - &self.genesis_hashes - } - - /// Computes max cliques of compatible blocks - pub fn compute_max_cliques(&self) -> Vec> { - let mut max_cliques: Vec> = Vec::new(); - - // algorithm adapted from IK_GPX as summarized in: - // Cazals et al., "A note on the problem of reporting maximal cliques" - // Theoretical Computer Science, 2008 - // https://doi.org/10.1016/j.tcs.2008.05.010 - - // stack: r, p, x - let mut stack: Vec<( - PreHashSet, - PreHashSet, - PreHashSet, - )> = vec![( - PreHashSet::::default(), - self.gi_head.keys().cloned().collect(), - PreHashSet::::default(), - )]; - while let Some((r, mut p, mut x)) = stack.pop() { - if p.is_empty() && x.is_empty() { - max_cliques.push(r); - continue; - } - // choose the pivot vertex following the GPX scheme: - // u_p = node from (p \/ x) that maximizes the cardinality of (P \ Neighbors(u_p, GI)) - let &u_p = p - .union(&x) - .max_by_key(|&u| { - p.difference(&(&self.gi_head[u] | &vec![*u].into_iter().collect())) - .count() - }) - .unwrap(); // p was checked to be non-empty before - - // iterate over u_set = (p /\ Neighbors(u_p, GI)) - let u_set: PreHashSet = - &p & &(&self.gi_head[&u_p] | &vec![u_p].into_iter().collect()); - for u_i in u_set.into_iter() { - p.remove(&u_i); - let u_i_set: PreHashSet = vec![u_i].into_iter().collect(); - let comp_n_u_i: PreHashSet = &self.gi_head[&u_i] | &u_i_set; - stack.push((&r | &u_i_set, &p - &comp_n_u_i, &x - &comp_n_u_i)); - x.insert(u_i); - } - } - if max_cliques.is_empty() { - // make sure at least one clique remains - max_cliques = vec![PreHashSet::::default()]; - } - max_cliques - } - - #[allow(clippy::too_many_arguments)] - fn add_block_to_graph( - &mut self, - add_block_id: BlockId, - parents_hash_period: Vec<(BlockId, u64)>, - add_block_creator: PublicKey, - add_block_slot: Slot, - incomp: PreHashSet, - inherited_incomp_count: usize, - fitness: u64, - mut storage: Storage, - ) -> Result<()> { - massa_trace!("consensus.block_graph.add_block_to_graph", { - "block_id": add_block_id - }); - - // Ensure block parents are claimed by the block's storage. - // Note that operations and endorsements should already be there (claimed in Protocol). - storage.claim_block_refs(&parents_hash_period.iter().map(|(p_id, _)| *p_id).collect()); - - // add block to status structure - self.block_statuses.insert( - add_block_id, - BlockStatus::Active { - a_block: Box::new(ActiveBlock { - creator_address: Address::from_public_key(&add_block_creator), - parents: parents_hash_period.clone(), - descendants: PreHashSet::::default(), - block_id: add_block_id, - children: vec![Default::default(); self.cfg.thread_count as usize], - is_final: false, - slot: add_block_slot, - fitness, - }), - storage, - }, - ); - self.active_index.insert(add_block_id); - - // add as child to parents - for (parent_h, _parent_period) in parents_hash_period.iter() { - if let Some(BlockStatus::Active { - a_block: a_parent, .. - }) = self.block_statuses.get_mut(parent_h) - { - a_parent.children[add_block_slot.thread as usize] - .insert(add_block_id, add_block_slot.period); - } else { - return Err(GraphError::ContainerInconsistency(format!( - "inconsistency inside block statuses adding child {} of block {}", - add_block_id, parent_h - ))); - } - } - - // add as descendant to ancestors. Note: descendants are never removed. - { - let mut ancestors: VecDeque = - parents_hash_period.iter().map(|(h, _)| *h).collect(); - let mut visited = PreHashSet::::default(); - while let Some(ancestor_h) = ancestors.pop_back() { - if !visited.insert(ancestor_h) { - continue; - } - if let Some(BlockStatus::Active { a_block: ab, .. }) = - self.block_statuses.get_mut(&ancestor_h) - { - ab.descendants.insert(add_block_id); - for (ancestor_parent_h, _) in ab.parents.iter() { - ancestors.push_front(*ancestor_parent_h); - } - } - } - } - - // add incompatibilities to gi_head - massa_trace!( - "consensus.block_graph.add_block_to_graph.add_incompatibilities", - {} - ); - for incomp_h in incomp.iter() { - self.gi_head - .get_mut(incomp_h) - .ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block when adding incomp to gi_head: {}", - incomp_h - )) - })? - .insert(add_block_id); - } - self.gi_head.insert(add_block_id, incomp.clone()); - - // max cliques update - massa_trace!( - "consensus.block_graph.add_block_to_graph.max_cliques_update", - {} - ); - if incomp.len() == inherited_incomp_count { - // clique optimization routine: - // the block only has incompatibilities inherited from its parents - // therefore it is not forking and can simply be added to the cliques it is compatible with - self.max_cliques - .iter_mut() - .filter(|c| incomp.is_disjoint(&c.block_ids)) - .for_each(|c| { - c.block_ids.insert(add_block_id); - }); - } else { - // fully recompute max cliques - massa_trace!( - "consensus.block_graph.add_block_to_graph.clique_full_computing", - { "hash": add_block_id } - ); - let before = self.max_cliques.len(); - self.max_cliques = self - .compute_max_cliques() - .into_iter() - .map(|c| Clique { - block_ids: c, - fitness: 0, - is_blockclique: false, - }) - .collect(); - let after = self.max_cliques.len(); - if before != after { - massa_trace!( - "consensus.block_graph.add_block_to_graph.clique_full_computing more than one clique", - { "cliques": self.max_cliques, "gi_head": self.gi_head } - ); - // gi_head - debug!( - "clique number went from {} to {} after adding {}", - before, after, add_block_id - ); - } - } - - // compute clique fitnesses and find blockclique - massa_trace!("consensus.block_graph.add_block_to_graph.compute_clique_fitnesses_and_find_blockclique", {}); - // note: clique_fitnesses is pair (fitness, -hash_sum) where the second parameter is negative for sorting - { - let mut blockclique_i = 0usize; - let mut max_clique_fitness = (0u64, num::BigInt::default()); - for (clique_i, clique) in self.max_cliques.iter_mut().enumerate() { - clique.fitness = 0; - clique.is_blockclique = false; - let mut sum_hash = num::BigInt::default(); - for block_h in clique.block_ids.iter() { - clique.fitness = clique.fitness - .checked_add( - BlockGraph::get_full_active_block(&self.block_statuses, *block_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses computing fitness while adding {} - missing {}", add_block_id, block_h)))? - .0.fitness, - ) - .ok_or(GraphError::FitnessOverflow)?; - sum_hash -= - num::BigInt::from_bytes_be(num::bigint::Sign::Plus, block_h.to_bytes()); - } - let cur_fit = (clique.fitness, sum_hash); - if cur_fit > max_clique_fitness { - blockclique_i = clique_i; - max_clique_fitness = cur_fit; - } - } - self.max_cliques[blockclique_i].is_blockclique = true; - } - - // update best parents - massa_trace!( - "consensus.block_graph.add_block_to_graph.update_best_parents", - {} - ); - { - // find blockclique - let blockclique_i = self - .max_cliques - .iter() - .position(|c| c.is_blockclique) - .unwrap_or_default(); - let blockclique = &self.max_cliques[blockclique_i]; - - // init best parents as latest_final_blocks_periods - self.best_parents = self.latest_final_blocks_periods.clone(); - // for each blockclique block, set it as best_parent in its own thread - // if its period is higher than the current best_parent in that thread - for block_h in blockclique.block_ids.iter() { - let b_slot = BlockGraph::get_full_active_block(&self.block_statuses, *block_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating best parents while adding {} - missing {}", add_block_id, block_h)))? - .0.slot; - if b_slot.period > self.best_parents[b_slot.thread as usize].1 { - self.best_parents[b_slot.thread as usize] = (*block_h, b_slot.period); - } - } - } - - // list stale blocks - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_stale_blocks", - {} - ); - let stale_blocks = { - let blockclique_i = self - .max_cliques - .iter() - .position(|c| c.is_blockclique) - .unwrap_or_default(); - let fitness_threshold = self.max_cliques[blockclique_i] - .fitness - .saturating_sub(self.cfg.delta_f0); - // iterate from largest to smallest to minimize reallocations - let mut indices: Vec = (0..self.max_cliques.len()).collect(); - indices - .sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].block_ids.len())); - let mut high_set = PreHashSet::::default(); - let mut low_set = PreHashSet::::default(); - for clique_i in indices.into_iter() { - if self.max_cliques[clique_i].fitness >= fitness_threshold { - high_set.extend(&self.max_cliques[clique_i].block_ids); - } else { - low_set.extend(&self.max_cliques[clique_i].block_ids); - } - } - self.max_cliques.retain(|c| c.fitness >= fitness_threshold); - &low_set - &high_set - }; - // mark stale blocks - massa_trace!( - "consensus.block_graph.add_block_to_graph.mark_stale_blocks", - {} - ); - for stale_block_hash in stale_blocks.into_iter() { - if let Some(BlockStatus::Active { - a_block: active_block, - storage: _storage, - }) = self.block_statuses.remove(&stale_block_hash) - { - self.active_index.remove(&stale_block_hash); - if active_block.is_final { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} was already final", add_block_id, stale_block_hash))); - } - - // remove from gi_head - if let Some(other_incomps) = self.gi_head.remove(&stale_block_hash) { - for other_incomp in other_incomps.into_iter() { - if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { - other_incomp_lst.remove(&stale_block_hash); - } - } - } - - // remove from cliques - let stale_block_fitness = active_block.fitness; - self.max_cliques.iter_mut().for_each(|c| { - if c.block_ids.remove(&stale_block_hash) { - c.fitness -= stale_block_fitness; - } - }); - self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques - if self.max_cliques.is_empty() { - // make sure at least one clique remains - self.max_cliques = vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }]; - } - - // remove from parent's children - for (parent_h, _parent_period) in active_block.parents.iter() { - if let Some(BlockStatus::Active { - a_block: parent_active_block, - .. - }) = self.block_statuses.get_mut(parent_h) - { - parent_active_block.children[active_block.slot.thread as usize] - .remove(&stale_block_hash); - } - } - - massa_trace!("consensus.block_graph.add_block_to_graph.stale", { - "hash": stale_block_hash - }); - - // mark as stale - self.new_stale_blocks.insert( - stale_block_hash, - (active_block.creator_address, active_block.slot), - ); - self.block_statuses.insert( - stale_block_hash, - BlockStatus::Discarded { - slot: active_block.slot, - creator: active_block.creator_address, - parents: active_block.parents.iter().map(|(h, _)| *h).collect(), - reason: DiscardReason::Stale, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.discarded_index.insert(stale_block_hash); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses removing stale blocks adding {} - block {} is missing", add_block_id, stale_block_hash))); - } - } - - // list final blocks - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_final_blocks", - {} - ); - let final_blocks = { - // short-circuiting intersection of cliques from smallest to largest - let mut indices: Vec = (0..self.max_cliques.len()).collect(); - indices.sort_unstable_by_key(|&i| self.max_cliques[i].block_ids.len()); - let mut final_candidates = self.max_cliques[indices[0]].block_ids.clone(); - for i in 1..indices.len() { - final_candidates.retain(|v| self.max_cliques[i].block_ids.contains(v)); - if final_candidates.is_empty() { - break; - } - } - - // restrict search to cliques with high enough fitness, sort cliques by fitness (highest to lowest) - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_final_blocks.restrict", - {} - ); - indices.retain(|&i| self.max_cliques[i].fitness > self.cfg.delta_f0); - indices.sort_unstable_by_key(|&i| std::cmp::Reverse(self.max_cliques[i].fitness)); - - let mut final_blocks = PreHashSet::::default(); - for clique_i in indices.into_iter() { - massa_trace!( - "consensus.block_graph.add_block_to_graph.list_final_blocks.loop", - { "clique_i": clique_i } - ); - // check in cliques from highest to lowest fitness - if final_candidates.is_empty() { - // no more final candidates - break; - } - let clique = &self.max_cliques[clique_i]; - - // compute the total fitness of all the descendants of the candidate within the clique - let loc_candidates = final_candidates.clone(); - for candidate_h in loc_candidates.into_iter() { - let desc_fit: u64 = - BlockGraph::get_full_active_block(&self.block_statuses, candidate_h) - .ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block when computing total fitness of descendants: {}", - candidate_h - )) - })? - .0 - .descendants - .intersection(&clique.block_ids) - .map(|h| { - if let Some(BlockStatus::Active { a_block: ab, .. }) = - self.block_statuses.get(h) - { - return ab.fitness; - } - 0 - }) - .sum(); - if desc_fit > self.cfg.delta_f0 { - // candidate is final - final_candidates.remove(&candidate_h); - final_blocks.insert(candidate_h); - } - } - } - final_blocks - }; - - // mark final blocks and update latest_final_blocks_periods - massa_trace!( - "consensus.block_graph.add_block_to_graph.mark_final_blocks", - {} - ); - for final_block_hash in final_blocks.into_iter() { - // remove from gi_head - if let Some(other_incomps) = self.gi_head.remove(&final_block_hash) { - for other_incomp in other_incomps.into_iter() { - if let Some(other_incomp_lst) = self.gi_head.get_mut(&other_incomp) { - other_incomp_lst.remove(&final_block_hash); - } - } - } - - // mark as final and update latest_final_blocks_periods - if let Some(BlockStatus::Active { - a_block: final_block, - .. - }) = self.block_statuses.get_mut(&final_block_hash) - { - massa_trace!("consensus.block_graph.add_block_to_graph.final", { - "hash": final_block_hash - }); - final_block.is_final = true; - // remove from cliques - let final_block_fitness = final_block.fitness; - self.max_cliques.iter_mut().for_each(|c| { - if c.block_ids.remove(&final_block_hash) { - c.fitness -= final_block_fitness; - } - }); - self.max_cliques.retain(|c| !c.block_ids.is_empty()); // remove empty cliques - if self.max_cliques.is_empty() { - // make sure at least one clique remains - self.max_cliques = vec![Clique { - block_ids: PreHashSet::::default(), - fitness: 0, - is_blockclique: true, - }]; - } - // update latest final blocks - if final_block.slot.period - > self.latest_final_blocks_periods[final_block.slot.thread as usize].1 - { - self.latest_final_blocks_periods[final_block.slot.thread as usize] = - (final_block_hash, final_block.slot.period); - } - // update new final blocks list - self.new_final_blocks.insert(final_block_hash); - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses updating final blocks adding {} - block {} is missing", add_block_id, final_block_hash))); - } - } - - massa_trace!("consensus.block_graph.add_block_to_graph.end", {}); - Ok(()) - } - - fn list_required_active_blocks(&self) -> Result> { - // list all active blocks - let mut retain_active: PreHashSet = - PreHashSet::::with_capacity(self.active_index.len()); - - let latest_final_blocks: Vec = self - .latest_final_blocks_periods - .iter() - .map(|(hash, _)| *hash) - .collect(); - - // retain all non-final active blocks, - // the current "best parents", - // and the dependencies for both. - for block_id in self.active_index.iter() { - if let Some(BlockStatus::Active { - a_block: active_block, - .. - }) = self.block_statuses.get(block_id) - { - if !active_block.is_final - || self.best_parents.iter().any(|(b, _p)| b == block_id) - || latest_final_blocks.contains(block_id) - { - retain_active.extend(active_block.parents.iter().map(|(p, _)| *p)); - retain_active.insert(*block_id); - } - } - } - - // retain best parents - retain_active.extend(self.best_parents.iter().map(|(b, _p)| *b)); - - // retain last final blocks - retain_active.extend(self.latest_final_blocks_periods.iter().map(|(h, _)| *h)); - - for (thread, id) in latest_final_blocks.iter().enumerate() { - let mut current_block_id = *id; - while let Some((current_block, _)) = self.get_active_block(¤t_block_id) { - let parent_id = { - if !current_block.parents.is_empty() { - Some(current_block.parents[thread].0) - } else { - None - } - }; - - // retain block - retain_active.insert(current_block_id); - - // stop traversing when reaching a block with period number low enough - // so that any of its operations will have their validity period expired at the latest final block in thread - // note: one more is kept because of the way we iterate - if current_block.slot.period - < self.latest_final_blocks_periods[thread] - .1 - .saturating_sub(self.cfg.operation_validity_periods) - { - break; - } - - // if not genesis, traverse parent - match parent_id { - Some(p_id) => current_block_id = p_id, - None => break, - } - } - } - - // grow with parents & fill thread holes twice - for _ in 0..2 { - // retain the parents of the selected blocks - let retain_clone = retain_active.clone(); - - for retain_h in retain_clone.into_iter() { - retain_active.extend( - self.get_active_block(&retain_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and retaining the parents of the selected blocks - {} is missing", retain_h)))? - .0.parents - .iter() - .map(|(b_id, _p)| *b_id), - ) - } - - // find earliest kept slots in each thread - let mut earliest_retained_periods: Vec = self - .latest_final_blocks_periods - .iter() - .map(|(_, p)| *p) - .collect(); - for retain_h in retain_active.iter() { - let retain_slot = &self - .get_active_block(retain_h) - .ok_or_else(|| GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and finding earliest kept slots in each thread - {} is missing", retain_h)))? - .0.slot; - earliest_retained_periods[retain_slot.thread as usize] = std::cmp::min( - earliest_retained_periods[retain_slot.thread as usize], - retain_slot.period, - ); - } - - // fill up from the latest final block back to the earliest for each thread - for thread in 0..self.cfg.thread_count { - let mut cursor = self.latest_final_blocks_periods[thread as usize].0; // hash of tha latest final in that thread - while let Some((c_block, _)) = self.get_active_block(&cursor) { - if c_block.slot.period < earliest_retained_periods[thread as usize] { - break; - } - retain_active.insert(cursor); - if c_block.parents.is_empty() { - // genesis - break; - } - cursor = c_block.parents[thread as usize].0; - } - } - } - - Ok(retain_active) - } - - /// prune active blocks and return final blocks, return discarded final blocks - fn prune_active(&mut self) -> Result> { - // list required active blocks - let mut retain_active = self.list_required_active_blocks()?; - - // retain extra history according to the config - // this is useful to avoid desync on temporary connection loss - for a_block in self.active_index.iter() { - if let Some(BlockStatus::Active { - a_block: active_block, - .. - }) = self.block_statuses.get(a_block) - { - let (_b_id, latest_final_period) = - self.latest_final_blocks_periods[active_block.slot.thread as usize]; - if active_block.slot.period - >= latest_final_period.saturating_sub(self.cfg.force_keep_final_periods) - { - retain_active.insert(*a_block); - } - } - } - - // remove unused final active blocks - let mut discarded_finals: PreHashMap = PreHashMap::default(); - let to_remove: Vec = self - .active_index - .difference(&retain_active) - .copied() - .collect(); - for discard_active_h in to_remove { - let block_slot; - let block_creator; - let block_parents; - { - let read_blocks = self.storage.read_blocks(); - let block = read_blocks.get(&discard_active_h).ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block when removing unused final active blocks: {}", - discard_active_h - )) - })?; - block_slot = block.content.header.content.slot; - block_creator = block.creator_address; - block_parents = block.content.header.content.parents.clone(); - }; - - let discarded_active = if let Some(BlockStatus::Active { - a_block: discarded_active, - .. - }) = self.block_statuses.remove(&discard_active_h) - { - self.active_index.remove(&discard_active_h); - discarded_active - } else { - return Err(GraphError::ContainerInconsistency(format!("inconsistency inside block statuses pruning and removing unused final active blocks - {} is missing", discard_active_h))); - }; - - // remove from parent's children - for (parent_h, _parent_period) in discarded_active.parents.iter() { - if let Some(BlockStatus::Active { - a_block: parent_active_block, - .. - }) = self.block_statuses.get_mut(parent_h) - { - parent_active_block.children[discarded_active.slot.thread as usize] - .remove(&discard_active_h); - } - } - - massa_trace!("consensus.block_graph.prune_active", {"hash": discard_active_h, "reason": DiscardReason::Final}); - - // mark as final - self.block_statuses.insert( - discard_active_h, - BlockStatus::Discarded { - slot: block_slot, - creator: block_creator, - parents: block_parents, - reason: DiscardReason::Final, - sequence_number: BlockGraph::new_sequence_number(&mut self.sequence_counter), - }, - ); - self.discarded_index.insert(discard_active_h); - - discarded_finals.insert(discard_active_h, *discarded_active); - } - - Ok(discarded_finals) - } - - fn promote_dep_tree(&mut self, hash: BlockId) -> Result<()> { - let mut to_explore = vec![hash]; - let mut to_promote: PreHashMap = PreHashMap::default(); - while let Some(h) = to_explore.pop() { - if to_promote.contains_key(&h) { - continue; - } - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, - unsatisfied_dependencies, - sequence_number, - .. - }) = self.block_statuses.get(&h) - { - // promote current block - to_promote.insert(h, (header_or_block.get_slot(), *sequence_number)); - // register dependencies for exploration - to_explore.extend(unsatisfied_dependencies); - } - } - - let mut to_promote: Vec<(Slot, u64, BlockId)> = to_promote - .into_iter() - .map(|(h, (slot, seq))| (slot, seq, h)) - .collect(); - to_promote.sort_unstable(); // last ones should have the highest seq number - for (_slot, _seq, h) in to_promote.into_iter() { - if let Some(BlockStatus::WaitingForDependencies { - sequence_number, .. - }) = self.block_statuses.get_mut(&h) - { - *sequence_number = BlockGraph::new_sequence_number(&mut self.sequence_counter); - } - } - Ok(()) - } - - fn prune_waiting_for_dependencies(&mut self) -> Result<()> { - let mut to_discard: PreHashMap> = PreHashMap::default(); - let mut to_keep: PreHashMap = PreHashMap::default(); - - // list items that are older than the latest final blocks in their threads or have deps that are discarded - { - for block_id in self.waiting_for_dependencies_index.iter() { - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, - unsatisfied_dependencies, - sequence_number, - }) = self.block_statuses.get(block_id) - { - // has already discarded dependencies => discard (choose worst reason) - let mut discard_reason = None; - let mut discarded_dep_found = false; - for dep in unsatisfied_dependencies.iter() { - if let Some(BlockStatus::Discarded { reason, .. }) = - self.block_statuses.get(dep) - { - discarded_dep_found = true; - match reason { - DiscardReason::Invalid(reason) => { - discard_reason = Some(DiscardReason::Invalid(format!("discarded because depend on block:{} that has discard reason:{}", block_id, reason))); - break; - } - DiscardReason::Stale => discard_reason = Some(DiscardReason::Stale), - DiscardReason::Final => discard_reason = Some(DiscardReason::Stale), - } - } - } - if discarded_dep_found { - to_discard.insert(*block_id, discard_reason); - continue; - } - - // is at least as old as the latest final block in its thread => discard as stale - let slot = header_or_block.get_slot(); - if slot.period <= self.latest_final_blocks_periods[slot.thread as usize].1 { - to_discard.insert(*block_id, Some(DiscardReason::Stale)); - continue; - } - - // otherwise, mark as to_keep - to_keep.insert(*block_id, (*sequence_number, header_or_block.get_slot())); - } - } - } - - // discard in chain and because of limited size - while !to_keep.is_empty() { - // mark entries as to_discard and remove them from to_keep - for (hash, _old_order) in to_keep.clone().into_iter() { - if let Some(BlockStatus::WaitingForDependencies { - unsatisfied_dependencies, - .. - }) = self.block_statuses.get(&hash) - { - // has dependencies that will be discarded => discard (choose worst reason) - let mut discard_reason = None; - let mut dep_to_discard_found = false; - for dep in unsatisfied_dependencies.iter() { - if let Some(reason) = to_discard.get(dep) { - dep_to_discard_found = true; - match reason { - Some(DiscardReason::Invalid(reason)) => { - discard_reason = Some(DiscardReason::Invalid(format!("discarded because depend on block:{} that has discard reason:{}", hash, reason))); - break; - } - Some(DiscardReason::Stale) => { - discard_reason = Some(DiscardReason::Stale) - } - Some(DiscardReason::Final) => { - discard_reason = Some(DiscardReason::Stale) - } - None => {} // leave as None - } - } - } - if dep_to_discard_found { - to_keep.remove(&hash); - to_discard.insert(hash, discard_reason); - continue; - } - } - } - - // remove worst excess element - if to_keep.len() > self.cfg.max_dependency_blocks { - let remove_elt = to_keep - .iter() - .filter_map(|(hash, _old_order)| { - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, - sequence_number, - .. - }) = self.block_statuses.get(hash) - { - return Some((sequence_number, header_or_block.get_slot(), *hash)); - } - None - }) - .min(); - if let Some((_seq_num, _slot, hash)) = remove_elt { - to_keep.remove(&hash); - to_discard.insert(hash, None); - continue; - } - } - - // nothing happened: stop loop - break; - } - - // transition states to Discarded if there is a reason, otherwise just drop - for (block_id, reason_opt) in to_discard.drain() { - if let Some(BlockStatus::WaitingForDependencies { - header_or_block, .. - }) = self.block_statuses.remove(&block_id) - { - self.waiting_for_dependencies_index.remove(&block_id); - let header = match header_or_block { - HeaderOrBlock::Header(h) => h, - HeaderOrBlock::Block { id: block_id, .. } => self - .storage - .read_blocks() - .get(&block_id) - .ok_or_else(|| { - GraphError::MissingBlock(format!( - "missing block when pruning waiting for deps: {}", - block_id - )) - })? - .content - .header - .clone(), - }; - massa_trace!("consensus.block_graph.prune_waiting_for_dependencies", {"hash": block_id, "reason": reason_opt}); - - if let Some(reason) = reason_opt { - // add to stats if reason is Stale - if reason == DiscardReason::Stale { - self.new_stale_blocks - .insert(block_id, (header.creator_address, header.content.slot)); - } - // transition to Discarded only if there is a reason - self.block_statuses.insert( - block_id, - BlockStatus::Discarded { - slot: header.content.slot, - creator: header.creator_address, - parents: header.content.parents.clone(), - reason, - sequence_number: BlockGraph::new_sequence_number( - &mut self.sequence_counter, - ), - }, - ); - self.discarded_index.insert(block_id); - } - } - } - - Ok(()) - } - - fn prune_slot_waiting(&mut self) { - if self.waiting_for_slot_index.len() <= self.cfg.max_future_processing_blocks { - return; - } - let mut slot_waiting: Vec<(Slot, BlockId)> = self - .waiting_for_slot_index - .iter() - .filter_map(|block_id| { - if let Some(BlockStatus::WaitingForSlot(header_or_block)) = - self.block_statuses.get(block_id) - { - return Some((header_or_block.get_slot(), *block_id)); - } - None - }) - .collect(); - slot_waiting.sort_unstable(); - let len_slot_waiting = slot_waiting.len(); - (self.cfg.max_future_processing_blocks..len_slot_waiting).for_each(|idx| { - let (_slot, block_id) = &slot_waiting[idx]; - self.block_statuses.remove(block_id); - self.waiting_for_slot_index.remove(block_id); - }); - } - - fn prune_discarded(&mut self) -> Result<()> { - if self.discarded_index.len() <= self.cfg.max_discarded_blocks { - return Ok(()); - } - let mut discard_hashes: Vec<(u64, BlockId)> = self - .discarded_index - .iter() - .filter_map(|block_id| { - if let Some(BlockStatus::Discarded { - sequence_number, .. - }) = self.block_statuses.get(block_id) - { - return Some((*sequence_number, *block_id)); - } - None - }) - .collect(); - discard_hashes.sort_unstable(); - discard_hashes.truncate(self.discarded_index.len() - self.cfg.max_discarded_blocks); - for (_, block_id) in discard_hashes.iter() { - self.block_statuses.remove(block_id); - self.discarded_index.remove(block_id); - } - Ok(()) - } - - /// prune and return final blocks, return discarded final blocks - pub fn prune(&mut self) -> Result> { - let before = self.max_cliques.len(); - // Step 1: discard final blocks that are not useful to the graph anymore and return them - let discarded_finals = self.prune_active()?; - - // Step 2: prune slot waiting blocks - self.prune_slot_waiting(); - - // Step 3: prune dependency waiting blocks - self.prune_waiting_for_dependencies()?; - - // Step 4: prune discarded - self.prune_discarded()?; - - let after = self.max_cliques.len(); - if before != after { - debug!( - "clique number went from {} to {} after pruning", - before, after - ); - } - - Ok(discarded_finals) - } - - /// get the current block wish list, including the operations hash. - pub fn get_block_wishlist(&self) -> Result>> { - let mut wishlist = PreHashMap::>::default(); - for block_id in self.waiting_for_dependencies_index.iter() { - if let Some(BlockStatus::WaitingForDependencies { - unsatisfied_dependencies, - .. - }) = self.block_statuses.get(block_id) - { - for unsatisfied_h in unsatisfied_dependencies.iter() { - match self.block_statuses.get(unsatisfied_h) { - Some(BlockStatus::WaitingForDependencies { - header_or_block: HeaderOrBlock::Header(header), - .. - }) => { - wishlist.insert(header.id, Some(header.clone())); - } - None => { - wishlist.insert(*unsatisfied_h, None); - } - _ => {} - } - } - } - } - - Ok(wishlist) - } - - /// get clique count - pub fn get_clique_count(&self) -> usize { - self.max_cliques.len() - } - - /// get the clique of higher fitness - pub fn get_blockclique(&self) -> &PreHashSet { - &self - .max_cliques - .iter() - .find(|c| c.is_blockclique) - .expect("blockclique missing") - .block_ids - } - - /// get the blockclique (or final) block ID at a given slot, if any - pub fn get_blockclique_block_at_slot(&self, slot: &Slot) -> Option { - // List all blocks at this slot. - // The list should be small: make a copy of it to avoid holding the storage lock. - let blocks_at_slot = { - let storage_read = self.storage.read_blocks(); - let returned = match storage_read.get_blocks_by_slot(slot) { - Some(v) => v.clone(), - None => return None, - }; - returned - }; - - // search for the block in the blockclique - let search_in_blockclique = blocks_at_slot - .intersection( - &self - .max_cliques - .iter() - .find(|c| c.is_blockclique) - .expect("expected one clique to be the blockclique") - .block_ids, - ) - .next(); - if let Some(found_id) = search_in_blockclique { - return Some(*found_id); - } - - // block not found in the blockclique: search in the final blocks - blocks_at_slot - .into_iter() - .find(|b_id| match self.block_statuses.get(b_id) { - Some(BlockStatus::Active { a_block, .. }) => a_block.is_final, - _ => false, - }) - } - - /// get the latest blockclique (or final) block ID that is the most recent, but still strictly older than `slot`, in the same thread as `slot` - pub fn get_latest_blockclique_block_at_slot(&self, slot: &Slot) -> BlockId { - let (mut best_block_id, mut best_block_period) = self - .latest_final_blocks_periods - .get(slot.thread as usize) - .unwrap_or_else(|| panic!("unexpected not found latest final block period")); - - self.max_cliques - .iter() - .find(|c| c.is_blockclique) - .expect("expected one clique to be the blockclique") - .block_ids - .iter() - .for_each(|id| match self.block_statuses.get(id) { - Some(BlockStatus::Active { - a_block, - storage: _, - }) => { - if a_block.is_final { - panic!( - "unexpected final block on getting latest blockclique block at slot" - ); - } - if a_block.slot.thread == slot.thread - && a_block.slot.period < slot.period - && a_block.slot.period > best_block_period - { - best_block_period = a_block.slot.period; - best_block_id = *id; - } - } - _ => { - panic!("expected to find only active block but found another status") - } - }); - best_block_id - } - - /// Gets all stored final blocks, not only the still-useful ones - /// This is used when initializing Execution from Consensus. - /// Since the Execution bootstrap snapshot is older than the Consensus snapshot, - /// we might need to signal older final blocks for Execution to catch up. - pub fn get_all_final_blocks(&self) -> HashMap { - self.active_index - .iter() - .map(|b_id| { - let (a_block, _storage) = - self.get_active_block(b_id).expect("active block missing"); - (*b_id, a_block.slot) - }) - .collect() - } - - /// Get the block id's to be propagated. - /// Must be called by the consensus worker within `block_db_changed`. - pub fn get_blocks_to_propagate(&mut self) -> PreHashMap { - mem::take(&mut self.to_propagate) - } - - /// Get the hashes of objects that were attack attempts. - /// Must be called by the consensus worker within `block_db_changed`. - pub fn get_attack_attempts(&mut self) -> Vec { - mem::take(&mut self.attack_attempts) - } - - /// Get the ids of blocks that became final. - /// Must be called by the consensus worker within `block_db_changed`. - pub fn get_new_final_blocks(&mut self) -> PreHashSet { - mem::take(&mut self.new_final_blocks) - } - - /// Get the ids of blocks that became stale. - /// Must be called by the consensus worker within `block_db_changed`. - pub fn get_new_stale_blocks(&mut self) -> PreHashMap { - mem::take(&mut self.new_stale_blocks) - } -} diff --git a/massa-graph/src/error.rs b/massa-graph/src/error.rs deleted file mode 100644 index 43822b0c09e..00000000000 --- a/massa-graph/src/error.rs +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -use displaydoc::Display; -use massa_execution_exports::ExecutionError; -use massa_models::error::ModelsError; -use std::array::TryFromSliceError; -use thiserror::Error; - -/// Result used in the graph -pub type GraphResult = core::result::Result; - -/// Result used in the ledger -pub type LedgerResult = core::result::Result; - -/// Graph error -#[non_exhaustive] -#[derive(Display, Error, Debug)] -pub enum GraphError { - /// execution error: {0} - ExecutionError(#[from] ExecutionError), - /// models error: {0} - ModelsError(#[from] ModelsError), - /// Could not create genesis block {0} - GenesisCreationError(String), - /// missing block {0} - MissingBlock(String), - /// missing operation {0} - MissingOperation(String), - /// there was an inconsistency between containers {0} - ContainerInconsistency(String), - /// fitness overflow - FitnessOverflow, - /// invalid ledger change: {0} - InvalidLedgerChange(String), - /// io error {0} - IOError(#[from] std::io::Error), - /// serde error - SerdeError(#[from] serde_json::Error), - /// Proof of stake cycle unavailable {0} - PosCycleUnavailable(String), - /// Ledger error {0} - LedgerError(#[from] LedgerError), - /// transaction error {0} - TransactionError(String), -} - -/// Internal error -#[non_exhaustive] -#[derive(Display, Error, Debug)] -pub enum InternalError { - /// transaction error {0} - TransactionError(String), -} - -/// Ledger error -#[non_exhaustive] -#[derive(Display, Error, Debug)] -pub enum LedgerError { - /// amount overflow - AmountOverflowError, - /// ledger inconsistency error {0} - LedgerInconsistency(String), - /// models error: {0} - ModelsError(#[from] ModelsError), - /// try from slice error {0} - TryFromSliceError(#[from] TryFromSliceError), - /// io error {0} - IOError(#[from] std::io::Error), - /// serde error - SerdeError(#[from] serde_json::Error), -} diff --git a/massa-graph/src/lib.rs b/massa-graph/src/lib.rs deleted file mode 100644 index 6f78cb49505..00000000000 --- a/massa-graph/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright (c) 2022 MASSA LABS -//! graph management -#![warn(missing_docs)] -#![warn(unused_crate_dependencies)] -#![feature(async_closure)] -#![feature(hash_drain_filter)] -#![feature(int_roundings)] - -extern crate massa_logging; - -/// useful structures -pub mod export_active_block; - -mod bootstrapable_graph; -pub use bootstrapable_graph::{ - BootstrapableGraph, BootstrapableGraphDeserializer, BootstrapableGraphSerializer, -}; - -mod block_graph; -pub use block_graph::*; - -/// graph errors -pub mod error; - -/// graph settings -pub mod settings; diff --git a/massa-graph/src/settings.rs b/massa-graph/src/settings.rs deleted file mode 100644 index 751df7f0bf6..00000000000 --- a/massa-graph/src/settings.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -#![allow(clippy::assertions_on_constants)] -use massa_signature::KeyPair; -use serde::{Deserialize, Serialize}; - -/// Graph configuration -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct GraphConfig { - /// Number of threads - pub thread_count: u8, - /// Keypair to sign genesis blocks. - pub genesis_key: KeyPair, - /// Maximum number of blocks allowed in discarded blocks. - pub max_discarded_blocks: usize, - /// If a block `is future_block_processing_max_periods` periods in the future, it is just discarded. - pub future_block_processing_max_periods: u64, - /// Maximum number of blocks allowed in `FutureIncomingBlocks`. - pub max_future_processing_blocks: usize, - /// Maximum number of blocks allowed in `DependencyWaitingBlocks`. - pub max_dependency_blocks: usize, - /// Threshold for fitness. - pub delta_f0: u64, - /// Maximum operation validity period count - pub operation_validity_periods: u64, - /// cycle duration in periods - pub periods_per_cycle: u64, - /// force keep at least this number of final periods in RAM for each thread - pub force_keep_final_periods: u64, - /// target number of endorsement per block - pub endorsement_count: u32, - /// pub `block_db_prune_interval`: `MassaTime`, - pub max_item_return_count: usize, -} diff --git a/massa-ledger-exports/src/controller.rs b/massa-ledger-exports/src/controller.rs index d16f2dacb91..016b08f07ed 100644 --- a/massa-ledger-exports/src/controller.rs +++ b/massa-ledger-exports/src/controller.rs @@ -56,7 +56,7 @@ pub trait LedgerController: Send + Sync + Debug { /// /// # Returns /// A `BTreeSet` of the datastore keys - fn get_datastore_keys(&self, addr: &Address) -> BTreeSet>; + fn get_datastore_keys(&self, addr: &Address) -> Option>>; /// Get the current disk ledger hash fn get_ledger_hash(&self) -> Hash; diff --git a/massa-ledger-worker/src/ledger.rs b/massa-ledger-worker/src/ledger.rs index f0c9534d0a6..fc168b31e1a 100644 --- a/massa-ledger-worker/src/ledger.rs +++ b/massa-ledger-worker/src/ledger.rs @@ -152,8 +152,11 @@ impl LedgerController for FinalLedger { /// /// # Returns /// A `BTreeSet` of the datastore keys - fn get_datastore_keys(&self, addr: &Address) -> BTreeSet> { - self.sorted_ledger.get_datastore_keys(addr) + fn get_datastore_keys(&self, addr: &Address) -> Option>> { + match self.entry_exists(addr) { + true => Some(self.sorted_ledger.get_datastore_keys(addr)), + false => None, + } } /// Get the current disk ledger hash diff --git a/massa-ledger-worker/src/ledger_db.rs b/massa-ledger-worker/src/ledger_db.rs index 000c25a53a8..c5b38dc01a7 100644 --- a/massa-ledger-worker/src/ledger_db.rs +++ b/massa-ledger-worker/src/ledger_db.rs @@ -485,9 +485,9 @@ impl LedgerDB { IteratorMode::From(&last_key, Direction::Forward), ); iter.next(); - (iter, StreamingStep::Finished) + (iter, StreamingStep::Finished(None)) } - StreamingStep::Finished => return Ok((ledger_part, cursor)), + StreamingStep::Finished(_) => return Ok((ledger_part, cursor)), }; // Iterates over the whole database diff --git a/massa-models/Cargo.toml b/massa-models/Cargo.toml index 055e8992fe4..c14f78f89b8 100644 --- a/massa-models/Cargo.toml +++ b/massa-models/Cargo.toml @@ -13,7 +13,7 @@ serde = { version = "1.0", features = ["derive"] } thiserror = "1.0" num = { version = "0.4", features = ["serde"] } directories = "4.0" -config = "0.11" +config = "0.13" bs58 = { version = "0.4", features = ["check"] } bitvec = { version = "1.0", features = ["serde"] } nom = "7.1" diff --git a/massa-models/src/address.rs b/massa-models/src/address.rs index e3a89d4dbec..63266f78a3c 100644 --- a/massa-models/src/address.rs +++ b/massa-models/src/address.rs @@ -113,6 +113,19 @@ impl<'de> ::serde::Deserialize<'de> for Address { impl FromStr for Address { type Err = ModelsError; + /// ## Example + /// ```rust + /// # use massa_signature::{PublicKey, KeyPair, Signature}; + /// # use massa_hash::Hash; + /// # use serde::{Deserialize, Serialize}; + /// # use std::str::FromStr; + /// # use massa_models::address::Address; + /// # let keypair = KeyPair::generate(); + /// # let address = Address::from_public_key(&keypair.get_public_key()); + /// let ser = address.to_string(); + /// let res_addr = Address::from_str(&ser).unwrap(); + /// assert_eq!(address, res_addr); + /// ``` fn from_str(s: &str) -> Result { let mut chars = s.chars(); match chars.next() { @@ -209,39 +222,27 @@ impl Address { pub fn from_bytes(data: &[u8; ADDRESS_SIZE_BYTES]) -> Address { Address(Hash::from_bytes(data)) } +} - /// ## Example - /// ```rust - /// # use massa_signature::{PublicKey, KeyPair, Signature}; - /// # use massa_hash::Hash; - /// # use serde::{Deserialize, Serialize}; - /// # use massa_models::address::Address; - /// # let keypair = KeyPair::generate(); - /// # let address = Address::from_public_key(&keypair.get_public_key()); - /// let ser = address.to_bs58_check(); - /// let res_addr = Address::from_bs58_check(&ser).unwrap(); - /// assert_eq!(address, res_addr); - /// ``` - pub fn from_bs58_check(data: &str) -> Result { - Ok(Address( - Hash::from_bs58_check(data).map_err(|_| ModelsError::HashError)?, - )) +/// Serializer for `Address` +#[derive(Default, Clone)] +pub struct AddressSerializer; + +impl AddressSerializer { + /// Serializes an `Address` into a `Vec` + pub fn new() -> Self { + Self } +} - /// ## Example - /// ```rust - /// # use massa_signature::{PublicKey, KeyPair, Signature}; - /// # use massa_hash::Hash; - /// # use serde::{Deserialize, Serialize}; - /// # use massa_models::address::Address; - /// # let keypair = KeyPair::generate(); - /// # let address = Address::from_public_key(&keypair.get_public_key()); - /// let ser = address.to_bs58_check(); - /// let res_addr = Address::from_bs58_check(&ser).unwrap(); - /// assert_eq!(address, res_addr); - /// ``` - pub fn to_bs58_check(&self) -> String { - self.0.to_bs58_check() +impl Serializer
for AddressSerializer { + fn serialize( + &self, + value: &Address, + buffer: &mut Vec, + ) -> Result<(), massa_serialization::SerializeError> { + buffer.extend_from_slice(value.to_bytes()); + Ok(()) } } diff --git a/massa-models/src/api.rs b/massa-models/src/api.rs index ba3433facc0..452ab4be06c 100644 --- a/massa-models/src/api.rs +++ b/massa-models/src/api.rs @@ -562,8 +562,6 @@ pub struct EventFilter { pub struct ReadOnlyBytecodeExecution { /// max available gas pub max_gas: u64, - /// gas price - pub simulated_gas_price: Amount, /// byte code pub bytecode: Vec, /// caller's address, optional @@ -577,14 +575,12 @@ pub struct ReadOnlyBytecodeExecution { pub struct ReadOnlyCall { /// max available gas pub max_gas: u64, - /// gas price - pub simulated_gas_price: Amount, /// target address pub target_address: Address, /// target function pub target_function: String, /// function parameter - pub parameter: String, + pub parameter: Vec, /// caller's address, optional pub caller_address: Option
, } diff --git a/massa-models/src/block.rs b/massa-models/src/block.rs index 2deeee1577f..19d6cb64cfd 100644 --- a/massa-models/src/block.rs +++ b/massa-models/src/block.rs @@ -97,6 +97,51 @@ impl BlockId { } } +/// Serializer for `BlockId` +#[derive(Default, Clone)] +pub struct BlockIdSerializer; + +impl BlockIdSerializer { + /// Creates a new serializer for `BlockId` + pub fn new() -> Self { + Self + } +} + +impl Serializer for BlockIdSerializer { + fn serialize(&self, value: &BlockId, buffer: &mut Vec) -> Result<(), SerializeError> { + buffer.extend(value.to_bytes()); + Ok(()) + } +} + +/// Deserializer for `BlockId` +#[derive(Default, Clone)] +pub struct BlockIdDeserializer { + hash_deserializer: HashDeserializer, +} + +impl BlockIdDeserializer { + /// Creates a new deserializer for `BlockId` + pub fn new() -> Self { + Self { + hash_deserializer: HashDeserializer::new(), + } + } +} + +impl Deserializer for BlockIdDeserializer { + fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> IResult<&'a [u8], BlockId, E> { + context("Failed BlockId deserialization", |input| { + let (rest, hash) = self.hash_deserializer.deserialize(input)?; + Ok((rest, BlockId(hash))) + })(buffer) + } +} + /// block #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Block { diff --git a/massa-models/src/config/constants.rs b/massa-models/src/config/constants.rs index 45ab2ed08cf..d3a3e3bc1f0 100644 --- a/massa-models/src/config/constants.rs +++ b/massa-models/src/config/constants.rs @@ -45,14 +45,14 @@ lazy_static::lazy_static! { .saturating_add(MassaTime::from_millis(1000 * 10)) ) } else { - 1667260800000.into() // Tuesday, November 01, 2022 00:00:01 AM UTC + 1669852801000.into() // Thursday, December 01, 2022 00:00:01 AM UTC }; /// TESTNET: time when the blockclique is ended. pub static ref END_TIMESTAMP: Option = if cfg!(feature = "sandbox") { None } else { - Some(1669827600000.into()) // Wednesday, November 30, 2022 5:00:00 PM UTC + Some(1672466400000.into()) // Saturday, December 31, 2022 6:00:00 PM UTC }; /// `KeyPair` to sign genesis blocks. pub static ref GENESIS_KEY: KeyPair = KeyPair::from_str("S1UxdCJv5ckDK8z87E5Jq5fEfSVLi2cTHgtpfZy7iURs3KpPns8") @@ -64,7 +64,7 @@ lazy_static::lazy_static! { if cfg!(feature = "sandbox") { "SAND.0.0" } else { - "TEST.16.1" + "TEST.17.0" } .parse() .unwrap() @@ -115,6 +115,8 @@ pub const ASYNC_POOL_BOOTSTRAP_PART_SIZE: u64 = 100; pub const DEFERRED_CREDITS_BOOTSTRAP_PART_SIZE: u64 = 100; /// Maximum executed ops per slot in a bootstrap batch pub const EXECUTED_OPS_BOOTSTRAP_PART_SIZE: u64 = 10; +/// Maximum number of consensus blocks in a bootstrap batch +pub const CONSENSUS_BOOTSTRAP_PART_SIZE: u64 = 50; /// Maximum size of proof-of-stake rolls pub const MAX_ROLLS_COUNT_LENGTH: u64 = 10_000; /// Maximum size of proof-of-stake production stats @@ -134,7 +136,7 @@ pub const MAX_DATASTORE_VALUE_LENGTH: u64 = 10_000_000; /// Maximum length of a datastore value pub const MAX_BYTECODE_LENGTH: u64 = 10_000_000; /// Maximum length of an operation datastore value -pub const MAX_OPERATION_DATASTORE_VALUE_LENGTH: u64 = 1_000; +pub const MAX_OPERATION_DATASTORE_VALUE_LENGTH: u64 = 500_000; /// Maximum ledger changes in a block pub const MAX_LEDGER_CHANGES_PER_SLOT: u32 = u32::MAX; /// Maximum production events in a block diff --git a/massa-models/src/config/massa_settings.rs b/massa-models/src/config/massa_settings.rs index b7458ec04b9..1f53b2abeb3 100644 --- a/massa-models/src/config/massa_settings.rs +++ b/massa-models/src/config/massa_settings.rs @@ -33,47 +33,32 @@ use std::path::Path; /// 3. in path specified in `MASSA_CONFIG_OVERRIDE_PATH` environment variable (`config/config.toml` by default) #[inline] pub fn build_massa_settings>(app_name: &str, env_prefix: &str) -> T { - let mut settings = config::Config::default(); + let mut builder = config::Config::builder(); let config_path = std::env::var("MASSA_CONFIG_PATH") .unwrap_or_else(|_| "base_config/config.toml".to_string()); - settings - .merge(config::File::with_name(&config_path)) - .unwrap_or_else(|error| { - panic!( - "failed to read {} config {}: {}", - config_path, - std::env::current_dir().unwrap().as_path().to_str().unwrap(), - error - ) - }); + + builder = builder.add_source(config::File::with_name(&config_path)); + let config_override_path = std::env::var("MASSA_CONFIG_OVERRIDE_PATH") .unwrap_or_else(|_| "config/config.toml".to_string()); + if Path::new(&config_override_path).is_file() { - settings - .merge(config::File::with_name(&config_override_path)) - .unwrap_or_else(|error| { - panic!( - "failed to read {} override config {}: {}", - config_override_path, - std::env::current_dir().unwrap().as_path().to_str().unwrap(), - error - ) - }); + builder = builder.add_source(config::File::with_name(&config_override_path)); } + if let Some(proj_dirs) = ProjectDirs::from("com", "MassaLabs", app_name) { // Portable user config loading let user_config_path = proj_dirs.config_dir(); if user_config_path.exists() { let path_str = user_config_path.to_str().unwrap(); - settings - .merge(config::File::with_name(path_str)) - .unwrap_or_else(|error| { - panic!("failed to read {} user config: {}", path_str, error) - }); + builder = builder.add_source(config::File::with_name(path_str)); } } - settings - .merge(config::Environment::with_prefix(env_prefix)) + + let s = builder + .add_source(config::Environment::with_prefix(env_prefix)) + .build() .unwrap(); - settings.try_into().unwrap() + + s.try_deserialize().unwrap() } diff --git a/massa-models/src/ledger_models.rs b/massa-models/src/ledger_models.rs index e6359e877dd..1f93b7c93af 100644 --- a/massa-models/src/ledger_models.rs +++ b/massa-models/src/ledger_models.rs @@ -383,14 +383,14 @@ impl Deserializer for LedgerChangesDeserializer { /// # use massa_serialization::{Serializer, Deserializer, DeserializeError}; /// # let ledger_changes = LedgerChanges(vec![ /// # ( - /// # Address::from_bs58_check("2oxLZc6g6EHfc5VtywyPttEeGDxWq3xjvTNziayWGDfxETZVTi".into()).unwrap(), + /// # Address::from_str("A12hgh5ULW9o8fJE9muLNXhQENaUUswQbxPyDSq8ridnDGu5gRiJ").unwrap(), /// # LedgerChange { /// # balance_delta: Amount::from_str("1149").unwrap(), /// # balance_increment: true /// # }, /// # ), /// # ( - /// # Address::from_bs58_check("2mvD6zEvo8gGaZbcs6AYTyWKFonZaKvKzDGRsiXhZ9zbxPD11q".into()).unwrap(), + /// # Address::from_str("A12htxRWiEm8jDJpJptr6cwEhWNcCSFWstN1MLSa96DDkVM9Y42G").unwrap(), /// # LedgerChange { /// # balance_delta: Amount::from_str("1020").unwrap(), /// # balance_increment: true diff --git a/massa-models/src/operation.rs b/massa-models/src/operation.rs index 39f4ce2c2dd..b31ded2932c 100644 --- a/massa-models/src/operation.rs +++ b/massa-models/src/operation.rs @@ -404,9 +404,8 @@ pub enum OperationType { data: Vec, /// The maximum amount of gas that the execution of the contract is allowed to cost. max_gas: u64, - /// The price per unit of gas that the caller is willing to pay for the execution. - gas_price: Amount, /// A key-value store associating a hash to arbitrary bytes + #[serde(skip)] datastore: Datastore, }, /// Calls an exported function from a stored smart contract @@ -416,13 +415,11 @@ pub enum OperationType { /// Target function name. No function is called if empty. target_func: String, /// Parameter to pass to the target function - param: String, + param: Vec, /// The maximum amount of gas that the execution of the contract is allowed to cost. max_gas: u64, /// Extra coins that are spent from the caller's balance and transferred to the target coins: Amount, - /// The price per unit of gas that the caller is willing to pay for the execution. - gas_price: Amount, }, } @@ -447,18 +444,15 @@ impl std::fmt::Display for OperationType { } OperationType::ExecuteSC { max_gas, - gas_price, .. // data & datastore, // these fields are ignored because bytes eh } => { writeln!(f, "ExecuteSC: ")?; writeln!(f, "\t- max_gas:{}", max_gas)?; - writeln!(f, "\t- gas_price:{}", gas_price)?; }, OperationType::CallSC { max_gas, coins, - gas_price, target_addr, target_func, param @@ -466,9 +460,8 @@ impl std::fmt::Display for OperationType { writeln!(f, "CallSC:")?; writeln!(f, "\t- target address:{}", target_addr)?; writeln!(f, "\t- target function:{}", target_func)?; - writeln!(f, "\t- target parameter:{}", param)?; + writeln!(f, "\t- target parameter:{:?}", param)?; writeln!(f, "\t- max_gas:{}", max_gas)?; - writeln!(f, "\t- gas_price:{}", gas_price)?; writeln!(f, "\t- coins:{}", coins)?; } } @@ -483,7 +476,6 @@ pub struct OperationTypeSerializer { vec_u8_serializer: VecU8Serializer, amount_serializer: AmountSerializer, function_name_serializer: StringSerializer, - parameter_serializer: StringSerializer, datastore_serializer: DatastoreSerializer, } @@ -496,7 +488,6 @@ impl OperationTypeSerializer { vec_u8_serializer: VecU8Serializer::new(), amount_serializer: AmountSerializer::new(), function_name_serializer: StringSerializer::new(U16VarIntSerializer::new()), - parameter_serializer: StringSerializer::new(U32VarIntSerializer::new()), datastore_serializer: DatastoreSerializer::new(), } } @@ -521,7 +512,6 @@ impl Serializer for OperationTypeSerializer { /// let op = OperationType::ExecuteSC { /// data: vec![0x01, 0x02, 0x03], /// max_gas: 100, - /// gas_price: Amount::from_str("1").unwrap(), /// datastore: BTreeMap::default(), /// }; /// let mut buffer = Vec::new(); @@ -551,13 +541,11 @@ impl Serializer for OperationTypeSerializer { OperationType::ExecuteSC { data, max_gas, - gas_price, datastore, } => { self.u32_serializer .serialize(&u32::from(OperationTypeId::ExecuteSC), buffer)?; self.u64_serializer.serialize(max_gas, buffer)?; - self.amount_serializer.serialize(gas_price, buffer)?; self.vec_u8_serializer.serialize(data, buffer)?; self.datastore_serializer.serialize(datastore, buffer)?; } @@ -567,17 +555,15 @@ impl Serializer for OperationTypeSerializer { param, max_gas, coins, - gas_price, } => { self.u32_serializer .serialize(&u32::from(OperationTypeId::CallSC), buffer)?; self.u64_serializer.serialize(max_gas, buffer)?; self.amount_serializer.serialize(coins, buffer)?; - self.amount_serializer.serialize(gas_price, buffer)?; buffer.extend(target_addr.to_bytes()); self.function_name_serializer .serialize(target_func, buffer)?; - self.parameter_serializer.serialize(param, buffer)?; + self.vec_u8_serializer.serialize(param, buffer)?; } } Ok(()) @@ -593,7 +579,7 @@ pub struct OperationTypeDeserializer { data_deserializer: VecU8Deserializer, amount_deserializer: AmountDeserializer, function_name_deserializer: StringDeserializer, - parameter_deserializer: StringDeserializer, + parameter_deserializer: VecU8Deserializer, datastore_deserializer: DatastoreDeserializer, } @@ -624,10 +610,10 @@ impl OperationTypeDeserializer { Included(0), Included(max_function_name_length), )), - parameter_deserializer: StringDeserializer::new(U32VarIntDeserializer::new( + parameter_deserializer: VecU8Deserializer::new( Included(0), - Included(max_parameters_size), - )), + Included(max_parameters_size as u64), + ), datastore_deserializer: DatastoreDeserializer::new( max_op_datastore_entry_count, max_op_datastore_key_length, @@ -650,7 +636,6 @@ impl Deserializer for OperationTypeDeserializer { /// let op = OperationType::ExecuteSC { /// data: vec![0x01, 0x02, 0x03], /// max_gas: 100, - /// gas_price: Amount::from_str("1").unwrap(), /// datastore: BTreeMap::from([(vec![1, 2], vec![254, 255])]) /// }; /// let mut buffer = Vec::new(); @@ -661,12 +646,10 @@ impl Deserializer for OperationTypeDeserializer { /// OperationType::ExecuteSC { /// data, /// max_gas, - /// gas_price, /// datastore /// } => { /// assert_eq!(data, vec![0x01, 0x02, 0x03]); /// assert_eq!(max_gas, 100); - /// assert_eq!(gas_price, Amount::from_str("1").unwrap()); /// assert_eq!(datastore, BTreeMap::from([(vec![1, 2], vec![254, 255])])) /// } /// _ => panic!("Unexpected operation type"), @@ -717,9 +700,6 @@ impl Deserializer for OperationTypeDeserializer { context("Failed max_gas deserialization", |input| { self.max_gas_deserializer.deserialize(input) }), - context("Failed gas_price deserialization", |input| { - self.amount_deserializer.deserialize(input) - }), context("Failed data deserialization", |input| { self.data_deserializer.deserialize(input) }), @@ -728,14 +708,11 @@ impl Deserializer for OperationTypeDeserializer { }), )), ) - .map( - |(max_gas, gas_price, data, datastore)| OperationType::ExecuteSC { - data, - max_gas, - gas_price, - datastore, - }, - ) + .map(|(max_gas, data, datastore)| OperationType::ExecuteSC { + data, + max_gas, + datastore, + }) .parse(input), OperationTypeId::CallSC => context( "Failed CallSC deserialization", @@ -746,9 +723,6 @@ impl Deserializer for OperationTypeDeserializer { context("Failed coins deserialization", |input| { self.amount_deserializer.deserialize(input) }), - context("Failed gas_price deserialization", |input| { - self.amount_deserializer.deserialize(input) - }), context("Failed target_addr deserialization", |input| { self.address_deserializer.deserialize(input) }), @@ -761,15 +735,12 @@ impl Deserializer for OperationTypeDeserializer { )), ) .map( - |(max_gas, coins, gas_price, target_addr, target_func, param)| { - OperationType::CallSC { - target_addr, - target_func, - param, - max_gas, - coins, - gas_price, - } + |(max_gas, coins, target_addr, target_func, param)| OperationType::CallSC { + target_addr, + target_func, + param, + max_gas, + coins, }, ) .parse(input), @@ -801,28 +772,6 @@ impl WrappedOperation { } } - /// Get the gas price set by the operation - pub fn get_gas_price(&self) -> Amount { - match &self.content.op { - OperationType::ExecuteSC { gas_price, .. } => *gas_price, - OperationType::CallSC { gas_price, .. } => *gas_price, - OperationType::RollBuy { .. } => Amount::default(), - OperationType::RollSell { .. } => Amount::default(), - OperationType::Transaction { .. } => Amount::default(), - } - } - - /// Get the amount of coins used by the operation to pay for gas - pub fn get_gas_coins(&self) -> Amount { - self.get_gas_price() - .saturating_mul_u64(self.get_gas_usage()) - } - - /// Get the total fee paid by the creator - pub fn get_total_fee(&self) -> Amount { - self.get_gas_coins().saturating_add(self.content.fee) - } - /// get the addresses that are involved in this operation from a ledger point of view pub fn get_ledger_involved_addresses(&self) -> PreHashSet
{ let mut res = PreHashSet::
::default(); @@ -851,21 +800,12 @@ impl WrappedOperation { OperationType::Transaction { amount, .. } => *amount, OperationType::RollBuy { roll_count } => roll_price.saturating_mul_u64(*roll_count), OperationType::RollSell { .. } => Amount::zero(), - OperationType::ExecuteSC { - max_gas, gas_price, .. - } => gas_price.saturating_mul_u64(*max_gas), - OperationType::CallSC { - max_gas, - gas_price, - coins, - .. - } => gas_price - .saturating_mul_u64(*max_gas) - .saturating_add(*coins), + OperationType::ExecuteSC { .. } => Amount::zero(), + OperationType::CallSC { coins, .. } => *coins, }; // add all fees and return - max_non_fee_seq_spending.saturating_add(self.get_total_fee()) + max_non_fee_seq_spending.saturating_add(self.content.fee) } /// get the addresses that are involved in this operation from a rolls point of view @@ -1380,7 +1320,6 @@ mod tests { let op = OperationType::ExecuteSC { max_gas: 123, - gas_price: Amount::from_str("772.122").unwrap(), data: vec![23u8, 123u8, 44u8], datastore: BTreeMap::from([ (vec![1, 2, 3], vec![4, 5, 6, 7, 8, 9]), @@ -1460,9 +1399,8 @@ mod tests { max_gas: 123, target_addr, coins: Amount::from_str("456.789").unwrap(), - gas_price: Amount::from_str("772.122").unwrap(), target_func: "target function".to_string(), - param: "parameter".to_string(), + param: b"parameter".to_vec(), }; let mut ser_type = Vec::new(); OperationTypeSerializer::new() diff --git a/massa-models/src/serialization.rs b/massa-models/src/serialization.rs index 6cabd2d25c3..0f170b27b50 100644 --- a/massa-models/src/serialization.rs +++ b/massa-models/src/serialization.rs @@ -1,13 +1,14 @@ // Copyright (c) 2022 MASSA LABS use crate::error::ModelsError; +use crate::prehash::{PreHashSet, PreHashed}; use bitvec::prelude::BitVec; use massa_serialization::{ Deserializer, SerializeError, Serializer, U32VarIntDeserializer, U32VarIntSerializer, U64VarIntDeserializer, U64VarIntSerializer, }; use nom::bytes::complete::take; -use nom::multi::length_data; +use nom::multi::{length_count, length_data}; use nom::sequence::preceded; use nom::{branch::alt, Parser, ToUsize}; use nom::{ @@ -15,6 +16,7 @@ use nom::{ IResult, }; use std::convert::TryInto; +use std::marker::PhantomData; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::ops::Bound; use Bound::Included; @@ -310,6 +312,185 @@ impl Deserializer> for VecU8Deserializer { } } +/// Basic `Vec<_>` serializer +#[derive(Clone)] +pub struct VecSerializer +where + ST: Serializer, +{ + len_serializer: U64VarIntSerializer, + data_serializer: ST, + phantom_t: PhantomData, +} + +impl VecSerializer +where + ST: Serializer, +{ + /// Creates a new `VecSerializer` + pub fn new(data_serializer: ST) -> Self { + Self { + len_serializer: U64VarIntSerializer::new(), + data_serializer, + phantom_t: PhantomData, + } + } +} + +impl Serializer> for VecSerializer +where + ST: Serializer, +{ + fn serialize(&self, value: &Vec, buffer: &mut Vec) -> Result<(), SerializeError> { + self.len_serializer + .serialize(&(value.len() as u64), buffer)?; + for elem in value { + self.data_serializer.serialize(elem, buffer)?; + } + Ok(()) + } +} + +/// Basic `Vec<_>` deserializer +#[derive(Clone)] +pub struct VecDeserializer +where + ST: Deserializer + Clone, +{ + varint_u64_deserializer: U64VarIntDeserializer, + data_deserializer: ST, + phantom_t: PhantomData, +} + +impl VecDeserializer +where + ST: Deserializer + Clone, +{ + /// Creates a new `VecDeserializer` + pub const fn new( + data_deserializer: ST, + min_length: Bound, + max_length: Bound, + ) -> Self { + Self { + varint_u64_deserializer: U64VarIntDeserializer::new(min_length, max_length), + data_deserializer, + phantom_t: PhantomData, + } + } +} + +impl Deserializer> for VecDeserializer +where + ST: Deserializer + Clone, +{ + fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> IResult<&'a [u8], Vec, E> { + context("Failed Vec<_> deserialization", |input| { + length_count( + context("length", |input| { + self.varint_u64_deserializer.deserialize(input) + }), + context("data", |input| self.data_deserializer.deserialize(input)), + )(input) + }) + .parse(buffer) + } +} + +/// Basic `PreHashSet<_>` serializer +#[derive(Clone)] +pub struct PreHashSetSerializer +where + ST: Serializer, +{ + len_serializer: U64VarIntSerializer, + data_serializer: ST, + phantom_t: PhantomData, +} + +impl PreHashSetSerializer +where + ST: Serializer, +{ + /// Creates a new `PreHashSetSerializer` + pub fn new(data_serializer: ST) -> Self { + Self { + len_serializer: U64VarIntSerializer::new(), + data_serializer, + phantom_t: PhantomData, + } + } +} + +impl Serializer> for PreHashSetSerializer +where + ST: Serializer, + T: PreHashed, +{ + fn serialize(&self, value: &PreHashSet, buffer: &mut Vec) -> Result<(), SerializeError> { + self.len_serializer + .serialize(&(value.len() as u64), buffer)?; + for elem in value { + self.data_serializer.serialize(elem, buffer)?; + } + Ok(()) + } +} + +/// Basic `PreHashSet<_>` deserializer +#[derive(Clone)] +pub struct PreHashSetDeserializer +where + ST: Deserializer + Clone, +{ + varint_u64_deserializer: U64VarIntDeserializer, + data_deserializer: ST, + phantom_t: PhantomData, +} + +impl PreHashSetDeserializer +where + ST: Deserializer + Clone, +{ + /// Creates a new `PreHashSetDeserializer` + pub const fn new( + data_deserializer: ST, + min_length: Bound, + max_length: Bound, + ) -> Self { + Self { + varint_u64_deserializer: U64VarIntDeserializer::new(min_length, max_length), + data_deserializer, + phantom_t: PhantomData, + } + } +} + +impl Deserializer> for PreHashSetDeserializer +where + ST: Deserializer + Clone, + T: PreHashed + std::cmp::Eq + std::hash::Hash, +{ + fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( + &self, + buffer: &'a [u8], + ) -> IResult<&'a [u8], PreHashSet, E> { + context("Failed PreHashSet<_> deserialization", |input| { + length_count( + context("length", |input| { + self.varint_u64_deserializer.deserialize(input) + }), + context("data", |input| self.data_deserializer.deserialize(input)), + )(input) + }) + .map(|vec| vec.into_iter().collect()) + .parse(buffer) + } +} + /// Serializer for `String` with generic serializer for the size of the string pub struct StringSerializer where diff --git a/massa-models/src/streaming_step.rs b/massa-models/src/streaming_step.rs index 57102391677..3fb53803af2 100644 --- a/massa-models/src/streaming_step.rs +++ b/massa-models/src/streaming_step.rs @@ -1,5 +1,6 @@ use massa_serialization::{ - Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, + Deserializer, OptionDeserializer, OptionSerializer, SerializeError, Serializer, + U64VarIntDeserializer, U64VarIntSerializer, }; use nom::{ error::{context, ContextError, ParseError}, @@ -17,13 +18,13 @@ pub enum StreamingStep { /// Finished step, after all the information has been streamed /// /// Also can keep an indicator of the last content streamed - Finished, + Finished(Option), } impl StreamingStep { /// Indicates if the current step if finished or not without caring about the values pub fn finished(&self) -> bool { - matches!(self, StreamingStep::Finished) + matches!(self, StreamingStep::Finished(_)) } } @@ -34,17 +35,19 @@ where { u64_serializer: U64VarIntSerializer, data_serializer: ST, + option_serializer: OptionSerializer, phantom_t: PhantomData, } impl StreamingStepSerializer where - ST: Serializer, + ST: Serializer + Clone, { /// Creates a new `StreamingStep` serializer pub fn new(data_serializer: ST) -> Self { Self { u64_serializer: U64VarIntSerializer::new(), + option_serializer: OptionSerializer::new(data_serializer.clone()), data_serializer, phantom_t: PhantomData, } @@ -62,11 +65,14 @@ where ) -> Result<(), SerializeError> { match value { StreamingStep::Started => self.u64_serializer.serialize(&0u64, buffer)?, - StreamingStep::Ongoing(cursor_data) => { + StreamingStep::Ongoing(data) => { self.u64_serializer.serialize(&1u64, buffer)?; - self.data_serializer.serialize(cursor_data, buffer)?; + self.data_serializer.serialize(data, buffer)?; + } + StreamingStep::Finished(opt_data) => { + self.u64_serializer.serialize(&2u64, buffer)?; + self.option_serializer.serialize(opt_data, buffer)?; } - StreamingStep::Finished => self.u64_serializer.serialize(&2u64, buffer)?, }; Ok(()) } @@ -76,21 +82,25 @@ where pub struct StreamingStepDeserializer where ST: Deserializer, + T: Clone, { - u64_deserializer: U64VarIntDeserializer, - data_deserializer: ST, + u64_deser: U64VarIntDeserializer, + data_deser: ST, + opt_deser: OptionDeserializer, phantom_t: PhantomData, } impl StreamingStepDeserializer where - ST: Deserializer, + ST: Deserializer + Clone, + T: Clone, { /// Creates a new `StreamingStep` deserializer - pub fn new(data_deserializer: ST) -> Self { + pub fn new(data_deser: ST) -> Self { Self { - u64_deserializer: U64VarIntDeserializer::new(Included(u64::MIN), Included(u64::MAX)), - data_deserializer, + u64_deser: U64VarIntDeserializer::new(Included(u64::MIN), Included(u64::MAX)), + opt_deser: OptionDeserializer::new(data_deser.clone()), + data_deser, phantom_t: PhantomData, } } @@ -99,22 +109,23 @@ where impl Deserializer> for StreamingStepDeserializer where ST: Deserializer, + T: Clone, { fn deserialize<'a, E: ParseError<&'a [u8]> + ContextError<&'a [u8]>>( &self, buffer: &'a [u8], ) -> IResult<&'a [u8], StreamingStep, E> { context("StreamingStep", |input| { - let (rest, ident) = context("identifier", |input| { - self.u64_deserializer.deserialize(input) - }) - .parse(input)?; + let (rest, ident) = + context("identifier", |input| self.u64_deser.deserialize(input)).parse(input)?; match ident { 0u64 => Ok((rest, StreamingStep::Started)), - 1u64 => context("data", |input| self.data_deserializer.deserialize(input)) + 1u64 => context("ongoing data", |input| self.data_deser.deserialize(input)) .map(StreamingStep::Ongoing) .parse(rest), - 2u64 => Ok((rest, StreamingStep::Finished)), + 2u64 => context("finished data", |input| self.opt_deser.deserialize(input)) + .map(StreamingStep::Finished) + .parse(rest), _ => Err(nom::Err::Error(ParseError::from_error_kind( buffer, nom::error::ErrorKind::Digit, diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index 875b407ea1e..06f063d07bf 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] +crossbeam-channel = "0.5.6" anyhow = "1.0" enum-map = { version = "2.4", features = ["serde"] } lazy_static = "1.4" @@ -51,14 +52,12 @@ massa_wallet = { path = "../massa-wallet" } massa_factory_exports = { path = "../massa-factory-exports" } massa_factory_worker = { path = "../massa-factory-worker" } - # for more information on what are the following features used for, see the cargo.toml at workspace level [features] beta = [] deadlock_detection = [] sandbox = [ "massa_bootstrap/sandbox", - "massa_consensus_exports/sandbox", "massa_consensus_worker/sandbox", "massa_execution_worker/sandbox", "massa_final_state/sandbox", diff --git a/massa-node/base_config/config.toml b/massa-node/base_config/config.toml index 67a69978aa9..c5379de63f5 100644 --- a/massa-node/base_config/config.toml +++ b/massa-node/base_config/config.toml @@ -7,14 +7,34 @@ [api] # max number of future periods considered during requests draw_lookahead_period_count = 10 - # port on which the node API listens for admin and node management requests. Dangerous if publicly exposed. + # port on which the node API listens for admin and node management requests. Dangerous if publicly exposed bind_private = "127.0.0.1:33034" - # port on which the node API listens for public requests. Can be exposed to the Internet. + # port on which the node API listens for public requests. Can be exposed to the Internet bind_public = "0.0.0.0:33035" # max number of arguments per RPC call max_arguments = 128 - # Path to the openrpc specification file used in `rpc.discover` method. + # path to the openrpc specification file used in `rpc.discover` method openrpc_spec_path = "base_config/openrpc.json" + # maximum size in bytes of a request + max_request_body_size = 52428800 + # maximum size in bytes of a response + max_response_body_size = 52428800 + # maximum number of incoming connections allowed + max_connections = 100 + # maximum number of subscriptions per connection + max_subscriptions_per_connection = 1024 + # max length for logging for requests and responses. Logs bigger than this limit will be truncated + max_log_length = 4096 + # host filtering + allow_hosts = [] + # whether batch requests are supported by this server or not + batch_requests_supported = true + # the interval at which `Ping` frames are submitted in milliseconds + ping_interval = 60000 + # whether to enable HTTP. + enable_http = true + # whether to enable WS. + enable_ws = false [execution] # max number of generated events kept in RAM @@ -80,22 +100,22 @@ max_known_endorsements_size = 2048 # max cache size for which endorsements a foreign node knows about max_node_known_endorsements_size = 2048 - # Maximum number of batches in the memory buffer. - # Dismiss the new batches if overflow + # maximum number of batches in the memory buffer. + # dismiss the new batches if overflow operation_batch_buffer_capacity = 10024 - # Immediately announce ops if overflow + # immediately announce ops if overflow operation_announcement_buffer_capacity = 2000 - # Start processing batches in the buffer each `operation_batch_proc_period` in millisecond + # start processing batches in the buffer each `operation_batch_proc_period` in millisecond operation_batch_proc_period = 500 - # All operations asked are prune each `operation_asked_pruning_period` millisecond + # all operations asked are prune each `operation_asked_pruning_period` millisecond asked_operations_pruning_period = 100000 - # Interval at which operations are announced in batches. + # interval at which operations are announced in batches. operation_announcement_interval = 300 - # Max number of operation per message, same as network param but can be smaller + # max number of operation per message, same as network param but can be smaller max_operations_per_message = 1024 - # Time threshold after which operation are not propagated + # time threshold after which operation are not propagated max_operations_propagation_time = 32000 - # Time threshold after which operation are not propagated + # time threshold after which endorsement are not propagated max_endorsements_propagation_time = 48000 [network] @@ -141,15 +161,15 @@ max_send_wait_network_event = 0 # we forget we banned a node after ban_timeout milliseconds ban_timeout = 3600000 - # Timeout duration when in handshake we respond with a PeerList + # timeout duration when in handshake we respond with a PeerList # (on max in connection reached we send a list of peers) peer_list_send_timeout = 100 - # Max number of in connection overflowed managed by the handshake + # max number of in connection overflowed managed by the handshake # that send a list of peers max_in_connection_overflow = 100 - # Read limitation for a connection in bytes per seconds + # read limitation for a connection in bytes per seconds max_bytes_read = 20_000_000.0 - # Write limitation for a connection in bytes per seconds + # write limitation for a connection in bytes per seconds max_bytes_write = 20_000_000.0 [network.peer_types_config] @@ -169,9 +189,9 @@ ["54.36.174.177:31245", "P1gEdBVEbRFbBxBtrjcTDDK9JPbJFDay27uiJRE3vmbFAFDKNh7"], ["51.75.60.228:31245", "P13Ykon8Zo73PTKMruLViMMtE2rEG646JQ4sCcee2DnopmVM3P5"] ] - # Path to the bootstrap whitelist file. This whitelist define IPs that can bootstrap on your node. + # path to the bootstrap whitelist file. This whitelist define IPs that can bootstrap on your node. bootstrap_whitelist_file = "base_config/bootstrap_whitelist.json" - # Path to the bootstrap blacklist file. This whitelist define IPs that will not be able to bootstrap on your node. This list is optional. + # path to the bootstrap blacklist file. This whitelist define IPs that will not be able to bootstrap on your node. This list is optional. bootstrap_blacklist_file = "base_config/bootstrap_blacklist.json" # [optionnal] port on which to listen for incoming bootstrap requests bind = "[::]:31245" @@ -199,7 +219,7 @@ ip_list_max_size = 10000 # refuse consecutive bootstrap attempts from a given IP when the interval between them is lower than per_ip_min_interval milliseconds per_ip_min_interval = 180000 - # Read-Write limitation for a connection in bytes per seconds (about the bootstrap specifically) + # read-write limitation for a connection in bytes per seconds (about the bootstrap specifically) max_bytes_read_write = 20_000_000.0 [pool] @@ -213,9 +233,9 @@ max_item_return_count = 100 [selector] - # Maximum number of computed cycle's draws we keep in cache + # maximum number of computed cycle's draws we keep in cache max_draw_cache = 10 - # Path to the initial roll distribution + # path to the initial roll distribution initial_rolls_path = "base_config/initial_rolls.json" [factory] diff --git a/massa-node/base_config/initial_ledger.json b/massa-node/base_config/initial_ledger.json index e214ac6818a..3d93a1a5fda 100644 --- a/massa-node/base_config/initial_ledger.json +++ b/massa-node/base_config/initial_ledger.json @@ -1,46 +1,46 @@ { "A1qDAxGJ387ETi9JRQzZWSPKYq4YPXrFvdiE4VoXUaiAt38JFEC": { - "balance": "80000000", + "balance": "1000000000", "datastore": {}, "bytecode": [] }, "A12M3AQqs7JH7mSe1UZyEA5NQ7nGQHXaqqxe1TGEpkimcRhsQ4eF": { - "balance": "80000000", + "balance": "1000000000", "datastore": {}, "bytecode": [] }, "A1nsqw9mCcYLyyMJx5f4in4NXDoe4B1LzV9pQdvX5Wrxq9ehf6h": { - "balance": "80000000", + "balance": "1000000000", "datastore": {}, "bytecode": [] }, "A1pdnk7gME8DSA6ueNZdCHqfSt9YfTwAJSgRCcB8g3z3kkapWtU": { - "balance": "80000000", + "balance": "1000000000", "datastore": {}, "bytecode": [] }, "A1H1Ze77ctAFi4FBc3nVe9AtWdtg7246V9pVXSeXqWaJFLPKfB1": { - "balance": "80000000", + "balance": "1000000000", "datastore": {}, "bytecode": [] }, "A12Dvay7jT1maaKpV9CHX6yMt3cS5ZEWy6Q67HV8twVGS3ihoq5x": { - "balance": "80000000", + "balance": "1000000000", "datastore": {}, "bytecode": [] }, "A13evhD7c9AXFc6CxWWjWWRT6bQnejYhq3MsNofJWJDe4UQStJE": { - "balance": "80000000", + "balance": "1000000000", "datastore": {}, "bytecode": [] }, "A1UpZLobUAWqj3M9DpBZNhh4GD4ZLvixKXQu2kt7ZDUiEepD89E": { - "balance": "80000000", + "balance": "1000000000", "datastore": {}, "bytecode": [] }, "A12p8v9V68SiehQb2Syzy6smfv9NTCJh2p6JPbsacy7PaGRw39uH": { - "balance": "80000000", + "balance": "1000000000", "datastore": {}, "bytecode": [] } diff --git a/massa-node/base_config/openrpc.json b/massa-node/base_config/openrpc.json index 27d8968bc2b..f0d6b4075d0 100644 --- a/massa-node/base_config/openrpc.json +++ b/massa-node/base_config/openrpc.json @@ -2,7 +2,7 @@ "openrpc": "1.2.4", "info": { "title": "Massa OpenRPC Specification", - "version": "TEST.16.1", + "version": "TEST.17.0", "description": "Massa OpenRPC Specification document. Find more information on https://docs.massa.net/en/latest/technical-doc/api.html", "termsOfService": "https://open-rpc.org", "contact": { @@ -991,7 +991,6 @@ "title": "CallSC", "description": "Call Smart Contract", "required": [ - "gas_price", "max_gas", "param", "coins", @@ -1018,10 +1017,6 @@ "coins": { "description": "Amount", "type": "number" - }, - "gas_price": { - "description": "Amount", - "type": "number" } }, "additionalProperties": false @@ -1421,7 +1416,6 @@ "description": "Execute Smart Contract", "required": [ "data", - "gas_price", "max_gas" ], "type": "object", @@ -1436,10 +1430,6 @@ "max_gas": { "description": "Maximum amount of gas that the execution of the contract is allowed to cost.", "type": "number" - }, - "gas_price": { - "description": "Represent an Amount in coins, price per unit of gas that the caller is willing to pay for the execution.", - "type": "string" } }, "additionalProperties": false @@ -1940,7 +1930,6 @@ "description": "Read only bytecode execution", "required": [ "max_gas", - "simulated_gas_price", "bytecode" ], "type": "object", @@ -1949,10 +1938,6 @@ "description": "Max available gas", "type": "number" }, - "simulated_gas_price": { - "description": "Gas price", - "type": "number" - }, "bytecode": { "description": "Bytecode to execute", "type": "array" @@ -1974,7 +1959,6 @@ "required": [ "max_gas", "parameter", - "simulated_gas_price", "target_address", "target_function" ], @@ -1984,10 +1968,6 @@ "description": "Max available gas", "type": "number" }, - "simulated_gas_price": { - "description": "Gas price", - "type": "number" - }, "target_address": { "description": "Target address", "type": "string" diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index b3bf865fd11..1b572ef0003 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -6,15 +6,14 @@ extern crate massa_logging; use crate::settings::SETTINGS; +use crossbeam_channel::{Receiver, TryRecvError}; use dialoguer::Password; use massa_api::{APIConfig, Private, Public, RpcServer, StopHandle, API}; use massa_async_pool::AsyncPoolConfig; use massa_bootstrap::{get_state, start_bootstrap_server, BootstrapConfig, BootstrapManager}; -use massa_consensus_exports::ConsensusManager; -use massa_consensus_exports::{ - events::ConsensusEvent, settings::ConsensusChannels, ConsensusConfig, ConsensusEventReceiver, -}; -use massa_consensus_worker::start_consensus_controller; +use massa_consensus_exports::events::ConsensusEvent; +use massa_consensus_exports::{ConsensusChannels, ConsensusConfig, ConsensusManager}; +use massa_consensus_worker::start_consensus_worker; use massa_executed_ops::ExecutedOpsConfig; use massa_execution_exports::{ExecutionConfig, ExecutionManager, StorageCostsConstants}; use massa_execution_worker::start_execution_worker; @@ -45,13 +44,16 @@ use massa_models::config::constants::{ POS_MISS_RATE_DEACTIVATION_THRESHOLD, POS_SAVED_CYCLES, PROTOCOL_CONTROLLER_CHANNEL_SIZE, PROTOCOL_EVENT_CHANNEL_SIZE, ROLL_PRICE, T0, THREAD_COUNT, VERSION, }; +use massa_models::config::CONSENSUS_BOOTSTRAP_PART_SIZE; use massa_network_exports::{Establisher, NetworkConfig, NetworkManager}; use massa_network_worker::start_network_controller; use massa_pool_exports::{PoolConfig, PoolManager}; use massa_pool_worker::start_pool_controller; use massa_pos_exports::{PoSConfig, SelectorConfig, SelectorManager}; use massa_pos_worker::start_selector_worker; -use massa_protocol_exports::{ProtocolConfig, ProtocolManager}; +use massa_protocol_exports::{ + ProtocolCommand, ProtocolCommandSender, ProtocolConfig, ProtocolManager, +}; use massa_protocol_worker::start_protocol_controller; use massa_storage::Storage; use massa_time::MassaTime; @@ -59,21 +61,22 @@ use massa_wallet::Wallet; use parking_lot::RwLock; use std::path::PathBuf; use std::sync::atomic::{AtomicUsize, Ordering}; +use std::thread::sleep; +use std::time::Duration; use std::{path::Path, process, sync::Arc}; use structopt::StructOpt; use tokio::signal; use tokio::sync::mpsc; use tracing::{error, info, warn}; use tracing_subscriber::filter::{filter_fn, LevelFilter}; - mod settings; async fn launch( node_wallet: Arc>, ) -> ( - ConsensusEventReceiver, + Receiver, Option, - ConsensusManager, + Box, Box, Box, Box, @@ -211,6 +214,7 @@ async fn launch( max_credits_length: MAX_DEFERRED_CREDITS_LENGTH, max_executed_ops_length: MAX_EXECUTED_OPS_LENGTH, max_ops_changes_length: MAX_EXECUTED_OPS_CHANGES_LENGTH, + consensus_bootstrap_part_size: CONSENSUS_BOOTSTRAP_PART_SIZE, }; // bootstrap @@ -347,6 +351,51 @@ async fn launch( let (pool_manager, pool_controller) = start_pool_controller(pool_config, &shared_storage, execution_controller.clone()); + let (protocol_command_sender, protocol_command_receiver) = + mpsc::channel::(PROTOCOL_CONTROLLER_CHANNEL_SIZE); + + let consensus_config = ConsensusConfig { + genesis_timestamp: *GENESIS_TIMESTAMP, + end_timestamp: *END_TIMESTAMP, + thread_count: THREAD_COUNT, + t0: T0, + genesis_key: GENESIS_KEY.clone(), + max_discarded_blocks: SETTINGS.consensus.max_discarded_blocks, + future_block_processing_max_periods: SETTINGS.consensus.future_block_processing_max_periods, + max_future_processing_blocks: SETTINGS.consensus.max_future_processing_blocks, + max_dependency_blocks: SETTINGS.consensus.max_dependency_blocks, + delta_f0: DELTA_F0, + operation_validity_periods: OPERATION_VALIDITY_PERIODS, + periods_per_cycle: PERIODS_PER_CYCLE, + stats_timespan: SETTINGS.consensus.stats_timespan, + max_send_wait: SETTINGS.consensus.max_send_wait, + force_keep_final_periods: SETTINGS.consensus.force_keep_final_periods, + endorsement_count: ENDORSEMENT_COUNT, + block_db_prune_interval: SETTINGS.consensus.block_db_prune_interval, + max_item_return_count: SETTINGS.consensus.max_item_return_count, + max_gas_per_block: MAX_GAS_PER_BLOCK, + channel_size: CHANNEL_SIZE, + clock_compensation_millis: bootstrap_state.compensation_millis, + bootstrap_part_size: CONSENSUS_BOOTSTRAP_PART_SIZE, + }; + + let (consensus_event_sender, consensus_event_receiver) = + crossbeam_channel::bounded(CHANNEL_SIZE); + let consensus_channels = ConsensusChannels { + execution_controller: execution_controller.clone(), + selector_controller: selector_controller.clone(), + pool_command_sender: pool_controller.clone(), + controller_event_tx: consensus_event_sender, + protocol_command_sender: ProtocolCommandSender(protocol_command_sender.clone()), + }; + + let (consensus_controller, consensus_manager) = start_consensus_worker( + consensus_config, + consensus_channels, + bootstrap_state.graph, + shared_storage.clone(), + ); + // launch protocol controller let protocol_config = ProtocolConfig { thread_count: THREAD_COUNT, @@ -378,57 +427,18 @@ async fn launch( max_operations_propagation_time: SETTINGS.protocol.max_operations_propagation_time, max_endorsements_propagation_time: SETTINGS.protocol.max_endorsements_propagation_time, }; - let (protocol_command_sender, protocol_event_receiver, protocol_manager) = - start_protocol_controller( - protocol_config, - network_command_sender.clone(), - network_event_receiver, - pool_controller.clone(), - shared_storage.clone(), - ) - .await - .expect("could not start protocol controller"); - // init consensus configuration - let consensus_config = ConsensusConfig { - genesis_timestamp: *GENESIS_TIMESTAMP, - end_timestamp: *END_TIMESTAMP, - thread_count: THREAD_COUNT, - t0: T0, - genesis_key: GENESIS_KEY.clone(), - max_discarded_blocks: SETTINGS.consensus.max_discarded_blocks, - future_block_processing_max_periods: SETTINGS.consensus.future_block_processing_max_periods, - max_future_processing_blocks: SETTINGS.consensus.max_future_processing_blocks, - max_dependency_blocks: SETTINGS.consensus.max_dependency_blocks, - delta_f0: DELTA_F0, - operation_validity_periods: OPERATION_VALIDITY_PERIODS, - periods_per_cycle: PERIODS_PER_CYCLE, - stats_timespan: SETTINGS.consensus.stats_timespan, - max_send_wait: SETTINGS.consensus.max_send_wait, - force_keep_final_periods: SETTINGS.consensus.force_keep_final_periods, - endorsement_count: ENDORSEMENT_COUNT, - block_db_prune_interval: SETTINGS.consensus.block_db_prune_interval, - max_item_return_count: SETTINGS.consensus.max_item_return_count, - max_gas_per_block: MAX_GAS_PER_BLOCK, - channel_size: CHANNEL_SIZE, - }; - // launch consensus controller - let (consensus_command_sender, consensus_event_receiver, consensus_manager) = - start_consensus_controller( - consensus_config.clone(), - ConsensusChannels { - execution_controller: execution_controller.clone(), - protocol_command_sender: protocol_command_sender.clone(), - protocol_event_receiver, - pool_command_sender: pool_controller.clone(), - selector_controller: selector_controller.clone(), - }, - bootstrap_state.graph, - shared_storage.clone(), - bootstrap_state.compensation_millis, - ) - .await - .expect("could not start consensus controller"); + let protocol_manager = start_protocol_controller( + protocol_config, + network_command_sender.clone(), + network_event_receiver, + protocol_command_receiver, + consensus_controller.clone(), + pool_controller.clone(), + shared_storage.clone(), + ) + .await + .expect("could not start protocol controller"); // launch factory let factory_config = FactoryConfig { @@ -442,16 +452,16 @@ async fn launch( }; let factory_channels = FactoryChannels { selector: selector_controller.clone(), - consensus: consensus_command_sender.clone(), + consensus: consensus_controller.clone(), pool: pool_controller.clone(), - protocol: protocol_command_sender.clone(), + protocol: ProtocolCommandSender(protocol_command_sender.clone()), storage: shared_storage.clone(), }; let factory_manager = start_factory(factory_config, node_wallet.clone(), factory_channels); // launch bootstrap server let bootstrap_manager = start_bootstrap_server( - consensus_command_sender.clone(), + consensus_controller.clone(), network_command_sender.clone(), final_state.clone(), bootstrap_config, @@ -469,33 +479,47 @@ async fn launch( draw_lookahead_period_count: SETTINGS.api.draw_lookahead_period_count, max_arguments: SETTINGS.api.max_arguments, openrpc_spec_path: SETTINGS.api.openrpc_spec_path.clone(), + max_request_body_size: SETTINGS.api.max_request_body_size, + max_response_body_size: SETTINGS.api.max_response_body_size, + max_connections: SETTINGS.api.max_connections, + max_subscriptions_per_connection: SETTINGS.api.max_subscriptions_per_connection, + max_log_length: SETTINGS.api.max_log_length, + allow_hosts: SETTINGS.api.allow_hosts.clone(), + batch_requests_supported: SETTINGS.api.batch_requests_supported, + ping_interval: SETTINGS.api.ping_interval, + enable_http: SETTINGS.api.enable_http, + enable_ws: SETTINGS.api.enable_ws, max_datastore_value_length: MAX_DATASTORE_VALUE_LENGTH, max_op_datastore_entry_count: MAX_OPERATION_DATASTORE_ENTRY_COUNT, max_op_datastore_key_length: MAX_OPERATION_DATASTORE_KEY_LENGTH, max_op_datastore_value_length: MAX_OPERATION_DATASTORE_VALUE_LENGTH, max_function_name_length: MAX_FUNCTION_NAME_LENGTH, max_parameter_size: MAX_PARAMETERS_SIZE, + thread_count: THREAD_COUNT, + genesis_timestamp: *GENESIS_TIMESTAMP, + t0: T0, + periods_per_cycle: PERIODS_PER_CYCLE, }; // spawn private API let (api_private, api_private_stop_rx) = API::::new( - consensus_command_sender.clone(), network_command_sender.clone(), execution_controller.clone(), api_config.clone(), - consensus_config.clone(), node_wallet, ); - let api_private_handle = api_private.serve(&SETTINGS.api.bind_private); + let api_private_handle = api_private + .serve(&SETTINGS.api.bind_private, &api_config) + .await + .expect("failed to start PRIVATE API"); // spawn public API let api_public = API::::new( - consensus_command_sender.clone(), + consensus_controller.clone(), execution_controller.clone(), - api_config, + api_config.clone(), selector_controller.clone(), - consensus_config, pool_controller.clone(), - protocol_command_sender.clone(), + ProtocolCommandSender(protocol_command_sender.clone()), network_config, *VERSION, network_command_sender.clone(), @@ -503,7 +527,10 @@ async fn launch( node_id, shared_storage.clone(), ); - let api_public_handle = api_public.serve(&SETTINGS.api.bind_public); + let api_public_handle = api_public + .serve(&SETTINGS.api.bind_public, &api_config) + .await + .expect("failed to start PUBLIC API"); #[cfg(feature = "deadlock_detection")] { @@ -552,7 +579,7 @@ async fn launch( struct Managers { bootstrap_manager: Option, - consensus_manager: ConsensusManager, + consensus_manager: Box, execution_manager: Box, selector_manager: Box, pool_manager: Box, @@ -562,11 +589,11 @@ struct Managers { } async fn stop( - consensus_event_receiver: ConsensusEventReceiver, + _consensus_event_receiver: Receiver, Managers { bootstrap_manager, mut execution_manager, - consensus_manager, + mut consensus_manager, mut selector_manager, mut pool_manager, protocol_manager, @@ -593,10 +620,14 @@ async fn stop( // stop factory factory_manager.stop(); - let protocol_event_receiver = consensus_manager - .stop(consensus_event_receiver) + // stop protocol controller + let network_event_receiver = protocol_manager + .stop() .await - .expect("consensus shutdown failed"); + .expect("protocol shutdown failed"); + + // stop consensus + consensus_manager.stop(); // stop pool pool_manager.stop(); @@ -611,12 +642,6 @@ async fn stop( // TODO //let protocol_pool_event_receiver = pool_manager.stop().await.expect("pool shutdown failed"); - // stop protocol controller - let network_event_receiver = protocol_manager - .stop(protocol_event_receiver) - .await - .expect("protocol shutdown failed"); - // stop network controller network_manager .stop(network_event_receiver) @@ -707,7 +732,7 @@ async fn run(args: Args) -> anyhow::Result<()> { loop { let ( - mut consensus_event_receiver, + consensus_event_receiver, bootstrap_manager, consensus_manager, execution_manager, @@ -722,37 +747,52 @@ async fn run(args: Args) -> anyhow::Result<()> { ) = launch(node_wallet.clone()).await; // interrupt signal listener - let stop_signal = signal::ctrl_c(); - tokio::pin!(stop_signal); + let (tx, rx) = crossbeam_channel::bounded(1); + let interrupt_signal_listener = tokio::spawn(async move { + signal::ctrl_c().await.unwrap(); + tx.send(()).unwrap(); + }); + // loop over messages let restart = loop { massa_trace!("massa-node.main.run.select", {}); - tokio::select! { - evt = consensus_event_receiver.wait_event() => { - massa_trace!("massa-node.main.run.select.consensus_event", {}); - match evt { - Ok(ConsensusEvent::NeedSync) => { - warn!("in response to a desynchronization, the node is going to bootstrap again"); - break true; - }, - Err(err) => { - error!("consensus_event_receiver.wait_event error: {}", err); - break false; - } + match consensus_event_receiver.try_recv() { + Ok(evt) => match evt { + ConsensusEvent::NeedSync => { + warn!("in response to a desynchronization, the node is going to bootstrap again"); + break true; } }, - - _ = &mut stop_signal => { - massa_trace!("massa-node.main.run.select.stop", {}); - info!("interrupt signal received"); + Err(TryRecvError::Disconnected) => { + error!("consensus_event_receiver.wait_event disconnected"); break false; } + _ => {} + }; - _ = api_private_stop_rx.recv() => { + match api_private_stop_rx.try_recv() { + Ok(_) => { info!("stop command received from private API"); break false; } + Err(tokio::sync::mpsc::error::TryRecvError::Disconnected) => { + error!("api_private_stop_rx disconnected"); + break false; + } + _ => {} + } + match rx.try_recv() { + Ok(_) => { + info!("interrupt signal received"); + break false; + } + Err(crossbeam_channel::TryRecvError::Disconnected) => { + error!("interrupt_signal_listener disconnected"); + break false; + } + _ => {} } + sleep(Duration::from_millis(100)); }; stop( consensus_event_receiver, @@ -774,6 +814,7 @@ async fn run(args: Args) -> anyhow::Result<()> { if !restart { break; } + interrupt_signal_listener.abort(); } Ok(()) } diff --git a/massa-node/src/settings.rs b/massa-node/src/settings.rs index 9b0cb228b18..625f539a066 100644 --- a/massa-node/src/settings.rs +++ b/massa-node/src/settings.rs @@ -109,7 +109,7 @@ pub struct PoolSettings { pub max_item_return_count: usize, } -/// API configuration, read from a file configuration +/// API and server configuration, read from a file configuration. #[derive(Debug, Deserialize, Clone)] pub struct APISettings { pub draw_lookahead_period_count: u64, @@ -117,6 +117,16 @@ pub struct APISettings { pub bind_public: SocketAddr, pub max_arguments: u64, pub openrpc_spec_path: PathBuf, + pub max_request_body_size: u32, + pub max_response_body_size: u32, + pub max_connections: u32, + pub max_subscriptions_per_connection: u32, + pub max_log_length: u32, + pub allow_hosts: Vec, + pub batch_requests_supported: bool, + pub ping_interval: MassaTime, + pub enable_http: bool, + pub enable_ws: bool, } #[derive(Debug, Deserialize, Clone)] diff --git a/massa-pool-worker/src/tests/tools.rs b/massa-pool-worker/src/tests/tools.rs index 5aa101d79fc..c7c8c0a0eb8 100644 --- a/massa-pool-worker/src/tests/tools.rs +++ b/massa-pool-worker/src/tests/tools.rs @@ -17,7 +17,6 @@ use massa_models::{ use massa_pool_exports::{PoolConfig, PoolController, PoolManager}; use massa_signature::{KeyPair, PublicKey}; use massa_storage::Storage; -use std::collections::BTreeMap; use std::str::FromStr; use std::sync::mpsc::Receiver; @@ -126,28 +125,3 @@ pub fn _get_transaction_with_addresses( }; Operation::new_wrapped(content, OperationSerializer::new(), sender_keypair).unwrap() } - -pub fn _create_executesc( - expire_period: u64, - fee: u64, - max_gas: u64, - gas_price: u64, -) -> WrappedOperation { - let keypair = KeyPair::generate(); - - let data = vec![42; 7]; - - let op = OperationType::ExecuteSC { - data, - max_gas, - gas_price: Amount::from_str(&gas_price.to_string()).unwrap(), - datastore: BTreeMap::new(), - }; - - let content = Operation { - fee: Amount::from_str(&fee.to_string()).unwrap(), - expire_period, - op, - }; - Operation::new_wrapped(content, OperationSerializer::new(), &keypair).unwrap() -} diff --git a/massa-pool-worker/src/types.rs b/massa-pool-worker/src/types.rs index ff804243063..3ba6129a025 100644 --- a/massa-pool-worker/src/types.rs +++ b/massa-pool-worker/src/types.rs @@ -51,7 +51,7 @@ impl OperationInfo { size: op.serialized_size(), max_gas: op.get_gas_usage(), creator_address: op.creator_address, - fee: op.get_total_fee(), + fee: op.content.fee, thread: op.creator_address.get_thread(thread_count), validity_period_range: op.get_validity_range(operation_validity_periods), max_spending: op.get_max_spending(roll_price), @@ -61,7 +61,7 @@ impl OperationInfo { /// build a cursor from an operation fn build_operation_cursor(op: &WrappedOperation) -> PoolOperationCursor { - let quality = Ratio::new(op.get_total_fee().to_raw(), op.serialized_size() as u64); + let quality = Ratio::new(op.content.fee.to_raw(), op.serialized_size() as u64); let inner = (Reverse(quality), op.id); // TODO take into account max_gas as well in the future (multi-dimensional packing) PoolOperationCursor::new(inner) diff --git a/massa-pos-exports/src/cycle_info.rs b/massa-pos-exports/src/cycle_info.rs index bb4ccd89d62..3bddf8f19c2 100644 --- a/massa-pos-exports/src/cycle_info.rs +++ b/massa-pos-exports/src/cycle_info.rs @@ -1,8 +1,10 @@ use bitvec::vec::BitVec; +use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_models::{ - address::{Address, AddressDeserializer}, + address::{Address, AddressDeserializer, AddressSerializer}, prehash::PreHashMap, serialization::{BitVecDeserializer, BitVecSerializer}, + slot::Slot, }; use massa_serialization::{ Deserializer, SerializeError, Serializer, U64VarIntDeserializer, U64VarIntSerializer, @@ -20,8 +22,72 @@ use num::rational::Ratio; use std::collections::BTreeMap; use std::ops::Bound::Included; +use crate::PoSChanges; + +const CYCLE_INFO_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; + +struct CycleInfoHashComputer { + u64_ser: U64VarIntSerializer, + address_ser: AddressSerializer, + bitvec_ser: BitVecSerializer, +} + +impl CycleInfoHashComputer { + fn new() -> Self { + Self { + u64_ser: U64VarIntSerializer::new(), + address_ser: AddressSerializer::new(), + bitvec_ser: BitVecSerializer::new(), + } + } + + fn compute_cycle_hash(&self, cycle: u64) -> Hash { + // serialization can never fail in the following computations, unwrap is justified + let mut buffer = Vec::new(); + self.u64_ser.serialize(&cycle, &mut buffer).unwrap(); + Hash::compute_from(&buffer) + } + + fn compute_complete_hash(&self, complete: bool) -> Hash { + let mut buffer = Vec::new(); + self.u64_ser + .serialize(&(complete as u64), &mut buffer) + .unwrap(); + Hash::compute_from(&buffer) + } + + fn compute_seed_hash(&self, seed: &BitVec) -> Hash { + let mut buffer = Vec::new(); + self.bitvec_ser.serialize(seed, &mut buffer).unwrap(); + Hash::compute_from(&buffer) + } + + fn compute_roll_entry_hash(&self, address: &Address, roll_count: u64) -> Hash { + let mut buffer = Vec::new(); + self.address_ser.serialize(address, &mut buffer).unwrap(); + self.u64_ser.serialize(&roll_count, &mut buffer).unwrap(); + Hash::compute_from(&buffer) + } + + fn compute_prod_stats_entry_hash( + &self, + address: &Address, + prod_stats: &ProductionStats, + ) -> Hash { + let mut buffer = Vec::new(); + self.address_ser.serialize(address, &mut buffer).unwrap(); + self.u64_ser + .serialize(&prod_stats.block_success_count, &mut buffer) + .unwrap(); + self.u64_ser + .serialize(&prod_stats.block_failure_count, &mut buffer) + .unwrap(); + Hash::compute_from(&buffer) + } +} + /// State of a cycle for all threads -#[derive(Default, Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub struct CycleInfo { /// cycle number pub cycle: u64, @@ -33,6 +99,222 @@ pub struct CycleInfo { pub rng_seed: BitVec, /// Per-address production statistics pub production_stats: PreHashMap, + /// Hash of the roll counts + pub roll_counts_hash: Hash, + /// Hash of the production statistics + pub production_stats_hash: Hash, + /// Hash of the cycle state + pub global_hash: Hash, +} + +impl CycleInfo { + /// Create a new `CycleInfo` and compute its hash + pub fn new_with_hash( + cycle: u64, + complete: bool, + roll_counts: BTreeMap, + rng_seed: BitVec, + production_stats: PreHashMap, + ) -> Self { + let hash_computer = CycleInfoHashComputer::new(); + let mut roll_counts_hash = Hash::from_bytes(CYCLE_INFO_HASH_INITIAL_BYTES); + let mut production_stats_hash = Hash::from_bytes(CYCLE_INFO_HASH_INITIAL_BYTES); + + // compute the cycle hash + let mut hash_concat: Vec = Vec::new(); + hash_concat.extend(hash_computer.compute_cycle_hash(cycle).to_bytes()); + hash_concat.extend(hash_computer.compute_complete_hash(complete).to_bytes()); + hash_concat.extend(hash_computer.compute_seed_hash(&rng_seed).to_bytes()); + for (addr, &count) in &roll_counts { + roll_counts_hash ^= hash_computer.compute_roll_entry_hash(addr, count); + } + hash_concat.extend(roll_counts_hash.to_bytes()); + for (addr, prod_stats) in &production_stats { + production_stats_hash ^= hash_computer.compute_prod_stats_entry_hash(addr, prod_stats); + } + hash_concat.extend(production_stats_hash.to_bytes()); + + // compute the global hash + let global_hash = Hash::compute_from(&hash_concat); + + // create the new cycle + CycleInfo { + cycle, + complete, + roll_counts, + rng_seed, + production_stats, + roll_counts_hash, + production_stats_hash, + global_hash, + } + } + + /// Apply every part of a `PoSChanges` to a cycle info, except for `deferred_credits` + pub(crate) fn apply_changes( + &mut self, + changes: PoSChanges, + slot: Slot, + periods_per_cycle: u64, + thread_count: u8, + ) -> bool { + let hash_computer = CycleInfoHashComputer::new(); + let slots_per_cycle = periods_per_cycle.saturating_mul(thread_count as u64); + let mut hash_concat: Vec = Vec::new(); + + // compute cycle hash and concat + let cycle_hash = hash_computer.compute_cycle_hash(self.cycle); + hash_concat.extend(cycle_hash.to_bytes()); + + // check for completion + self.complete = slot.is_last_of_cycle(periods_per_cycle, thread_count); + let complete_hash = hash_computer.compute_complete_hash(self.complete); + hash_concat.extend(complete_hash.to_bytes()); + + // extend seed_bits with changes.seed_bits + self.rng_seed.extend(changes.seed_bits); + let rng_seed_hash = hash_computer.compute_seed_hash(&self.rng_seed); + hash_concat.extend(rng_seed_hash.to_bytes()); + + // extend roll counts + for (addr, roll_count) in changes.roll_changes { + if roll_count == 0 && let Some(removed_count) = self.roll_counts.remove(&addr) { + self.roll_counts_hash ^= + hash_computer.compute_roll_entry_hash(&addr, removed_count); + } else { + if let Some(replaced_count) = self.roll_counts.insert(addr, roll_count) { + self.roll_counts_hash ^= + hash_computer.compute_roll_entry_hash(&addr, replaced_count); + } + self.roll_counts_hash ^= hash_computer.compute_roll_entry_hash(&addr, roll_count); + } + } + hash_concat.extend(self.roll_counts_hash.to_bytes()); + + // extend production stats + for (addr, stats) in changes.production_stats { + self.production_stats + .entry(addr) + .and_modify(|current_stats| { + self.production_stats_hash ^= + hash_computer.compute_prod_stats_entry_hash(&addr, current_stats); + current_stats.extend(&stats); + self.production_stats_hash ^= + hash_computer.compute_prod_stats_entry_hash(&addr, current_stats); + }) + .or_insert_with(|| { + self.production_stats_hash ^= + hash_computer.compute_prod_stats_entry_hash(&addr, &stats); + stats + }); + } + hash_concat.extend(self.production_stats_hash.to_bytes()); + + // if the cycle just completed, check that it has the right number of seed bits + if self.complete && self.rng_seed.len() as u64 != slots_per_cycle { + panic!("cycle completed with incorrect number of seed bits"); + } + + // compute the global hash + self.global_hash = Hash::compute_from(&hash_concat); + + // return the completion status + self.complete + } +} + +#[test] +fn test_cycle_info_hash_computation() { + use crate::DeferredCredits; + use bitvec::prelude::*; + + // cycle and address + let mut cycle_a = CycleInfo::new_with_hash( + 0, + false, + BTreeMap::default(), + BitVec::default(), + PreHashMap::default(), + ); + let addr = Address::from_bytes(&[0u8; 32]); + + // add changes + let mut roll_changes = PreHashMap::default(); + roll_changes.insert(addr, 10); + let mut production_stats = PreHashMap::default(); + production_stats.insert( + addr, + ProductionStats { + block_success_count: 4, + block_failure_count: 0, + }, + ); + let changes = PoSChanges { + seed_bits: bitvec![u8, Lsb0; 0, 10], + roll_changes: roll_changes.clone(), + production_stats: production_stats.clone(), + deferred_credits: DeferredCredits::default(), + }; + cycle_a.apply_changes(changes, Slot::new(0, 0), 2, 2); + + // update changes once + roll_changes.clear(); + roll_changes.insert(addr, 20); + production_stats.clear(); + production_stats.insert( + addr, + ProductionStats { + block_success_count: 4, + block_failure_count: 6, + }, + ); + let changes = PoSChanges { + seed_bits: bitvec![u8, Lsb0; 0, 20], + roll_changes: roll_changes.clone(), + production_stats: production_stats.clone(), + deferred_credits: DeferredCredits::default(), + }; + cycle_a.apply_changes(changes, Slot::new(0, 1), 2, 2); + + // update changes twice + roll_changes.clear(); + roll_changes.insert(addr, 0); + production_stats.clear(); + production_stats.insert( + addr, + ProductionStats { + block_success_count: 4, + block_failure_count: 12, + }, + ); + let changes = PoSChanges { + seed_bits: bitvec![u8, Lsb0; 0, 30], + roll_changes, + production_stats, + deferred_credits: DeferredCredits::default(), + }; + cycle_a.apply_changes(changes, Slot::new(1, 0), 2, 2); + + // create a seconde cycle from same value and match hash + let cycle_b = CycleInfo::new_with_hash( + 0, + cycle_a.complete, + cycle_a.roll_counts, + cycle_a.rng_seed, + cycle_a.production_stats, + ); + assert_eq!( + cycle_a.roll_counts_hash, cycle_b.roll_counts_hash, + "roll_counts_hash mismatch" + ); + assert_eq!( + cycle_a.production_stats_hash, cycle_b.production_stats_hash, + "production_stats_hash mismatch" + ); + assert_eq!( + cycle_a.global_hash, cycle_b.global_hash, + "global_hash mismatch" + ); } /// Serializer for `CycleInfo` @@ -134,12 +416,14 @@ impl Deserializer for CycleInfoDeserializer { Vec<(Address, u64)>, // roll_counts BitVec, // rng_seed PreHashMap, // production_stats (address, n_success, n_fail) - )| CycleInfo { - cycle, - complete, - roll_counts: roll_counts.into_iter().collect(), - rng_seed, - production_stats, + )| { + CycleInfo::new_with_hash( + cycle, + complete, + roll_counts.into_iter().collect(), + rng_seed, + production_stats, + ) }, ) .parse(buffer) diff --git a/massa-pos-exports/src/deferred_credits.rs b/massa-pos-exports/src/deferred_credits.rs index b65271cc621..f5f72aa02af 100644 --- a/massa-pos-exports/src/deferred_credits.rs +++ b/massa-pos-exports/src/deferred_credits.rs @@ -1,5 +1,6 @@ +use massa_hash::{Hash, HASH_SIZE_BYTES}; use massa_models::{ - address::{Address, AddressDeserializer}, + address::{Address, AddressDeserializer, AddressSerializer}, amount::{Amount, AmountDeserializer, AmountSerializer}, prehash::PreHashMap, slot::{Slot, SlotDeserializer, SlotSerializer}, @@ -16,43 +17,117 @@ use nom::{ use std::collections::BTreeMap; use std::ops::Bound::{Excluded, Included}; -#[derive(Debug, Default, Clone)] +const DEFERRED_CREDITS_HASH_INITIAL_BYTES: &[u8; 32] = &[0; HASH_SIZE_BYTES]; + +#[derive(Debug, Clone)] /// Structure containing all the PoS deferred credits information -pub struct DeferredCredits(pub BTreeMap>); +pub struct DeferredCredits { + /// Deferred credits + pub credits: BTreeMap>, + /// Hash of the current deferred credits state + pub hash: Hash, +} + +impl Default for DeferredCredits { + fn default() -> Self { + Self { + credits: Default::default(), + hash: Hash::from_bytes(DEFERRED_CREDITS_HASH_INITIAL_BYTES), + } + } +} + +struct DeferredCreditsHashComputer { + slot_ser: SlotSerializer, + address_ser: AddressSerializer, + amount_ser: AmountSerializer, +} + +impl DeferredCreditsHashComputer { + fn new() -> Self { + Self { + slot_ser: SlotSerializer::new(), + address_ser: AddressSerializer::new(), + amount_ser: AmountSerializer::new(), + } + } + + fn compute_credit_hash(&self, slot: &Slot, address: &Address, amount: &Amount) -> Hash { + // serialization can never fail in the following computations, unwrap is justified + let mut buffer = Vec::new(); + self.slot_ser.serialize(slot, &mut buffer).unwrap(); + self.address_ser.serialize(address, &mut buffer).unwrap(); + self.amount_ser.serialize(amount, &mut buffer).unwrap(); + Hash::compute_from(&buffer) + } +} impl DeferredCredits { - /// Extends the current `DeferredCredits` with another but accumulates the addresses and amounts + /// Extends the current `DeferredCredits` with another and replace the amounts for existing addresses pub fn nested_extend(&mut self, other: Self) { - for (slot, new_credits) in other.0 { - self.0 - .entry(slot) - .and_modify(|current_credits| { - for (address, new_amount) in new_credits.iter() { - current_credits - .entry(*address) - .and_modify(|current_amount| { - *current_amount = current_amount.saturating_add(*new_amount); - }) - .or_insert(*new_amount); - } - }) - .or_insert(new_credits); + for (slot, other_credits) in other.credits { + self.credits.entry(slot).or_default().extend(other_credits); } } - /// Remove zero credits + /// Extends the current `DeferredCredits` with another, replace the amounts for existing addresses and compute the object hash, use only on finality + pub fn final_nested_extend(&mut self, other: Self) { + let hash_computer = DeferredCreditsHashComputer::new(); + for (slot, other_credits) in other.credits { + let self_credits = self.credits.entry(slot).or_default(); + for (address, other_amount) in other_credits { + if let Some(cur_amount) = self_credits.insert(address, other_amount) { + self.hash ^= hash_computer.compute_credit_hash(&slot, &address, &cur_amount); + } + self.hash ^= hash_computer.compute_credit_hash(&slot, &address, &other_amount); + } + } + } + + /// Remove credits set to zero, use only on finality pub fn remove_zeros(&mut self) { - let mut delete_slots = Vec::new(); - for (slot, credits) in &mut self.0 { - credits.retain(|_addr, amount| !amount.is_zero()); + let hash_computer = DeferredCreditsHashComputer::new(); + let mut empty_slots = Vec::new(); + for (slot, credits) in &mut self.credits { + credits.retain(|address, amount| { + // if amount is zero XOR the credit hash and do not retain + if amount.is_zero() { + self.hash ^= hash_computer.compute_credit_hash(slot, address, amount); + false + } else { + true + } + }); if credits.is_empty() { - delete_slots.push(*slot); + empty_slots.push(*slot); } } - for slot in delete_slots { - self.0.remove(&slot); + for slot in empty_slots { + self.credits.remove(&slot); } } + + /// Gets the deferred credits for a given address that will be credited at a given slot + pub fn get_address_deferred_credit_for_slot( + &self, + addr: &Address, + slot: &Slot, + ) -> Option { + if let Some(v) = self + .credits + .get(slot) + .and_then(|slot_credits| slot_credits.get(addr)) + { + return Some(*v); + } + None + } + + /// Insert/overwrite a deferred credit + pub fn insert(&mut self, addr: Address, slot: Slot, amount: Amount) { + let entry = self.credits.entry(slot).or_default(); + entry.insert(addr, amount); + } } /// Serializer for `DeferredCredits` @@ -86,9 +161,10 @@ impl Serializer for DeferredCreditsSerializer { buffer: &mut Vec, ) -> Result<(), SerializeError> { // deferred credits length - self.u64_ser.serialize(&(value.0.len() as u64), buffer)?; + self.u64_ser + .serialize(&(value.credits.len() as u64), buffer)?; // deferred credits - for (slot, credits) in &value.0 { + for (slot, credits) in &value.credits { // slot self.slot_ser.serialize(slot, buffer)?; // credits @@ -143,7 +219,10 @@ impl Deserializer for DeferredCreditsDeserializer { )), ), ) - .map(|elements| DeferredCredits(elements.into_iter().collect())) + .map(|elements| DeferredCredits { + credits: elements.into_iter().collect(), + hash: Hash::from_bytes(DEFERRED_CREDITS_HASH_INITIAL_BYTES), + }) .parse(buffer) } } diff --git a/massa-pos-exports/src/pos_changes.rs b/massa-pos-exports/src/pos_changes.rs index a4ce86d5265..81143043053 100644 --- a/massa-pos-exports/src/pos_changes.rs +++ b/massa-pos-exports/src/pos_changes.rs @@ -38,8 +38,9 @@ impl PoSChanges { self.seed_bits.is_empty() && self.roll_changes.is_empty() && self.production_stats.is_empty() - && self.deferred_credits.0.is_empty() + && self.deferred_credits.credits.is_empty() } + /// Extends the current `PosChanges` with another one pub fn extend(&mut self, other: PoSChanges) { // extend seed bits diff --git a/massa-pos-exports/src/pos_final_state.rs b/massa-pos-exports/src/pos_final_state.rs index dfd779780f1..d62194f742d 100644 --- a/massa-pos-exports/src/pos_final_state.rs +++ b/massa-pos-exports/src/pos_final_state.rs @@ -4,17 +4,11 @@ use bitvec::vec::BitVec; use massa_hash::Hash; use massa_models::error::ModelsError; use massa_models::streaming_step::StreamingStep; -use massa_models::{ - address::{Address, AddressDeserializer}, - amount::{Amount, AmountDeserializer}, - prehash::PreHashMap, - slot::{Slot, SlotDeserializer}, -}; -use massa_serialization::U64VarIntDeserializer; +use massa_models::{address::Address, amount::Amount, prehash::PreHashMap, slot::Slot}; use std::collections::VecDeque; use std::{ collections::BTreeMap, - ops::Bound::{Excluded, Included, Unbounded}, + ops::Bound::{Excluded, Unbounded}, path::PathBuf, }; use tracing::debug; @@ -33,14 +27,6 @@ pub struct PoSFinalState { pub initial_rolls: BTreeMap, /// initial seeds, used for negative cycle look back (cycles -2, -1 in that order) pub initial_seeds: Vec, - /// amount deserializer - pub amount_deserializer: AmountDeserializer, - /// slot deserializer - pub slot_deserializer: SlotDeserializer, - /// deserializer - pub deferred_credit_length_deserializer: U64VarIntDeserializer, - /// address deserializer - pub address_deserializer: AddressDeserializer, } impl PoSFinalState { @@ -63,17 +49,6 @@ impl PoSFinalState { let init_seed = Hash::compute_from(initial_seed_string.as_bytes()); let initial_seeds = vec![Hash::compute_from(init_seed.to_bytes()), init_seed]; - // Deserializers - let amount_deserializer = - AmountDeserializer::new(Included(Amount::MIN), Included(Amount::MAX)); - let slot_deserializer = SlotDeserializer::new( - (Included(u64::MIN), Included(u64::MAX)), - (Included(0), Excluded(config.thread_count)), - ); - let deferred_credit_length_deserializer = - U64VarIntDeserializer::new(Included(u64::MIN), Included(u64::MAX)); // TODO define a max here - let address_deserializer = AddressDeserializer::new(); - Ok(Self { config, cycle_history: Default::default(), @@ -81,10 +56,6 @@ impl PoSFinalState { selector, initial_rolls, initial_seeds, - amount_deserializer, - slot_deserializer, - deferred_credit_length_deserializer, - address_deserializer, }) } @@ -103,13 +74,13 @@ impl PoSFinalState { // assume genesis blocks have a "False" seed bit to avoid passing them around rng_seed.push(false); } - self.cycle_history.push_back(CycleInfo { - cycle: 0, + self.cycle_history.push_back(CycleInfo::new_with_hash( + 0, + false, + self.initial_rolls.clone(), rng_seed, - production_stats: Default::default(), - roll_counts: self.initial_rolls.clone(), - complete: false, - }); + PreHashMap::default(), + )); } /// Sends the current draw inputs (initial or bootstrapped) to the selector. @@ -199,13 +170,13 @@ impl PoSFinalState { // extend the last incomplete cycle } else if info.cycle.checked_add(1) == Some(cycle) && info.complete { // the previous cycle is complete, push a new incomplete/empty one to extend - self.cycle_history.push_back(CycleInfo { + self.cycle_history.push_back(CycleInfo::new_with_hash( cycle, - roll_counts: info.roll_counts.clone(), - rng_seed: BitVec::with_capacity(slots_per_cycle), - production_stats: Default::default(), - complete: false, - }); + false, + info.roll_counts.clone(), + BitVec::with_capacity(slots_per_cycle), + PreHashMap::default(), + )); while self.cycle_history.len() > self.config.cycle_history_length { self.cycle_history.pop_front(); } @@ -218,44 +189,24 @@ impl PoSFinalState { panic!("PoS History shouldn't be empty here."); } - // update cycle data - let cycle_completed: bool; - { - let current = self - .cycle_history - .back_mut() - .expect("cycle history should be non-empty"); // because if was filled above - - // extend seed_bits with changes.seed_bits - current.rng_seed.extend(changes.seed_bits); - - // extend roll counts - current.roll_counts.extend(changes.roll_changes); - current.roll_counts.retain(|_, &mut count| count != 0); - - // extend production stats - for (addr, stats) in changes.production_stats { - current - .production_stats - .entry(addr) - .and_modify(|cur| cur.extend(&stats)) - .or_insert(stats); - } + // get the last history cycle, should always be present because it was filled above + let current = self + .cycle_history + .back_mut() + .expect("cycle history should be non-empty"); - // check for completion - current.complete = - slot.is_last_of_cycle(self.config.periods_per_cycle, self.config.thread_count); - // if the cycle just completed, check that it has the right number of seed bits - if current.complete && current.rng_seed.len() != slots_per_cycle { - panic!("cycle completed with incorrect number of seed bits"); - } - cycle_completed = current.complete; - } + // apply changes to the current cycle + let cycle_completed = current.apply_changes( + changes.clone(), + slot, + self.config.periods_per_cycle, + self.config.thread_count, + ); // extent deferred_credits with changes.deferred_credits // remove zero-valued credits self.deferred_credits - .nested_extend(changes.deferred_credits); + .final_nested_extend(changes.deferred_credits); self.deferred_credits.remove_zeros(); // feed the cycle if it is complete @@ -350,7 +301,7 @@ impl PoSFinalState { /// Retrieves every deferred credit of the given slot pub fn get_deferred_credits_at(&self, slot: &Slot) -> PreHashMap { self.deferred_credits - .0 + .credits .get(slot) .cloned() .unwrap_or_default() @@ -402,14 +353,14 @@ impl PoSFinalState { StreamingStep::Ongoing(last_cycle) => { if let Some(index) = self.get_cycle_index(last_cycle) { if index == self.cycle_history.len() - 1 { - return Ok((None, StreamingStep::Finished)); + return Ok((None, StreamingStep::Finished(None))); } index.saturating_add(1) } else { return Err(ModelsError::OutdatedBootstrapCursor); } } - StreamingStep::Finished => return Ok((None, cursor)), + StreamingStep::Finished(_) => return Ok((None, cursor)), }; let cycle_info = self .cycle_history @@ -436,12 +387,12 @@ impl PoSFinalState { let left_bound = match cursor { StreamingStep::Started => Unbounded, StreamingStep::Ongoing(last_slot) => Excluded(last_slot), - StreamingStep::Finished => return (credits_part, cursor), + StreamingStep::Finished(_) => return (credits_part, cursor), }; let mut credit_part_last_slot: Option = None; - for (slot, credits) in self.deferred_credits.0.range((left_bound, Unbounded)) { - if credits_part.0.len() < self.config.credits_bootstrap_part_size as usize { - credits_part.0.insert(*slot, credits.clone()); + for (slot, credits) in self.deferred_credits.credits.range((left_bound, Unbounded)) { + if credits_part.credits.len() < self.config.credits_bootstrap_part_size as usize { + credits_part.credits.insert(*slot, credits.clone()); credit_part_last_slot = Some(*slot); } else { break; @@ -450,7 +401,7 @@ impl PoSFinalState { if let Some(last_slot) = credit_part_last_slot { (credits_part, StreamingStep::Ongoing(last_slot)) } else { - (credits_part, StreamingStep::Finished) + (credits_part, StreamingStep::Finished(None)) } } @@ -464,14 +415,17 @@ impl PoSFinalState { .cycle_history .back() .map(|info| info.cycle.saturating_add(1)); - let current_cycle = cycle_info.cycle; - if let Some(next_cycle) = opt_next_cycle && current_cycle != next_cycle { - panic!("PoS received cycle ({}) should be equal to the next expected cycle ({})", current_cycle, next_cycle); - } + let received_cycle = cycle_info.cycle; + if let Some(next_cycle) = opt_next_cycle && received_cycle != next_cycle { + panic!( + "PoS received cycle ({}) should be equal to the next expected cycle ({})", + received_cycle, next_cycle + ); + } self.cycle_history.push_back(cycle_info); - StreamingStep::Ongoing(current_cycle) + StreamingStep::Ongoing(received_cycle) } else { - StreamingStep::Finished + StreamingStep::Finished(None) } } @@ -480,16 +434,16 @@ impl PoSFinalState { /// # Arguments /// `part`: `DeferredCredits` from `get_pos_state_part` and used to update PoS final state pub fn set_deferred_credits_part(&mut self, part: DeferredCredits) -> StreamingStep { - self.deferred_credits.nested_extend(part); + self.deferred_credits.final_nested_extend(part); if let Some(slot) = self .deferred_credits - .0 + .credits .last_key_value() .map(|(&slot, _)| slot) { StreamingStep::Ongoing(slot) } else { - StreamingStep::Finished + StreamingStep::Finished(None) } } } diff --git a/massa-pos-exports/src/test_exports/bootstrap.rs b/massa-pos-exports/src/test_exports/bootstrap.rs index 0a1f7447340..ace927d33e1 100644 --- a/massa-pos-exports/src/test_exports/bootstrap.rs +++ b/massa-pos-exports/src/test_exports/bootstrap.rs @@ -34,12 +34,12 @@ pub fn assert_eq_pos_state(s1: &PoSFinalState, s2: &PoSFinalState) { "PoS cycle_history mismatching" ); assert_eq!( - s1.deferred_credits.0.len(), - s2.deferred_credits.0.len(), + s1.deferred_credits.credits.len(), + s2.deferred_credits.credits.len(), "PoS deferred_credits len mismatching" ); assert_eq!( - s1.deferred_credits.0, s2.deferred_credits.0, + s1.deferred_credits.credits, s2.deferred_credits.credits, "PoS deferred_credits mismatching" ); assert_eq!( diff --git a/massa-protocol-exports/src/error.rs b/massa-protocol-exports/src/error.rs index ead3ae5d7d6..11dd853e80f 100644 --- a/massa-protocol-exports/src/error.rs +++ b/massa-protocol-exports/src/error.rs @@ -1,6 +1,5 @@ // Copyright (c) 2022 MASSA LABS -use crate::ProtocolEvent; use displaydoc::Display; use massa_models::error::ModelsError; use massa_network_exports::ConnectionId; @@ -12,44 +11,44 @@ use thiserror::Error; #[non_exhaustive] #[derive(Display, Error, Debug)] pub enum ProtocolError { - /// wrong signature + /// Wrong signature WrongSignature, - /// Protocol err:{0} + /// Protocol error: {0} GeneralProtocolError(String), /// An error occurred during channel communication: {0} ChannelError(String), - /// A tokio task has crashed err:{0} + /// A tokio task has crashed err: {0} TokioTaskJoinError(#[from] tokio::task::JoinError), - /// error receiving one shot response : {0} + /// Error receiving one shot response: {0} TokioRecvError(#[from] tokio::sync::oneshot::error::RecvError), - /// error sending protocol event: {0} - TokioSendError(#[from] Box>), - /// Error during network connection:`{0:?}` + /// Error during network connection: `{0:?}` PeerConnectionError(NetworkConnectionErrorType), - /// The ip:`{0}` address is not valid + /// The ip: `{0}` address is not valid InvalidIpError(IpAddr), - /// Active connection missing:`{0}` + /// Active connection missing: `{0}` ActiveConnectionMissing(ConnectionId), - /// IO error : {0} + /// IO error: {0} IOError(#[from] std::io::Error), - /// Serde error : {0} + /// Serde error: {0} SerdeError(#[from] serde_json::Error), - /// `massa_hash` error {0} + /// `massa_hash` error: {0} MassaHashError(#[from] massa_hash::MassaHashError), - /// the network controller should not drop a node command sender before shutting down the node. + /// The network controller should not drop a node command sender before shutting down the node. UnexpectedNodeCommandChannelClosure, - /// the writer of a node should not drop its event sender before sending a `clean_exit` message. + /// The writer of a node should not drop its event sender before sending a `clean_exit` message. UnexpectedWriterClosure, - /// Time error {0} + /// Time error: {0} TimeError(#[from] massa_time::TimeError), - /// missing peers + /// Missing peers MissingPeersError, - /// models error: {0} + /// Models error: {0} ModelsError(#[from] ModelsError), - /// network error: {0} + /// Network error: {0} NetworkError(#[from] NetworkError), - /// container inconsistency error: {0} + /// Container inconsistency error: {0} ContainerInconsistencyError(String), + /// Invalid operation error: {0} + InvalidOperationError(String), } #[derive(Debug)] diff --git a/massa-protocol-exports/src/lib.rs b/massa-protocol-exports/src/lib.rs index dd2562b6b9a..ab550c45e2e 100644 --- a/massa-protocol-exports/src/lib.rs +++ b/massa-protocol-exports/src/lib.rs @@ -11,8 +11,8 @@ mod settings; pub use error::ProtocolError; pub use protocol_controller::{ - BlocksResults, ProtocolCommand, ProtocolCommandSender, ProtocolEvent, ProtocolEventReceiver, - ProtocolManagementCommand, ProtocolManager, + BlocksResults, ProtocolCommand, ProtocolCommandSender, ProtocolManagementCommand, + ProtocolManager, }; pub use settings::ProtocolConfig; diff --git a/massa-protocol-exports/src/protocol_controller.rs b/massa-protocol-exports/src/protocol_controller.rs index 502e7a290ff..4f615dc194c 100644 --- a/massa-protocol-exports/src/protocol_controller.rs +++ b/massa-protocol-exports/src/protocol_controller.rs @@ -1,53 +1,19 @@ // Copyright (c) 2022 MASSA LABS -use std::collections::VecDeque; - use crate::error::ProtocolError; use massa_logging::massa_trace; +use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_models::{ block::{BlockId, WrappedHeader}, endorsement::EndorsementId, operation::OperationId, }; -use massa_models::{ - prehash::{PreHashMap, PreHashSet}, - slot::Slot, -}; use massa_network_exports::NetworkEventReceiver; use massa_storage::Storage; use serde::Serialize; use tokio::{sync::mpsc, task::JoinHandle}; -use tracing::{debug, info}; - -/// Possible types of events that can happen. -#[allow(clippy::large_enum_variant)] -#[derive(Debug)] -pub enum ProtocolEvent { - /// A block with a valid signature has been received. - ReceivedBlock { - /// block ID - block_id: BlockId, - /// block slot - slot: Slot, - /// storage instance containing the block and its dependencies (except the parents) - storage: Storage, - }, - /// A message to tell the consensus that a block is invalid - InvalidBlock { - /// block ID - block_id: BlockId, - /// header - header: WrappedHeader, - }, - /// A block header with a valid signature has been received. - ReceivedBlockHeader { - /// its id - block_id: BlockId, - /// The header - header: WrappedHeader, - }, -} +use tracing::info; /// block result: map block id to /// ```md @@ -100,7 +66,7 @@ impl ProtocolCommandSender { /// # Arguments /// * `block_id`: ID of the block /// * `storage`: Storage instance containing references to the block and all its dependencies - pub async fn integrated_block( + pub fn integrated_block( &mut self, block_id: BlockId, storage: Storage, @@ -109,34 +75,31 @@ impl ProtocolCommandSender { "block_id": block_id }); self.0 - .send(ProtocolCommand::IntegratedBlock { block_id, storage }) - .await + .blocking_send(ProtocolCommand::IntegratedBlock { block_id, storage }) .map_err(|_| ProtocolError::ChannelError("block_integrated command send error".into())) } /// Notify to protocol an attack attempt. - pub async fn notify_block_attack(&mut self, block_id: BlockId) -> Result<(), ProtocolError> { + pub fn notify_block_attack(&mut self, block_id: BlockId) -> Result<(), ProtocolError> { massa_trace!("protocol.command_sender.notify_block_attack", { "block_id": block_id }); self.0 - .send(ProtocolCommand::AttackBlockDetected(block_id)) - .await + .blocking_send(ProtocolCommand::AttackBlockDetected(block_id)) .map_err(|_| { ProtocolError::ChannelError("notify_block_attack command send error".into()) }) } /// update the block wish list - pub async fn send_wishlist_delta( + pub fn send_wishlist_delta( &mut self, new: PreHashMap>, remove: PreHashSet, ) -> Result<(), ProtocolError> { massa_trace!("protocol.command_sender.send_wishlist_delta", { "new": new, "remove": remove }); self.0 - .send(ProtocolCommand::WishlistDelta { new, remove }) - .await + .blocking_send(ProtocolCommand::WishlistDelta { new, remove }) .map_err(|_| { ProtocolError::ChannelError("send_wishlist_delta command send error".into()) }) @@ -145,13 +108,12 @@ impl ProtocolCommandSender { /// Propagate a batch of operation ids (from pool). /// /// note: Full `OperationId` is replaced by a `OperationPrefixId` later by the worker. - pub async fn propagate_operations(&mut self, operations: Storage) -> Result<(), ProtocolError> { + pub fn propagate_operations(&mut self, operations: Storage) -> Result<(), ProtocolError> { massa_trace!("protocol.command_sender.propagate_operations", { "operations": operations.get_op_refs() }); self.0 - .send(ProtocolCommand::PropagateOperations(operations)) - .await + .blocking_send(ProtocolCommand::PropagateOperations(operations)) .map_err(|_| { ProtocolError::ChannelError("propagate_operation command send error".into()) }) @@ -170,36 +132,6 @@ impl ProtocolCommandSender { } } -/// Protocol event receiver -pub struct ProtocolEventReceiver(pub mpsc::Receiver); - -impl ProtocolEventReceiver { - /// Receives the next `ProtocolEvent` from connected Node. - /// None is returned when all Sender halves have dropped, - /// indicating that no further values can be sent on the channel - pub async fn wait_event(&mut self) -> Result { - massa_trace!("protocol.event_receiver.wait_event", {}); - self.0.recv().await.ok_or_else(|| { - ProtocolError::ChannelError( - "DefaultProtocolController wait_event channel recv failed".into(), - ) - }) - } - - /// drains remaining events and returns them in a `VecDeque` - /// note: events are sorted from oldest to newest - pub async fn drain(mut self) -> VecDeque { - let mut remaining_events: VecDeque = VecDeque::new(); - while let Some(evt) = self.0.recv().await { - debug!( - "after receiving event from ProtocolEventReceiver.0 in protocol_controller drain" - ); - remaining_events.push_back(evt); - } - remaining_events - } -} - /// protocol manager used to stop the protocol pub struct ProtocolManager { join_handle: JoinHandle>, @@ -219,14 +151,9 @@ impl ProtocolManager { } /// Stop the protocol controller - pub async fn stop( - self, - protocol_event_receiver: ProtocolEventReceiver, - //protocol_pool_event_receiver: ProtocolPoolEventReceiver, - ) -> Result { + pub async fn stop(self) -> Result { info!("stopping protocol controller..."); drop(self.manager_tx); - let _remaining_events = protocol_event_receiver.drain().await; let network_event_receiver = self.join_handle.await??; info!("protocol controller stopped"); Ok(network_event_receiver) diff --git a/massa-protocol-exports/src/test_exports/mock.rs b/massa-protocol-exports/src/test_exports/mock.rs index 5639ab80d92..b38436b1606 100644 --- a/massa-protocol-exports/src/test_exports/mock.rs +++ b/massa-protocol-exports/src/test_exports/mock.rs @@ -1,14 +1,7 @@ // Copyright (c) 2022 MASSA LABS -use crate::{ - protocol_controller::ProtocolEventReceiver, ProtocolCommand, ProtocolCommandSender, - ProtocolEvent, -}; -use massa_models::{ - block::{BlockId, WrappedHeader}, - slot::Slot, -}; -use massa_storage::Storage; +use crate::{ProtocolCommand, ProtocolCommandSender}; +use massa_models::block::BlockId; use massa_time::MassaTime; use tokio::{sync::mpsc, time::sleep}; @@ -16,22 +9,17 @@ use tokio::{sync::mpsc, time::sleep}; /// TODO: Improve doc pub struct MockProtocolController { protocol_command_rx: mpsc::Receiver, - protocol_event_tx: mpsc::Sender, } impl MockProtocolController { /// Creates a new protocol mock - /// TODO: Improve doc - pub fn new() -> (Self, ProtocolCommandSender, ProtocolEventReceiver) { + pub fn new() -> (Self, ProtocolCommandSender) { let (protocol_command_tx, protocol_command_rx) = mpsc::channel::(256); - let (protocol_event_tx, protocol_event_rx) = mpsc::channel::(256); ( MockProtocolController { - protocol_event_tx, protocol_command_rx, }, ProtocolCommandSender(protocol_command_tx), - ProtocolEventReceiver(protocol_event_rx), ) } @@ -53,27 +41,6 @@ impl MockProtocolController { } } - /// Note: if you care about the operation set, use another method. - pub async fn receive_block(&mut self, block_id: BlockId, slot: Slot, storage: Storage) { - self.protocol_event_tx - .send(ProtocolEvent::ReceivedBlock { - block_id, - slot, - storage, - }) - .await - .expect("could not send protocol event"); - } - - /// Send a receive header to the protocol event channel - pub async fn receive_header(&mut self, header: WrappedHeader) { - let block_id = header.id; - self.protocol_event_tx - .send(ProtocolEvent::ReceivedBlockHeader { block_id, header }) - .await - .expect("could not send protocol event"); - } - /// Not implemented pub async fn receive_get_active_blocks(&mut self, _list: Vec) {} diff --git a/massa-protocol-exports/src/tests/tools.rs b/massa-protocol-exports/src/tests/tools.rs index 80d6d57f907..3544c84c7fa 100644 --- a/massa-protocol-exports/src/tests/tools.rs +++ b/massa-protocol-exports/src/tests/tools.rs @@ -1,12 +1,10 @@ // Copyright (c) 2022 MASSA LABS use super::mock_network_controller::MockNetworkController; -use crate::protocol_controller::{ProtocolCommandSender, ProtocolEventReceiver}; -use crate::{ProtocolConfig, ProtocolEvent}; +use crate::ProtocolConfig; use massa_hash::Hash; use massa_models::node::NodeId; use massa_models::operation::OperationSerializer; -use massa_models::prehash::PreHashSet; use massa_models::wrapped::WrappedContent; use massa_models::{ address::Address, @@ -16,7 +14,7 @@ use massa_models::{ operation::{Operation, OperationType, WrappedOperation}, slot::Slot, }; -use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NetworkCommand}; +use massa_network_exports::{AskForBlocksInfo, NetworkCommand}; use massa_signature::KeyPair; use massa_time::MassaTime; use std::collections::HashMap; @@ -162,65 +160,6 @@ pub fn create_block_with_endorsements( .unwrap() } -/// send a block and assert it has been propagate (or not) -pub async fn send_and_propagate_block( - network_controller: &mut MockNetworkController, - block: WrappedBlock, - valid: bool, - source_node_id: NodeId, - protocol_event_receiver: &mut ProtocolEventReceiver, - protocol_command_sender: &mut ProtocolCommandSender, - operations: Vec, -) { - let expected_hash = block.id; - - network_controller - .send_header(source_node_id, block.content.header.clone()) - .await; - - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); - - // Send block info to protocol. - let info = vec![( - block.id, - BlockInfoReply::Info(block.content.operations.clone()), - )]; - network_controller - .send_block_info(source_node_id, info) - .await; - - // Send full ops. - let info = vec![(block.id, BlockInfoReply::Operations(operations))]; - network_controller - .send_block_info(source_node_id, info) - .await; - - // Check protocol sends block to consensus. - let hash = match wait_protocol_event(protocol_event_receiver, 1000.into(), |evt| match evt { - evt @ ProtocolEvent::ReceivedBlock { .. } => Some(evt), - _ => None, - }) - .await - { - Some(ProtocolEvent::ReceivedBlock { block_id, .. }) => Some(block_id), - None => None, - _ => panic!("Unexpected or no protocol event."), - }; - if valid { - assert_eq!(expected_hash, hash.unwrap()); - } else { - assert!(hash.is_none(), "unexpected protocol event") - } -} - /// Creates an endorsement for use in protocol tests, /// without paying attention to consensus related things. pub fn create_endorsement() -> WrappedEndorsement { @@ -288,28 +227,6 @@ pub fn create_protocol_config() -> ProtocolConfig { } } -/// wait protocol event -pub async fn wait_protocol_event( - protocol_event_receiver: &mut ProtocolEventReceiver, - timeout: MassaTime, - filter_map: F, -) -> Option -where - F: Fn(ProtocolEvent) -> Option, -{ - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - evt_opt = protocol_event_receiver.wait_event() => match evt_opt { - Ok(orig_evt) => if let Some(res_evt) = filter_map(orig_evt) { return Some(res_evt); }, - _ => return None - }, - _ = &mut timer => return None - } - } -} - /// assert block id has been asked to node pub async fn assert_hash_asked_to_node( hash_1: BlockId, diff --git a/massa-protocol-worker/Cargo.toml b/massa-protocol-worker/Cargo.toml index 18b7f04dd9e..2817e32c9fb 100644 --- a/massa-protocol-worker/Cargo.toml +++ b/massa-protocol-worker/Cargo.toml @@ -15,6 +15,7 @@ rayon = "1.5" massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } +massa_consensus_exports = { path = "../massa-consensus-exports" } massa_network_exports = { path = "../massa-network-exports" } massa_pool_exports = { path = "../massa-pool-exports" } massa_protocol_exports = { path = "../massa-protocol-exports" } @@ -30,3 +31,7 @@ futures = "0.3" massa_signature = { path = "../massa-signature" } massa_pool_exports = { path = "../massa-pool-exports", features = ["testing"] } + +[features] + +testing = ["massa_consensus_exports/testing", "massa_network_exports/testing", "massa_pool_exports/testing", "massa_protocol_exports/testing"] \ No newline at end of file diff --git a/massa-protocol-worker/src/protocol_network.rs b/massa-protocol-worker/src/protocol_network.rs index d4e79f6d7a9..2c34e74d402 100644 --- a/massa-protocol-worker/src/protocol_network.rs +++ b/massa-protocol-worker/src/protocol_network.rs @@ -17,7 +17,7 @@ use massa_models::{ wrapped::{Id, Wrapped}, }; use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NetworkEvent}; -use massa_protocol_exports::{ProtocolError, ProtocolEvent}; +use massa_protocol_exports::ProtocolError; use massa_serialization::Serializer; use massa_storage::Storage; use std::pin::Pin; @@ -98,11 +98,8 @@ impl ProtocolWorker { self.note_header_from_node(&header, &source_node_id).await? { if is_new { - self.send_protocol_event(ProtocolEvent::ReceivedBlockHeader { - block_id, - header, - }) - .await; + self.consensus_controller + .register_block_header(block_id, header); } self.update_ask_block(block_ask_timer).await?; } else { @@ -284,7 +281,7 @@ impl ProtocolWorker { /// # Ban /// Start compute the operations serialized total size with the operation we know. /// Ban the node if the operations contained in the block overflow the max size. We don't - /// forward the block to the graph in that case. + /// forward the block to the consensus in that case. /// /// # Parameters: /// - `from_node_id`: Node which sent us the information. @@ -428,7 +425,7 @@ impl ProtocolWorker { return Ok(()); } - let protocol_event_full_block = match self.block_wishlist.entry(block_id) { + match self.block_wishlist.entry(block_id) { Entry::Occupied(mut entry) => { let info = entry.get_mut(); let header = if let Some(header) = &info.header { @@ -471,7 +468,8 @@ impl ProtocolWorker { warn!("Node id {} sent us full operations for block id {} but they exceed max size.", from_node_id, block_id); let _ = self.ban_node(&from_node_id).await; self.block_wishlist.remove(&block_id); - ProtocolEvent::InvalidBlock { block_id, header } + self.consensus_controller + .mark_invalid_block(block_id, header); } else { if known_operations != block_ids_set { warn!( @@ -516,11 +514,10 @@ impl ProtocolWorker { let slot = wrapped_block.content.header.content.slot; // add block to local storage and claim ref block_storage.store_block(wrapped_block); - ProtocolEvent::ReceivedBlock { - slot, - block_id, - storage: block_storage, - } + + // Send to consensus + self.consensus_controller + .register_block(block_id, slot, block_storage, false); } } Entry::Vacant(_) => { @@ -532,8 +529,6 @@ impl ProtocolWorker { return Ok(()); } }; - // Send to graph - self.send_protocol_event(protocol_event_full_block).await; // Update ask block let remove_hashes = vec![block_id].into_iter().collect(); diff --git a/massa-protocol-worker/src/protocol_worker.rs b/massa-protocol-worker/src/protocol_worker.rs index b26234100ad..d99be1b3b70 100644 --- a/massa-protocol-worker/src/protocol_worker.rs +++ b/massa-protocol-worker/src/protocol_worker.rs @@ -5,6 +5,7 @@ use crate::checked_operations::CheckedOperations; use crate::sig_verifier::verify_sigs_batch; use crate::{node_info::NodeInfo, worker_operations_impl::OperationBatchBuffer}; +use massa_consensus_exports::ConsensusController; use massa_logging::massa_trace; use massa_models::slot::Slot; @@ -20,8 +21,7 @@ use massa_models::{ use massa_network_exports::{AskForBlocksInfo, NetworkCommandSender, NetworkEventReceiver}; use massa_pool_exports::PoolController; use massa_protocol_exports::{ - ProtocolCommand, ProtocolCommandSender, ProtocolConfig, ProtocolError, ProtocolEvent, - ProtocolEventReceiver, ProtocolManagementCommand, ProtocolManager, + ProtocolCommand, ProtocolConfig, ProtocolError, ProtocolManagementCommand, ProtocolManager, }; use massa_models::wrapped::Id; @@ -32,7 +32,6 @@ use std::mem; use std::pin::Pin; use tokio::{ sync::mpsc, - sync::mpsc::error::SendTimeoutError, time::{sleep, sleep_until, Instant, Sleep}, }; use tracing::{debug, error, info, warn}; @@ -51,22 +50,14 @@ pub async fn start_protocol_controller( config: ProtocolConfig, network_command_sender: NetworkCommandSender, network_event_receiver: NetworkEventReceiver, + protocol_command_receiver: mpsc::Receiver, + consensus_controller: Box, pool_controller: Box, storage: Storage, -) -> Result< - ( - ProtocolCommandSender, - ProtocolEventReceiver, - ProtocolManager, - ), - ProtocolError, -> { +) -> Result { debug!("starting protocol controller"); // launch worker - let (controller_event_tx, event_rx) = mpsc::channel::(config.event_channel_size); - let (command_tx, controller_command_rx) = - mpsc::channel::(config.controller_channel_size); let (manager_tx, controller_manager_rx) = mpsc::channel::(1); let pool_controller = pool_controller.clone(); let join_handle = tokio::spawn(async move { @@ -75,10 +66,10 @@ pub async fn start_protocol_controller( ProtocolWorkerChannels { network_command_sender, network_event_receiver, - controller_event_tx, - controller_command_rx, + controller_command_rx: protocol_command_receiver, controller_manager_rx, }, + consensus_controller, pool_controller, storage, ) @@ -96,11 +87,7 @@ pub async fn start_protocol_controller( } }); debug!("protocol controller ready"); - Ok(( - ProtocolCommandSender(command_tx), - ProtocolEventReceiver(event_rx), - ProtocolManager::new(join_handle, manager_tx), - )) + Ok(ProtocolManager::new(join_handle, manager_tx)) } /// Info about a block we've seen @@ -132,12 +119,12 @@ impl BlockInfo { pub struct ProtocolWorker { /// Protocol configuration. pub(crate) config: ProtocolConfig, + /// Consensus controller + pub(crate) consensus_controller: Box, /// Associated network command sender. pub(crate) network_command_sender: NetworkCommandSender, /// Associated network event receiver. network_event_receiver: NetworkEventReceiver, - /// Channel to send protocol events to the controller. - controller_event_tx: mpsc::Sender, /// Channel to send protocol pool events to the controller. pool_controller: Box, /// Channel receiving commands from the controller. @@ -171,8 +158,6 @@ pub struct ProtocolWorkerChannels { pub network_command_sender: NetworkCommandSender, /// network event receiver pub network_event_receiver: NetworkEventReceiver, - /// protocol event sender - pub controller_event_tx: mpsc::Sender, /// protocol command receiver pub controller_command_rx: mpsc::Receiver, /// protocol management command receiver @@ -193,10 +178,10 @@ impl ProtocolWorker { ProtocolWorkerChannels { network_command_sender, network_event_receiver, - controller_event_tx, controller_command_rx, controller_manager_rx, }: ProtocolWorkerChannels, + consensus_controller: Box, pool_controller: Box, storage: Storage, ) -> ProtocolWorker { @@ -204,7 +189,7 @@ impl ProtocolWorker { config, network_command_sender, network_event_receiver, - controller_event_tx, + consensus_controller, pool_controller, controller_command_rx, controller_manager_rx, @@ -224,25 +209,6 @@ impl ProtocolWorker { } } - pub(crate) async fn send_protocol_event(&self, event: ProtocolEvent) { - let result = self - .controller_event_tx - .send_timeout(event, self.config.max_send_wait.to_duration()) - .await; - match result { - Ok(()) => {} - Err(SendTimeoutError::Closed(event)) => { - warn!( - "Failed to send ProtocolEvent due to channel closure: {:?}.", - event - ); - } - Err(SendTimeoutError::Timeout(event)) => { - warn!("Failed to send ProtocolEvent due to timeout: {:?}.", event); - } - } - } - /// Main protocol worker loop. Consumes self. /// It is mostly a `tokio::select!` inside a loop /// waiting on : @@ -930,6 +896,14 @@ impl ProtocolWorker { let mut received_ids = PreHashSet::with_capacity(length); for operation in operations { let operation_id = operation.id; + if operation.serialized_size() > self.config.max_serialized_operations_size_per_block { + return Err(ProtocolError::InvalidOperationError(format!( + "Operation {} exceeds max block size, maximum authorized {} bytes but found {} bytes", + operation_id, + operation.serialized_size(), + self.config.max_serialized_operations_size_per_block + ))); + }; received_ids.insert(operation_id); // Check operation signature only if not already checked. @@ -981,7 +955,7 @@ impl ProtocolWorker { match expire_period_timestamp { Ok(slot_timestamp) => { slot_timestamp - .saturating_add(self.config.max_endorsements_propagation_time) + .saturating_add(self.config.max_operations_propagation_time) < now } Err(_) => true, diff --git a/massa-protocol-worker/src/tests/ask_block_scenarios.rs b/massa-protocol-worker/src/tests/ask_block_scenarios.rs index bbe8c4f6a8f..cad0469fe5d 100644 --- a/massa-protocol-worker/src/tests/ask_block_scenarios.rs +++ b/massa-protocol-worker/src/tests/ask_block_scenarios.rs @@ -1,12 +1,13 @@ // Copyright (c) 2022 MASSA LABS use super::tools::protocol_test; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::prehash::PreHashSet; use massa_models::{block::BlockId, slot::Slot}; use massa_network_exports::{AskForBlocksInfo, BlockInfoReply, NetworkCommand}; use massa_protocol_exports::tests::tools; use massa_protocol_exports::tests::tools::{asked_list, assert_hash_asked_to_node}; -use massa_protocol_exports::ProtocolEvent; +use massa_time::MassaTime; use serial_test::serial; #[tokio::test] @@ -18,9 +19,9 @@ async fn test_full_ask_block_workflow() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -54,15 +55,18 @@ async fn test_full_ask_block_workflow() { .await; // Send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + let header = block.content.header.clone(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(block.id, Some(header))].into_iter().collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // assert it was asked to node A, then B assert_hash_asked_to_node(block.id, node_a.id, &mut network_controller).await; @@ -105,28 +109,49 @@ async fn test_full_ask_block_workflow() { ) .await; - // Protocol sends expected block to consensus. - loop { - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlock { - slot, - block_id, - storage, - } => { - assert_eq!(slot, block.content.header.content.slot); - assert_eq!(block_id, block.id); - let received_block = storage.read_blocks().get(&block_id).cloned().unwrap(); - assert_eq!(received_block.content.operations, block.content.operations); - break; + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + // Protocol sends expected block to consensus. + loop { + match protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(100), + |command| match command { + MockConsensusControllerMessage::RegisterBlock { + slot, + block_id, + block_storage, + created: _, + } => { + assert_eq!(slot, block.content.header.content.slot); + assert_eq!(block_id, block.id); + let received_block = + block_storage.read_blocks().get(&block_id).cloned().unwrap(); + assert_eq!( + received_block.content.operations, + block.content.operations + ); + Some(()) + } + _evt => None, + }, + ) { + Some(()) => { + break; + } + None => { + continue; + } } - _evt => continue, - }; - } + } + return protocol_consensus_event_receiver; + }) + .await + .unwrap(); + ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -143,9 +168,9 @@ async fn test_empty_block() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -171,15 +196,18 @@ async fn test_empty_block() { .await; // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(hash_1, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + let header = block.content.header.clone(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(hash_1, Some(header))].into_iter().collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // assert it was asked to node A, then B assert_hash_asked_to_node(hash_1, node_a.id, &mut network_controller).await; @@ -209,27 +237,47 @@ async fn test_empty_block() { ); // Protocol sends expected block to consensus. - loop { - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlock { - slot, - block_id, - storage, - } => { - assert_eq!(slot, block.content.header.content.slot); - assert_eq!(block_id, block.id); - let received_block = storage.read_blocks().get(&block_id).cloned().unwrap(); - assert_eq!(received_block.content.operations, block.content.operations); - break; + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + loop { + match protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(100), + |command| match command { + MockConsensusControllerMessage::RegisterBlock { + slot, + block_id, + block_storage, + created: _, + } => { + assert_eq!(slot, block.content.header.content.slot); + assert_eq!(block_id, block.id); + let received_block = + block_storage.read_blocks().get(&block_id).cloned().unwrap(); + assert_eq!( + received_block.content.operations, + block.content.operations + ); + Some(()) + } + _evt => None, + }, + ) { + Some(()) => { + break; + } + None => { + continue; + } } - _evt => continue, - }; - } + } + protocol_consensus_event_receiver + }) + .await + .unwrap(); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -245,9 +293,9 @@ async fn test_someone_knows_it() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -278,21 +326,33 @@ async fn test_someone_knows_it() { .send_header(node_c.id, block.content.header.clone()) .await; - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlockHeader { .. } => {} - _ => panic!("unexpected protocol event"), - }; + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(100), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + }, + ); + protocol_consensus_event_receiver + }) + .await + .unwrap(); // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(hash_1, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(hash_1, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); assert_hash_asked_to_node(hash_1, node_c.id, &mut network_controller).await; @@ -329,9 +389,9 @@ async fn test_someone_knows_it() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -347,9 +407,9 @@ async fn test_dont_want_it_anymore() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -370,24 +430,32 @@ async fn test_dont_want_it_anymore() { // end set up // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(hash_1, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(hash_1, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // assert it was asked to node A assert_hash_asked_to_node(hash_1, node_a.id, &mut network_controller).await; // we don't want it anymore - protocol_command_sender - .send_wishlist_delta(Default::default(), vec![hash_1].into_iter().collect()) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta(Default::default(), vec![hash_1].into_iter().collect()) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // 7. Make sure protocol did not send additional ask for block commands. let ask_for_block_cmd_filter = |cmd| match cmd { @@ -405,9 +473,9 @@ async fn test_dont_want_it_anymore() { ); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -424,9 +492,9 @@ async fn test_no_one_has_it() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -447,15 +515,19 @@ async fn test_no_one_has_it() { // end set up // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(hash_1, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(hash_1, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // assert it was asked to node A assert_hash_asked_to_node(hash_1, node_a.id, &mut network_controller).await; @@ -487,9 +559,9 @@ async fn test_no_one_has_it() { ); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -505,9 +577,9 @@ async fn test_multiple_blocks_without_a_priori() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { let node_a = tools::create_and_connect_nodes(1, &mut network_controller) .await @@ -535,18 +607,22 @@ async fn test_multiple_blocks_without_a_priori() { tokio::time::sleep(tokio::time::Duration::from_millis(100)).await; // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![ - (hash_1, Some(block_1.content.header.clone())), - (hash_2, Some(block_2.content.header.clone())), - ] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![ + (hash_1, Some(block_1.content.header.clone())), + (hash_2, Some(block_2.content.header.clone())), + ] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); let list = asked_list(&mut network_controller).await; for (node_id, set) in list.into_iter() { @@ -561,9 +637,9 @@ async fn test_multiple_blocks_without_a_priori() { } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs index fec4bb7dfac..12e5c4a76c2 100644 --- a/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs +++ b/massa-protocol-worker/src/tests/ban_nodes_scenarios.rs @@ -1,6 +1,7 @@ // Copyright (c) 2022 MASSA LABS use super::tools::protocol_test; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_hash::Hash; use massa_models::operation::OperationId; use massa_models::prehash::PreHashSet; @@ -9,8 +10,8 @@ use massa_models::{block::BlockId, slot::Slot}; use massa_network_exports::{BlockInfoReply, NetworkCommand}; use massa_pool_exports::test_exports::MockPoolControllerMessage; use massa_protocol_exports::tests::tools; -use massa_protocol_exports::ProtocolEvent; use massa_signature::KeyPair; +use massa_time::MassaTime; use serial_test::serial; use std::collections::HashSet; use std::time::Duration; @@ -22,9 +23,9 @@ async fn test_protocol_bans_node_sending_block_header_with_invalid_signature() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -46,23 +47,31 @@ async fn test_protocol_bans_node_sending_block_header_with_invalid_signature() { tools::assert_banned_nodes(vec![creator_node.id], &mut network_controller).await; // Check protocol does not send block to consensus. - match tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlock { .. } => Some(evt), - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - evt @ ProtocolEvent::InvalidBlock { .. } => Some(evt), - } + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlock { .. } => { + panic!("Protocol unexpectedly sent block.") + } + MockConsensusControllerMessage::RegisterBlockHeader { .. } => { + panic!("Protocol unexpectedly sent header.") + } + MockConsensusControllerMessage::MarkInvalidBlock { .. } => { + panic!("Protocol unexpectedly sent invalid block.") + } + _ => Some(()), + }, + ); + protocol_consensus_event_receiver }) .await - { - None => {} - _ => panic!("Protocol unexpectedly sent block or header."), - } + .unwrap(); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -118,6 +127,54 @@ async fn test_protocol_bans_node_sending_operation_with_invalid_signature() { .await; } +#[tokio::test] +#[serial] +async fn test_protocol_bans_node_sending_operation_with_size_bigger_than_max_block_size() { + let protocol_config = &tools::PROTOCOL_CONFIG; + protocol_test( + protocol_config, + async move |mut network_controller, + protocol_event_receiver, + protocol_command_sender, + protocol_manager, + mut pool_event_receiver| { + // Create 1 node. + let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; + + let creator_node = nodes.pop().expect("Failed to get node info."); + + // 1. Create an operation + let mut operation = + tools::create_operation_with_expire_period(&creator_node.keypair, 1); + + // 2. Change the serialized data + operation.serialized_data = vec![1; 500_001]; + + // 3. Send block to protocol. + network_controller + .send_operations(creator_node.id, vec![operation]) + .await; + + // The node is banned. + tools::assert_banned_nodes(vec![creator_node.id], &mut network_controller).await; + + // Check protocol does not send operation to pool. + pool_event_receiver.wait_command(1000.into(), |evt| match evt { + evt @ MockPoolControllerMessage::AddOperations { .. } => Some(evt), + _ => None, + }); + ( + network_controller, + protocol_event_receiver, + protocol_command_sender, + protocol_manager, + pool_event_receiver, + ) + }, + ) + .await; +} + #[tokio::test] #[serial] async fn test_protocol_bans_node_sending_header_with_invalid_signature() { @@ -125,9 +182,9 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -150,21 +207,33 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { .send_header(to_ban_node.id, block.content.header.clone()) .await; - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlockHeader { .. } => {} - _ => panic!("unexpected protocol event"), - }; + let mut protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + }, + ); + protocol_consensus_event_receiver + }) + .await + .unwrap(); // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(block.id, Some(block.content.header))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); tools::assert_hash_asked_to_node(block.id, to_ban_node.id, &mut network_controller) .await; @@ -198,23 +267,31 @@ async fn test_protocol_bans_node_sending_header_with_invalid_signature() { .await; // Check protocol does not send block to consensus. - match tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlock { .. } => Some(evt), - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - evt @ ProtocolEvent::InvalidBlock { .. } => Some(evt), - } + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlock { .. } => { + panic!("Protocol unexpectedly sent block.") + } + MockConsensusControllerMessage::RegisterBlockHeader { .. } => { + panic!("Protocol unexpectedly sent header.") + } + MockConsensusControllerMessage::MarkInvalidBlock { .. } => { + panic!("Protocol unexpectedly sent invalid block.") + } + _ => Some(()), + }, + ); + protocol_consensus_event_receiver }) .await - { - None => {} - _ => panic!("Protocol unexpectedly sent header coming from banned node."), - } + .unwrap(); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -229,9 +306,9 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let ask_for_block_cmd_filter = |cmd| match cmd { cmd @ NetworkCommand::AskForBlocks { .. } => Some(cmd), @@ -251,18 +328,21 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h .await; // Check protocol sends header to consensus. - let received_hash = - match tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - } + let (protocol_consensus_event_receiver, received_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_consensus_event_receiver, id) }) .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, .. }) => block_id, - _ => panic!("Unexpected or no protocol event."), - }; + .unwrap(); // 3. Check that protocol sent the right header to consensus. let expected_hash = block.id; @@ -280,15 +360,19 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h tools::assert_banned_nodes(vec![creator_node.id], &mut network_controller).await; // 5. Ask for block. - protocol_command_sender - .send_wishlist_delta( - vec![(expected_hash, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .expect("Failed to ask for block."); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(expected_hash, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .expect("Failed to ask for block."); + protocol_command_sender + }) + .await + .unwrap(); // 6. Make sure protocol did not ask for the block from the banned node. let got_more_commands = network_controller @@ -301,9 +385,9 @@ async fn test_protocol_does_not_asks_for_block_from_banned_node_who_propagated_h ); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -318,9 +402,9 @@ async fn test_protocol_does_not_send_blocks_when_asked_for_by_banned_node() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { let send_block_or_header_cmd_filter = |cmd| match cmd { cmd @ NetworkCommand::SendBlockInfo { .. } => Some(cmd), @@ -389,9 +473,9 @@ async fn test_protocol_does_not_send_blocks_when_asked_for_by_banned_node() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -406,9 +490,9 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 4 nodes. let nodes = tools::create_and_connect_nodes(4, &mut network_controller).await; @@ -425,38 +509,30 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { .send_header(creator_node.id, block.content.header.clone()) .await; + let (old_protocol_consensus_event_receiver, optional_block_id) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }, + ); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); + protocol_consensus_event_receiver = old_protocol_consensus_event_receiver; // Check protocol sends header to consensus (only the 1st time: later, there is caching). if idx == 0 { - let received_hash = match tools::wait_protocol_event( - &mut protocol_event_receiver, - 1000.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, .. }) => block_id, - Some(evt) => panic!("Unexpected protocol event {:?}", evt), - None => panic!("no protocol event"), - }; + let received_hash = optional_block_id.unwrap(); // Check that protocol sent the right header to consensus. assert_eq!(expected_hash, received_hash); } else { - assert!( - tools::wait_protocol_event( - &mut protocol_event_receiver, - 150.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - .is_none(), - "caching was ignored" - ); + assert!(optional_block_id.is_none(), "caching was ignored"); } } @@ -471,10 +547,14 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { tokio::time::sleep(Duration::from_millis(250)).await; // Simulate consensus notifying an attack attempt. - protocol_command_sender - .notify_block_attack(expected_hash) - .await - .expect("Failed to ask for block."); + let protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .notify_block_attack(expected_hash) + .expect("Failed to ask for block."); + protocol_command_sender + }) + .await + .unwrap(); // Make sure all initial nodes are banned. let node_ids = nodes.into_iter().map(|node_info| node_info.id).collect(); @@ -496,9 +576,9 @@ async fn test_protocol_bans_all_nodes_propagating_an_attack_attempt() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -513,9 +593,9 @@ async fn test_protocol_removes_banned_node_on_disconnection() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -542,27 +622,30 @@ async fn test_protocol_removes_banned_node_on_disconnection() { .await; // Check protocol sends header to consensus. - let received_hash = - match tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - } + let (protocol_consensus_event_receiver, received_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_consensus_event_receiver, id) }) .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, .. }) => block_id, - _ => panic!("Unexpected or no protocol event."), - }; + .unwrap(); // Check that protocol sent the right header to consensus. let expected_hash = block.id; assert_eq!(expected_hash, received_hash); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/endorsements_scenarios.rs b/massa-protocol-worker/src/tests/endorsements_scenarios.rs index 41273569bf4..588b329efd0 100644 --- a/massa-protocol-worker/src/tests/endorsements_scenarios.rs +++ b/massa-protocol-worker/src/tests/endorsements_scenarios.rs @@ -3,12 +3,13 @@ // RUST_BACKTRACE=1 cargo test test_one_handshake -- --nocapture --test-threads=1 use super::tools::protocol_test; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::{address::Address, slot::Slot}; use massa_network_exports::NetworkCommand; use massa_pool_exports::test_exports::MockPoolControllerMessage; use massa_protocol_exports::tests::tools; -use massa_protocol_exports::ProtocolEvent; use massa_storage::Storage; +use massa_time::MassaTime; use serial_test::serial; use std::thread; use std::time::Duration; @@ -20,9 +21,9 @@ async fn test_protocol_sends_valid_endorsements_it_receives_to_pool() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -56,9 +57,9 @@ async fn test_protocol_sends_valid_endorsements_it_receives_to_pool() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -73,9 +74,9 @@ async fn test_protocol_does_not_send_invalid_endorsements_it_receives_to_pool() protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -103,9 +104,9 @@ async fn test_protocol_does_not_send_invalid_endorsements_it_receives_to_pool() ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -120,9 +121,9 @@ async fn test_protocol_propagates_endorsements_to_active_nodes() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 2 nodes. let nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -170,9 +171,9 @@ async fn test_protocol_propagates_endorsements_to_active_nodes() { } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -187,9 +188,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -246,9 +247,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -264,9 +265,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -318,9 +319,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -337,9 +338,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -392,9 +393,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -410,9 +411,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 2 nodes. let nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -440,13 +441,18 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou // Wait for the event to be sure that the node is connected, // and noted as knowing the block and its endorsements. - let _ = tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - } + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("Node isn't connected or didn't mark block as known."), + }, + ); + protocol_consensus_event_receiver }) - .await; + .await + .unwrap(); // Send the endorsement to protocol // it should not propagate to the node that already knows about it @@ -478,9 +484,9 @@ async fn test_protocol_propagates_endorsements_only_to_nodes_that_dont_know_abou ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -495,9 +501,9 @@ async fn test_protocol_does_not_propagates_endorsements_when_receiving_those_ins protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 2 nodes. let mut nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -543,9 +549,9 @@ async fn test_protocol_does_not_propagates_endorsements_when_receiving_those_ins } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs index 1daca60df23..c891c9bbaf1 100644 --- a/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/in_block_operations_scenarios.rs @@ -1,6 +1,7 @@ // Copyright (c) 2022 MASSA LABS -use super::tools::protocol_test; +use super::tools::{protocol_test, send_and_propagate_block}; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_hash::Hash; use massa_models::operation::OperationId; use massa_models::wrapped::{Id, WrappedContent}; @@ -13,9 +14,9 @@ use massa_network_exports::NetworkCommand; use massa_protocol_exports::tests::tools; use massa_protocol_exports::tests::tools::{ create_and_connect_nodes, create_block_with_operations, create_operation_with_expire_period, - send_and_propagate_block, }; use massa_signature::KeyPair; +use massa_time::MassaTime; use serial_test::serial; #[tokio::test] @@ -25,9 +26,9 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 2 node. let mut nodes = create_and_connect_nodes(2, &mut network_controller).await; @@ -53,17 +54,46 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { Slot::new(1, op_thread), vec![op.clone()], ); + let block_id = block.id; send_and_propagate_block( &mut network_controller, block, - true, creator_node.id, - &mut protocol_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) .await; + // Check protocol sends block to consensus. + let (protocol_consensus_event_receiver, expected_hash) = + tokio::task::spawn_blocking(move || { + let header_id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + }) + .unwrap(); + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlock { + block_id, + slot: _, + block_storage: _, + created: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + }) + .unwrap(); + assert_eq!(header_id, id); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); + assert_eq!(expected_hash, block_id); + // Propagates the operation found in the block. if let Some(NetworkCommand::SendOperationAnnouncements { to_node, batch }) = network_controller @@ -81,9 +111,9 @@ async fn test_protocol_does_propagate_operations_received_in_blocks() { }; ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -104,9 +134,9 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = create_and_connect_nodes(1, &mut network_controller).await; @@ -133,16 +163,46 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { Slot::new(1, op_thread), vec![op.clone()], ); + let block_id = block.id; send_and_propagate_block( &mut network_controller, block, - true, creator_node.id, - &mut protocol_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) .await; + + // Check protocol sends block to consensus. + let (new_protocol_consensus_event_receiver, expected_hash) = + tokio::task::spawn_blocking(move || { + let header_id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + }) + .unwrap(); + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlock { + block_id, + slot: _, + block_storage: _, + created: _, + } => Some(block_id), + _ => panic!("Unexpected or no protocol event."), + }) + .unwrap(); + assert_eq!(header_id, id); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); + protocol_consensus_event_receiver = new_protocol_consensus_event_receiver; + assert_eq!(expected_hash, block_id); } // block with wrong merkle root @@ -178,13 +238,34 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { send_and_propagate_block( &mut network_controller, block, - false, creator_node.id, - &mut protocol_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) .await; + + // Check protocol didn't send block to consensus. + let (new_protocol_consensus_event_receiver, optional_expected_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => None, + }, + ); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); + protocol_consensus_event_receiver = new_protocol_consensus_event_receiver; + assert!( + optional_expected_hash.is_none(), + "Block sent to consensus but shouldn't." + ); } // block with operation with wrong signature @@ -201,20 +282,41 @@ async fn test_protocol_sends_blocks_with_operations_to_consensus() { send_and_propagate_block( &mut network_controller, block, - false, creator_node.id, - &mut protocol_event_receiver, &mut protocol_command_sender, vec![op.clone()], ) .await; + + // Check protocol didn't send block to consensus. + let (new_protocol_consensus_event_receiver, optional_expected_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => None, + }, + ); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); + protocol_consensus_event_receiver = new_protocol_consensus_event_receiver; + assert!( + optional_expected_hash.is_none(), + "Block sent to consensus but shouldn't." + ); } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/operations_scenarios.rs b/massa-protocol-worker/src/tests/operations_scenarios.rs index 55e70e2c0a7..b0ee9cc8fa1 100644 --- a/massa-protocol-worker/src/tests/operations_scenarios.rs +++ b/massa-protocol-worker/src/tests/operations_scenarios.rs @@ -3,12 +3,13 @@ // RUST_BACKTRACE=1 cargo test test_one_handshake -- --nocapture --test-threads=1 use super::tools::{protocol_test, protocol_test_with_storage}; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::prehash::PreHashSet; use massa_models::{self, address::Address, amount::Amount, block::BlockId, slot::Slot}; use massa_network_exports::{BlockInfoReply, NetworkCommand}; use massa_pool_exports::test_exports::MockPoolControllerMessage; use massa_protocol_exports::tests::tools::{self, assert_hash_asked_to_node}; -use massa_protocol_exports::ProtocolEvent; +use massa_time::MassaTime; use serial_test::serial; use std::str::FromStr; use std::time::Duration; @@ -20,9 +21,9 @@ async fn test_protocol_sends_valid_operations_it_receives_to_consensus() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -73,9 +74,9 @@ async fn test_protocol_sends_valid_operations_it_receives_to_consensus() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -90,9 +91,9 @@ async fn test_protocol_does_not_send_invalid_operations_it_receives_to_consensus protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -119,9 +120,9 @@ async fn test_protocol_does_not_send_invalid_operations_it_receives_to_consensus ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -136,9 +137,9 @@ async fn test_protocol_propagates_operations_to_active_nodes() { protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver, mut storage| { // Create 2 nodes. @@ -163,10 +164,14 @@ async fn test_protocol_propagates_operations_to_active_nodes() { let expected_operation_id = operation.id; storage.store_operations(vec![operation.clone()]); - protocol_command_sender - .propagate_operations(storage) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .propagate_operations(storage) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); loop { match network_controller @@ -187,9 +192,9 @@ async fn test_protocol_propagates_operations_to_active_nodes() { } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -204,9 +209,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver, mut storage| { // Create 1 nodes. @@ -237,10 +242,14 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ // send endorsement to protocol // it should be propagated only to the node that doesn't know about it storage.store_operations(vec![operation.clone()]); - protocol_command_sender - .propagate_operations(storage) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .propagate_operations(storage) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); loop { match network_controller @@ -261,9 +270,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -279,9 +288,9 @@ async fn test_protocol_propagates_operations_received_over_the_network_only_to_n protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver, _storage| { // Create 2 nodes. @@ -323,9 +332,9 @@ async fn test_protocol_propagates_operations_received_over_the_network_only_to_n } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -341,9 +350,9 @@ async fn test_protocol_batches_propagation_of_operations_received_over_the_netwo protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver, mut storage| { // Create 2 nodes. @@ -370,10 +379,14 @@ async fn test_protocol_batches_propagation_of_operations_received_over_the_netwo // Send it via the API. storage.store_operations(vec![operation.clone()]); - protocol_command_sender - .propagate_operations(storage) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .propagate_operations(storage) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); let expected_operation_id_2 = operation.id; @@ -401,9 +414,9 @@ async fn test_protocol_batches_propagation_of_operations_received_over_the_netwo } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -419,9 +432,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ protocol_test_with_storage( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 1 node. @@ -442,21 +455,33 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .send_header(nodes[0].id, block.content.header.clone()) .await; - match protocol_event_receiver.wait_event().await.unwrap() { - ProtocolEvent::ReceivedBlockHeader { .. } => {} - _ => panic!("unexpected protocol event"), - }; + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + }, + ); + protocol_consensus_event_receiver + }) + .await + .unwrap(); // send wishlist - protocol_command_sender - .send_wishlist_delta( - vec![(block.id, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(block.id, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); assert_hash_asked_to_node(block.id, nodes[0].id, &mut network_controller).await; @@ -478,10 +503,14 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ // it should not propagate to the node that already knows about it // because of the previously received header. storage.store_operations(vec![operation.clone()]); - protocol_command_sender - .propagate_operations(storage) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .propagate_operations(storage) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); match network_controller .wait_command(1000.into(), |cmd| match cmd { @@ -504,9 +533,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -523,9 +552,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ protocol_test_with_storage( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 3 nodes. @@ -569,7 +598,6 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .collect(), PreHashSet::::default(), ) - .await .unwrap(); // assert it was asked to node A, then B, then C. @@ -595,20 +623,24 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ .await; // Wait for the event to be sure that the node is connected. - let _ = tools::wait_protocol_event(&mut protocol_event_receiver, 1000.into(), |evt| { - match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - } + let protocol_consensus_event_receiver = tokio::task::spawn_blocking(move || { + protocol_consensus_event_receiver.wait_command( + MassaTime::from_millis(1000), + |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { .. } => Some(()), + _ => panic!("unexpected protocol event"), + }, + ); + protocol_consensus_event_receiver }) - .await; + .await + .unwrap(); // Send the operation to protocol // it should propagate to the node because it isn't in the block. storage.store_operations(vec![op_2.clone()]); protocol_command_sender .propagate_operations(storage) - .await .unwrap(); match network_controller @@ -629,9 +661,9 @@ async fn test_protocol_propagates_operations_only_to_nodes_that_dont_know_about_ ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -647,9 +679,9 @@ async fn test_protocol_does_not_propagates_operations_when_receiving_those_insid protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, mut pool_event_receiver| { // Create 2 nodes. let mut nodes = tools::create_and_connect_nodes(2, &mut network_controller).await; @@ -693,9 +725,9 @@ async fn test_protocol_does_not_propagates_operations_when_receiving_those_insid } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, pool_event_receiver, ) }, @@ -710,9 +742,9 @@ async fn test_protocol_ask_operations_on_batch_received() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = tools::create_and_connect_nodes(1, &mut network_controller).await; @@ -745,9 +777,9 @@ async fn test_protocol_ask_operations_on_batch_received() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -762,9 +794,9 @@ async fn test_protocol_re_ask_operations_to_another_node_on_batch_received_after protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 3 node. let mut nodes = tools::create_and_connect_nodes(3, &mut network_controller).await; @@ -818,9 +850,9 @@ async fn test_protocol_re_ask_operations_to_another_node_on_batch_received_after }; ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -835,9 +867,9 @@ async fn test_protocol_does_not_re_ask_operations_to_another_node_if_received() protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 3 node. let mut nodes = tools::create_and_connect_nodes(3, &mut network_controller).await; @@ -893,9 +925,9 @@ async fn test_protocol_does_not_re_ask_operations_to_another_node_if_received() ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -910,9 +942,9 @@ async fn test_protocol_on_ask_operations() { protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 1 node. @@ -957,9 +989,9 @@ async fn test_protocol_on_ask_operations() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/scenarios.rs b/massa-protocol-worker/src/tests/scenarios.rs index 829c59f0c55..143ca55aa42 100644 --- a/massa-protocol-worker/src/tests/scenarios.rs +++ b/massa-protocol-worker/src/tests/scenarios.rs @@ -3,14 +3,16 @@ // RUST_BACKTRACE=1 cargo test test_one_handshake -- --nocapture --test-threads=1 use super::tools::{protocol_test, protocol_test_with_storage}; +use massa_consensus_exports::test_exports::MockConsensusControllerMessage; use massa_models::block::BlockId; use massa_models::prehash::{PreHashMap, PreHashSet}; use massa_network_exports::{AskForBlocksInfo, NetworkCommand}; use massa_protocol_exports::tests::tools; use massa_protocol_exports::{ - tests::tools::{create_and_connect_nodes, create_block, wait_protocol_event}, - BlocksResults, ProtocolEvent, + tests::tools::{create_and_connect_nodes, create_block}, + BlocksResults, }; +use massa_time::MassaTime; use serial_test::serial; use std::collections::HashSet; @@ -22,9 +24,9 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { protocol_test( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver| { let ask_for_block_cmd_filter = |cmd| match cmd { cmd @ NetworkCommand::AskForBlocks { .. } => Some(cmd), @@ -46,34 +48,40 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { .await; // Check protocol sends header to consensus. - let received_hash = match wait_protocol_event( - &mut protocol_event_receiver, - 1000.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, .. }) => block_id, - _ => panic!("Unexpected or no protocol event."), - }; + let (protocol_consensus_event_receiver, received_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); // 4. Check that protocol sent the right header to consensus. let expected_hash = block.id; assert_eq!(expected_hash, received_hash); // 5. Ask for block. - protocol_command_sender - .send_wishlist_delta( - vec![(expected_hash, Some(block.content.header.clone()))] - .into_iter() - .collect(), - PreHashSet::::default(), - ) - .await - .expect("Failed to ask for block."); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .send_wishlist_delta( + vec![(expected_hash, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .expect("Failed to ask for block."); + protocol_command_sender + }) + .await + .unwrap(); // 6. Check that protocol asks the node for the full block. match network_controller @@ -101,9 +109,9 @@ async fn test_protocol_asks_for_block_from_node_who_propagated_header() { ); ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -118,9 +126,9 @@ async fn test_protocol_sends_blocks_when_asked_for() { protocol_test_with_storage( protocol_config, async move |mut network_controller, - protocol_event_receiver, mut protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { let send_block_info_cmd_filter = |cmd| match cmd { @@ -141,10 +149,14 @@ async fn test_protocol_sends_blocks_when_asked_for() { // Add to storage, integrate. storage.store_block(block.clone()); - protocol_command_sender - .integrated_block(expected_hash, storage.clone()) - .await - .unwrap(); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .integrated_block(expected_hash, storage.clone()) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // 3. Simulate two nodes asking for a block. for node in nodes.iter().take(2) { @@ -188,9 +200,9 @@ async fn test_protocol_sends_blocks_when_asked_for() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -205,9 +217,9 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f protocol_test_with_storage( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 4 nodes. @@ -235,19 +247,21 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f // node[1] asks for that block // Check protocol sends header to consensus. - let (ref_hash, _) = match wait_protocol_event( - &mut protocol_event_receiver, - 1000.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, header }) => (block_id, header), - _ => panic!("Unexpected or no protocol event."), - }; + let (protocol_consensus_event_receiver, ref_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); storage.store_block(ref_block.clone()); @@ -258,10 +272,14 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f // 5. Propagate header. let _op_ids = ref_block.content.operations.clone(); - protocol_command_sender - .integrated_block(ref_hash, storage) - .await - .expect("Failed to ask for block."); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .integrated_block(ref_hash, storage.clone()) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // 6. Check that protocol propagates the header to the right nodes. // node_a created the block and should receive nothing @@ -302,9 +320,9 @@ async fn test_protocol_propagates_block_to_all_nodes_including_those_who_asked_f } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -320,9 +338,9 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl protocol_test_with_storage( protocol_config, async move |mut network_controller, - mut protocol_event_receiver, mut protocol_command_sender, protocol_manager, + mut protocol_consensus_event_receiver, protocol_pool_event_receiver, mut storage| { // Create 4 nodes. @@ -350,27 +368,33 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl // node[1] asks for that block // Check protocol sends header to consensus. - let (ref_hash, _) = match wait_protocol_event( - &mut protocol_event_receiver, - 1000.into(), - |evt| match evt { - evt @ ProtocolEvent::ReceivedBlockHeader { .. } => Some(evt), - _ => None, - }, - ) - .await - { - Some(ProtocolEvent::ReceivedBlockHeader { block_id, header }) => (block_id, header), - _ => panic!("Unexpected or no protocol event."), - }; + let (protocol_consensus_event_receiver, ref_hash) = + tokio::task::spawn_blocking(move || { + let id = protocol_consensus_event_receiver + .wait_command(MassaTime::from_millis(1000), |command| match command { + MockConsensusControllerMessage::RegisterBlockHeader { + block_id, + header: _, + } => Some(block_id), + _ => panic!("unexpected protocol event"), + }) + .unwrap(); + (protocol_consensus_event_receiver, id) + }) + .await + .unwrap(); storage.store_block(ref_block.clone()); // 5. Propagate header. let _op_ids = ref_block.content.operations.clone(); - protocol_command_sender - .integrated_block(ref_hash, storage) - .await - .expect("Failed to ask for block."); + protocol_command_sender = tokio::task::spawn_blocking(move || { + protocol_command_sender + .integrated_block(ref_hash, storage.clone()) + .unwrap(); + protocol_command_sender + }) + .await + .unwrap(); // 6. Check that protocol propagates the header to the right nodes. // node_a created the block and should receive nothing @@ -442,9 +466,9 @@ async fn test_protocol_propagates_block_to_node_who_asked_for_operations_and_onl } ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -460,9 +484,9 @@ async fn test_protocol_sends_full_blocks_it_receives_to_consensus() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = create_and_connect_nodes(1, &mut network_controller).await; @@ -478,9 +502,9 @@ async fn test_protocol_sends_full_blocks_it_receives_to_consensus() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, @@ -495,9 +519,9 @@ async fn test_protocol_block_not_found() { protocol_test( protocol_config, async move |mut network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver| { // Create 1 node. let mut nodes = create_and_connect_nodes(1, &mut network_controller).await; @@ -533,9 +557,9 @@ async fn test_protocol_block_not_found() { ( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + protocol_consensus_event_receiver, protocol_pool_event_receiver, ) }, diff --git a/massa-protocol-worker/src/tests/tools.rs b/massa-protocol-worker/src/tests/tools.rs index 3ef0904ecba..7917116c4a1 100644 --- a/massa-protocol-worker/src/tests/tools.rs +++ b/massa-protocol-worker/src/tests/tools.rs @@ -1,27 +1,36 @@ use crate::start_protocol_controller; use futures::Future; +use massa_consensus_exports::test_exports::{ConsensusEventReceiver, MockConsensusController}; +use massa_models::{ + block::{BlockId, WrappedBlock}, + node::NodeId, + operation::WrappedOperation, + prehash::PreHashSet, +}; +use massa_network_exports::BlockInfoReply; use massa_pool_exports::test_exports::{MockPoolController, PoolEventReceiver}; use massa_protocol_exports::{ tests::mock_network_controller::MockNetworkController, ProtocolCommandSender, ProtocolConfig, - ProtocolEventReceiver, ProtocolManager, + ProtocolManager, }; use massa_storage::Storage; +use tokio::sync::mpsc; pub async fn protocol_test(protocol_config: &ProtocolConfig, test: F) where F: FnOnce( MockNetworkController, - ProtocolEventReceiver, ProtocolCommandSender, ProtocolManager, + ConsensusEventReceiver, PoolEventReceiver, ) -> V, V: Future< Output = ( MockNetworkController, - ProtocolEventReceiver, ProtocolCommandSender, ProtocolManager, + ConsensusEventReceiver, PoolEventReceiver, ), >, @@ -30,39 +39,42 @@ where MockNetworkController::new(); let (pool_controller, pool_event_receiver) = MockPoolController::new_with_receiver(); - + let (consensus_controller, consensus_event_receiver) = + MockConsensusController::new_with_receiver(); // start protocol controller - let (protocol_command_sender, protocol_event_receiver, protocol_manager): ( - ProtocolCommandSender, - ProtocolEventReceiver, - ProtocolManager, - ) = start_protocol_controller( + let (protocol_command_sender, protocol_command_receiver) = + mpsc::channel(protocol_config.controller_channel_size); + // start protocol controller + let protocol_manager: ProtocolManager = start_protocol_controller( *protocol_config, network_command_sender, network_event_receiver, + protocol_command_receiver, + consensus_controller, pool_controller, Storage::create_root(), ) .await .expect("could not start protocol controller"); + let protocol_command_sender = ProtocolCommandSender(protocol_command_sender); let ( _network_controller, - protocol_event_receiver, _protocol_command_sender, protocol_manager, + _consensus_event_receiver, _pool_event_receiver, ) = test( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + consensus_event_receiver, pool_event_receiver, ) .await; protocol_manager - .stop(protocol_event_receiver) + .stop() .await .expect("Failed to shutdown protocol."); } @@ -71,18 +83,18 @@ pub async fn protocol_test_with_storage(protocol_config: &ProtocolConfig, where F: FnOnce( MockNetworkController, - ProtocolEventReceiver, ProtocolCommandSender, ProtocolManager, + ConsensusEventReceiver, PoolEventReceiver, Storage, ) -> V, V: Future< Output = ( MockNetworkController, - ProtocolEventReceiver, ProtocolCommandSender, ProtocolManager, + ConsensusEventReceiver, PoolEventReceiver, ), >, @@ -90,37 +102,85 @@ where let (network_controller, network_command_sender, network_event_receiver) = MockNetworkController::new(); let (pool_controller, mock_pool_receiver) = MockPoolController::new_with_receiver(); + let (consensus_controller, mock_consensus_receiver) = + MockConsensusController::new_with_receiver(); let storage = Storage::create_root(); // start protocol controller - let (protocol_command_sender, protocol_event_receiver, protocol_manager) = - start_protocol_controller( - *protocol_config, - network_command_sender, - network_event_receiver, - pool_controller, - storage.clone(), - ) - .await - .expect("could not start protocol controller"); + let (protocol_command_sender, protocol_command_receiver) = + mpsc::channel(protocol_config.controller_channel_size); + let protocol_manager = start_protocol_controller( + *protocol_config, + network_command_sender, + network_event_receiver, + protocol_command_receiver, + consensus_controller, + pool_controller, + storage.clone(), + ) + .await + .expect("could not start protocol controller"); + let protocol_command_sender = ProtocolCommandSender(protocol_command_sender); let ( _network_controller, - protocol_event_receiver, _protocol_command_sender, protocol_manager, + _consensus_event_receiver, _protocol_pool_event_receiver, ) = test( network_controller, - protocol_event_receiver, protocol_command_sender, protocol_manager, + mock_consensus_receiver, mock_pool_receiver, storage, ) .await; protocol_manager - .stop(protocol_event_receiver) + .stop() .await .expect("Failed to shutdown protocol."); } + +/// send a block and assert it has been propagate (or not) +pub async fn send_and_propagate_block( + network_controller: &mut MockNetworkController, + block: WrappedBlock, + source_node_id: NodeId, + protocol_command_sender: &mut ProtocolCommandSender, + operations: Vec, +) { + network_controller + .send_header(source_node_id, block.content.header.clone()) + .await; + + let mut protocol_sender = protocol_command_sender.clone(); + tokio::task::spawn_blocking(move || { + protocol_sender + .send_wishlist_delta( + vec![(block.id, Some(block.content.header.clone()))] + .into_iter() + .collect(), + PreHashSet::::default(), + ) + .unwrap(); + }) + .await + .unwrap(); + + // Send block info to protocol. + let info = vec![( + block.id, + BlockInfoReply::Info(block.content.operations.clone()), + )]; + network_controller + .send_block_info(source_node_id, info) + .await; + + // Send full ops. + let info = vec![(block.id, BlockInfoReply::Operations(operations))]; + network_controller + .send_block_info(source_node_id, info) + .await; +} diff --git a/massa-sdk/Cargo.toml b/massa-sdk/Cargo.toml index 9769fa72f2f..d9c6f16a7a1 100644 --- a/massa-sdk/Cargo.toml +++ b/massa-sdk/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] -jsonrpc-core-client = { git = "https://github.com/massalabs/jsonrpc", features = ["http", "tls"] } -tokio = { version = "1.21", features = ["full"] } +jsonrpsee = { version = "0.16.1", features = ["client"] } +http = "0.2.8" massa_models = { path = "../massa-models" } -serde = { version = "1.0", features = ["derive"] } +massa_time = { path = "../massa-time" } diff --git a/massa-sdk/src/config.rs b/massa-sdk/src/config.rs new file mode 100644 index 00000000000..0f28e771afb --- /dev/null +++ b/massa-sdk/src/config.rs @@ -0,0 +1,23 @@ +// Copyright (c) 2022 MASSA LABS + +use massa_time::MassaTime; + +/// Http client settings. +/// the Http client settings +#[derive(Debug, Clone)] +pub struct HttpConfig { + /// maximum size in bytes of a request. + pub max_request_body_size: u32, + /// maximum size in bytes of a response. + pub request_timeout: MassaTime, + /// maximum size in bytes of a response. + pub max_concurrent_requests: usize, + /// certificate_store, `Native` or `WebPki` + pub certificate_store: String, + /// JSON-RPC request object id data type, `Number` or `String` + pub id_kind: String, + /// max length for logging for requests and responses. Logs bigger than this limit will be truncated. + pub max_log_length: u32, + /// custom headers to pass with every request. + pub headers: Vec<(String, String)>, +} diff --git a/massa-sdk/src/lib.rs b/massa-sdk/src/lib.rs index 6cca64afa73..0a282d0a355 100644 --- a/massa-sdk/src/lib.rs +++ b/massa-sdk/src/lib.rs @@ -4,8 +4,11 @@ #![warn(missing_docs)] #![warn(unused_crate_dependencies)] -use jsonrpc_core_client::transports::http; -use jsonrpc_core_client::{RpcChannel, RpcError, RpcResult, TypedClient}; +use http::header::HeaderName; +use jsonrpsee::core::client::{CertificateStore, ClientT, IdKind}; +use jsonrpsee::http_client::HttpClient; +use jsonrpsee::rpc_params; +use jsonrpsee::ws_client::{HeaderMap, HeaderValue}; use massa_models::api::{ AddressInfo, BlockInfo, BlockSummary, DatastoreEntryInput, DatastoreEntryOutput, EndorsementInfo, EventFilter, NodeStatus, OperationInfo, OperationInput, @@ -21,9 +24,12 @@ use massa_models::{ address::Address, block::BlockId, endorsement::EndorsementId, operation::OperationId, }; -use serde::de::DeserializeOwned; -use serde::Serialize; +use jsonrpsee::{core::Error as JsonRpseeError, core::RpcResult, http_client::HttpClientBuilder}; use std::net::{IpAddr, SocketAddr}; +use std::str::FromStr; + +mod config; +pub use config::HttpConfig; /// Client pub struct Client { @@ -35,123 +41,149 @@ pub struct Client { impl Client { /// creates a new client - pub async fn new(ip: IpAddr, public_port: u16, private_port: u16) -> Client { + pub async fn new( + ip: IpAddr, + public_port: u16, + private_port: u16, + http_config: &HttpConfig, + ) -> Client { let public_socket_addr = SocketAddr::new(ip, public_port); let private_socket_addr = SocketAddr::new(ip, private_port); let public_url = format!("http://{}", public_socket_addr); let private_url = format!("http://{}", private_socket_addr); Client { - public: RpcClient::from_url(&public_url).await, - private: RpcClient::from_url(&private_url).await, + public: RpcClient::from_url(&public_url, http_config).await, + private: RpcClient::from_url(&private_url, http_config).await, } } } -/// TODO ask @yvan-sraka +/// TODO add ws client pub struct RpcClient { - client: TypedClient, - timeout: u64, -} - -/// This is required by `jsonrpc_core_client::transports::http::connect` -impl From for RpcClient { - fn from(channel: RpcChannel) -> Self { - RpcClient { - client: channel.into(), - timeout: 10000, - } - } + http_client: HttpClient, } impl RpcClient { /// Default constructor - pub async fn from_url(url: &str) -> RpcClient { - match http::connect::(url).await { - Ok(client) => client, + pub async fn from_url(url: &str, http_config: &HttpConfig) -> RpcClient { + let certificate_store = match http_config.certificate_store.as_str() { + "Native" => CertificateStore::Native, + "WebPki" => CertificateStore::WebPki, + _ => CertificateStore::Native, + }; + let id_kind = match http_config.id_kind.as_str() { + "Number" => IdKind::Number, + "String" => IdKind::String, + _ => IdKind::Number, + }; + + let mut headers = HeaderMap::new(); + http_config.headers.iter().for_each(|(key, value)| { + let header_name = match HeaderName::from_str(key.as_str()) { + Ok(header_name) => header_name, + Err(_) => panic!("invalid header name: {:?}", key), + }; + let header_value = match HeaderValue::from_str(value.as_str()) { + Ok(header_name) => header_name, + Err(_) => panic!("invalid header value: {:?}", value), + }; + headers.insert(header_name, header_value); + }); + + match HttpClientBuilder::default() + .max_request_body_size(http_config.max_request_body_size) + .request_timeout(http_config.request_timeout.to_duration()) + .max_concurrent_requests(http_config.max_concurrent_requests) + .certificate_store(certificate_store) + .id_format(id_kind) + .set_headers(headers) + .build(url) + { + Ok(http_client) => RpcClient { http_client }, Err(_) => panic!("unable to connect to Node."), } } - /// Typed wrapper to API calls based on the method given by `jsonrpc_core_client` - async fn call_method( - &self, - method: &str, - returns: &str, - args: T, - ) -> RpcResult { - tokio::time::timeout( - tokio::time::Duration::from_millis(self.timeout), - self.client.call_method(method, returns, args), - ) - .await - .map_err(|e| RpcError::Client(format!("timeout during {}: {}", method, e)))? - } - /// Gracefully stop the node. pub async fn stop_node(&self) -> RpcResult<()> { - self.call_method("stop_node", "()", ()).await + self.http_client.request("stop_node", rpc_params![]).await } /// Sign message with node's key. /// Returns the public key that signed the message and the signature. pub async fn node_sign_message(&self, message: Vec) -> RpcResult { - self.call_method("node_sign_message", "PubkeySig", vec![message]) + self.http_client + .request("node_sign_message", rpc_params![message]) .await } /// Add a vector of new secret keys for the node to use to stake. /// No confirmation to expect. pub async fn add_staking_secret_keys(&self, secret_keys: Vec) -> RpcResult<()> { - self.call_method("add_staking_secret_keys", "()", vec![secret_keys]) + self.http_client + .request("add_staking_secret_keys", rpc_params![secret_keys]) .await } /// Remove a vector of addresses used to stake. /// No confirmation to expect. pub async fn remove_staking_addresses(&self, addresses: Vec
) -> RpcResult<()> { - self.call_method("remove_staking_addresses", "()", vec![addresses]) + self.http_client + .request("remove_staking_addresses", rpc_params![addresses]) .await } /// Return hash-set of staking addresses. pub async fn get_staking_addresses(&self) -> RpcResult> { - self.call_method("get_staking_addresses", "Set
", ()) + self.http_client + .request("get_staking_addresses", rpc_params![]) .await } /// Bans given ip address(es) /// No confirmation to expect. pub async fn node_ban_by_ip(&self, ips: Vec) -> RpcResult<()> { - self.call_method("node_ban_by_ip", "()", vec![ips]).await + self.http_client + .request("node_ban_by_ip", rpc_params![ips]) + .await } /// Bans given node id(s) /// No confirmation to expect. pub async fn node_ban_by_id(&self, ids: Vec) -> RpcResult<()> { - self.call_method("node_ban_by_id", "()", vec![ids]).await + self.http_client + .request("node_ban_by_id", rpc_params![ids]) + .await } /// Unban given ip address(es) /// No confirmation to expect. pub async fn node_unban_by_ip(&self, ips: Vec) -> RpcResult<()> { - self.call_method("node_unban_by_ip", "()", vec![ips]).await + self.http_client + .request("node_unban_by_ip", rpc_params![ips]) + .await } /// Unban given node id(s) /// No confirmation to expect. pub async fn node_unban_by_id(&self, ids: Vec) -> RpcResult<()> { - self.call_method("node_unban_by_id", "()", vec![ids]).await + self.http_client + .request("node_unban_by_id", rpc_params![ids]) + .await } /// add ips to whitelist /// create peer if it was unknown pub async fn node_whitelist(&self, ips: Vec) -> RpcResult<()> { - self.call_method("node_whitelist", "()", vec![ips]).await + self.http_client + .request("node_whitelist", rpc_params![ips]) + .await } /// remove IPs from whitelist pub async fn node_remove_from_whitelist(&self, ips: Vec) -> RpcResult<()> { - self.call_method("node_remove_from_whitelist", "()", vec![ips]) + self.http_client + .request("node_remove_from_whitelist", rpc_params![ips]) .await } @@ -163,19 +195,18 @@ impl RpcClient { /// summary of the current state: time, last final blocks (hash, thread, slot, timestamp), clique count, connected nodes count pub async fn get_status(&self) -> RpcResult { - self.call_method("get_status", "NodeStatus", ()).await + self.http_client.request("get_status", rpc_params![]).await } pub(crate) async fn _get_cliques(&self) -> RpcResult> { - self.call_method("get_cliques", "Vec", ()).await + self.http_client.request("get_cliques", rpc_params![]).await } // Debug (specific information) /// Returns the active stakers and their roll counts for the current cycle. pub(crate) async fn _get_stakers(&self) -> RpcResult> { - self.call_method("get_stakers", "Map", ()) - .await + self.http_client.request("get_stakers", rpc_params![]).await } /// Returns operations information associated to a given list of operations' IDs. @@ -183,7 +214,8 @@ impl RpcClient { &self, operation_ids: Vec, ) -> RpcResult> { - self.call_method("get_operations", "Vec", vec![operation_ids]) + self.http_client + .request("get_operations", rpc_params![operation_ids]) .await } @@ -192,17 +224,15 @@ impl RpcClient { &self, endorsement_ids: Vec, ) -> RpcResult> { - self.call_method( - "get_endorsements", - "Vec", - vec![endorsement_ids], - ) - .await + self.http_client + .request("get_endorsements", rpc_params![endorsement_ids]) + .await } /// Get information on a block given its `BlockId` pub async fn get_block(&self, block_id: BlockId) -> RpcResult { - self.call_method("get_block", "BlockInfo", vec![block_id]) + self.http_client + .request("get_block", rpc_params![block_id]) .await } @@ -211,12 +241,9 @@ impl RpcClient { &self, filter: EventFilter, ) -> RpcResult> { - self.call_method( - "get_filtered_sc_output_event", - "Vec", - vec![filter], - ) - .await + self.http_client + .request("get_filtered_sc_output_event", rpc_params![filter]) + .await } /// Get the block graph within the specified time interval. @@ -225,13 +252,15 @@ impl RpcClient { &self, time_interval: TimeInterval, ) -> RpcResult> { - self.call_method("get_graph_interval", "Vec", time_interval) + self.http_client + .request("get_graph_interval", rpc_params![time_interval]) .await } /// Get info by addresses pub async fn get_addresses(&self, addresses: Vec
) -> RpcResult> { - self.call_method("get_addresses", "Vec", vec![addresses]) + self.http_client + .request("get_addresses", rpc_params![addresses]) .await } @@ -240,12 +269,9 @@ impl RpcClient { &self, input: Vec, ) -> RpcResult> { - self.call_method( - "get_datastore_entries", - "Vec", - vec![input], - ) - .await + self.http_client + .request("get_datastore_entries", rpc_params![input]) + .await } // User (interaction with the node) @@ -255,7 +281,8 @@ impl RpcClient { &self, operations: Vec, ) -> RpcResult> { - self.call_method("send_operations", "Vec", vec![operations]) + self.http_client + .request("send_operations", rpc_params![operations]) .await } @@ -264,16 +291,16 @@ impl RpcClient { &self, read_only_execution: ReadOnlyBytecodeExecution, ) -> RpcResult { - self.call_method::>, Vec>( - "execute_read_only_bytecode", - "Vec", - vec![vec![read_only_execution]], - ) - .await? - .pop() - .ok_or_else(|| { - RpcError::Client("missing return value on execute_read_only_bytecode".into()) - }) + self.http_client + .request::, Vec>>( + "execute_read_only_bytecode", + vec![vec![read_only_execution]], + ) + .await? + .pop() + .ok_or_else(|| { + JsonRpseeError::Custom("missing return value on execute_read_only_bytecode".into()) + }) } /// execute read only SC call @@ -281,13 +308,15 @@ impl RpcClient { &self, read_only_execution: ReadOnlyCall, ) -> RpcResult { - self.call_method::>, Vec>( - "execute_read_only_call", - "Vec", - vec![vec![read_only_execution]], - ) - .await? - .pop() - .ok_or_else(|| RpcError::Client("missing return value on execute_read_only_call".into())) + self.http_client + .request::, Vec>>( + "execute_read_only_call", + vec![vec![read_only_execution]], + ) + .await? + .pop() + .ok_or_else(|| { + JsonRpseeError::Custom("missing return value on execute_read_only_call".into()) + }) } } diff --git a/tools/Readme.md b/tools/Readme.md index 7daaa089051..fc8a8313862 100644 --- a/tools/Readme.md +++ b/tools/Readme.md @@ -22,7 +22,11 @@ If required, please update the Git tag in setup_test.rs (line 25) #### Run -* cargo script setup_test +* cargo script setup_test.rs + +### Run with local sources + +* cargo script setup_test.rs -- --local "../../massa-unit-tests-src/build/massa/*.wasm" ### Howto: add a new SC unit tests diff --git a/tools/setup_test.rs b/tools/setup_test.rs index 1228bb0a89d..88e448c5845 100644 --- a/tools/setup_test.rs +++ b/tools/setup_test.rs @@ -6,28 +6,35 @@ //! fs_extra="1" //! ureq="2" //! thiserror="1" +//! clap={ version = "4", features= ["derive"] } //! ``` use std::fs::File; -use std::path::Path; +use std::io::Error; use std::io::{Cursor, Read}; use std::num::ParseIntError; +use std::path::Path; +extern crate clap; extern crate flate2; -extern crate tar; -extern crate glob; extern crate fs_extra; +extern crate glob; +extern crate tar; extern crate ureq; +use clap::Parser; use flate2::read::GzDecoder; -use tar::Archive; use glob::glob; -// use fs_extra::dir::{remove, copy, CopyOptions}; +use tar::Archive; -const TAG: &str = "TEST.16.3"; // git tag -const ARCHIVE_MAX_SIZE: u64 = 1048576; // Maximum archive file size to download in bytes (here: 1Mb) +// git tag +const TAG: &str = "TEST.16.3"; + +// Maximum archive file size to download in bytes (here: 1Mb) // const ARCHIVE_MAX_SIZE: u64 = 2; // Maximum archive file size to download in bytes (DEBUG) // Destination path for wasm file & src files (relative to repo root) -const PATH_DST_BASE_1: &str = "massa-execution-worker/src/tests/wasm/"; -// const PATH_DST_BASE_2: &str = "massa-execution-worker/src/tests/wasm/sources/"; +const ARCHIVE_MAX_SIZE: u64 = 1048576; + +// destination path +const PATH_DST_BASE_1: &str = "../massa-execution-worker/src/tests/wasm/"; #[derive(Debug, thiserror::Error)] enum DlFileError { @@ -36,23 +43,19 @@ enum DlFileError { #[error("ureq error: {0}")] Ureq(#[from] ureq::Error), #[error("parse error: {0}")] - Parse(#[from] ParseIntError) + Parse(#[from] ParseIntError), } /// Archive download using given url fn dl_file(url: &str, to_file: &str, max_size: u64) -> Result<(), DlFileError> { - - let resp = ureq::get(url) - .call()?; + let resp = ureq::get(url).call()?; let mut bytes: Vec = match resp.header("Content-Length") { Some(l) => Vec::with_capacity(l.parse()?), None => Vec::with_capacity(max_size as usize), }; - resp.into_reader() - .take(max_size) - .read_to_end(&mut bytes)?; + resp.into_reader().take(max_size).read_to_end(&mut bytes)?; let mut buf = Cursor::new(bytes); let mut file = std::fs::File::create(to_file)?; @@ -61,17 +64,25 @@ fn dl_file(url: &str, to_file: &str, max_size: u64) -> Result<(), DlFileError> { Ok(()) } -fn main() -> Result<(), std::io::Error> { - - println!("Using tag: {} for release of massa-unit-tests-src repo...", TAG); +fn download_src() -> Result { + println!( + "Using tag: {} for release of massa-unit-tests-src repo...", + TAG + ); let path = format!("massa_unit_tests_{}.tar.gz", TAG); - let url = format!("https://github.com/massalabs/massa-unit-tests-src/releases/download/{}/{}", TAG, path); + let url = format!( + "https://github.com/massalabs/massa-unit-tests-src/releases/download/{}/{}", + TAG, path + ); let extract_folder = format!("extract_massa_unit_tests_src_{}", TAG); if Path::new(&extract_folder).exists() { - println!("Please remove the folder: {} before runnning this script", extract_folder); + println!( + "Please remove the folder: {} before runnning this script", + extract_folder + ); std::process::exit(1); } @@ -90,16 +101,41 @@ fn main() -> Result<(), std::io::Error> { let mut archive = Archive::new(tar); archive.unpack(extract_folder.clone())?; - // Copy wasm files - let pattern_src_1 = format!("{}/massa_unit_tests/*.wasm", extract_folder.clone()); + Ok(format!( + "{}/massa_unit_tests/*.wasm", + extract_folder.clone() + )) +} + +/// Script input arguments +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + /// Optional local pattern of the WASM files to copy + #[arg(short, long)] + local: Option, +} + +fn main() -> Result<(), Error> { + let args = Args::parse(); + + let pattern_src = if let Some(local_src) = args.local { + println!("Using local sources"); + local_src + } else { + download_src()? + }; let path_dst_base = Path::new(PATH_DST_BASE_1); - for entry in glob(&pattern_src_1).expect("Failed to read glob pattern (wasm)") { + for entry in glob(&pattern_src).expect("Failed to read glob pattern (wasm)") { match entry { Ok(path) => { let path_file_name = match path.file_name() { Some(fname) => fname, - None => { println!("Unable to extract file name from: {:?}", path); continue }, + None => { + println!("Unable to extract file name from: {:?}", path); + continue; + } }; let path_dst = path_dst_base.join(path_file_name); @@ -109,7 +145,7 @@ fn main() -> Result<(), std::io::Error> { if copy_res.is_err() { println!("Copy error: {:?}", copy_res); } - }, + } Err(e) => { println!("{:?}", e); }