From f1d88b4021eca53502143d1342fbc0d8c6b02ee6 Mon Sep 17 00:00:00 2001 From: Shashank <99187193+sudo-shashank@users.noreply.github.com> Date: Fri, 6 Dec 2024 13:18:16 +0530 Subject: [PATCH] Add V16 Builtin Actors (#350) --- Cargo.toml | 28 +- actors/account/Cargo.toml | 1 + actors/account/src/lib.rs | 1 + actors/account/src/v16/mod.rs | 20 + actors/account/src/v16/state.rs | 11 + actors/account/src/v16/types.rs | 29 + actors/cron/src/lib.rs | 1 + actors/cron/src/v16/mod.rs | 27 + actors/cron/src/v16/state.rs | 21 + actors/datacap/Cargo.toml | 1 + actors/datacap/src/lib.rs | 1 + actors/datacap/src/v16/mod.rs | 63 + actors/datacap/src/v16/state.rs | 37 + actors/datacap/src/v16/types.rs | 85 + actors/eam/src/lib.rs | 1 + actors/eam/src/v16/mod.rs | 44 + actors/ethaccount/src/lib.rs | 1 + actors/ethaccount/src/v16/mod.rs | 11 + actors/ethaccount/src/v16/types.rs | 10 + actors/evm/Cargo.toml | 1 + actors/evm/src/evm_shared/mod.rs | 1 + actors/evm/src/evm_shared/v16/address.rs | 178 ++ actors/evm/src/evm_shared/v16/mod.rs | 2 + actors/evm/src/evm_shared/v16/uints.rs | 339 ++++ actors/evm/src/lib.rs | 1 + actors/evm/src/v16/mod.rs | 38 + actors/evm/src/v16/state.rs | 168 ++ actors/evm/src/v16/types.rs | 67 + actors/init/src/lib.rs | 1 + actors/init/src/v16/mod.rs | 20 + actors/init/src/v16/state.rs | 99 ++ actors/init/src/v16/types.rs | 40 + actors/market/src/lib.rs | 1 + actors/market/src/v16/balance_table.rs | 119 ++ actors/market/src/v16/deal.rs | 150 ++ actors/market/src/v16/ext.rs | 168 ++ actors/market/src/v16/mod.rs | 61 + actors/market/src/v16/policy.rs | 74 + actors/market/src/v16/state.rs | 1514 +++++++++++++++++ actors/market/src/v16/types.rs | 281 +++ actors/miner/src/lib.rs | 1 + actors/miner/src/v16/beneficiary.rs | 78 + actors/miner/src/v16/bitfield_queue.rs | 142 ++ actors/miner/src/v16/commd.rs | 108 ++ actors/miner/src/v16/deadline_assignment.rs | 205 +++ actors/miner/src/v16/deadline_info.rs | 163 ++ actors/miner/src/v16/deadline_state.rs | 1439 ++++++++++++++++ actors/miner/src/v16/deadlines.rs | 157 ++ actors/miner/src/v16/expiration_queue.rs | 986 +++++++++++ actors/miner/src/v16/ext.rs | 201 +++ actors/miner/src/v16/mod.rs | 244 +++ actors/miner/src/v16/monies.rs | 364 ++++ actors/miner/src/v16/partition_state.rs | 951 +++++++++++ actors/miner/src/v16/policy.rs | 211 +++ actors/miner/src/v16/quantize.rs | 53 + actors/miner/src/v16/sector_map.rs | 177 ++ actors/miner/src/v16/sectors.rs | 161 ++ actors/miner/src/v16/state.rs | 1414 +++++++++++++++ actors/miner/src/v16/termination.rs | 52 + actors/miner/src/v16/types.rs | 681 ++++++++ actors/miner/src/v16/vesting_state.rs | 144 ++ actors/multisig/src/lib.rs | 1 + actors/multisig/src/v16/mod.rs | 28 + actors/multisig/src/v16/state.rs | 152 ++ actors/multisig/src/v16/types.rs | 158 ++ actors/paych/src/lib.rs | 1 + actors/paych/src/v16/ext.rs | 17 + actors/paych/src/v16/mod.rs | 25 + actors/paych/src/v16/state.rs | 56 + actors/paych/src/v16/types.rs | 128 ++ actors/power/src/lib.rs | 1 + actors/power/src/v16/ext.rs | 73 + actors/power/src/v16/mod.rs | 40 + actors/power/src/v16/policy.rs | 13 + actors/power/src/v16/state.rs | 471 +++++ actors/power/src/v16/types.rs | 105 ++ actors/reward/src/lib.rs | 1 + actors/reward/src/v16/expneg.rs | 49 + actors/reward/src/v16/ext.rs | 14 + actors/reward/src/v16/logic.rs | 318 ++++ actors/reward/src/v16/mod.rs | 33 + actors/reward/src/v16/state.rs | 201 +++ .../v16/testdata/TestBaselineReward.golden | 513 ++++++ .../src/v16/testdata/TestSimpleReward.golden | 513 ++++++ actors/reward/src/v16/types.rs | 29 + actors/system/src/lib.rs | 1 + actors/system/src/v16/mod.rs | 48 + actors/verifreg/src/lib.rs | 1 + actors/verifreg/src/v16/expiration.rs | 94 + actors/verifreg/src/v16/ext.rs | 46 + actors/verifreg/src/v16/mod.rs | 38 + actors/verifreg/src/v16/state.rs | 269 +++ actors/verifreg/src/v16/types.rs | 260 +++ fil_actors_shared/src/abi/commp.rs | 4 +- fil_actors_shared/src/lib.rs | 1 + fil_actors_shared/src/v16/actor_error.rs | 317 ++++ fil_actors_shared/src/v16/builtin/mod.rs | 11 + fil_actors_shared/src/v16/builtin/network.rs | 18 + .../src/v16/builtin/reward/math.rs | 23 + .../src/v16/builtin/reward/mod.rs | 18 + .../reward/smooth/alpha_beta_filter.rs | 311 ++++ .../src/v16/builtin/reward/smooth/mod.rs | 9 + .../v16/builtin/reward/smooth/smooth_func.rs | 99 ++ fil_actors_shared/src/v16/builtin/shared.rs | 162 ++ .../src/v16/builtin/singletons.rs | 33 + fil_actors_shared/src/v16/mod.rs | 92 + fil_actors_shared/src/v16/runtime/builtins.rs | 1 + fil_actors_shared/src/v16/runtime/empty.rs | 40 + fil_actors_shared/src/v16/runtime/mod.rs | 276 +++ fil_actors_shared/src/v16/runtime/policy.rs | 432 +++++ .../src/v16/runtime/randomness.rs | 52 + .../src/v16/util/batch_return.rs | 291 ++++ fil_actors_shared/src/v16/util/cbor.rs | 35 + fil_actors_shared/src/v16/util/downcast.rs | 118 ++ fil_actors_shared/src/v16/util/events.rs | 142 ++ fil_actors_shared/src/v16/util/map.rs | 253 +++ fil_actors_shared/src/v16/util/mapmap.rs | 170 ++ .../src/v16/util/message_accumulator.rs | 175 ++ fil_actors_shared/src/v16/util/mod.rs | 24 + fil_actors_shared/src/v16/util/multimap.rs | 118 ++ fil_actors_shared/src/v16/util/set.rs | 76 + .../src/v16/util/set_multimap.rs | 156 ++ fil_actors_shared/src/v16/vm_api/builtin.rs | 50 + fil_actors_shared/src/v16/vm_api/error.rs | 30 + fil_actors_shared/src/v16/vm_api/mod.rs | 269 +++ fil_actors_shared/src/v16/vm_api/trace.rs | 225 +++ 126 files changed, 19360 insertions(+), 16 deletions(-) create mode 100644 actors/account/src/v16/mod.rs create mode 100644 actors/account/src/v16/state.rs create mode 100644 actors/account/src/v16/types.rs create mode 100644 actors/cron/src/v16/mod.rs create mode 100644 actors/cron/src/v16/state.rs create mode 100644 actors/datacap/src/v16/mod.rs create mode 100644 actors/datacap/src/v16/state.rs create mode 100644 actors/datacap/src/v16/types.rs create mode 100644 actors/eam/src/v16/mod.rs create mode 100644 actors/ethaccount/src/v16/mod.rs create mode 100644 actors/ethaccount/src/v16/types.rs create mode 100644 actors/evm/src/evm_shared/v16/address.rs create mode 100644 actors/evm/src/evm_shared/v16/mod.rs create mode 100644 actors/evm/src/evm_shared/v16/uints.rs create mode 100644 actors/evm/src/v16/mod.rs create mode 100644 actors/evm/src/v16/state.rs create mode 100644 actors/evm/src/v16/types.rs create mode 100644 actors/init/src/v16/mod.rs create mode 100644 actors/init/src/v16/state.rs create mode 100644 actors/init/src/v16/types.rs create mode 100644 actors/market/src/v16/balance_table.rs create mode 100644 actors/market/src/v16/deal.rs create mode 100644 actors/market/src/v16/ext.rs create mode 100644 actors/market/src/v16/mod.rs create mode 100644 actors/market/src/v16/policy.rs create mode 100644 actors/market/src/v16/state.rs create mode 100644 actors/market/src/v16/types.rs create mode 100644 actors/miner/src/v16/beneficiary.rs create mode 100644 actors/miner/src/v16/bitfield_queue.rs create mode 100644 actors/miner/src/v16/commd.rs create mode 100644 actors/miner/src/v16/deadline_assignment.rs create mode 100644 actors/miner/src/v16/deadline_info.rs create mode 100644 actors/miner/src/v16/deadline_state.rs create mode 100644 actors/miner/src/v16/deadlines.rs create mode 100644 actors/miner/src/v16/expiration_queue.rs create mode 100644 actors/miner/src/v16/ext.rs create mode 100644 actors/miner/src/v16/mod.rs create mode 100644 actors/miner/src/v16/monies.rs create mode 100644 actors/miner/src/v16/partition_state.rs create mode 100644 actors/miner/src/v16/policy.rs create mode 100644 actors/miner/src/v16/quantize.rs create mode 100644 actors/miner/src/v16/sector_map.rs create mode 100644 actors/miner/src/v16/sectors.rs create mode 100644 actors/miner/src/v16/state.rs create mode 100644 actors/miner/src/v16/termination.rs create mode 100644 actors/miner/src/v16/types.rs create mode 100644 actors/miner/src/v16/vesting_state.rs create mode 100644 actors/multisig/src/v16/mod.rs create mode 100644 actors/multisig/src/v16/state.rs create mode 100644 actors/multisig/src/v16/types.rs create mode 100644 actors/paych/src/v16/ext.rs create mode 100644 actors/paych/src/v16/mod.rs create mode 100644 actors/paych/src/v16/state.rs create mode 100644 actors/paych/src/v16/types.rs create mode 100644 actors/power/src/v16/ext.rs create mode 100644 actors/power/src/v16/mod.rs create mode 100644 actors/power/src/v16/policy.rs create mode 100644 actors/power/src/v16/state.rs create mode 100644 actors/power/src/v16/types.rs create mode 100644 actors/reward/src/v16/expneg.rs create mode 100644 actors/reward/src/v16/ext.rs create mode 100644 actors/reward/src/v16/logic.rs create mode 100644 actors/reward/src/v16/mod.rs create mode 100644 actors/reward/src/v16/state.rs create mode 100644 actors/reward/src/v16/testdata/TestBaselineReward.golden create mode 100644 actors/reward/src/v16/testdata/TestSimpleReward.golden create mode 100644 actors/reward/src/v16/types.rs create mode 100644 actors/system/src/v16/mod.rs create mode 100644 actors/verifreg/src/v16/expiration.rs create mode 100644 actors/verifreg/src/v16/ext.rs create mode 100644 actors/verifreg/src/v16/mod.rs create mode 100644 actors/verifreg/src/v16/state.rs create mode 100644 actors/verifreg/src/v16/types.rs create mode 100644 fil_actors_shared/src/v16/actor_error.rs create mode 100644 fil_actors_shared/src/v16/builtin/mod.rs create mode 100644 fil_actors_shared/src/v16/builtin/network.rs create mode 100644 fil_actors_shared/src/v16/builtin/reward/math.rs create mode 100644 fil_actors_shared/src/v16/builtin/reward/mod.rs create mode 100644 fil_actors_shared/src/v16/builtin/reward/smooth/alpha_beta_filter.rs create mode 100644 fil_actors_shared/src/v16/builtin/reward/smooth/mod.rs create mode 100644 fil_actors_shared/src/v16/builtin/reward/smooth/smooth_func.rs create mode 100644 fil_actors_shared/src/v16/builtin/shared.rs create mode 100644 fil_actors_shared/src/v16/builtin/singletons.rs create mode 100644 fil_actors_shared/src/v16/mod.rs create mode 100644 fil_actors_shared/src/v16/runtime/builtins.rs create mode 100644 fil_actors_shared/src/v16/runtime/empty.rs create mode 100644 fil_actors_shared/src/v16/runtime/mod.rs create mode 100644 fil_actors_shared/src/v16/runtime/policy.rs create mode 100644 fil_actors_shared/src/v16/runtime/randomness.rs create mode 100644 fil_actors_shared/src/v16/util/batch_return.rs create mode 100644 fil_actors_shared/src/v16/util/cbor.rs create mode 100644 fil_actors_shared/src/v16/util/downcast.rs create mode 100644 fil_actors_shared/src/v16/util/events.rs create mode 100644 fil_actors_shared/src/v16/util/map.rs create mode 100644 fil_actors_shared/src/v16/util/mapmap.rs create mode 100644 fil_actors_shared/src/v16/util/message_accumulator.rs create mode 100644 fil_actors_shared/src/v16/util/mod.rs create mode 100644 fil_actors_shared/src/v16/util/multimap.rs create mode 100644 fil_actors_shared/src/v16/util/set.rs create mode 100644 fil_actors_shared/src/v16/util/set_multimap.rs create mode 100644 fil_actors_shared/src/v16/vm_api/builtin.rs create mode 100644 fil_actors_shared/src/v16/vm_api/error.rs create mode 100644 fil_actors_shared/src/v16/vm_api/mod.rs create mode 100644 fil_actors_shared/src/v16/vm_api/trace.rs diff --git a/Cargo.toml b/Cargo.toml index 933187f0..c6cd2083 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ members = [ resolver = "2" [workspace.package] -version = "18.0.0" +version = "19.0.0" license = "MIT OR Apache-2.0" repository = "https://github.com/ChainSafe/fil-actor-states" authors = [ @@ -81,18 +81,18 @@ toml = "0.8" uint = { version = "0.10", default-features = false } unsigned-varint = "0.8" -fil_actor_account_state = { version = "18.0.0", path = "./actors/account" } -fil_actor_cron_state = { version = "18.0.0", path = "./actors/cron" } -fil_actor_datacap_state = { version = "18.0.0", path = "./actors/datacap" } -fil_actor_evm_state = { version = "18.0.0", path = "./actors/evm" } -fil_actor_init_state = { version = "18.0.0", path = "./actors/init" } -fil_actor_market_state = { version = "18.0.0", path = "./actors/market" } -fil_actor_miner_state = { version = "18.0.0", path = "./actors/miner" } -fil_actor_multisig_state = { version = "18.0.0", path = "./actors/multisig" } -fil_actor_power_state = { version = "18.0.0", path = "./actors/power" } -fil_actor_reward_state = { version = "18.0.0", path = "./actors/reward" } -fil_actor_system_state = { version = "18.0.0", path = "./actors/system" } -fil_actor_verifreg_state = { version = "18.0.0", path = "./actors/verifreg" } -fil_actors_shared = { version = "18.0.0", path = "./fil_actors_shared" } +fil_actor_account_state = { version = "19.0.0", path = "./actors/account" } +fil_actor_cron_state = { version = "19.0.0", path = "./actors/cron" } +fil_actor_datacap_state = { version = "19.0.0", path = "./actors/datacap" } +fil_actor_evm_state = { version = "19.0.0", path = "./actors/evm" } +fil_actor_init_state = { version = "19.0.0", path = "./actors/init" } +fil_actor_market_state = { version = "19.0.0", path = "./actors/market" } +fil_actor_miner_state = { version = "19.0.0", path = "./actors/miner" } +fil_actor_multisig_state = { version = "19.0.0", path = "./actors/multisig" } +fil_actor_power_state = { version = "19.0.0", path = "./actors/power" } +fil_actor_reward_state = { version = "19.0.0", path = "./actors/reward" } +fil_actor_system_state = { version = "19.0.0", path = "./actors/system" } +fil_actor_verifreg_state = { version = "19.0.0", path = "./actors/verifreg" } +fil_actors_shared = { version = "19.0.0", path = "./fil_actors_shared" } fil_actors_test_utils = { path = "./fil_actors_test_utils" } diff --git a/actors/account/Cargo.toml b/actors/account/Cargo.toml index 34b81027..b33ca0eb 100644 --- a/actors/account/Cargo.toml +++ b/actors/account/Cargo.toml @@ -9,6 +9,7 @@ version.workspace = true keywords.workspace = true [dependencies] +frc42_dispatch = { workspace = true } frc42_macros = { workspace = true } fvm_ipld_encoding = { workspace = true } fvm_shared = { workspace = true } diff --git a/actors/account/src/lib.rs b/actors/account/src/lib.rs index f95f7f59..26e0c7c7 100644 --- a/actors/account/src/lib.rs +++ b/actors/account/src/lib.rs @@ -7,5 +7,6 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v8; pub mod v9; diff --git a/actors/account/src/v16/mod.rs b/actors/account/src/v16/mod.rs new file mode 100644 index 00000000..8d31970d --- /dev/null +++ b/actors/account/src/v16/mod.rs @@ -0,0 +1,20 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +pub use self::state::State; +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +mod state; +pub mod types; + +/// Account actor methods available +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + PubkeyAddress = 2, + // Deprecated in v10 + // AuthenticateMessage = 3, + AuthenticateMessageExported = frc42_dispatch::method_hash!("AuthenticateMessage"), +} diff --git a/actors/account/src/v16/state.rs b/actors/account/src/v16/state.rs new file mode 100644 index 00000000..92dd4fd6 --- /dev/null +++ b/actors/account/src/v16/state.rs @@ -0,0 +1,11 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; + +/// State includes the address for the actor +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)] +pub struct State { + pub address: Address, +} diff --git a/actors/account/src/v16/types.rs b/actors/account/src/v16/types.rs new file mode 100644 index 00000000..716cbb83 --- /dev/null +++ b/actors/account/src/v16/types.rs @@ -0,0 +1,29 @@ +use fvm_ipld_encoding::strict_bytes; +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct ConstructorParams { + pub address: Address, +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct PubkeyAddressReturn { + pub address: Address, +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct AuthenticateMessageParams { + #[serde(with = "strict_bytes")] + pub signature: Vec, + #[serde(with = "strict_bytes")] + pub message: Vec, +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct AuthenticateMessageReturn { + pub authenticated: bool, +} diff --git a/actors/cron/src/lib.rs b/actors/cron/src/lib.rs index f95f7f59..26e0c7c7 100644 --- a/actors/cron/src/lib.rs +++ b/actors/cron/src/lib.rs @@ -7,5 +7,6 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v8; pub mod v9; diff --git a/actors/cron/src/v16/mod.rs b/actors/cron/src/v16/mod.rs new file mode 100644 index 00000000..a1f0c16f --- /dev/null +++ b/actors/cron/src/v16/mod.rs @@ -0,0 +1,27 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; + +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +pub use self::state::{Entry, State}; + +mod state; + +/// Cron actor methods available +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + EpochTick = 2, +} + +/// Constructor parameters for Cron actor, contains entries +/// of actors and methods to call on each epoch +#[derive(Default, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ConstructorParams { + /// Entries is a set of actors (and corresponding methods) to call during EpochTick. + pub entries: Vec, +} diff --git a/actors/cron/src/v16/state.rs b/actors/cron/src/v16/state.rs new file mode 100644 index 00000000..8e3339fb --- /dev/null +++ b/actors/cron/src/v16/state.rs @@ -0,0 +1,21 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; +use fvm_shared4::MethodNum; + +/// Cron actor state which holds entries to call during epoch tick +#[derive(Default, Serialize_tuple, Deserialize_tuple, Clone, Debug)] +pub struct State { + /// Entries is a set of actors (and corresponding methods) to call during EpochTick. + pub entries: Vec, +} + +#[derive(Clone, PartialEq, Eq, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Entry { + /// The actor to call (ID address) + pub receiver: Address, + /// The method number to call (must accept empty parameters) + pub method_num: MethodNum, +} diff --git a/actors/datacap/Cargo.toml b/actors/datacap/Cargo.toml index d14a52ec..2258c1f5 100644 --- a/actors/datacap/Cargo.toml +++ b/actors/datacap/Cargo.toml @@ -10,6 +10,7 @@ keywords.workspace = true [dependencies] fil_actors_shared = { workspace = true } +frc42_dispatch = { workspace = true } frc42_macros = { workspace = true } frc46_token = { workspace = true } fvm_ipld_blockstore = { workspace = true } diff --git a/actors/datacap/src/lib.rs b/actors/datacap/src/lib.rs index 6f5e5948..0af589b3 100644 --- a/actors/datacap/src/lib.rs +++ b/actors/datacap/src/lib.rs @@ -7,4 +7,5 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v9; diff --git a/actors/datacap/src/v16/mod.rs b/actors/datacap/src/v16/mod.rs new file mode 100644 index 00000000..b2ee099d --- /dev/null +++ b/actors/datacap/src/v16/mod.rs @@ -0,0 +1,63 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use frc46_token::token::TOKEN_PRECISION; +use fvm_shared4::bigint::BigInt; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::METHOD_CONSTRUCTOR; +use lazy_static::lazy_static; +use num_derive::FromPrimitive; + +pub use self::state::State; +pub use self::types::*; + +mod state; +mod types; + +pub const DATACAP_GRANULARITY: u64 = TOKEN_PRECISION; + +lazy_static! { + // > 800 EiB + pub static ref INFINITE_ALLOWANCE: TokenAmount = TokenAmount::from_atto( + BigInt::from(TOKEN_PRECISION) + * BigInt::from(1_000_000_000_000_000_000_000_i128) + ); +} + +/// Datacap actor methods available +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + // Deprecated in v10 + // Mint = 2, + // Destroy = 3, + // Name = 10, + // Symbol = 11, + // TotalSupply = 12, + // BalanceOf = 13, + // Transfer = 14, + // TransferFrom = 15, + // IncreaseAllowance = 16, + // DecreaseAllowance = 17, + // RevokeAllowance = 18, + // Burn = 19, + // BurnFrom = 20, + // Allowance = 21, + // Method numbers derived from FRC-0042 standards + MintExported = frc42_dispatch::method_hash!("Mint"), + DestroyExported = frc42_dispatch::method_hash!("Destroy"), + NameExported = frc42_dispatch::method_hash!("Name"), + SymbolExported = frc42_dispatch::method_hash!("Symbol"), + GranularityExported = frc42_dispatch::method_hash!("Granularity"), + TotalSupplyExported = frc42_dispatch::method_hash!("TotalSupply"), + BalanceExported = frc42_dispatch::method_hash!("Balance"), + TransferExported = frc42_dispatch::method_hash!("Transfer"), + TransferFromExported = frc42_dispatch::method_hash!("TransferFrom"), + IncreaseAllowanceExported = frc42_dispatch::method_hash!("IncreaseAllowance"), + DecreaseAllowanceExported = frc42_dispatch::method_hash!("DecreaseAllowance"), + RevokeAllowanceExported = frc42_dispatch::method_hash!("RevokeAllowance"), + BurnExported = frc42_dispatch::method_hash!("Burn"), + BurnFromExported = frc42_dispatch::method_hash!("BurnFrom"), + AllowanceExported = frc42_dispatch::method_hash!("Allowance"), +} diff --git a/actors/datacap/src/v16/state.rs b/actors/datacap/src/v16/state.rs new file mode 100644 index 00000000..01758c38 --- /dev/null +++ b/actors/datacap/src/v16/state.rs @@ -0,0 +1,37 @@ +use frc46_token::token; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::error::ExitCode; +use fvm_shared4::ActorID; + +use fil_actors_shared::v16::{ActorError, AsActorError}; + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct State { + pub governor: Address, + pub token: token::state::TokenState, +} + +impl State { + pub fn new(store: &BS, governor: Address) -> Result { + let token_state = token::state::TokenState::new(store) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to create token state")?; + Ok(State { + governor, + token: token_state, + }) + } + + // Visible for testing + pub fn balance( + &self, + bs: &BS, + owner: ActorID, + ) -> Result { + self.token + .get_balance(bs, owner) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to get balance") + } +} diff --git a/actors/datacap/src/v16/types.rs b/actors/datacap/src/v16/types.rs new file mode 100644 index 00000000..56374f24 --- /dev/null +++ b/actors/datacap/src/v16/types.rs @@ -0,0 +1,85 @@ +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; +use fvm_shared4::econ::TokenAmount; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct ConstructorParams { + pub governor: Address, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct NameReturn { + pub name: String, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct SymbolReturn { + pub symbol: String, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct TotalSupplyReturn { + pub supply: TokenAmount, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct BalanceParams { + pub address: Address, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct BalanceReturn { + pub balance: TokenAmount, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct GetAllowanceReturn { + pub allowance: TokenAmount, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct IncreaseAllowanceReturn { + pub new_allowance: TokenAmount, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct DecreaseAllowanceReturn { + pub new_allowance: TokenAmount, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct RevokeAllowanceReturn { + pub old_allowance: TokenAmount, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct MintParams { + // Recipient of the newly minted tokens. + pub to: Address, + // Amount of tokens to mint. + pub amount: TokenAmount, + // Addresses to be granted effectively-infinite operator allowance for the recipient. + pub operators: Vec
, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct DestroyParams { + pub owner: Address, + pub amount: TokenAmount, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct GranularityReturn { + pub granularity: u64, +} diff --git a/actors/eam/src/lib.rs b/actors/eam/src/lib.rs index 3bcfd0ba..a8af966e 100644 --- a/actors/eam/src/lib.rs +++ b/actors/eam/src/lib.rs @@ -7,3 +7,4 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; diff --git a/actors/eam/src/v16/mod.rs b/actors/eam/src/v16/mod.rs new file mode 100644 index 00000000..88abd8f1 --- /dev/null +++ b/actors/eam/src/v16/mod.rs @@ -0,0 +1,44 @@ +use fil_actor_evm_state::evm_shared::v15::address::EthAddress; +use fvm_ipld_encoding::{strict_bytes, tuple::*}; +use fvm_shared4::{address::Address, ActorID, METHOD_CONSTRUCTOR}; +use num_derive::FromPrimitive; +use serde::{Deserialize, Serialize}; + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + Create = 2, + Create2 = 3, + CreateExternal = 4, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct CreateParams { + #[serde(with = "strict_bytes")] + pub initcode: Vec, + pub nonce: u64, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct Create2Params { + #[serde(with = "strict_bytes")] + pub initcode: Vec, + #[serde(with = "strict_bytes")] + pub salt: [u8; 32], +} + +#[derive(Serialize, Deserialize)] +#[serde(transparent)] +pub struct CreateExternalParams(#[serde(with = "strict_bytes")] pub Vec); + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, PartialEq, Eq)] +pub struct Return { + pub actor_id: ActorID, + pub robust_address: Option
, + pub eth_address: EthAddress, +} + +pub type CreateReturn = Return; +pub type Create2Return = Return; +pub type CreateExternalReturn = Return; diff --git a/actors/ethaccount/src/lib.rs b/actors/ethaccount/src/lib.rs index 3bcfd0ba..a8af966e 100644 --- a/actors/ethaccount/src/lib.rs +++ b/actors/ethaccount/src/lib.rs @@ -7,3 +7,4 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; diff --git a/actors/ethaccount/src/v16/mod.rs b/actors/ethaccount/src/v16/mod.rs new file mode 100644 index 00000000..b02aeff0 --- /dev/null +++ b/actors/ethaccount/src/v16/mod.rs @@ -0,0 +1,11 @@ +pub mod types; + +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +/// Ethereum Account actor methods. +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, +} diff --git a/actors/ethaccount/src/v16/types.rs b/actors/ethaccount/src/v16/types.rs new file mode 100644 index 00000000..04910748 --- /dev/null +++ b/actors/ethaccount/src/v16/types.rs @@ -0,0 +1,10 @@ +use fvm_ipld_encoding::strict_bytes; +use fvm_ipld_encoding::tuple::*; + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct AuthenticateMessageParams { + #[serde(with = "strict_bytes")] + pub signature: Vec, + #[serde(with = "strict_bytes")] + pub message: Vec, +} diff --git a/actors/evm/Cargo.toml b/actors/evm/Cargo.toml index 39f7cbdc..44cec3a4 100644 --- a/actors/evm/Cargo.toml +++ b/actors/evm/Cargo.toml @@ -11,6 +11,7 @@ keywords.workspace = true [dependencies] cid = { workspace = true } fil_actors_shared = { workspace = true } +frc42_dispatch = { workspace = true } frc42_macros = { workspace = true } fvm_ipld_encoding = { workspace = true } fvm_shared3 = { workspace = true } diff --git a/actors/evm/src/evm_shared/mod.rs b/actors/evm/src/evm_shared/mod.rs index 3bcfd0ba..a8af966e 100644 --- a/actors/evm/src/evm_shared/mod.rs +++ b/actors/evm/src/evm_shared/mod.rs @@ -7,3 +7,4 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; diff --git a/actors/evm/src/evm_shared/v16/address.rs b/actors/evm/src/evm_shared/v16/address.rs new file mode 100644 index 00000000..8731d7ea --- /dev/null +++ b/actors/evm/src/evm_shared/v16/address.rs @@ -0,0 +1,178 @@ +use super::uints::U256; +use fil_actors_shared::v16::EAM_ACTOR_ID; +use fvm_ipld_encoding::{serde, strict_bytes}; +use fvm_shared4::address::Address; +use fvm_shared4::ActorID; + +/// A Filecoin address as represented in the FEVM runtime (also called EVM-form). +#[derive(serde::Deserialize, serde::Serialize, PartialEq, Eq, Clone, Copy)] +pub struct EthAddress(#[serde(with = "strict_bytes")] pub [u8; 20]); + +/// Converts a U256 to an EthAddress by taking the lower 20 bytes. +/// +/// Per the EVM spec, this simply discards the high bytes. +impl From for EthAddress { + fn from(v: U256) -> Self { + let bytes = v.to_big_endian(); + Self(bytes[12..].try_into().unwrap()) + } +} + +impl std::fmt::Debug for EthAddress { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&hex::encode(self.0)) + } +} + +impl From for Address { + fn from(addr: EthAddress) -> Self { + From::from(&addr) + } +} + +impl From<&EthAddress> for Address { + fn from(addr: &EthAddress) -> Self { + if let Some(id) = addr.as_id() { + Address::new_id(id) + } else { + Address::new_delegated(EAM_ACTOR_ID, addr.as_ref()).unwrap() + } + } +} + +impl EthAddress { + /// Returns a "null" address. + pub const fn null() -> Self { + Self([0u8; 20]) + } + + /// Returns an EVM-form ID address from actor ID. + pub fn from_id(id: u64) -> EthAddress { + let mut bytes = [0u8; 20]; + bytes[0] = 0xff; + bytes[12..].copy_from_slice(&id.to_be_bytes()); + EthAddress(bytes) + } + + /// Interpret the EVM word as an ID address in EVM-form, and return a Filecoin ID address if + /// that's the case. + /// + /// An ID address starts with 0xff (msb), and contains the u64 in the last 8 bytes. + /// We assert that everything in between are 0x00, otherwise we've gotten an illegal address. + /// + /// 0 1-11 12 + /// 0xff \[0x00...] [id address...] + pub fn as_id(&self) -> Option { + if !self.is_id() { + return None; + } + Some(u64::from_be_bytes(self.0[12..].try_into().unwrap())) + } + + /// Returns this Address as an EVM word. + #[inline] + pub fn as_evm_word(&self) -> U256 { + U256::from_big_endian(&self.0) + } + + /// Returns true if this is the null/zero EthAddress. + #[inline] + pub fn is_null(&self) -> bool { + self.0 == [0; 20] + } + + /// Returns true if the EthAddress refers to an address in the precompile range. + /// [reference](https://github.com/filecoin-project/ref-fvm/issues/1164#issuecomment-1371304676) + #[inline] + pub fn is_precompile(&self) -> bool { + // Exact index is not checked since it is unknown to the EAM what precompiles exist in the EVM actor. + // 0 indexes of both ranges are not assignable as well but are _not_ precompile address. + let [prefix, middle @ .., _index] = self.0; + (prefix == 0xfe || prefix == 0x00) && middle == [0u8; 18] + } + + /// Returns true if the EthAddress is an actor ID embedded in an eth address. + #[inline] + pub fn is_id(&self) -> bool { + self.0[0] == 0xff && self.0[1..12].iter().all(|&i| i == 0) + } +} + +impl AsRef<[u8]> for EthAddress { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + +#[cfg(test)] +mod tests { + use super::super::uints::U256; + use super::EthAddress; + + // padding (12 bytes) + const TYPE_PADDING: &[u8] = &[0; 12]; + // ID address marker (1 byte) + const ID_ADDRESS_MARKER: &[u8] = &[0xff]; + // ID address marker (1 byte) + const GOOD_ADDRESS_PADDING: &[u8] = &[ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + ]; // padding for inner u64 (11 bytes) + + macro_rules! id_address_test { + ($($name:ident: $input:expr => $expectation:expr,)*) => { + $( + #[test] + fn $name() { + let evm_bytes = $input.concat(); + let evm_addr = EthAddress::from(U256::from_big_endian(evm_bytes.as_slice())); + assert_eq!( + evm_addr.as_id(), + $expectation + ); + + // test inverse conversion, if a valid ID address was supplied + if let Some(fil_id) = $expectation { + assert_eq!(EthAddress::from_id(fil_id), evm_addr); + } + } + )* + }; + } + + id_address_test! { + good_address_1: [ + TYPE_PADDING, + ID_ADDRESS_MARKER, + GOOD_ADDRESS_PADDING, + vec![0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01].as_slice() // ID address (u64 big endian) (8 bytes) + ] => Some(1), + + good_address_2: [ + TYPE_PADDING, + ID_ADDRESS_MARKER, + GOOD_ADDRESS_PADDING, + vec![0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff].as_slice() // ID address (u64 big endian) (8 bytes) + ] => Some(u16::MAX as u64), + + bad_marker: [ + TYPE_PADDING, + &[0xfa], + GOOD_ADDRESS_PADDING, + vec![0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01].as_slice() // ID address (u64 big endian) (8 bytes) + ] => None, + + bad_padding: [ + TYPE_PADDING, + ID_ADDRESS_MARKER, + &[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01], // bad padding + vec![0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01].as_slice() // ID address (u64 big endian) (8 bytes) + ] => None, + + bad_marker_and_padding: [ + TYPE_PADDING, + &[0xfa], + &[0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01], // bad padding + vec![0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01].as_slice() // ID address (u64 big endian) (8 bytes) + ] => None, + } +} diff --git a/actors/evm/src/evm_shared/v16/mod.rs b/actors/evm/src/evm_shared/v16/mod.rs new file mode 100644 index 00000000..f74d12fc --- /dev/null +++ b/actors/evm/src/evm_shared/v16/mod.rs @@ -0,0 +1,2 @@ +pub mod address; +pub mod uints; diff --git a/actors/evm/src/evm_shared/v16/uints.rs b/actors/evm/src/evm_shared/v16/uints.rs new file mode 100644 index 00000000..50222e8a --- /dev/null +++ b/actors/evm/src/evm_shared/v16/uints.rs @@ -0,0 +1,339 @@ +// to silence construct_uint! clippy warnings +// see https://github.com/paritytech/parity-common/issues/660 +#![allow(clippy::ptr_offset_with_cast, clippy::assign_op_pattern)] + +#[doc(inline)] +pub use uint::byteorder; + +use serde::{Deserialize, Serialize}; +//use substrate_bn::arith; + +use { + fvm_shared4::bigint::BigInt, fvm_shared4::econ::TokenAmount, std::cmp::Ordering, std::fmt, + uint::construct_uint, +}; + +construct_uint! { pub struct U256(4); } // ethereum word size +construct_uint! { pub struct U512(8); } // used for addmod and mulmod opcodes + +// Convenience method for comparing against a small value. +impl PartialOrd for U256 { + fn partial_cmp(&self, other: &u64) -> Option { + if self.0[3] > 0 || self.0[2] > 0 || self.0[1] > 0 { + Some(Ordering::Greater) + } else { + self.0[0].partial_cmp(other) + } + } +} + +impl PartialEq for U256 { + fn eq(&self, other: &u64) -> bool { + self.0[0] == *other && self.0[1] == 0 && self.0[2] == 0 && self.0[3] == 0 + } +} + +impl U256 { + pub const BITS: u32 = 256; + pub const ZERO: Self = U256::from_u64(0); + pub const ONE: Self = U256::from_u64(1); + pub const I256_MIN: Self = U256([0, 0, 0, i64::MIN as u64]); + + #[inline(always)] + pub const fn from_u128_words(high: u128, low: u128) -> U256 { + U256([ + low as u64, + (low >> u64::BITS) as u64, + high as u64, + (high >> u64::BITS) as u64, + ]) + } + + #[inline(always)] + pub const fn from_u64(value: u64) -> U256 { + U256([value, 0, 0, 0]) + } + + #[inline(always)] + pub const fn i256_is_negative(&self) -> bool { + (self.0[3] as i64) < 0 + } + + /// turns a i256 value to negative + #[inline(always)] + pub fn i256_neg(&self) -> U256 { + if self.is_zero() { + U256::ZERO + } else { + !*self + U256::ONE + } + } + + #[inline(always)] + pub fn i256_cmp(&self, other: &U256) -> Ordering { + // true > false: + // - true < positive: + match other.i256_is_negative().cmp(&self.i256_is_negative()) { + Ordering::Equal => self.cmp(other), + sign_cmp => sign_cmp, + } + } + + #[inline] + pub fn i256_div(&self, other: &U256) -> U256 { + if self.is_zero() || other.is_zero() { + // EVM defines X/0 to be 0. + return U256::ZERO; + } + + let mut first = *self; + let mut second = *other; + + // Record and strip the signs. We add them back at the end. + let first_neg = first.i256_is_negative(); + let second_neg = second.i256_is_negative(); + + if first_neg { + first = first.i256_neg() + } + + if second_neg { + second = second.i256_neg() + } + + let d = first / second; + + // Flip the sign back if necessary. + if d.is_zero() || first_neg == second_neg { + d + } else { + d.i256_neg() + } + } + + #[inline] + pub fn i256_mod(&self, other: &U256) -> U256 { + if self.is_zero() || other.is_zero() { + // X % 0 or 0 % X is always 0. + return U256::ZERO; + } + + let mut first = *self; + let mut second = *other; + + // Record and strip the sign. + let negative = first.i256_is_negative(); + if negative { + first = first.i256_neg(); + } + + if second.i256_is_negative() { + second = second.i256_neg() + } + + let r = first % second; + + // Restore the sign. + if negative && !r.is_zero() { + r.i256_neg() + } else { + r + } + } + + pub fn to_bytes(&self) -> [u8; 32] { + self.to_big_endian() + } + + /// Returns the low 64 bits, saturating the value to u64 max if it is larger + pub fn to_u64_saturating(&self) -> u64 { + if self.bits() > 64 { + u64::MAX + } else { + self.0[0] + } + } +} + +impl U512 { + pub fn low_u256(&self) -> U256 { + let [a, b, c, d, ..] = self.0; + U256([a, b, c, d]) + } +} + +impl From<&TokenAmount> for U256 { + fn from(amount: &TokenAmount) -> U256 { + let (_, bytes) = amount.atto().to_bytes_be(); + U256::from_big_endian(bytes.as_slice()) + } +} + +impl From for U512 { + fn from(v: U256) -> Self { + let [a, b, c, d] = v.0; + U512([a, b, c, d, 0, 0, 0, 0]) + } +} + +impl From<&U256> for TokenAmount { + fn from(ui: &U256) -> TokenAmount { + let bits = ui.to_big_endian(); + TokenAmount::from_atto(BigInt::from_bytes_be( + fvm_shared4::bigint::Sign::Plus, + &bits, + )) + } +} + +impl Serialize for U256 { + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + let bytes = self.to_big_endian(); + serializer.serialize_bytes(zeroless_view(&bytes)) + } +} + +impl<'de> Deserialize<'de> for U256 { + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + struct Visitor; + impl serde::de::Visitor<'_> for Visitor { + type Value = U256; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "at most 32 bytes") + } + + fn visit_bytes(self, v: &[u8]) -> Result + where + E: serde::de::Error, + { + if v.len() > 32 { + return Err(serde::de::Error::invalid_length(v.len(), &self)); + } + Ok(U256::from_big_endian(v)) + } + } + deserializer.deserialize_bytes(Visitor) + } +} + +fn zeroless_view(v: &impl AsRef<[u8]>) -> &[u8] { + let v = v.as_ref(); + &v[v.iter().take_while(|&&b| b == 0).count()..] +} + +#[cfg(test)] +mod tests { + use fvm_ipld_encoding::{BytesDe, BytesSer, RawBytes}; + + use {super::*, core::num::Wrapping}; + + #[test] + fn div_i256() { + assert_eq!(Wrapping(i8::MIN) / Wrapping(-1), Wrapping(i8::MIN)); + assert_eq!(i8::MAX / -1, -i8::MAX); + + let zero = U256::ZERO; + let one = U256::ONE; + let one_hundred = U256::from(100); + let fifty = U256::from(50); + let two = U256::from(2); + let neg_one_hundred = U256::from(100); + let minus_one = U256::from(1); + let max_value = U256::from(2).pow(255.into()) - 1; + let neg_max_value = U256::from(2).pow(255.into()) - 1; + + assert_eq!(U256::I256_MIN.i256_div(&minus_one), U256::I256_MIN); + assert_eq!(U256::I256_MIN.i256_div(&one), U256::I256_MIN); + assert_eq!( + U256::I256_MIN.i256_div(&two), + U256([0, 0, 0, i64::MIN as u64 + (i64::MIN as u64 >> 1)]) + ); + assert_eq!(one.i256_div(&U256::I256_MIN), zero); + assert_eq!(max_value.i256_div(&one), max_value); + assert_eq!(max_value.i256_div(&minus_one), neg_max_value); + assert_eq!(one_hundred.i256_div(&minus_one), neg_one_hundred); + assert_eq!(one_hundred.i256_div(&two), fifty); + + assert_eq!(zero.i256_div(&zero), zero); + assert_eq!(one.i256_div(&zero), zero); + assert_eq!(zero.i256_div(&one), zero); + } + + #[test] + fn mod_i256() { + let zero = U256::ZERO; + let one = U256::ONE; + let one_hundred = U256::from(100); + let two = U256::from(2); + let three = U256::from(3); + + let neg_one_hundred = U256::from(100).i256_neg(); + let minus_one = U256::from(1).i256_neg(); + let neg_three = U256::from(3).i256_neg(); + let max_value = U256::from(2).pow(255.into()) - 1; + + // zero + assert_eq!(minus_one.i256_mod(&U256::ZERO), U256::ZERO); + assert_eq!(max_value.i256_mod(&U256::ZERO), U256::ZERO); + assert_eq!(U256::ZERO.i256_mod(&U256::ZERO), U256::ZERO); + + assert_eq!(minus_one.i256_mod(&two), minus_one); + assert_eq!(U256::I256_MIN.i256_mod(&one), 0); + assert_eq!(one.i256_mod(&U256::I256_MIN), one); + assert_eq!(one.i256_mod(&U256::from(i128::MAX)), one); + + assert_eq!(max_value.i256_mod(&minus_one), zero); + assert_eq!(neg_one_hundred.i256_mod(&minus_one), zero); + assert_eq!(one_hundred.i256_mod(&two), zero); + assert_eq!(one_hundred.i256_mod(&neg_three), one); + + assert_eq!(neg_one_hundred.i256_mod(&three), minus_one); + + let a = U256::from(95).i256_neg(); + let b = U256::from(256); + assert_eq!(a % b, U256::from(161)) + } + + #[test] + fn negative_i256() { + assert_eq!(U256::ZERO.i256_neg(), U256::ZERO); + + let one = U256::ONE.i256_neg(); + assert!(one.i256_is_negative()); + + let neg_one = U256::from_big_endian(&[0xff; 32]); + let pos_one = neg_one.i256_neg(); + assert_eq!(pos_one, U256::ONE); + } + + #[test] + fn u256_serde() { + let encoded = RawBytes::serialize(U256::from(0x4d2)).unwrap(); + let BytesDe(bytes) = encoded.deserialize().unwrap(); + assert_eq!(bytes, &[0x04, 0xd2]); + let decoded: U256 = encoded.deserialize().unwrap(); + assert_eq!(decoded, 0x4d2); + } + + #[test] + fn u256_empty() { + let encoded = RawBytes::serialize(U256::from(0)).unwrap(); + let BytesDe(bytes) = encoded.deserialize().unwrap(); + assert!(bytes.is_empty()); + } + + #[test] + fn u256_overflow() { + let encoded = RawBytes::serialize(BytesSer(&[1; 33])).unwrap(); + encoded + .deserialize::() + .expect_err("should have failed to decode an over-large u256"); + } +} diff --git a/actors/evm/src/lib.rs b/actors/evm/src/lib.rs index f64e6d3b..414099a2 100644 --- a/actors/evm/src/lib.rs +++ b/actors/evm/src/lib.rs @@ -8,3 +8,4 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; diff --git a/actors/evm/src/v16/mod.rs b/actors/evm/src/v16/mod.rs new file mode 100644 index 00000000..904d527d --- /dev/null +++ b/actors/evm/src/v16/mod.rs @@ -0,0 +1,38 @@ +use fvm_shared4::error::ExitCode; + +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +pub use types::*; + +mod state; +mod types; + +pub use state::*; + +pub const EVM_CONTRACT_REVERTED: ExitCode = ExitCode::new(33); +pub const EVM_CONTRACT_INVALID_INSTRUCTION: ExitCode = ExitCode::new(34); +pub const EVM_CONTRACT_UNDEFINED_INSTRUCTION: ExitCode = ExitCode::new(35); +pub const EVM_CONTRACT_STACK_UNDERFLOW: ExitCode = ExitCode::new(36); +pub const EVM_CONTRACT_STACK_OVERFLOW: ExitCode = ExitCode::new(37); +pub const EVM_CONTRACT_ILLEGAL_MEMORY_ACCESS: ExitCode = ExitCode::new(38); +pub const EVM_CONTRACT_BAD_JUMPDEST: ExitCode = ExitCode::new(39); +pub const EVM_CONTRACT_SELFDESTRUCT_FAILED: ExitCode = ExitCode::new(40); + +pub const EVM_MAX_RESERVED_METHOD: u64 = 1023; +pub const NATIVE_METHOD_SIGNATURE: &str = "handle_filecoin_method(uint64,uint64,bytes)"; +pub const NATIVE_METHOD_SELECTOR: [u8; 4] = [0x86, 0x8e, 0x10, 0xc4]; + +pub const EVM_WORD_SIZE: usize = 32; + +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + Resurrect = 2, + GetBytecode = 3, + GetBytecodeHash = 4, + GetStorageAt = 5, + InvokeContractDelegate = 6, + InvokeContract = frc42_dispatch::method_hash!("InvokeEVM"), +} diff --git a/actors/evm/src/v16/state.rs b/actors/evm/src/v16/state.rs new file mode 100644 index 00000000..9c44d33e --- /dev/null +++ b/actors/evm/src/v16/state.rs @@ -0,0 +1,168 @@ +use std::array::TryFromSliceError; + +use crate::evm_shared::v16::uints::U256; +use fvm_shared4::ActorID; + +use cid::Cid; +use fvm_ipld_encoding::strict_bytes; +use fvm_ipld_encoding::tuple::*; +use serde::{Deserialize, Serialize}; + +/// A tombstone indicating that the contract has been self-destructed. +#[derive(Copy, Clone, Debug, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct Tombstone { + /// The message origin when this actor was self-destructed. + pub origin: ActorID, + /// The message nonce when this actor was self-destructed. + pub nonce: u64, +} + +/// A Keccak256 digest of EVM bytecode. +#[derive(Deserialize, Serialize, Clone, Copy, Eq, PartialEq)] +#[serde(transparent)] +pub struct BytecodeHash(#[serde(with = "strict_bytes")] [u8; 32]); + +impl std::fmt::Debug for BytecodeHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("BytecodeHash") + .field(&format_args!("{}", self)) + .finish() + } +} + +impl std::fmt::Display for BytecodeHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + if f.alternate() { + write!(f, "0x")?; + } + for b in self.0 { + write!(f, "{b:02X}")?; + } + Ok(()) + } +} + +impl BytecodeHash { + pub const ZERO: Self = Self([0; 32]); + + /// Keccak256 hash of `[0xfe]`, "native bytecode" + pub const NATIVE_ACTOR: Self = Self(hex_literal::hex!( + "bcc90f2d6dada5b18e155c17a1c0a55920aae94f39857d39d0d8ed07ae8f228b" + )); + + /// Keccak256 hash of `[]`, empty bytecode + pub const EMPTY: Self = Self(hex_literal::hex!( + "c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470" + )); + + pub fn as_slice(&self) -> &[u8] { + &self.0 + } +} + +impl From<[u8; 32]> for BytecodeHash { + fn from(digest: [u8; 32]) -> Self { + BytecodeHash(digest) + } +} + +impl From for [u8; 32] { + fn from(digest: BytecodeHash) -> Self { + digest.0 + } +} + +impl From for Vec { + fn from(digest: BytecodeHash) -> Self { + digest.0.into() + } +} + +impl From for U256 { + fn from(bytecode: BytecodeHash) -> Self { + let bytes: [u8; 32] = bytecode.into(); + Self::from_big_endian(&bytes) + } +} + +impl TryFrom<&[u8]> for BytecodeHash { + type Error = TryFromSliceError; + + fn try_from(value: &[u8]) -> Result { + Ok(Self(value.try_into()?)) + } +} + +/// Data stored by an EVM contract. +/// This runs on the fvm-evm-runtime actor code cid. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct State { + /// The EVM contract bytecode resulting from calling the + /// initialization code by the constructor. + pub bytecode: Cid, + + /// The EVM contract bytecode hash keccak256(bytecode) + pub bytecode_hash: BytecodeHash, + + /// The EVM contract state dictionary. + /// All eth contract state is a map of U256 -> U256 values. + /// + /// KAMT + pub contract_state: Cid, + + /// The EVM nonce used to track how many times CREATE or CREATE2 have been called. + pub nonce: u64, + + /// Possibly a tombstone if this actor has been self-destructed. + /// + /// In the EVM, self-destructed contracts are "alive" until the current top-level transaction + /// ends. We track this by recording the origin and nonce. + /// + /// Specifically: + /// + /// 1. On SELFDESTRUCT, they mark themselves as "deleted" (by setting a tombstone with the + /// current origin/nonce), send away all funds, and return immediately. + /// 2. For the rest of the current transaction (as long as the tombstone's origin/nonce matches + /// the currently executing top-level transaction) , the contract continues to behave + /// normally. + /// 3. After the current transaction ends, the contract behaves as if it were an "empty" + /// contract, kind of like an embryo. At this point, the contract can be "resurrected" + /// (recreated) by via CREATE/CREATE2. + /// + /// See https://github.com/filecoin-project/ref-fvm/issues/1174 for some context. + pub tombstone: Option, +} + +#[cfg(test)] +mod test { + use fvm_ipld_encoding::{from_slice, to_vec, BytesDe}; + + use crate::v16::BytecodeHash; + + #[test] + fn test_bytecode_hash_serde() { + let encoded = to_vec(&BytecodeHash::EMPTY).unwrap(); + let BytesDe(decoded) = from_slice(&encoded).unwrap(); + assert_eq!( + BytecodeHash::try_from(&decoded[..]).unwrap(), + BytecodeHash::EMPTY + ); + } + + #[test] + fn test_bytecode_hash_format() { + assert_eq!( + BytecodeHash::ZERO.to_string(), + "0000000000000000000000000000000000000000000000000000000000000000" + ); + assert_eq!( + format!("{:#}", BytecodeHash::ZERO), + "0x0000000000000000000000000000000000000000000000000000000000000000" + ); + + assert_eq!( + format!("{:?}", BytecodeHash::ZERO), + "BytecodeHash(0000000000000000000000000000000000000000000000000000000000000000)" + ); + } +} diff --git a/actors/evm/src/v16/types.rs b/actors/evm/src/v16/types.rs new file mode 100644 index 00000000..b954c240 --- /dev/null +++ b/actors/evm/src/v16/types.rs @@ -0,0 +1,67 @@ +use crate::evm_shared::v16::address::EthAddress; +use crate::evm_shared::v16::uints::U256; +use cid::Cid; +use fvm_ipld_encoding::strict_bytes; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::RawBytes; +use fvm_shared4::econ::TokenAmount; + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ConstructorParams { + /// The actor's "creator" (specified by the EAM). + pub creator: EthAddress, + /// The initcode that will construct the new EVM actor. + pub initcode: RawBytes, +} + +pub type ResurrectParams = ConstructorParams; + +#[derive(Default, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct InvokeContractParams { + #[serde(with = "strict_bytes")] + pub input_data: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct InvokeContractReturn { + #[serde(with = "strict_bytes")] + pub output_data: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct BytecodeReturn { + pub code: Option, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct GetStorageAtReturn { + pub storage: U256, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct DelegateCallParams { + pub code: Cid, + /// The contract invocation parameters + #[serde(with = "strict_bytes")] + pub input: Vec, + /// The original caller's Eth address. + pub caller: EthAddress, + /// The value passed in the original call. + pub value: TokenAmount, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct DelegateCallReturn { + #[serde(with = "strict_bytes")] + pub return_data: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct GetStorageAtParams { + pub storage_key: U256, +} diff --git a/actors/init/src/lib.rs b/actors/init/src/lib.rs index 7ad80ec1..af8adbad 100644 --- a/actors/init/src/lib.rs +++ b/actors/init/src/lib.rs @@ -8,5 +8,6 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v8; pub mod v9; diff --git a/actors/init/src/v16/mod.rs b/actors/init/src/v16/mod.rs new file mode 100644 index 00000000..7129a09d --- /dev/null +++ b/actors/init/src/v16/mod.rs @@ -0,0 +1,20 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +pub use self::state::*; +pub use self::types::*; + +mod state; +mod types; + +/// Init actor methods available +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + Exec = 2, + Exec4 = 3, +} diff --git a/actors/init/src/v16/state.rs b/actors/init/src/v16/state.rs new file mode 100644 index 00000000..597768a3 --- /dev/null +++ b/actors/init/src/v16/state.rs @@ -0,0 +1,99 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::{Address, Protocol}; +use fvm_shared4::ActorID; + +use fil_actors_shared::actor_error_v16; +use fil_actors_shared::v16::{ActorError, Map2, DEFAULT_HAMT_CONFIG, FIRST_NON_SINGLETON_ADDR}; + +#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug)] +pub struct State { + /// HAMT[Address]ActorID + pub address_map: Cid, + pub next_id: ActorID, + pub network_name: String, +} + +pub type AddressMap = Map2; + +impl State { + pub fn new(store: &BS, network_name: String) -> Result { + let empty = AddressMap::flush_empty(store, DEFAULT_HAMT_CONFIG)?; + Ok(Self { + address_map: empty, + next_id: FIRST_NON_SINGLETON_ADDR, + network_name, + }) + } + + /// Maps argument addresses to to a new or existing actor ID. + /// With no delegated address, or if the delegated address is not already mapped, + /// allocates a new ID address and maps both to it. + /// If the delegated address is already present, maps the robust address to that actor ID. + /// Fails if the robust address is already mapped. The assignment of an ID to an address is one-time-only, even if the actor at that ID is deleted. + /// Returns the actor ID and a boolean indicating whether or not the actor already exists. + pub fn map_addresses_to_id( + &mut self, + store: &BS, + robust_addr: &Address, + delegated_addr: Option<&Address>, + ) -> Result<(ActorID, bool), ActorError> { + let mut map = AddressMap::load(store, &self.address_map, DEFAULT_HAMT_CONFIG, "addresses")?; + let (id, existing) = if let Some(delegated_addr) = delegated_addr { + // If there's a delegated address, either recall the already-mapped actor ID or + // create and map a new one. + if let Some(existing_id) = map.get(delegated_addr)? { + (*existing_id, true) + } else { + let new_id = self.next_id; + self.next_id += 1; + map.set(delegated_addr, new_id)?; + (new_id, false) + } + } else { + // With no delegated address, always create a new actor ID. + let new_id = self.next_id; + self.next_id += 1; + (new_id, false) + }; + + // Map the robust address to the ID, failing if it's already mapped to anything. + let is_new = map.set_if_absent(robust_addr, id)?; + if !is_new { + return Err(actor_error_v16!( + forbidden, + "robust address {} is already allocated in the address map", + robust_addr + )); + } + self.address_map = map.flush()?; + Ok((id, existing)) + } + + /// ResolveAddress resolves an address to an ID-address, if possible. + /// If the provided address is an ID address, it is returned as-is. + /// This means that mapped ID-addresses (which should only appear as values, not keys) and + /// singleton actor addresses (which are not in the map) pass through unchanged. + /// + /// Returns an ID-address and `true` if the address was already an ID-address or was resolved + /// in the mapping. + /// Returns an undefined address and `false` if the address was not an ID-address and not found + /// in the mapping. + /// Returns an error only if state was inconsistent. + pub fn resolve_address( + &self, + store: &BS, + addr: &Address, + ) -> Result, ActorError> { + if addr.protocol() == Protocol::ID { + return Ok(Some(*addr)); + } + let map = AddressMap::load(store, &self.address_map, DEFAULT_HAMT_CONFIG, "addresses")?; + let found = map.get(addr)?; + Ok(found.copied().map(Address::new_id)) + } +} diff --git a/actors/init/src/v16/types.rs b/actors/init/src/v16/types.rs new file mode 100644 index 00000000..ab117823 --- /dev/null +++ b/actors/init/src/v16/types.rs @@ -0,0 +1,40 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::RawBytes; +use fvm_shared4::address::Address; + +/// Init actor Constructor parameters +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ConstructorParams { + pub network_name: String, +} + +/// Init actor Exec Params +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ExecParams { + pub code_cid: Cid, + pub constructor_params: RawBytes, +} + +/// Init actor Exec Return value +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ExecReturn { + /// ID based address for created actor + pub id_address: Address, + /// Reorg safe address for actor + pub robust_address: Address, +} + +/// Init actor Exec4 Params +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct Exec4Params { + pub code_cid: Cid, + pub constructor_params: RawBytes, + pub subaddress: RawBytes, +} + +/// Init actor Exec4 Return value +pub type Exec4Return = ExecReturn; diff --git a/actors/market/src/lib.rs b/actors/market/src/lib.rs index f95f7f59..26e0c7c7 100644 --- a/actors/market/src/lib.rs +++ b/actors/market/src/lib.rs @@ -7,5 +7,6 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v8; pub mod v9; diff --git a/actors/market/src/v16/balance_table.rs b/actors/market/src/v16/balance_table.rs new file mode 100644 index 00000000..6e776d32 --- /dev/null +++ b/actors/market/src/v16/balance_table.rs @@ -0,0 +1,119 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared4::address::Address; +use fvm_shared4::econ::TokenAmount; +use num_traits::Zero; + +use fil_actors_shared::actor_error_v16; +use fil_actors_shared::v16::{ActorContext, ActorError, Config, Map2, DEFAULT_HAMT_CONFIG}; + +/// Balance table which handles getting and updating token balances specifically +pub struct BalanceTable(pub Map2); + +const CONF: Config = Config { + bit_width: 6, + ..DEFAULT_HAMT_CONFIG +}; + +impl BalanceTable +where + BS: Blockstore, +{ + /// Initializes a new empty balance table + pub fn new(bs: BS, name: &'static str) -> Self { + Self(Map2::empty(bs, CONF, name)) + } + + /// Initializes a balance table from a root Cid + pub fn from_root(bs: BS, cid: &Cid, name: &'static str) -> Result { + Ok(Self(Map2::load(bs, cid, CONF, name)?)) + } + + /// Retrieve root from balance table + pub fn root(&mut self) -> Result { + self.0.flush() + } + + /// Gets token amount for given address in balance table + pub fn get(&self, key: &Address) -> Result { + if let Some(v) = self.0.get(key)? { + Ok(v.clone()) + } else { + Ok(TokenAmount::zero()) + } + } + + /// Adds token amount to previously initialized account. + pub fn add(&mut self, key: &Address, value: &TokenAmount) -> Result<(), ActorError> { + let prev = self.get(key)?; + let sum = &prev + value; + if sum.is_negative() { + Err(actor_error_v16!( + illegal_argument, + "negative balance for {} adding {} to {}", + key, + value, + prev + )) + } else if sum.is_zero() && !prev.is_zero() { + self.0.delete(key).context("adding balance")?; + Ok(()) + } else { + self.0.set(key, sum).context("adding balance")?; + Ok(()) + } + } + + /// Subtracts up to the specified amount from a balance, without reducing the balance + /// below some minimum. + /// Returns the amount subtracted (always positive or zero). + pub fn subtract_with_minimum( + &mut self, + key: &Address, + req: &TokenAmount, + floor: &TokenAmount, + ) -> Result { + let prev = self.get(key)?; + let available = std::cmp::max(TokenAmount::zero(), prev - floor); + let sub: TokenAmount = std::cmp::min(&available, req).clone(); + + if sub.is_positive() { + self.add(key, &-sub.clone()) + .context("subtracting balance")?; + } + + Ok(sub) + } + + /// Subtracts value from a balance, and errors if full amount was not substracted. + pub fn must_subtract(&mut self, key: &Address, req: &TokenAmount) -> Result<(), ActorError> { + let prev = self.get(key)?; + + if req > &prev { + Err(actor_error_v16!( + illegal_argument, + "negative balance for {} subtracting {} from {}", + key, + req, + prev + )) + } else { + self.add(key, &-req) + } + } + + /// Returns total balance held by this balance table + #[allow(dead_code)] + pub fn total(&self) -> Result { + let mut total = TokenAmount::zero(); + self.0.for_each(|_, v: &TokenAmount| { + total += v; + Ok(()) + })?; + + Ok(total) + } +} diff --git a/actors/market/src/v16/deal.rs b/actors/market/src/v16/deal.rs new file mode 100644 index 00000000..6e7d785f --- /dev/null +++ b/actors/market/src/v16/deal.rs @@ -0,0 +1,150 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::{Cid, Version}; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::BytesSer; +use fvm_shared4::address::Address; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::commcid::{FIL_COMMITMENT_UNSEALED, SHA2_256_TRUNC254_PADDED}; +use fvm_shared4::crypto::signature::Signature; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::piece::PaddedPieceSize; +use fvm_shared4::sector::SectorNumber; +use ipld_core::ipld::Ipld; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +use std::convert::{TryFrom, TryInto}; + +/// Cid prefix for piece Cids +pub fn is_piece_cid(c: &Cid) -> bool { + // TODO: Move FIL_COMMITMENT etc, into a better place + c.version() == Version::V1 + && c.codec() == FIL_COMMITMENT_UNSEALED + && c.hash().code() == SHA2_256_TRUNC254_PADDED + && c.hash().size() == 32 +} + +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum Label { + String(String), + Bytes(Vec), +} + +/// Serialize the Label like an untagged enum. +impl Serialize for Label { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + match self { + Label::String(v) => v.serialize(serializer), + Label::Bytes(v) => BytesSer(v).serialize(serializer), + } + } +} + +impl TryFrom for Label { + type Error = String; + + fn try_from(ipld: Ipld) -> Result { + match ipld { + Ipld::String(s) => Ok(Label::String(s)), + Ipld::Bytes(b) => Ok(Label::Bytes(b)), + other => Err(format!( + "Expected `Ipld::String` or `Ipld::Bytes`, got {:#?}", + other + )), + } + } +} + +/// Deserialize the Label like an untagged enum. +impl<'de> Deserialize<'de> for Label { + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Ipld::deserialize(deserializer).and_then(|ipld| ipld.try_into().map_err(de::Error::custom)) + } +} + +impl Label { + pub fn len(&self) -> usize { + match self { + Label::String(s) => s.len(), + Label::Bytes(b) => b.len(), + } + } + + pub fn is_empty(&self) -> bool { + match self { + Label::String(s) => s.is_empty(), + Label::Bytes(b) => b.is_empty(), + } + } +} + +/// Note: Deal Collateral is only released and returned to clients and miners +/// when the storage deal stops counting towards power. In the current iteration, +/// it will be released when the sector containing the storage deals expires, +/// even though some storage deals can expire earlier than the sector does. +/// Collaterals are denominated in PerEpoch to incur a cost for self dealing or +/// minimal deals that last for a long time. +/// Note: ClientCollateralPerEpoch may not be needed and removed pending future confirmation. +/// There will be a Minimum value for both client and provider deal collateral. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct DealProposal { + pub piece_cid: Cid, + pub piece_size: PaddedPieceSize, + pub verified_deal: bool, + pub client: Address, + pub provider: Address, + + /// Arbitrary client chosen label to apply to the deal + pub label: Label, + + // Nominal start epoch. Deal payment is linear between StartEpoch and EndEpoch, + // with total amount StoragePricePerEpoch * (EndEpoch - StartEpoch). + // Storage deal must appear in a sealed (proven) sector no later than StartEpoch, + // otherwise it is invalid. + pub start_epoch: ChainEpoch, + pub end_epoch: ChainEpoch, + pub storage_price_per_epoch: TokenAmount, + + pub provider_collateral: TokenAmount, + pub client_collateral: TokenAmount, +} + +impl DealProposal { + pub fn duration(&self) -> ChainEpoch { + self.end_epoch - self.start_epoch + } + pub fn total_storage_fee(&self) -> TokenAmount { + self.storage_price_per_epoch.clone() * self.duration() as u64 + } + pub fn client_balance_requirement(&self) -> TokenAmount { + &self.client_collateral + self.total_storage_fee() + } + pub fn provider_balance_requirement(&self) -> &TokenAmount { + &self.provider_collateral + } +} + +/// ClientDealProposal is a DealProposal signed by a client +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ClientDealProposal { + pub proposal: DealProposal, + pub client_signature: Signature, +} + +#[derive(Clone, Debug, PartialEq, Eq, Copy, Serialize_tuple, Deserialize_tuple)] +pub struct DealState { + // 0 if not yet included in proven sector (0 is also a valid sector number) + pub sector_number: SectorNumber, + // -1 if not yet included in proven sector + pub sector_start_epoch: ChainEpoch, + // -1 if deal state never updated + pub last_updated_epoch: ChainEpoch, + // -1 if deal never slashed + pub slash_epoch: ChainEpoch, +} diff --git a/actors/market/src/v16/ext.rs b/actors/market/src/v16/ext.rs new file mode 100644 index 00000000..8d46f5c9 --- /dev/null +++ b/actors/market/src/v16/ext.rs @@ -0,0 +1,168 @@ +use fvm_ipld_encoding::strict_bytes; +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; +use fvm_shared4::bigint::bigint_ser; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::sector::StoragePower; + +use fil_actors_shared::v16::reward::FilterEstimate; + +pub mod account { + use super::*; + + pub const AUTHENTICATE_MESSAGE_METHOD: u64 = + frc42_dispatch::method_hash!("AuthenticateMessage"); + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct AuthenticateMessageParams { + #[serde(with = "strict_bytes")] + pub signature: Vec, + #[serde(with = "strict_bytes")] + pub message: Vec, + } +} + +pub mod miner { + use super::*; + use cid::Cid; + use fvm_ipld_encoding::RawBytes; + use fvm_shared4::clock::ChainEpoch; + use fvm_shared4::piece::PaddedPieceSize; + use fvm_shared4::sector::SectorNumber; + use fvm_shared4::MethodNum; + + pub const CONTROL_ADDRESSES_METHOD: u64 = 2; + pub const IS_CONTROLLING_ADDRESS_EXPORTED: u64 = + frc42_dispatch::method_hash!("IsControllingAddress"); + pub const SECTOR_CONTENT_CHANGED: MethodNum = + frc42_dispatch::method_hash!("SectorContentChanged"); + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct GetControlAddressesReturnParams { + pub owner: Address, + pub worker: Address, + pub control_addresses: Vec
, + } + + #[derive(Serialize_tuple, Deserialize_tuple)] + #[serde(transparent)] + pub struct IsControllingAddressReturn { + pub is_controlling: bool, + } + + #[derive(Serialize_tuple, Deserialize_tuple)] + #[serde(transparent)] + pub struct IsControllingAddressParam { + pub address: Address, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + #[serde(transparent)] + pub struct SectorContentChangedParams { + pub sectors: Vec, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct SectorChanges { + pub sector: SectorNumber, + pub minimum_commitment_epoch: ChainEpoch, + pub added: Vec, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct PieceChange { + pub data: Cid, + pub size: PaddedPieceSize, + pub payload: RawBytes, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + #[serde(transparent)] + pub struct SectorContentChangedReturn { + pub sectors: Vec, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + #[serde(transparent)] + pub struct SectorReturn { + pub added: Vec, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + #[serde(transparent)] + pub struct PieceReturn { + pub accepted: bool, + } +} + +pub mod verifreg { + use super::*; + use cid::Cid; + use fil_actors_shared::v16::BatchReturn; + use fvm_shared4::clock::ChainEpoch; + use fvm_shared4::piece::PaddedPieceSize; + use fvm_shared4::ActorID; + + pub type AllocationID = u64; + pub type ClaimID = u64; + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct AllocationRequest { + pub provider: ActorID, + pub data: Cid, + pub size: PaddedPieceSize, + pub term_min: ChainEpoch, + pub term_max: ChainEpoch, + pub expiration: ChainEpoch, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct ClaimExtensionRequest { + pub provider: ActorID, + pub claim: ClaimID, + pub term_max: ChainEpoch, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct AllocationRequests { + pub allocations: Vec, + pub extensions: Vec, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct AllocationsResponse { + // Result for each allocation request. + pub allocation_results: BatchReturn, + // Result for each extension request. + pub extension_results: BatchReturn, + // IDs of new allocations created. + pub new_allocations: Vec, + } +} + +pub mod datacap { + pub const BALANCE_OF_METHOD: u64 = frc42_dispatch::method_hash!("Balance"); + pub const TRANSFER_FROM_METHOD: u64 = frc42_dispatch::method_hash!("TransferFrom"); +} + +pub mod reward { + pub const THIS_EPOCH_REWARD_METHOD: u64 = 3; +} + +pub mod power { + use super::*; + + pub const CURRENT_TOTAL_POWER_METHOD: u64 = 9; + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct CurrentTotalPowerReturn { + #[serde(with = "bigint_ser")] + pub raw_byte_power: StoragePower, + #[serde(with = "bigint_ser")] + pub quality_adj_power: StoragePower, + pub pledge_collateral: TokenAmount, + pub quality_adj_power_smoothed: FilterEstimate, + pub ramp_start_epoch: i64, + pub ramp_duration_epochs: u64, + } +} diff --git a/actors/market/src/v16/mod.rs b/actors/market/src/v16/mod.rs new file mode 100644 index 00000000..c57e0e8b --- /dev/null +++ b/actors/market/src/v16/mod.rs @@ -0,0 +1,61 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_shared::v16::FIRST_ACTOR_SPECIFIC_EXIT_CODE; +use fvm_shared4::error::ExitCode; +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +pub use self::deal::*; +pub use self::state::*; +pub use self::types::*; + +// exports for testing +pub mod balance_table; +#[doc(hidden)] +pub mod ext; +pub mod policy; + +mod deal; +mod state; +mod types; + +pub const NO_ALLOCATION_ID: u64 = 0; + +// Indicates that information about a past deal is no longer available. +pub const EX_DEAL_EXPIRED: ExitCode = ExitCode::new(FIRST_ACTOR_SPECIFIC_EXIT_CODE); +// Indicates that information about a deal's activation is not yet available. +pub const EX_DEAL_NOT_ACTIVATED: ExitCode = ExitCode::new(FIRST_ACTOR_SPECIFIC_EXIT_CODE + 1); + +/// Market actor methods available +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + AddBalance = 2, + WithdrawBalance = 3, + PublishStorageDeals = 4, + VerifyDealsForActivation = 5, + BatchActivateDeals = 6, + OnMinerSectorsTerminate = 7, + // ComputeDataCommitment = 8, // Deprecated + CronTick = 9, + // Method numbers derived from FRC-0042 standards + AddBalanceExported = frc42_dispatch::method_hash!("AddBalance"), + WithdrawBalanceExported = frc42_dispatch::method_hash!("WithdrawBalance"), + PublishStorageDealsExported = frc42_dispatch::method_hash!("PublishStorageDeals"), + GetBalanceExported = frc42_dispatch::method_hash!("GetBalance"), + GetDealDataCommitmentExported = frc42_dispatch::method_hash!("GetDealDataCommitment"), + GetDealClientExported = frc42_dispatch::method_hash!("GetDealClient"), + GetDealProviderExported = frc42_dispatch::method_hash!("GetDealProvider"), + GetDealLabelExported = frc42_dispatch::method_hash!("GetDealLabel"), + GetDealTermExported = frc42_dispatch::method_hash!("GetDealTerm"), + GetDealTotalPriceExported = frc42_dispatch::method_hash!("GetDealTotalPrice"), + GetDealClientCollateralExported = frc42_dispatch::method_hash!("GetDealClientCollateral"), + GetDealProviderCollateralExported = frc42_dispatch::method_hash!("GetDealProviderCollateral"), + GetDealVerifiedExported = frc42_dispatch::method_hash!("GetDealVerified"), + GetDealActivationExported = frc42_dispatch::method_hash!("GetDealActivation"), + GetDealSectorExported = frc42_dispatch::method_hash!("GetDealSector"), + SettleDealPaymentsExported = frc42_dispatch::method_hash!("SettleDealPayments"), + SectorContentChangedExported = ext::miner::SECTOR_CONTENT_CHANGED, +} diff --git a/actors/market/src/v16/policy.rs b/actors/market/src/v16/policy.rs new file mode 100644 index 00000000..46cfc2d4 --- /dev/null +++ b/actors/market/src/v16/policy.rs @@ -0,0 +1,74 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::cmp::max; + +use fil_actors_shared::v16::network::EPOCHS_IN_DAY; +use fil_actors_shared::v16::runtime::Policy; +use fvm_shared4::bigint::{BigInt, Integer}; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::piece::PaddedPieceSize; +use fvm_shared4::sector::StoragePower; +use lazy_static::lazy_static; +use num_traits::Zero; + +pub mod detail { + /// Maximum length of a deal label. + pub const DEAL_MAX_LABEL_SIZE: usize = 256; +} + +lazy_static! { + /// Total (assumed) Filecoin available to the network. This is only used to bound the maximum + /// deal collateral and price. + pub static ref TOTAL_FILECOIN: TokenAmount = TokenAmount::from_whole(2_000_000_000); +} + +/// Bounds (inclusive) on deal duration. +pub(super) fn _deal_duration_bounds(_size: PaddedPieceSize) -> (ChainEpoch, ChainEpoch) { + (180 * EPOCHS_IN_DAY, 1278 * EPOCHS_IN_DAY) +} + +pub(super) fn _deal_price_per_epoch_bounds( + _size: PaddedPieceSize, + _duration: ChainEpoch, +) -> (TokenAmount, &'static TokenAmount) { + (TokenAmount::zero(), &TOTAL_FILECOIN) +} + +pub fn deal_provider_collateral_bounds( + policy: &Policy, + size: PaddedPieceSize, + network_raw_power: &StoragePower, + baseline_power: &StoragePower, + network_circulating_supply: &TokenAmount, +) -> (TokenAmount, TokenAmount) { + // minimumProviderCollateral = ProviderCollateralSupplyTarget * normalizedCirculatingSupply + // normalizedCirculatingSupply = networkCirculatingSupply * dealPowerShare + // dealPowerShare = dealRawPower / max(BaselinePower(t), NetworkRawPower(t), dealRawPower) + + let lock_target_num = network_circulating_supply * policy.prov_collateral_percent_supply_num; + let power_share_num = BigInt::from(size.0); + let power_share_denom = max(max(network_raw_power, baseline_power), &power_share_num).clone(); + + let num: BigInt = power_share_num * lock_target_num.atto(); + let denom: BigInt = power_share_denom * policy.prov_collateral_percent_supply_denom; + ( + TokenAmount::from_atto(num.div_floor(&denom)), + TOTAL_FILECOIN.clone(), + ) +} + +pub(super) fn _deal_client_collateral_bounds( + _: PaddedPieceSize, + _: ChainEpoch, +) -> (TokenAmount, TokenAmount) { + (TokenAmount::zero(), TOTAL_FILECOIN.clone()) // PARAM_FINISH +} + +/// Penalty to provider deal collateral if the deadline expires before sector commitment. +pub(super) fn collateral_penalty_for_deal_activation_missed( + provider_collateral: TokenAmount, +) -> TokenAmount { + provider_collateral +} diff --git a/actors/market/src/v16/state.rs b/actors/market/src/v16/state.rs new file mode 100644 index 00000000..5f876379 --- /dev/null +++ b/actors/market/src/v16/state.rs @@ -0,0 +1,1514 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::cmp::{max, min}; +use std::collections::BTreeMap; + +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; +use fvm_shared4::bigint::BigInt; +use fvm_shared4::clock::{ChainEpoch, EPOCH_UNDEFINED}; +use fvm_shared4::deal::DealID; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::error::ExitCode; +use fvm_shared4::sector::SectorNumber; +use fvm_shared4::{ActorID, HAMT_BIT_WIDTH}; +use num_traits::Zero; +use std::collections::BTreeSet; + +use fil_actors_shared::actor_error_v16; +use fil_actors_shared::v16::{ + ActorContext, ActorError, Array, AsActorError, Config, DealWeight, Map2, Set, SetMultimap, + SetMultimapConfig, DEFAULT_HAMT_CONFIG, +}; + +use crate::v16::balance_table::BalanceTable; +use crate::v16::ext::verifreg::AllocationID; + +use super::policy::*; +use super::types::*; +use super::{DealProposal, DealState, EX_DEAL_EXPIRED}; + +pub enum Reason { + ClientCollateral, + ClientStorageFee, + ProviderCollateral, +} + +/// Market actor state +#[derive(Clone, Default, Serialize_tuple, Deserialize_tuple, Debug)] +pub struct State { + /// Proposals are deals that have been proposed and not yet cleaned up after expiry or termination. + /// Array + pub proposals: Cid, + + // States contains state for deals that have been activated and not yet cleaned up after expiry or termination. + // After expiration, the state exists until the proposal is cleaned up too. + // Invariant: keys(States) ⊆ keys(Proposals). + /// Array + pub states: Cid, + + /// PendingProposals tracks dealProposals that have not yet reached their deal start date. + /// We track them here to ensure that miners can't publish the same deal proposal twice + /// Set + pub pending_proposals: Cid, + + /// Total amount held in escrow, indexed by actor address (including both locked and unlocked amounts). + pub escrow_table: Cid, + + /// Amount locked, indexed by actor address. + /// Note: the amounts in this table do not affect the overall amount in escrow: + /// only the _portion_ of the total escrow amount that is locked. + pub locked_table: Cid, + + /// Deal id state sequential incrementer + pub next_id: DealID, + + /// Metadata cached for efficient iteration over deals. + /// SetMultimap
+ pub deal_ops_by_epoch: Cid, + pub last_cron: ChainEpoch, + + /// Total Client Collateral that is locked -> unlocked when deal is terminated + pub total_client_locked_collateral: TokenAmount, + /// Total Provider Collateral that is locked -> unlocked when deal is terminated + pub total_provider_locked_collateral: TokenAmount, + /// Total storage fee that is locked in escrow -> unlocked when payments are made + pub total_client_storage_fee: TokenAmount, + + /// Verified registry allocation IDs for deals that are not yet activated. + // HAMT[DealID]AllocationID + pub pending_deal_allocation_ids: Cid, + + /// Maps providers to their sector IDs to deal IDs. + /// This supports finding affected deals when a sector is terminated early + /// or has data replaced. + /// Grouping by provider limits the cost of operations in the expected use case + /// of multiple sectors all belonging to the same provider. + /// HAMT[ActorID]HAMT[SectorNumber][]DealID + pub provider_sectors: Cid, +} + +pub type PendingProposalsSet = Set; +pub const PENDING_PROPOSALS_CONFIG: Config = DEFAULT_HAMT_CONFIG; + +pub type DealOpsByEpoch = SetMultimap; +pub const DEAL_OPS_BY_EPOCH_CONFIG: SetMultimapConfig = SetMultimapConfig { + outer: DEFAULT_HAMT_CONFIG, + inner: DEFAULT_HAMT_CONFIG, +}; + +pub type PendingDealAllocationsMap = Map2; +pub const PENDING_ALLOCATIONS_CONFIG: Config = Config { + bit_width: HAMT_BIT_WIDTH, + ..DEFAULT_HAMT_CONFIG +}; + +pub type ProviderSectorsMap = Map2; +pub const PROVIDER_SECTORS_CONFIG: Config = Config { + bit_width: HAMT_BIT_WIDTH, + ..DEFAULT_HAMT_CONFIG +}; + +pub type SectorDealsMap = Map2>; +pub const SECTOR_DEALS_CONFIG: Config = Config { + bit_width: HAMT_BIT_WIDTH, + ..DEFAULT_HAMT_CONFIG +}; + +fn get_proposals( + proposal_array: &DealArray, + deal_ids: &[DealID], + next_id: DealID, +) -> Result, ActorError> { + let mut proposals = Vec::new(); + let mut seen_deal_ids = BTreeSet::new(); + for deal_id in deal_ids { + if !seen_deal_ids.insert(deal_id) { + return Err(actor_error_v16!( + illegal_argument, + "duplicate deal ID {} in sector", + deal_id + )); + } + let proposal = get_proposal(proposal_array, *deal_id, next_id)?; + proposals.push((*deal_id, proposal)); + } + Ok(proposals) +} + +fn validate_deal_can_activate( + proposal: &DealProposal, + miner_addr: &Address, + sector_expiration: ChainEpoch, + curr_epoch: ChainEpoch, +) -> Result<(), ActorError> { + if &proposal.provider != miner_addr { + return Err(ActorError::forbidden(format!( + "proposal has provider {}, must be {}", + proposal.provider, miner_addr + ))); + }; + + if curr_epoch > proposal.start_epoch { + return Err(ActorError::unchecked( + // Use the same code as if the proposal had already been cleaned up from state. + EX_DEAL_EXPIRED, + format!( + "proposal start epoch {} has already elapsed at {}", + proposal.start_epoch, curr_epoch + ), + )); + }; + + if proposal.end_epoch > sector_expiration { + return Err(ActorError::illegal_argument(format!( + "proposal expiration {} exceeds sector expiration {}", + proposal.end_epoch, sector_expiration + ))); + }; + + Ok(()) +} + +impl State { + pub fn new(store: &BS) -> Result { + let empty_proposals_array = + Array::<(), BS>::new_with_bit_width(store, PROPOSALS_AMT_BITWIDTH) + .flush() + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to create empty proposals array", + )?; + + let empty_states_array = Array::<(), BS>::new_with_bit_width(store, STATES_AMT_BITWIDTH) + .flush() + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to create empty states array", + )?; + + let empty_pending_proposals = + PendingProposalsSet::empty(store, PENDING_PROPOSALS_CONFIG, "pending proposals") + .flush()?; + let empty_balance_table = BalanceTable::new(store, "balance table").root()?; + let empty_deal_ops = + DealOpsByEpoch::empty(store, DEAL_OPS_BY_EPOCH_CONFIG, "deal ops").flush()?; + + let empty_pending_deal_allocation_map = PendingDealAllocationsMap::empty( + store, + PENDING_ALLOCATIONS_CONFIG, + "pending deal allocations", + ) + .flush()?; + + let empty_sector_deals_hamt = + ProviderSectorsMap::empty(store, PROVIDER_SECTORS_CONFIG, "sector deals").flush()?; + + Ok(Self { + proposals: empty_proposals_array, + states: empty_states_array, + pending_proposals: empty_pending_proposals, + escrow_table: empty_balance_table, + locked_table: empty_balance_table, + next_id: 0, + deal_ops_by_epoch: empty_deal_ops, + last_cron: EPOCH_UNDEFINED, + + total_client_locked_collateral: TokenAmount::default(), + total_provider_locked_collateral: TokenAmount::default(), + total_client_storage_fee: TokenAmount::default(), + pending_deal_allocation_ids: empty_pending_deal_allocation_map, + provider_sectors: empty_sector_deals_hamt, + }) + } + + pub fn get_total_locked(&self) -> TokenAmount { + &self.total_client_locked_collateral + + &self.total_provider_locked_collateral + + &self.total_client_storage_fee + } + + pub fn load_deal_states<'bs, BS>( + &self, + store: &'bs BS, + ) -> Result, ActorError> + where + BS: Blockstore, + { + DealMetaArray::load(&self.states, store).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load deal state array", + ) + } + + fn save_deal_states(&mut self, states: &mut DealMetaArray) -> Result<(), ActorError> + where + BS: Blockstore, + { + self.states = states + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush deal states")?; + Ok(()) + } + + pub fn find_deal_state( + &self, + store: &BS, + deal_id: DealID, + ) -> Result, ActorError> + where + BS: Blockstore, + { + let states = self.load_deal_states(store)?; + find_deal_state(&states, deal_id) + } + + pub fn put_deal_states( + &mut self, + store: &BS, + new_deal_states: &[(DealID, DealState)], + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + let mut states = self.load_deal_states(store)?; + new_deal_states + .iter() + .try_for_each(|(id, deal_state)| -> Result<(), ActorError> { + states + .set(*id, *deal_state) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal state")?; + Ok(()) + })?; + self.save_deal_states(&mut states) + } + + pub fn remove_deal_state( + &mut self, + store: &BS, + deal_id: DealID, + ) -> Result, ActorError> + where + BS: Blockstore, + { + let mut states = self.load_deal_states(store)?; + let removed = states + .delete(deal_id) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to delete deal state")?; + self.save_deal_states(&mut states)?; + Ok(removed) + } + + pub fn load_proposals<'bs, BS>(&self, store: &'bs BS) -> Result, ActorError> + where + BS: Blockstore, + { + DealArray::load(&self.proposals, store).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load deal proposal array", + ) + } + + pub fn get_proposal( + &self, + store: &BS, + id: DealID, + ) -> Result { + get_proposal(&self.load_proposals(store)?, id, self.next_id) + } + + pub fn find_proposal( + &self, + store: &BS, + deal_id: DealID, + ) -> Result, ActorError> + where + BS: Blockstore, + { + find_proposal(&self.load_proposals(store)?, deal_id) + } + + pub fn remove_proposal( + &mut self, + store: &BS, + deal_id: DealID, + ) -> Result, ActorError> + where + BS: Blockstore, + { + let mut deal_proposals = DealArray::load(&self.proposals, store).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load deal proposal array", + )?; + + let proposal = deal_proposals + .delete(deal_id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("no such deal proposal {}", deal_id) + })?; + + self.proposals = deal_proposals.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush deal proposals", + )?; + + Ok(proposal) + } + + pub fn put_deal_proposals( + &mut self, + store: &BS, + new_deal_proposals: &[(DealID, DealProposal)], + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + let mut deal_proposals = DealArray::load(&self.proposals, store).context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load deal proposal array", + )?; + + new_deal_proposals + .iter() + .try_for_each(|(id, proposal)| -> Result<(), ActorError> { + deal_proposals + .set(*id, proposal.clone()) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to set deal proposal")?; + Ok(()) + })?; + + self.proposals = deal_proposals.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush deal proposals", + )?; + + Ok(()) + } + + pub fn load_pending_deal_allocation_ids( + &mut self, + store: BS, + ) -> Result, ActorError> + where + BS: Blockstore, + { + PendingDealAllocationsMap::load( + store, + &self.pending_deal_allocation_ids, + PENDING_ALLOCATIONS_CONFIG, + "pending deal allocations", + ) + } + + pub fn save_pending_deal_allocation_ids( + &mut self, + pending_deal_allocation_ids: &mut PendingDealAllocationsMap, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + self.pending_deal_allocation_ids = pending_deal_allocation_ids.flush()?; + Ok(()) + } + + pub fn put_pending_deal_allocation_ids( + &mut self, + store: &BS, + new_pending_deal_allocation_ids: &[(DealID, AllocationID)], + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + let mut pending_deal_allocation_ids = self.load_pending_deal_allocation_ids(store)?; + new_pending_deal_allocation_ids.iter().try_for_each( + |(deal_id, allocation_id)| -> Result<(), ActorError> { + pending_deal_allocation_ids.set(deal_id, *allocation_id)?; + Ok(()) + }, + )?; + self.save_pending_deal_allocation_ids(&mut pending_deal_allocation_ids)?; + Ok(()) + } + + pub fn get_pending_deal_allocation_ids( + &mut self, + store: &BS, + deal_id_keys: &[DealID], + ) -> Result, ActorError> + where + BS: Blockstore, + { + let pending_deal_allocation_ids = self.load_pending_deal_allocation_ids(store)?; + + let mut allocation_ids: Vec = vec![]; + deal_id_keys + .iter() + .try_for_each(|deal_id| -> Result<(), ActorError> { + let allocation_id = pending_deal_allocation_ids.get(&deal_id.clone())?; + allocation_ids.push( + *allocation_id + .ok_or(ActorError::not_found("no such deal proposal".to_string()))?, + ); + Ok(()) + })?; + + Ok(allocation_ids) + } + + pub fn remove_pending_deal_allocation_id( + &mut self, + store: &BS, + deal_id: DealID, + ) -> Result, ActorError> + where + BS: Blockstore, + { + let mut pending_deal_allocation_ids = self.load_pending_deal_allocation_ids(store)?; + let maybe_alloc_id = pending_deal_allocation_ids.delete(&deal_id)?; + self.save_pending_deal_allocation_ids(&mut pending_deal_allocation_ids)?; + Ok(maybe_alloc_id) + } + + pub fn load_deal_ops( + &self, + store: BS, + ) -> Result, ActorError> + where + BS: Blockstore, + { + DealOpsByEpoch::load( + store, + &self.deal_ops_by_epoch, + DEAL_OPS_BY_EPOCH_CONFIG, + "deal ops", + ) + } + + pub fn put_deals_by_epoch( + &mut self, + store: &BS, + new_deals_by_epoch: &[(ChainEpoch, DealID)], + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + let mut deals_by_epoch = self.load_deal_ops(store)?; + new_deals_by_epoch + .iter() + .try_for_each(|(epoch, id)| -> Result<(), ActorError> { + deals_by_epoch.put(epoch, *id)?; + Ok(()) + })?; + + self.deal_ops_by_epoch = deals_by_epoch.flush()?; + Ok(()) + } + + pub fn put_batch_deals_by_epoch( + &mut self, + store: &BS, + new_deals_by_epoch: &BTreeMap>, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + let mut deals_by_epoch = self.load_deal_ops(store)?; + new_deals_by_epoch + .iter() + .try_for_each(|(epoch, deals)| -> Result<(), ActorError> { + deals_by_epoch.put_many(epoch, deals)?; + Ok(()) + })?; + + self.deal_ops_by_epoch = deals_by_epoch.flush()?; + Ok(()) + } + + pub fn get_deals_for_epoch( + &self, + store: &BS, + key: ChainEpoch, + ) -> Result, ActorError> + where + BS: Blockstore, + { + let mut deal_ids = Vec::new(); + let deals_by_epoch = self.load_deal_ops(store)?; + deals_by_epoch.for_each_in(&key, |deal_id| { + deal_ids.push(deal_id); + Ok(()) + })?; + + Ok(deal_ids) + } + + pub fn remove_deals_by_epoch( + &mut self, + store: &BS, + epochs_to_remove: &[ChainEpoch], + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + let mut deals_by_epoch = self.load_deal_ops(store)?; + epochs_to_remove + .iter() + .try_for_each(|epoch| -> Result<(), ActorError> { + deals_by_epoch.remove_all(epoch)?; + Ok(()) + })?; + + self.deal_ops_by_epoch = deals_by_epoch.flush()?; + Ok(()) + } + + pub fn add_balance_to_escrow_table( + &mut self, + store: &BS, + addr: &Address, + amount: &TokenAmount, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + let mut escrow_table = BalanceTable::from_root(store, &self.escrow_table, "escrow table")?; + escrow_table.add(addr, amount)?; + self.escrow_table = escrow_table.root()?; + Ok(()) + } + + pub fn withdraw_balance_from_escrow_table( + &mut self, + store: &BS, + addr: &Address, + amount: &TokenAmount, + ) -> Result + where + BS: Blockstore, + { + let mut escrow_table = BalanceTable::from_root(store, &self.escrow_table, "escrow table")?; + let locked_table = BalanceTable::from_root(store, &self.locked_table, "locked table")?; + + let min_balance = locked_table.get(addr)?; + let ex = escrow_table.subtract_with_minimum(addr, amount, &min_balance)?; + + self.escrow_table = escrow_table.root()?; + Ok(ex) + } + + pub fn load_pending_deals(&self, store: BS) -> Result, ActorError> + where + BS: Blockstore, + { + PendingProposalsSet::load( + store, + &self.pending_proposals, + PENDING_PROPOSALS_CONFIG, + "pending proposals", + ) + } + + fn save_pending_deals( + &mut self, + pending_deals: &mut PendingProposalsSet, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + self.pending_proposals = pending_deals.flush()?; + Ok(()) + } + + pub fn has_pending_deal(&self, store: &BS, key: &Cid) -> Result + where + BS: Blockstore, + { + let pending_deals = self.load_pending_deals(store)?; + pending_deals.has(key) + } + + pub fn put_pending_deals( + &mut self, + store: &BS, + new_pending_deals: &[Cid], + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + let mut pending_deals = self.load_pending_deals(store)?; + new_pending_deals + .iter() + .try_for_each(|key: &Cid| -> Result<(), ActorError> { + pending_deals.put(key)?; + Ok(()) + })?; + + self.save_pending_deals(&mut pending_deals) + } + + pub fn remove_pending_deal( + &mut self, + store: &BS, + pending_deal_key: Cid, + ) -> Result, ActorError> + where + BS: Blockstore, + { + let mut pending_deals = self.load_pending_deals(store)?; + let removed = pending_deals.delete(&pending_deal_key)?; + + self.save_pending_deals(&mut pending_deals)?; + Ok(removed) + } + + pub fn escrow_table<'a, BS: Blockstore>( + &self, + store: &'a BS, + ) -> Result, ActorError> { + BalanceTable::from_root(store, &self.escrow_table, "escrow table") + } + + pub fn locked_table<'a, BS: Blockstore>( + &self, + store: &'a BS, + ) -> Result, ActorError> { + BalanceTable::from_root(store, &self.locked_table, "locked table") + } + + /// Verify that a given set of storage deals is valid for a sector currently being PreCommitted + pub fn verify_deals_for_activation( + &self, + store: &BS, + addr: &Address, + deal_ids: Vec, + curr_epoch: ChainEpoch, + sector_exp: i64, + ) -> Result<(DealWeight, DealWeight), ActorError> + where + BS: Blockstore, + { + let proposal_array = self.load_proposals(store)?; + let mut total_w = BigInt::zero(); + let mut total_vw = BigInt::zero(); + let sector_proposals = get_proposals(&proposal_array, &deal_ids, self.next_id)?; + for (deal_id, proposal) in sector_proposals.into_iter() { + validate_deal_can_activate(&proposal, addr, sector_exp, curr_epoch) + .with_context(|| format!("cannot activate deal {}", deal_id))?; + let deal_duration = sector_exp - curr_epoch; + let deal_size = proposal.piece_size.0; + let deal_spacetime = DealWeight::from(deal_duration as u64 * deal_size); + + if proposal.verified_deal { + total_vw += deal_spacetime; + } else { + total_w += deal_spacetime; + }; + } + + Ok((total_w, total_vw)) + } + + //////////////////////////////////////////////////////////////////////////////// + // Provider sector/deal operations + //////////////////////////////////////////////////////////////////////////////// + + // Stores deal IDs associated with sectors for a provider. + // Deal IDs are added to any already stored for the provider and sector. + // Returns the root cid of the sector deals map. + pub fn put_sector_deal_ids( + &mut self, + store: &impl Blockstore, + provider: ActorID, + sector_deal_ids: &[(SectorNumber, Vec)], + ) -> Result<(), ActorError> { + let mut provider_sectors = self.load_provider_sectors(store)?; + let mut sector_deals = load_provider_sector_deals(store, &provider_sectors, provider)?; + + for (sector_number, deals) in sector_deal_ids { + let mut new_deals = deals.clone(); + let existing_deal_ids = sector_deals + .get(sector_number) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to read sector deals")?; + if let Some(existing_deal_ids) = existing_deal_ids { + new_deals.extend(existing_deal_ids.iter()); + } + new_deals.sort(); + new_deals.dedup(); + sector_deals + .set(sector_number, new_deals) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!( + "failed to set sector deals for {} {}", + provider, sector_number + ) + })?; + } + + save_provider_sector_deals(&mut provider_sectors, provider, &mut sector_deals)?; + self.save_provider_sectors(&mut provider_sectors)?; + Ok(()) + } + + // Reads and removes the sector deals mapping for an array of sector numbers, + pub fn pop_sector_deal_ids( + &mut self, + store: &impl Blockstore, + provider: ActorID, + sector_numbers: impl Iterator, + ) -> Result, ActorError> { + let mut provider_sectors = self.load_provider_sectors(store)?; + let mut sector_deals = load_provider_sector_deals(store, &provider_sectors, provider)?; + + let mut popped_sector_deals = Vec::new(); + let mut flush = false; + for sector_number in sector_numbers { + let deals: Option> = sector_deals + .delete(§or_number) + .with_context(|| format!("provider {}", provider))?; + if let Some(deals) = deals { + popped_sector_deals.extend(deals.iter()); + flush = true; + } + } + + // Flush if any of the requested sectors were found. + if flush { + save_provider_sector_deals(&mut provider_sectors, provider, &mut sector_deals)?; + self.save_provider_sectors(&mut provider_sectors)?; + } + + Ok(popped_sector_deals) + } + + // Removes specified deals from the sector deals mapping. + // Missing deals are ignored. + pub fn remove_sector_deal_ids( + &mut self, + store: &impl Blockstore, + provider_sector_deal_ids: &BTreeMap>>, + ) -> Result<(), ActorError> { + let mut provider_sectors = self.load_provider_sectors(store)?; + for (provider, sector_deal_ids) in provider_sector_deal_ids { + let mut flush = false; + let mut sector_deals = load_provider_sector_deals(store, &provider_sectors, *provider)?; + for (sector_number, deals_to_remove) in sector_deal_ids { + let existing_deal_ids = sector_deals + .get(sector_number) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to read sector deals")?; + if let Some(existing_deal_ids) = existing_deal_ids { + // The filter below is a linear scan of deals_to_remove. + // This is expected to be a small list, often a singleton, so is usually + // pretty fast. + // Loading into a HashSet could be an improvement for large collections of deals + // in a single sector being removed at one time. + let new_deals: Vec<_> = existing_deal_ids + .iter() + .filter(|deal_id| !deals_to_remove.contains(*deal_id)) + .cloned() + .collect(); + flush = true; + + if new_deals.is_empty() { + sector_deals.delete(sector_number).with_context_code( + ExitCode::USR_ILLEGAL_STATE, + || { + format!( + "failed to delete sector deals for {} {}", + provider, sector_number + ) + }, + )?; + } else { + sector_deals + .set(sector_number, new_deals) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!( + "failed to set sector deals for {} {}", + provider, sector_number + ) + })?; + } + } + } + if flush { + save_provider_sector_deals(&mut provider_sectors, *provider, &mut sector_deals)?; + } + } + self.save_provider_sectors(&mut provider_sectors)?; + Ok(()) + } + + pub fn load_provider_sectors(&self, store: BS) -> Result, ActorError> + where + BS: Blockstore, + { + ProviderSectorsMap::load( + store, + &self.provider_sectors, + PROVIDER_SECTORS_CONFIG, + "provider sectors", + ) + } + + fn save_provider_sectors( + &mut self, + provider_sectors: &mut ProviderSectorsMap, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + self.provider_sectors = provider_sectors.flush()?; + Ok(()) + } + + /// Delete proposal and state simultaneously. + pub fn remove_completed_deal( + &mut self, + store: &BS, + deal_id: DealID, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + let state = self.remove_deal_state(store, deal_id)?; + if state.is_none() { + return Err(actor_error_v16!( + illegal_state, + "failed to delete deal state: does not exist" + )); + } + let proposal = self.remove_proposal(store, deal_id)?; + if proposal.is_none() { + return Err(actor_error_v16!( + illegal_state, + "failed to delete deal proposal: does not exist" + )); + } + Ok(()) + } + + /// Given a DealProposal, checks that the corresponding deal has activated + /// If not, checks that the deal is past its activation epoch and performs cleanup + pub fn get_active_deal_or_process_timeout( + &mut self, + store: &BS, + curr_epoch: ChainEpoch, + deal_id: DealID, + deal_proposal: &DealProposal, + dcid: &Cid, + ) -> Result + where + BS: Blockstore, + { + let deal_state = self.find_deal_state(store, deal_id)?; + + match deal_state { + Some(deal_state) => Ok(LoadDealState::Loaded(deal_state)), + None => { + // deal_id called too early + if curr_epoch < deal_proposal.start_epoch { + return Ok(LoadDealState::TooEarly); + } + + // if not activated, the proposal has timed out + let slashed = self.process_deal_init_timed_out(store, deal_proposal)?; + + // delete the proposal (but not state, which doesn't exist) + let deleted = self.remove_proposal(store, deal_id)?; + if deleted.is_none() { + return Err(actor_error_v16!( + illegal_state, + format!( + "failed to delete deal {} proposal {}: does not exist", + deal_id, dcid + ) + )); + } + + // delete pending deal cid + self.remove_pending_deal(store, *dcid)?.ok_or_else(|| { + actor_error_v16!( + illegal_state, + format!( + "failed to delete pending deal {}: cid {} does not exist", + deal_id, dcid + ) + ) + })?; + + // delete pending deal allocation id (if present) + self.remove_pending_deal_allocation_id(store, deal_id)?; + + Ok(LoadDealState::ProposalExpired(slashed)) + } + } + } + + //////////////////////////////////////////////////////////////////////////////// + // Deal state operations + //////////////////////////////////////////////////////////////////////////////// + + // TODO: change return value when marked-for-termination sectors are cleared from state + // https://github.com/filecoin-project/builtin-actors/issues/1388 + // drop slash_amount, bool return value indicates a completed deal + pub fn process_deal_update( + &mut self, + store: &BS, + state: &DealState, + deal: &DealProposal, + deal_cid: &Cid, + epoch: ChainEpoch, + ) -> Result< + ( + /* slash_amount */ TokenAmount, + /* payment_amount */ TokenAmount, + /* is_deal_completed */ bool, + /* remove */ bool, + ), + ActorError, + > + where + BS: Blockstore, + { + let ever_updated = state.last_updated_epoch != EPOCH_UNDEFINED; + + // seeing a slashed deal here will eventually be an unreachable state + // during the transition to synchronous deal termination there may be marked-for-termination + // deals that have not been processed in cron yet + // https://github.com/filecoin-project/builtin-actors/issues/1388 + // TODO: remove this and calculations below that assume deals can be slashed + let ever_slashed = state.slash_epoch != EPOCH_UNDEFINED; + + if !ever_updated { + // pending deal might have been removed by manual settlement or cron so we don't care if it's missing + self.remove_pending_deal(store, *deal_cid)?; + } + + // if the deal was ever updated, make sure it didn't happen in the future + if ever_updated && state.last_updated_epoch > epoch { + return Err(actor_error_v16!( + illegal_state, + "deal updated at future epoch {}", + state.last_updated_epoch + )); + } + + // this is a safe no-op but can happen if a storage provider calls settle_deal_payments too early + if deal.start_epoch > epoch { + return Ok((TokenAmount::zero(), TokenAmount::zero(), false, false)); + } + + let payment_end_epoch = if ever_slashed { + if epoch < state.slash_epoch { + return Err(actor_error_v16!( + illegal_state, + "current epoch less than deal slash epoch {}", + state.slash_epoch + )); + } + if state.slash_epoch > deal.end_epoch { + return Err(actor_error_v16!( + illegal_state, + "deal slash epoch {} after deal end {}", + state.slash_epoch, + deal.end_epoch + )); + } + state.slash_epoch + } else { + std::cmp::min(deal.end_epoch, epoch) + }; + + let payment_start_epoch = if ever_updated && state.last_updated_epoch > deal.start_epoch { + state.last_updated_epoch + } else { + deal.start_epoch + }; + + let num_epochs_elapsed = payment_end_epoch - payment_start_epoch; + + let elapsed_payment = &deal.storage_price_per_epoch * num_epochs_elapsed; + if elapsed_payment.is_positive() { + self.transfer_balance(store, &deal.client, &deal.provider, &elapsed_payment)?; + } + + // TODO: remove handling of terminated deals *after* transition to synchronous deal termination + // at that point, this function can be modified to return a bool only, indicating whether the deal is completed + // https://github.com/filecoin-project/builtin-actors/issues/1388 + if ever_slashed { + // unlock client collateral and locked storage fee + let payment_remaining = deal_get_payment_remaining(deal, state.slash_epoch)?; + + // Unlock remaining storage fee + self.unlock_balance( + store, + &deal.client, + &payment_remaining, + Reason::ClientStorageFee, + ) + .context("unlocking client storage fee")?; + + // Unlock client collateral + self.unlock_balance( + store, + &deal.client, + &deal.client_collateral, + Reason::ClientCollateral, + ) + .context("unlocking client collateral")?; + + // slash provider collateral + let slashed = deal.provider_collateral.clone(); + self.slash_balance(store, &deal.provider, &slashed, Reason::ProviderCollateral) + .context("slashing balance")?; + + return Ok((slashed, payment_remaining + elapsed_payment, false, true)); + } + + if epoch >= deal.end_epoch { + self.process_deal_expired(store, deal, state)?; + return Ok((TokenAmount::zero(), elapsed_payment, true, true)); + } + + Ok((TokenAmount::zero(), elapsed_payment, false, false)) + } + + pub fn process_slashed_deal( + &mut self, + store: &BS, + proposal: &DealProposal, + state: &DealState, + ) -> Result + where + BS: Blockstore, + { + // make payments for epochs until termination + let payment_start_epoch = max(proposal.start_epoch, state.last_updated_epoch); + let payment_end_epoch = min(proposal.end_epoch, state.slash_epoch); + let num_epochs_elapsed = max(0, payment_end_epoch - payment_start_epoch); + let total_payment = &proposal.storage_price_per_epoch * num_epochs_elapsed; + if total_payment.is_positive() { + self.transfer_balance(store, &proposal.client, &proposal.provider, &total_payment)?; + } + + // unlock client collateral and locked storage fee + let payment_remaining = deal_get_payment_remaining(proposal, state.slash_epoch)?; + + // Unlock remaining storage fee + self.unlock_balance( + store, + &proposal.client, + &payment_remaining, + Reason::ClientStorageFee, + ) + .context("unlocking client storage fee")?; + + // Unlock client collateral + self.unlock_balance( + store, + &proposal.client, + &proposal.client_collateral, + Reason::ClientCollateral, + ) + .context("unlocking client collateral")?; + + // slash provider collateral + let slashed = proposal.provider_collateral.clone(); + self.slash_balance( + store, + &proposal.provider, + &slashed, + Reason::ProviderCollateral, + ) + .context("slashing balance")?; + + Ok(slashed) + } + + /// Deal start deadline elapsed without appearing in a proven sector. + /// Slash a portion of provider's collateral, and unlock remaining collaterals + /// for both provider and client. + pub fn process_deal_init_timed_out( + &mut self, + store: &BS, + deal: &DealProposal, + ) -> Result + where + BS: Blockstore, + { + self.unlock_balance( + store, + &deal.client, + &deal.total_storage_fee(), + Reason::ClientStorageFee, + ) + .context("unlocking client storage fee")?; + + self.unlock_balance( + store, + &deal.client, + &deal.client_collateral, + Reason::ClientCollateral, + ) + .context("unlocking client collateral")?; + + let amount_slashed = + collateral_penalty_for_deal_activation_missed(deal.provider_collateral.clone()); + let amount_remaining = deal.provider_balance_requirement() - &amount_slashed; + + self.slash_balance( + store, + &deal.provider, + &amount_slashed, + Reason::ProviderCollateral, + ) + .context("slashing balance")?; + + self.unlock_balance( + store, + &deal.provider, + &amount_remaining, + Reason::ProviderCollateral, + ) + .context("unlocking deal provider balance")?; + + Ok(amount_slashed) + } + + /// Normal expiration. Unlock collaterals for both miner and client. + fn process_deal_expired( + &mut self, + store: &BS, + deal: &DealProposal, + state: &DealState, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + if state.sector_start_epoch == EPOCH_UNDEFINED { + return Err(actor_error_v16!( + illegal_state, + "start sector epoch undefined" + )); + } + + self.unlock_balance( + store, + &deal.provider, + &deal.provider_collateral, + Reason::ProviderCollateral, + ) + .context("unlocking deal provider balance")?; + + self.unlock_balance( + store, + &deal.client, + &deal.client_collateral, + Reason::ClientCollateral, + ) + .context("unlocking deal client balance")?; + + Ok(()) + } + + pub fn generate_storage_deal_id(&mut self) -> DealID { + let ret = self.next_id; + self.next_id += 1; + ret + } + + // Return true when the funds in escrow for the input address can cover an additional lockup of amountToLock + pub fn balance_covered( + &self, + store: &BS, + addr: Address, + amount_to_lock: &TokenAmount, + ) -> Result + where + BS: Blockstore, + { + let escrow_table = BalanceTable::from_root(store, &self.escrow_table, "escrow table")?; + let locked_table = BalanceTable::from_root(store, &self.locked_table, "locked table")?; + + let escrow_balance = escrow_table.get(&addr)?; + let prev_locked = locked_table.get(&addr)?; + Ok((prev_locked + amount_to_lock) <= escrow_balance) + } + + fn maybe_lock_balance( + &mut self, + store: &BS, + addr: &Address, + amount: &TokenAmount, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + if amount.is_negative() { + return Err(actor_error_v16!( + illegal_state, + "cannot lock negative amount {}", + amount + )); + } + + let escrow_table = BalanceTable::from_root(store, &self.escrow_table, "escrow table")?; + let mut locked_table = BalanceTable::from_root(store, &self.locked_table, "locked table")?; + + let prev_locked = locked_table.get(addr)?; + let escrow_balance = escrow_table.get(addr)?; + if &prev_locked + amount > escrow_balance { + return Err(actor_error_v16!(insufficient_funds; + "not enough balance to lock for addr{}: \ + escrow balance {} < prev locked {} + amount {}", + addr, escrow_balance, prev_locked, amount)); + } + + locked_table.add(addr, amount)?; + self.locked_table = locked_table.root()?; + Ok(()) + } + + pub fn lock_client_and_provider_balances( + &mut self, + store: &BS, + proposal: &DealProposal, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + self.maybe_lock_balance( + store, + &proposal.client, + &proposal.client_balance_requirement(), + ) + .context("locking client funds")?; + self.maybe_lock_balance(store, &proposal.provider, &proposal.provider_collateral) + .context("locking provider funds")?; + + self.total_client_locked_collateral += &proposal.client_collateral; + self.total_client_storage_fee += proposal.total_storage_fee(); + self.total_provider_locked_collateral += &proposal.provider_collateral; + Ok(()) + } + + fn unlock_balance( + &mut self, + store: &BS, + addr: &Address, + amount: &TokenAmount, + lock_reason: Reason, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + if amount.is_negative() { + return Err(actor_error_v16!( + illegal_state, + "unlock negative amount: {}", + amount + )); + } + + let mut locked_table = BalanceTable::from_root(store, &self.locked_table, "locked table")?; + locked_table + .must_subtract(addr, amount) + .context("unlocking balance")?; + + match lock_reason { + Reason::ClientCollateral => { + self.total_client_locked_collateral -= amount; + } + Reason::ClientStorageFee => { + self.total_client_storage_fee -= amount; + } + Reason::ProviderCollateral => { + self.total_provider_locked_collateral -= amount; + } + }; + + self.locked_table = locked_table.root()?; + Ok(()) + } + + /// move funds from locked in client to available in provider + fn transfer_balance( + &mut self, + store: &BS, + from_addr: &Address, + to_addr: &Address, + amount: &TokenAmount, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + if amount.is_negative() { + return Err(actor_error_v16!( + illegal_state, + "transfer negative amount: {}", + amount + )); + } + + let mut escrow_table = BalanceTable::from_root(store, &self.escrow_table, "escrow table")?; + + // Subtract from locked and escrow tables + escrow_table.must_subtract(from_addr, amount)?; + self.unlock_balance(store, from_addr, amount, Reason::ClientStorageFee) + .context("unlocking client balance")?; + + // Add subtracted amount to the recipient + escrow_table.add(to_addr, amount)?; + self.escrow_table = escrow_table.root()?; + Ok(()) + } + + fn slash_balance( + &mut self, + store: &BS, + addr: &Address, + amount: &TokenAmount, + lock_reason: Reason, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + if amount.is_negative() { + return Err(actor_error_v16!( + illegal_state, + "negative amount to slash: {}", + amount + )); + } + + let mut escrow_table = BalanceTable::from_root(store, &self.escrow_table, "escrow table")?; + + // Subtract from locked and escrow tables + escrow_table.must_subtract(addr, amount)?; + self.escrow_table = escrow_table.root()?; + self.unlock_balance(store, addr, amount, lock_reason) + } +} + +pub enum LoadDealState { + TooEarly, + ProposalExpired(/* slashed_amount */ TokenAmount), + Loaded(DealState), +} + +pub fn deal_get_payment_remaining( + deal: &DealProposal, + mut slash_epoch: ChainEpoch, +) -> Result { + if slash_epoch > deal.end_epoch { + return Err(actor_error_v16!( + illegal_state, + "deal slash epoch {} after end epoch {}", + slash_epoch, + deal.end_epoch + )); + } + + // Payments are always for start -> end epoch irrespective of when the deal is slashed. + slash_epoch = std::cmp::max(slash_epoch, deal.start_epoch); + + let duration_remaining = deal.end_epoch - slash_epoch; + if duration_remaining < 0 { + return Err(actor_error_v16!( + illegal_state, + "deal remaining duration negative: {}", + duration_remaining + )); + } + + Ok(&deal.storage_price_per_epoch * duration_remaining as u64) +} + +pub fn get_proposal( + proposals: &DealArray, + id: DealID, + next_id: DealID, +) -> Result { + let found = find_proposal(proposals, id)?.ok_or_else(|| { + if id < next_id { + // If the deal ID has been used, it must have been cleaned up. + ActorError::unchecked(EX_DEAL_EXPIRED, format!("deal {} expired", id)) + } else { + // Never been published. + ActorError::not_found(format!("no such deal {}", id)) + } + })?; + Ok(found) +} + +pub fn find_proposal( + proposals: &DealArray, + deal_id: DealID, +) -> Result, ActorError> +where + BS: Blockstore, +{ + let proposal = proposals + .get(deal_id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load deal proposal {}", deal_id) + })?; + Ok(proposal.cloned()) +} + +pub fn find_deal_state( + states: &DealMetaArray, + deal_id: DealID, +) -> Result, ActorError> +where + BS: Blockstore, +{ + let state = states + .get(deal_id) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load deal state {}", deal_id) + })?; + Ok(state.cloned()) +} + +pub fn load_provider_sector_deals( + store: BS, + provider_sectors: &ProviderSectorsMap, + provider: ActorID, +) -> Result, ActorError> +where + BS: Blockstore, +{ + let sectors_root = (*provider_sectors).get(&provider)?; + let sector_deals = if let Some(sectors_root) = sectors_root { + SectorDealsMap::load(store, sectors_root, SECTOR_DEALS_CONFIG, "sector deals") + .with_context(|| format!("provider {}", provider))? + } else { + SectorDealsMap::empty(store, SECTOR_DEALS_CONFIG, "empty") + }; + Ok(sector_deals) +} + +fn save_provider_sector_deals( + provider_sectors: &mut ProviderSectorsMap, + provider: ActorID, + sector_deals: &mut SectorDealsMap, +) -> Result<(), ActorError> +where + BS: Blockstore, +{ + if sector_deals.is_empty() { + provider_sectors + .delete(&provider) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete sector deals for {}", provider) + })?; + } else { + let sectors_root = sector_deals.flush()?; + provider_sectors.set(&provider, sectors_root)?; + } + Ok(()) +} diff --git a/actors/market/src/v16/types.rs b/actors/market/src/v16/types.rs new file mode 100644 index 00000000..43984309 --- /dev/null +++ b/actors/market/src/v16/types.rs @@ -0,0 +1,281 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use super::ext::verifreg::AllocationID; +use cid::Cid; +use fil_actors_shared::v16::Array; +use fil_actors_shared::v16::BatchReturn; +use fvm_ipld_bitfield::BitField; +use fvm_ipld_encoding::strict_bytes; +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; +use fvm_shared4::bigint::{bigint_ser, BigInt}; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::deal::DealID; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::piece::PaddedPieceSize; +use fvm_shared4::ActorID; + +use crate::v16::Label; +use fvm_shared4::sector::{RegisteredSealProof, SectorNumber}; + +use super::deal::{ClientDealProposal, DealProposal, DealState}; + +pub const PROPOSALS_AMT_BITWIDTH: u32 = 5; +pub const STATES_AMT_BITWIDTH: u32 = 6; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct AddBalanceParams { + pub provider_or_client: Address, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct WithdrawBalanceParams { + pub provider_or_client: Address, + pub amount: TokenAmount, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct WithdrawBalanceReturn { + pub amount_withdrawn: TokenAmount, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct GetBalanceParams { + pub account: Address, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct GetBalanceReturn { + pub balance: TokenAmount, + pub locked: TokenAmount, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, PartialEq)] // Add Eq when BitField does +pub struct OnMinerSectorsTerminateParams { + pub epoch: ChainEpoch, + pub sectors: BitField, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct PublishStorageDealsParams { + pub deals: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, PartialEq)] // Add Eq when BitField does +pub struct PublishStorageDealsReturn { + pub ids: Vec, + pub valid_deals: BitField, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct VerifyDealsForActivationParams { + /// Deals to verify, grouped by sector. + pub sectors: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct SectorDeals { + pub sector_number: SectorNumber, + pub sector_type: RegisteredSealProof, + pub sector_expiry: ChainEpoch, + pub deal_ids: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct VerifyDealsForActivationReturn { + // The unsealed CID computed from the deals specified for each sector. + // A None indicates no deals were specified. + pub unsealed_cids: Vec>, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct BatchActivateDealsParams { + /// Deals to activate, grouped by sector. + /// A failed deal activation will cause other deals in the same sector group to also fail, + /// but allow other sectors to proceed. + pub sectors: Vec, + /// Requests computation of an unsealed CID for each sector from the provided deals. + pub compute_cid: bool, +} + +// Information about a deal that has been activated. +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct ActivatedDeal { + pub client: ActorID, + pub allocation_id: AllocationID, // NO_ALLOCATION_ID for unverified deals. + pub data: Cid, + pub size: PaddedPieceSize, +} + +// Information about a sector-grouping of deals that have been activated. +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct SectorDealActivation { + /// Information about each deal activated. + pub activated: Vec, + /// Unsealed CID computed from the deals specified for the sector. + /// A None indicates no deals were specified, or the computation was not requested. + pub unsealed_cid: Option, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct BatchActivateDealsResult { + /// Status of each sector grouping of deals. + pub activation_results: BatchReturn, + /// Activation information for the sector groups that were activated. + pub activations: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct DealSpaces { + #[serde(with = "bigint_ser")] + pub deal_space: BigInt, + #[serde(with = "bigint_ser")] + pub verified_deal_space: BigInt, +} + +/// A specialization of a array to deals. +pub type DealArray<'bs, BS> = Array<'bs, DealProposal, BS>; + +/// A specialization of a array to deals. +pub type DealMetaArray<'bs, BS> = Array<'bs, DealState, BS>; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct SectorDataSpec { + pub deal_ids: Vec, + pub sector_type: RegisteredSealProof, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct DealQueryParams { + pub id: DealID, +} + +pub type GetDealDataCommitmentParams = DealQueryParams; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct GetDealDataCommitmentReturn { + pub data: Cid, + pub size: PaddedPieceSize, +} + +pub type GetDealClientParams = DealQueryParams; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct GetDealClientReturn { + pub client: ActorID, +} + +pub type GetDealProviderParams = DealQueryParams; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct GetDealProviderReturn { + pub provider: ActorID, +} + +pub type GetDealLabelParams = DealQueryParams; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct GetDealLabelReturn { + pub label: Label, +} + +pub type GetDealTermParams = DealQueryParams; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct GetDealTermReturn { + // First epoch for the deal (inclusive) + pub start: ChainEpoch, + pub duration: ChainEpoch, // Duration of the deal. +} + +pub type GetDealTotalPriceParams = DealQueryParams; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct GetDealTotalPriceReturn { + pub total_price: TokenAmount, +} + +pub type GetDealClientCollateralParams = DealQueryParams; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct GetDealClientCollateralReturn { + pub collateral: TokenAmount, +} + +pub type GetDealProviderCollateralParams = DealQueryParams; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct GetDealProviderCollateralReturn { + pub collateral: TokenAmount, +} + +pub type GetDealVerifiedParams = DealQueryParams; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct GetDealVerifiedReturn { + pub verified: bool, +} + +pub type GetDealActivationParams = DealQueryParams; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct GetDealActivationReturn { + /// Epoch at which the deal was activated, or -1. + /// This may be before the proposed start epoch. + pub activated: ChainEpoch, + /// Epoch at which the deal was terminated abnormally, or -1. + pub terminated: ChainEpoch, +} + +pub type GetDealSectorParams = DealQueryParams; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct GetDealSectorReturn { + /// Sector number with the provider that has committed the deal. + pub sector: SectorNumber, +} + +// Interface market clients can implement to receive notifications from builtin market +pub const MARKET_NOTIFY_DEAL_METHOD: u64 = frc42_dispatch::method_hash!("MarketNotifyDeal"); + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct MarketNotifyDealParams { + #[serde(with = "strict_bytes")] + pub proposal: Vec, + pub deal_id: u64, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)] +#[serde(transparent)] +pub struct SettleDealPaymentsParams { + pub deal_ids: BitField, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct SettleDealPaymentsReturn { + /// Indicators of success or failure for each deal + pub results: BatchReturn, + /// Results for the deals that succesfully settled + pub settlements: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct DealSettlementSummary { + /// Incremental amount of funds transferred from client to provider for deal payment + pub payment: TokenAmount, + /// Whether the deal has settled for the final time + pub completed: bool, +} diff --git a/actors/miner/src/lib.rs b/actors/miner/src/lib.rs index f95f7f59..26e0c7c7 100644 --- a/actors/miner/src/lib.rs +++ b/actors/miner/src/lib.rs @@ -7,5 +7,6 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v8; pub mod v9; diff --git a/actors/miner/src/v16/beneficiary.rs b/actors/miner/src/v16/beneficiary.rs new file mode 100644 index 00000000..fa254c55 --- /dev/null +++ b/actors/miner/src/v16/beneficiary.rs @@ -0,0 +1,78 @@ +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; + +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use num_traits::Zero; +use std::ops::Sub; + +#[derive(Debug, PartialEq, Eq, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct BeneficiaryTerm { + /// The total amount the current beneficiary can withdraw. Monotonic, but reset when beneficiary changes. + pub quota: TokenAmount, + /// The amount of quota the current beneficiary has already withdrawn + pub used_quota: TokenAmount, + /// The epoch at which the beneficiary's rights expire and revert to the owner + pub expiration: ChainEpoch, +} + +impl Default for BeneficiaryTerm { + fn default() -> BeneficiaryTerm { + BeneficiaryTerm { + quota: TokenAmount::zero(), + expiration: 0, + used_quota: TokenAmount::zero(), + } + } +} + +impl BeneficiaryTerm { + pub fn new( + quota: TokenAmount, + used_quota: TokenAmount, + expiration: ChainEpoch, + ) -> BeneficiaryTerm { + BeneficiaryTerm { + quota, + expiration, + used_quota, + } + } + + /// Get the amount that the beneficiary has not yet withdrawn + /// return 0 when expired + /// return 0 when the usedQuota >= Quota for safe + /// otherwise Return quota-used_quota + pub fn available(&self, cur: ChainEpoch) -> TokenAmount { + if self.expiration > cur { + (&self.quota).sub(&self.used_quota).max(TokenAmount::zero()) + } else { + TokenAmount::zero() + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct PendingBeneficiaryChange { + pub new_beneficiary: Address, + pub new_quota: TokenAmount, + pub new_expiration: ChainEpoch, + pub approved_by_beneficiary: bool, + pub approved_by_nominee: bool, +} + +impl PendingBeneficiaryChange { + pub fn new( + new_beneficiary: Address, + new_quota: TokenAmount, + new_expiration: ChainEpoch, + ) -> Self { + PendingBeneficiaryChange { + new_beneficiary, + new_quota, + new_expiration, + approved_by_beneficiary: false, + approved_by_nominee: false, + } + } +} diff --git a/actors/miner/src/v16/bitfield_queue.rs b/actors/miner/src/v16/bitfield_queue.rs new file mode 100644 index 00000000..d3df56ed --- /dev/null +++ b/actors/miner/src/v16/bitfield_queue.rs @@ -0,0 +1,142 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::convert::TryInto; + +use cid::Cid; +use fil_actors_shared::v16::{ActorDowncast, Array}; +use fvm_ipld_amt::Error as AmtError; +use fvm_ipld_bitfield::BitField; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared4::clock::ChainEpoch; +use itertools::Itertools; + +use super::QuantSpec; + +/// Wrapper for working with an AMT[ChainEpoch]*Bitfield functioning as a queue, bucketed by epoch. +/// Keys in the queue are quantized (upwards), modulo some offset, to reduce the cardinality of keys. +pub struct BitFieldQueue<'db, BS> { + pub amt: Array<'db, BitField, BS>, + quant: QuantSpec, +} + +impl<'db, BS: Blockstore> BitFieldQueue<'db, BS> { + pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result { + Ok(Self { + amt: Array::load(root, store)?, + quant, + }) + } + + /// Adds values to the queue entry for an epoch. + pub fn add_to_queue(&mut self, raw_epoch: ChainEpoch, values: &BitField) -> anyhow::Result<()> { + if values.is_empty() { + // nothing to do. + return Ok(()); + } + + let epoch: u64 = self.quant.quantize_up(raw_epoch).try_into()?; + + let bitfield = self + .amt + .get(epoch) + .map_err(|e| e.downcast_wrap(format!("failed to lookup queue epoch {}", epoch)))? + .cloned() + .unwrap_or_default(); + + self.amt + .set(epoch, &bitfield | values) + .map_err(|e| e.downcast_wrap(format!("failed to set queue epoch {}", epoch)))?; + + Ok(()) + } + + pub fn add_to_queue_values( + &mut self, + epoch: ChainEpoch, + values: impl IntoIterator, + ) -> anyhow::Result<()> { + self.add_to_queue(epoch, &BitField::try_from_bits(values)?) + } + + /// Cut cuts the elements from the bits in the given bitfield out of the queue, + /// shifting other bits down and removing any newly empty entries. + /// + /// See the docs on `BitField::cut` to better understand what it does. + pub fn cut(&mut self, to_cut: &BitField) -> anyhow::Result<()> { + let mut epochs_to_remove = Vec::::new(); + + self.amt + .for_each_mut(|epoch, bitfield| { + let bf = bitfield.cut(to_cut); + + if bf.is_empty() { + epochs_to_remove.push(epoch); + } else { + **bitfield = bf; + } + + Ok(()) + }) + .map_err(|e| e.downcast_wrap("failed to cut from bitfield queue"))?; + + self.amt + .batch_delete(epochs_to_remove, true) + .map_err(|e| e.downcast_wrap("failed to remove empty epochs from bitfield queue"))?; + + Ok(()) + } + + pub fn add_many_to_queue_values( + &mut self, + values: impl IntoIterator, + ) -> anyhow::Result<()> { + // Pre-quantize to reduce the number of updates. + let mut quantized_values: Vec<_> = values + .into_iter() + .map(|(raw_epoch, value)| (self.quant.quantize_up(raw_epoch), value)) + .collect(); + + // Sort and dedup. + quantized_values.sort_unstable(); + quantized_values.dedup(); + + // Add to queue. + let mut iter = quantized_values.into_iter().peekable(); + while let Some(&(epoch, _)) = iter.peek() { + self.add_to_queue_values( + epoch, + iter.peeking_take_while(|&(e, _)| e == epoch) + .map(|(_, v)| v), + )?; + } + + Ok(()) + } + + /// Removes and returns all values with keys less than or equal to until. + /// Modified return value indicates whether this structure has been changed by the call. + pub fn pop_until(&mut self, until: ChainEpoch) -> anyhow::Result<(BitField, bool)> { + let mut popped_values = BitField::new(); + let mut popped_keys = Vec::::new(); + + self.amt.for_each_while(|epoch, bitfield| { + if epoch as ChainEpoch > until { + // break + return Ok(false); + } + + popped_keys.push(epoch); + popped_values |= bitfield; + Ok(true) + })?; + + if popped_keys.is_empty() { + // Nothing expired. + return Ok((BitField::new(), false)); + } + + self.amt.batch_delete(popped_keys, true)?; + Ok((popped_values, true)) + } +} diff --git a/actors/miner/src/v16/commd.rs b/actors/miner/src/v16/commd.rs new file mode 100644 index 00000000..f4751b2d --- /dev/null +++ b/actors/miner/src/v16/commd.rs @@ -0,0 +1,108 @@ +use cid::multihash::Multihash; +use cid::{Cid, Version}; +use fil_actors_shared::actor_error_v16; +use fil_actors_shared::v16::ActorError; +use fvm_shared4::commcid::{FIL_COMMITMENT_UNSEALED, SHA2_256_TRUNC254_PADDED}; +use fvm_shared4::sector::RegisteredSealProof; +use serde::{Deserialize, Serialize}; + +/// CompactCommD represents a Cid with compact representation of context dependant zero value +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Default)] +#[serde(transparent)] +pub struct CompactCommD(pub Option); + +impl CompactCommD { + pub fn new(commd: Option) -> Self { + CompactCommD(commd) + } + + // A CompactCommD representing zero data. + pub fn empty() -> Self { + CompactCommD(None) + } + + // A CompactCommD representing some non-zero data. + pub fn of(c: Cid) -> Self { + CompactCommD(Some(c)) + } + + // Whether this represents the zero CID. + pub fn is_zero(&self) -> bool { + self.0.is_none() + } + + // Gets the full, non-compact CID. + pub fn get_cid(&self, seal_proof: RegisteredSealProof) -> Result { + match self.0 { + Some(ref x) => Ok(*x), + None => zero_commd(seal_proof), + } + } + + // Gets the full, non-compact CID, panicking if the CID is zero. + pub fn unwrap_nonzero_cid(&self) -> Cid { + match self.0 { + Some(ref x) => *x, + None => panic!("zero commd"), + } + } +} + +/// Prefix for unsealed sector CIDs (CommD). +pub fn is_unsealed_sector(c: &Cid) -> bool { + c.version() == Version::V1 + && c.codec() == FIL_COMMITMENT_UNSEALED + && c.hash().code() == SHA2_256_TRUNC254_PADDED + && c.hash().size() == 32 +} + +const ZERO_COMMD_HASH: [[u8; 32]; 5] = [ + [ + 252, 126, 146, 130, 150, 229, 22, 250, 173, 233, 134, 178, 143, 146, 212, 74, 79, 36, 185, + 53, 72, 82, 35, 55, 106, 121, 144, 39, 188, 24, 248, 51, + ], + [ + 57, 86, 14, 123, 19, 169, 59, 7, 162, 67, 253, 39, 32, 255, 167, 203, 62, 29, 46, 80, 90, + 179, 98, 158, 121, 244, 99, 19, 81, 44, 218, 6, + ], + [ + 101, 242, 158, 93, 152, 210, 70, 195, 139, 56, 140, 252, 6, 219, 31, 107, 2, 19, 3, 197, + 162, 137, 0, 11, 220, 232, 50, 169, 195, 236, 66, 28, + ], + [ + 7, 126, 95, 222, 53, 197, 10, 147, 3, 165, 80, 9, 227, 73, 138, 78, 190, 223, 243, 156, 66, + 183, 16, 183, 48, 216, 236, 122, 199, 175, 166, 62, + ], + [ + 230, 64, 5, 166, 191, 227, 119, 121, 83, 184, 173, 110, 249, 63, 15, 202, 16, 73, 178, 4, + 22, 84, 242, 164, 17, 247, 112, 39, 153, 206, 206, 2, + ], +]; + +fn zero_commd(seal_proof: RegisteredSealProof) -> Result { + let mut seal_proof = seal_proof; + seal_proof.update_to_v1(); + let i = match seal_proof { + RegisteredSealProof::StackedDRG2KiBV1P1 + | RegisteredSealProof::StackedDRG2KiBV1P1_Feat_SyntheticPoRep + | RegisteredSealProof::StackedDRG2KiBV1P2_Feat_NiPoRep => 0, + RegisteredSealProof::StackedDRG512MiBV1P1 + | RegisteredSealProof::StackedDRG512MiBV1P1_Feat_SyntheticPoRep + | RegisteredSealProof::StackedDRG512MiBV1P2_Feat_NiPoRep => 1, + RegisteredSealProof::StackedDRG8MiBV1P1 + | RegisteredSealProof::StackedDRG8MiBV1P1_Feat_SyntheticPoRep + | RegisteredSealProof::StackedDRG8MiBV1P2_Feat_NiPoRep => 2, + RegisteredSealProof::StackedDRG32GiBV1P1 + | RegisteredSealProof::StackedDRG32GiBV1P1_Feat_SyntheticPoRep + | RegisteredSealProof::StackedDRG32GiBV1P2_Feat_NiPoRep => 3, + RegisteredSealProof::StackedDRG64GiBV1P1 + | RegisteredSealProof::StackedDRG64GiBV1P1_Feat_SyntheticPoRep + | RegisteredSealProof::StackedDRG64GiBV1P2_Feat_NiPoRep => 4, + _ => { + return Err(actor_error_v16!(illegal_argument, "unknown SealProof")); + } + }; + let hash = Multihash::wrap(SHA2_256_TRUNC254_PADDED, &ZERO_COMMD_HASH[i]) + .map_err(|_| actor_error_v16!(assertion_failed, "static commd payload invalid"))?; + Ok(Cid::new_v1(FIL_COMMITMENT_UNSEALED, hash)) +} diff --git a/actors/miner/src/v16/deadline_assignment.rs b/actors/miner/src/v16/deadline_assignment.rs new file mode 100644 index 00000000..def971d1 --- /dev/null +++ b/actors/miner/src/v16/deadline_assignment.rs @@ -0,0 +1,205 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::cmp::Ordering; +use std::collections::BinaryHeap; + +use anyhow::anyhow; + +use fil_actors_shared::v16::runtime::Policy; + +use super::{Deadline, SectorOnChainInfo}; + +fn div_rounding_up(dividend: u64, divisor: u64) -> u64 { + dividend / divisor + u64::from(dividend % divisor > 0) +} + +struct DeadlineAssignmentInfo { + index: usize, + live_sectors: u64, + total_sectors: u64, +} + +impl DeadlineAssignmentInfo { + fn partitions_after_assignment(&self, partition_size: u64) -> u64 { + div_rounding_up( + self.total_sectors + 1, // after assignment + partition_size, + ) + } + + fn compact_partitions_after_assignment(&self, partition_size: u64) -> u64 { + div_rounding_up( + self.live_sectors + 1, // after assignment + partition_size, + ) + } + + fn is_full_now(&self, partition_size: u64) -> bool { + self.total_sectors % partition_size == 0 + } + + fn max_partitions_reached(&self, partition_size: u64, max_partitions: u64) -> bool { + self.total_sectors >= partition_size * max_partitions + } +} + +fn cmp(a: &DeadlineAssignmentInfo, b: &DeadlineAssignmentInfo, partition_size: u64) -> Ordering { + // When assigning partitions to deadlines, we're trying to optimize the + // following: + // + // First, avoid increasing the maximum number of partitions in any + // deadline, across all deadlines, after compaction. This would + // necessitate buying a new GPU. + // + // Second, avoid forcing the miner to repeatedly compact partitions. A + // miner would be "forced" to compact a partition when a the number of + // partitions in any given deadline goes above the current maximum + // number of partitions across all deadlines, and compacting that + // deadline would then reduce the number of partitions, reducing the + // maximum. + // + // At the moment, the only "forced" compaction happens when either: + // + // 1. Assignment of the sector into any deadline would force a + // compaction. + // 2. The chosen deadline has at least one full partition's worth of + // terminated sectors and at least one fewer partition (after + // compaction) than any other deadline. + // + // Third, we attempt to assign "runs" of sectors to the same partition + // to reduce the size of the bitfields. + // + // Finally, we try to balance the number of sectors (thus partitions) + // assigned to any given deadline over time. + + // Summary: + // + // 1. Assign to the deadline that will have the _least_ number of + // post-compaction partitions (after sector assignment). + // 2. Assign to the deadline that will have the _least_ number of + // pre-compaction partitions (after sector assignment). + // 3. Assign to a deadline with a non-full partition. + // - If both have non-full partitions, assign to the most full one (stable assortment). + // 4. Assign to the deadline with the least number of live sectors. + // 5. Assign sectors to the deadline with the lowest index first. + + // If one deadline would end up with fewer partitions (after + // compacting), assign to that one. This ensures we keep the maximum + // number of partitions in any given deadline to a minimum. + // + // Technically, this could increase the maximum number of partitions + // before compaction. However, that can only happen if the deadline in + // question could save an entire partition by compacting. At that point, + // the miner should compact the deadline. + a.compact_partitions_after_assignment(partition_size) + .cmp(&b.compact_partitions_after_assignment(partition_size)) + .then_with(|| { + // If, after assignment, neither deadline would have fewer + // post-compaction partitions, assign to the deadline with the fewest + // pre-compaction partitions (after assignment). This will put off + // compaction as long as possible. + a.partitions_after_assignment(partition_size) + .cmp(&b.partitions_after_assignment(partition_size)) + }) + .then_with(|| { + // Ok, we'll end up with the same number of partitions any which way we + // go. Try to fill up a partition instead of opening a new one. + a.is_full_now(partition_size) + .cmp(&b.is_full_now(partition_size)) + }) + .then_with(|| { + // Either we have two open partitions, or neither deadline has an open + // partition. + + // If we have two open partitions, fill the deadline with the most-full + // open partition. This helps us assign runs of sequential sectors into + // the same partition. + if !a.is_full_now(partition_size) && !b.is_full_now(partition_size) { + a.total_sectors.cmp(&b.total_sectors).reverse() + } else { + Ordering::Equal + } + }) + .then_with(|| { + // Otherwise, assign to the deadline with the least live sectors. This + // will break the tie in one of the two immediately preceding + // conditions. + a.live_sectors.cmp(&b.live_sectors) + }) + .then_with(|| { + // Finally, fall back on the deadline index. + a.index.cmp(&b.index) + }) +} + +// Assigns partitions to deadlines, first filling partial partitions, then +// adding new partitions to deadlines with the fewest live sectors. +pub fn assign_deadlines( + policy: &Policy, + max_partitions: u64, + partition_size: u64, + deadlines: &[Option], + sectors: Vec, +) -> anyhow::Result>> { + struct Entry { + partition_size: u64, + info: DeadlineAssignmentInfo, + } + + impl PartialEq for Entry { + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == Ordering::Equal + } + } + + impl Eq for Entry {} + + impl PartialOrd for Entry { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + + impl Ord for Entry { + fn cmp(&self, other: &Self) -> Ordering { + // we're using a max heap instead of a min heap, so we need to reverse the ordering + cmp(&self.info, &other.info, self.partition_size).reverse() + } + } + + let mut heap: BinaryHeap = deadlines + .iter() + .enumerate() + .filter_map(|(index, deadline)| deadline.as_ref().map(|dl| (index, dl))) + .map(|(index, deadline)| Entry { + partition_size, + info: DeadlineAssignmentInfo { + index, + live_sectors: deadline.live_sectors, + total_sectors: deadline.total_sectors, + }, + }) + .collect(); + + assert!(!heap.is_empty()); + + let mut changes = vec![Vec::new(); policy.wpost_period_deadlines as usize]; + + for sector in sectors { + let info = &mut heap.peek_mut().unwrap().info; + + if info.max_partitions_reached(partition_size, max_partitions) { + return Err(anyhow!( + "max partitions limit {} reached for all deadlines", + max_partitions + )); + } + + changes[info.index].push(sector); + info.live_sectors += 1; + info.total_sectors += 1; + } + + Ok(changes) +} diff --git a/actors/miner/src/v16/deadline_info.rs b/actors/miner/src/v16/deadline_info.rs new file mode 100644 index 00000000..4404ada2 --- /dev/null +++ b/actors/miner/src/v16/deadline_info.rs @@ -0,0 +1,163 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::clock::ChainEpoch; +use serde::{Deserialize, Serialize}; + +use crate::v16::QuantSpec; + +/// Deadline calculations with respect to a current epoch. +/// "Deadline" refers to the window during which proofs may be submitted. +/// Windows are non-overlapping ranges [Open, Close), but the challenge epoch for a window occurs +/// before the window opens. +#[derive(Default, Debug, Serialize, Deserialize, PartialEq, Eq, Copy, Clone)] +#[serde(rename_all = "PascalCase")] +pub struct DeadlineInfo { + /// Epoch at which this info was calculated. + pub current_epoch: ChainEpoch, + /// First epoch of the proving period (<= CurrentEpoch). + pub period_start: ChainEpoch, + /// Current deadline index, in [0..WPoStProvingPeriodDeadlines). + pub index: u64, + /// First epoch from which a proof may be submitted (>= CurrentEpoch). + pub open: ChainEpoch, + /// First epoch from which a proof may no longer be submitted (>= Open). + pub close: ChainEpoch, + /// Epoch at which to sample the chain for challenge (< Open). + pub challenge: ChainEpoch, + /// First epoch at which a fault declaration is rejected (< Open). + pub fault_cutoff: ChainEpoch, + + // Protocol parameters (This is intentionally included in the JSON response for deadlines) + #[serde(rename = "WPoStPeriodDeadlines")] + pub w_post_period_deadlines: u64, + #[serde(rename = "WPoStProvingPeriod")] + pub w_post_proving_period: ChainEpoch, + #[serde(rename = "WPoStChallengeWindow")] + pub w_post_challenge_window: ChainEpoch, + #[serde(rename = "WPoStChallengeLookback")] + pub w_post_challenge_lookback: ChainEpoch, + pub fault_declaration_cutoff: ChainEpoch, +} + +impl DeadlineInfo { + #[allow(clippy::too_many_arguments)] + pub fn new( + period_start: ChainEpoch, + deadline_idx: u64, + current_epoch: ChainEpoch, + w_post_period_deadlines: u64, + w_post_proving_period: ChainEpoch, + w_post_challenge_window: ChainEpoch, + w_post_challenge_lookback: ChainEpoch, + fault_declaration_cutoff: ChainEpoch, + ) -> Self { + if deadline_idx < w_post_period_deadlines { + let deadline_open = period_start + (deadline_idx as i64 * w_post_challenge_window); + Self { + current_epoch, + period_start, + index: deadline_idx, + open: deadline_open, + close: deadline_open + w_post_challenge_window, + challenge: deadline_open - w_post_challenge_lookback, + fault_cutoff: deadline_open - fault_declaration_cutoff, + w_post_period_deadlines, + w_post_proving_period, + w_post_challenge_window, + w_post_challenge_lookback, + fault_declaration_cutoff, + } + } else { + let after_last_deadline = period_start + w_post_proving_period; + Self { + current_epoch, + period_start, + index: deadline_idx, + open: after_last_deadline, + close: after_last_deadline, + challenge: after_last_deadline, + fault_cutoff: 0, + w_post_period_deadlines, + w_post_proving_period, + w_post_challenge_window, + w_post_challenge_lookback, + fault_declaration_cutoff, + } + } + } + + /// Whether the proving period has begun. + pub fn period_started(&self) -> bool { + self.current_epoch >= self.period_start + } + + /// Whether the proving period has elapsed. + pub fn period_elapsed(&self) -> bool { + self.current_epoch >= self.next_period_start() + } + + /// The last epoch in the proving period. + pub fn period_end(&self) -> ChainEpoch { + self.period_start + self.w_post_proving_period - 1 + } + + /// The first epoch in the next proving period. + pub fn next_period_start(&self) -> ChainEpoch { + self.period_start + self.w_post_proving_period + } + + /// Whether the current deadline is currently open. + pub fn is_open(&self) -> bool { + self.current_epoch >= self.open && self.current_epoch < self.close + } + + /// Whether the current deadline has already closed. + pub fn has_elapsed(&self) -> bool { + self.current_epoch >= self.close + } + + /// The last epoch during which a proof may be submitted. + pub fn last(&self) -> ChainEpoch { + self.close - 1 + } + + /// Epoch at which the subsequent deadline opens. + pub fn next_open(&self) -> ChainEpoch { + self.close + } + + /// Whether the deadline's fault cutoff has passed. + pub fn fault_cutoff_passed(&self) -> bool { + self.current_epoch >= self.fault_cutoff + } + + /// Returns the next instance of this deadline that has not yet elapsed. + pub fn next_not_elapsed(self) -> Self { + if !self.has_elapsed() { + return self; + } + + // has elapsed, advance by some multiples of w_post_proving_period + let gap = self.current_epoch - self.close; + let delta_periods = 1 + gap / self.w_post_proving_period; + + Self::new( + self.period_start + self.w_post_proving_period * delta_periods, + self.index, + self.current_epoch, + self.w_post_period_deadlines, + self.w_post_proving_period, + self.w_post_challenge_window, + self.w_post_challenge_lookback, + self.fault_declaration_cutoff, + ) + } + + pub fn quant_spec(&self) -> QuantSpec { + QuantSpec { + unit: self.w_post_proving_period, + offset: self.last(), + } + } +} diff --git a/actors/miner/src/v16/deadline_state.rs b/actors/miner/src/v16/deadline_state.rs new file mode 100644 index 00000000..38af40dc --- /dev/null +++ b/actors/miner/src/v16/deadline_state.rs @@ -0,0 +1,1439 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::cmp; +use std::collections::BTreeSet; + +use anyhow::anyhow; +use cid::Cid; +use fvm_ipld_bitfield::BitField; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::CborStore; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::error::ExitCode; +use fvm_shared4::sector::{PoStProof, SectorSize}; +use multihash_codetable::Code; +use num_traits::{Signed, Zero}; + +use fil_actors_shared::actor_error_v16; +use fil_actors_shared::v16::runtime::Policy; +use fil_actors_shared::v16::{ActorDowncast, ActorError, Array, AsActorError}; + +use crate::v16::SECTORS_AMT_BITWIDTH; + +use super::{ + BitFieldQueue, ExpirationSet, Partition, PartitionSectorMap, PoStPartition, PowerPair, + QuantSpec, SectorOnChainInfo, Sectors, TerminationResult, +}; + +// Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. +// Usually a small array +pub const DEADLINE_PARTITIONS_AMT_BITWIDTH: u32 = 3; +pub const DEADLINE_EXPIRATIONS_AMT_BITWIDTH: u32 = 5; + +// Given that 4 partitions can be proven in one post, this AMT's height will +// only exceed the partition AMT's height at ~0.75EiB of storage. +pub const DEADLINE_OPTIMISTIC_POST_SUBMISSIONS_AMT_BITWIDTH: u32 = 2; + +/// Deadlines contains Deadline objects, describing the sectors due at the given +/// deadline and their state (faulty, terminated, recovering, etc.). +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct Deadlines { + // Note: we could inline part of the deadline struct (e.g., active/assigned sectors) + // to make new sector assignment cheaper. At the moment, assigning a sector requires + // loading all deadlines to figure out where best to assign new sectors. + // TODO: change this to an array once the `LengthAtMost32` trait is no more + pub due: Vec, // []Deadline +} + +impl Deadlines { + pub fn new(policy: &Policy, empty_deadline_cid: Cid) -> Self { + Self { + due: vec![empty_deadline_cid; policy.wpost_period_deadlines as usize], + } + } + + pub fn load_deadline( + &self, + store: &BS, + idx: u64, + ) -> Result { + let idx = idx as usize; + if idx >= self.due.len() { + return Err(actor_error_v16!( + illegal_argument, + "invalid deadline index {} of {}", + idx, + self.due.len() + )); + } + + store + .get_cbor(&self.due[idx]) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load deadline {}", idx) + })? + .ok_or_else(|| actor_error_v16!(illegal_argument, "no deadline {}", idx)) + } + + pub fn for_each( + &self, + store: &BS, + mut f: impl FnMut(u64, Deadline) -> anyhow::Result<()>, + ) -> anyhow::Result<()> { + for i in 0..(self.due.len() as u64) { + let index = i; + let deadline = self.load_deadline(store, index)?; + f(index, deadline)?; + } + Ok(()) + } + + pub fn update_deadline( + &mut self, + policy: &Policy, + store: &BS, + deadline_idx: u64, + deadline: &Deadline, + ) -> anyhow::Result<()> { + if deadline_idx >= policy.wpost_period_deadlines { + return Err(anyhow!("invalid deadline {}", deadline_idx)); + } + + deadline.validate_state()?; + + self.due[deadline_idx as usize] = store.put_cbor(deadline, Code::Blake2b256)?; + Ok(()) + } +} + +/// Deadline holds the state for all sectors due at a specific deadline. +#[derive(Debug, Default, Serialize_tuple, Deserialize_tuple)] +pub struct Deadline { + /// Partitions in this deadline, in order. + /// The keys of this AMT are always sequential integers beginning with zero. + pub partitions: Cid, // AMT[PartitionNumber]Partition + + /// Maps epochs to partitions that _may_ have sectors that expire in or + /// before that epoch, either on-time or early as faults. + /// Keys are quantized to final epochs in each proving deadline. + /// + /// NOTE: Partitions MUST NOT be removed from this queue (until the + /// associated epoch has passed) even if they no longer have sectors + /// expiring at that epoch. Sectors expiring at this epoch may later be + /// recovered, and this queue will not be updated at that time. + pub expirations_epochs: Cid, // AMT[ChainEpoch]BitField + + // Partitions that have been proved by window PoSts so far during the + // current challenge window. + // NOTE: This bitfield includes both partitions whose proofs + // were optimistically accepted and stored in + // OptimisticPoStSubmissions, and those whose proofs were + // verified on-chain. + pub partitions_posted: BitField, + + /// Partitions with sectors that terminated early. + pub early_terminations: BitField, + + /// The number of non-terminated sectors in this deadline (incl faulty). + pub live_sectors: u64, + + /// The total number of sectors in this deadline (incl dead). + pub total_sectors: u64, + + /// Memoized sum of faulty power in partitions. + pub faulty_power: PowerPair, + + // AMT of optimistically accepted WindowPoSt proofs, submitted during + // the current challenge window. At the end of the challenge window, + // this AMT will be moved to PoStSubmissionsSnapshot. WindowPoSt proofs + // verified on-chain do not appear in this AMT + pub optimistic_post_submissions: Cid, + + // Snapshot of the miner's sectors AMT at the end of the previous challenge + // window for this deadline. + pub sectors_snapshot: Cid, + + // Snapshot of partition state at the end of the previous challenge + // window for this deadline. + pub partitions_snapshot: Cid, + + // Snapshot of the proofs submitted by the end of the previous challenge + // window for this deadline. + // + // These proofs may be disputed via DisputeWindowedPoSt. Successfully + // disputed window PoSts are removed from the snapshot. + pub optimistic_post_submissions_snapshot: Cid, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Clone)] +pub struct WindowedPoSt { + // Partitions proved by this WindowedPoSt. + pub partitions: BitField, + + // Array of proofs, one per distinct registered proof type present in + // the sectors being proven. In the usual case of a single proof type, + // this array will always have a single element (independent of number + // of partitions). + pub proofs: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct DisputeInfo { + pub all_sector_nos: BitField, + pub ignored_sector_nos: BitField, + pub disputed_sectors: PartitionSectorMap, + pub disputed_power: PowerPair, +} + +impl Deadline { + pub fn new(store: &BS) -> Result { + let empty_partitions_array = + Array::<(), BS>::new_with_bit_width(store, DEADLINE_PARTITIONS_AMT_BITWIDTH) + .flush() + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "Failed to create empty states array", + ) + })?; + let empty_deadline_expiration_array = + Array::<(), BS>::new_with_bit_width(store, DEADLINE_EXPIRATIONS_AMT_BITWIDTH) + .flush() + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "Failed to create empty states array", + ) + })?; + let empty_post_submissions_array = Array::<(), BS>::new_with_bit_width( + store, + DEADLINE_OPTIMISTIC_POST_SUBMISSIONS_AMT_BITWIDTH, + ) + .flush() + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "Failed to create empty states array", + ) + })?; + let empty_sectors_array = Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) + .flush() + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "Failed to construct empty sectors snapshot array", + ) + })?; + Ok(Self { + partitions: empty_partitions_array, + expirations_epochs: empty_deadline_expiration_array, + early_terminations: BitField::new(), + live_sectors: 0, + total_sectors: 0, + faulty_power: PowerPair::zero(), + partitions_posted: BitField::new(), + optimistic_post_submissions: empty_post_submissions_array, + partitions_snapshot: empty_partitions_array, + sectors_snapshot: empty_sectors_array, + optimistic_post_submissions_snapshot: empty_post_submissions_array, + }) + } + + pub fn partitions_amt<'db, BS: Blockstore>( + &self, + store: &'db BS, + ) -> anyhow::Result> { + Ok(Array::load(&self.partitions, store)?) + } + + pub fn optimistic_proofs_amt<'db, BS: Blockstore>( + &self, + store: &'db BS, + ) -> anyhow::Result> { + Ok(Array::load(&self.optimistic_post_submissions, store)?) + } + + pub fn partitions_snapshot_amt<'db, BS: Blockstore>( + &self, + store: &'db BS, + ) -> anyhow::Result> { + Ok(Array::load(&self.partitions_snapshot, store)?) + } + + pub fn optimistic_proofs_snapshot_amt<'db, BS: Blockstore>( + &self, + store: &'db BS, + ) -> anyhow::Result> { + Ok(Array::load( + &self.optimistic_post_submissions_snapshot, + store, + )?) + } + + pub fn load_partition( + &self, + store: &BS, + partition_idx: u64, + ) -> Result { + let partitions = Array::::load(&self.partitions, store) + .context_code(ExitCode::USR_ILLEGAL_STATE, "loading partitions array")?; + + let partition = partitions + .get(partition_idx) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to lookup partition {}", partition_idx) + })? + .ok_or_else(|| actor_error_v16!(not_found, "no partition {}", partition_idx))?; + Ok(partition.clone()) + } + + pub fn load_partition_snapshot( + &self, + store: &BS, + partition_idx: u64, + ) -> anyhow::Result { + let partitions = Array::::load(&self.partitions_snapshot, store)?; + + let partition = partitions + .get(partition_idx) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to lookup partition snapshot {}", partition_idx), + ) + })? + .ok_or_else(|| { + actor_error_v16!(not_found, "no partition snapshot {}", partition_idx) + })?; + + Ok(partition.clone()) + } + + /// Adds some partition numbers to the set expiring at an epoch. + pub fn add_expiration_partitions( + &mut self, + store: &BS, + expiration_epoch: ChainEpoch, + partitions: &[u64], + quant: QuantSpec, + ) -> anyhow::Result<()> { + // Avoid doing any work if there's nothing to reschedule. + if partitions.is_empty() { + return Ok(()); + } + + let mut queue = BitFieldQueue::new(store, &self.expirations_epochs, quant) + .map_err(|e| e.downcast_wrap("failed to load expiration queue"))?; + queue + .add_to_queue_values(expiration_epoch, partitions.iter().copied()) + .map_err(|e| e.downcast_wrap("failed to mutate expiration queue"))?; + self.expirations_epochs = queue + .amt + .flush() + .map_err(|e| e.downcast_wrap("failed to save expiration queue"))?; + + Ok(()) + } + + /// PopExpiredSectors terminates expired sectors from all partitions. + /// Returns the expired sector aggregates. + pub fn pop_expired_sectors( + &mut self, + store: &BS, + until: ChainEpoch, + quant: QuantSpec, + ) -> anyhow::Result { + let (expired_partitions, modified) = self.pop_expired_partitions(store, until, quant)?; + + if !modified { + // nothing to do. + return Ok(ExpirationSet::empty()); + } + + let mut partitions = self.partitions_amt(store)?; + + let mut on_time_sectors = Vec::::new(); + let mut early_sectors = Vec::::new(); + let mut all_on_time_pledge = TokenAmount::zero(); + let mut all_active_power = PowerPair::zero(); + let mut all_faulty_power = PowerPair::zero(); + let mut partitions_with_early_terminations = Vec::::new(); + + // For each partition with an expiry, remove and collect expirations from the partition queue. + for i in expired_partitions.iter() { + let partition_idx = i; + let mut partition = partitions + .get(partition_idx)? + .cloned() + .ok_or_else(|| anyhow!("missing expected partition {}", partition_idx))?; + + let partition_expiration = + partition + .pop_expired_sectors(store, until, quant) + .map_err(|e| { + e.downcast_wrap(format!( + "failed to pop expired sectors from partition {}", + partition_idx + )) + })?; + + if !partition_expiration.early_sectors.is_empty() { + partitions_with_early_terminations.push(partition_idx); + } + + on_time_sectors.push(partition_expiration.on_time_sectors); + early_sectors.push(partition_expiration.early_sectors); + all_active_power += &partition_expiration.active_power; + all_faulty_power += &partition_expiration.faulty_power; + all_on_time_pledge += &partition_expiration.on_time_pledge; + + partitions.set(partition_idx, partition)?; + } + + self.partitions = partitions.flush()?; + + // Update early expiration bitmap. + let new_early_terminations = BitField::try_from_bits(partitions_with_early_terminations) + .map_err( + |_| actor_error_v16!(illegal_state; "partition index out of bitfield range"), + )?; + self.early_terminations |= &new_early_terminations; + + let all_on_time_sectors = BitField::union(&on_time_sectors); + let all_early_sectors = BitField::union(&early_sectors); + + // Update live sector count. + let on_time_count = all_on_time_sectors.len(); + let early_count = all_early_sectors.len(); + self.live_sectors -= on_time_count + early_count; + + self.faulty_power -= &all_faulty_power; + + Ok(ExpirationSet { + on_time_sectors: all_on_time_sectors, + early_sectors: all_early_sectors, + on_time_pledge: all_on_time_pledge, + active_power: all_active_power, + faulty_power: all_faulty_power, + }) + } + + /// Adds sectors to a deadline. It's the caller's responsibility to make sure + /// that this deadline isn't currently "open" (i.e., being proved at this point + /// in time). + /// The sectors are assumed to be non-faulty. + pub fn add_sectors( + &mut self, + store: &BS, + partition_size: u64, + proven: bool, + mut sectors: &[SectorOnChainInfo], + sector_size: SectorSize, + quant: QuantSpec, + ) -> anyhow::Result { + let mut total_power = PowerPair::zero(); + if sectors.is_empty() { + return Ok(total_power); + } + + // First update partitions, consuming the sectors + let mut partition_deadline_updates = Vec::<(ChainEpoch, u64)>::with_capacity(sectors.len()); + self.live_sectors += sectors.len() as u64; + self.total_sectors += sectors.len() as u64; + + let mut partitions = self.partitions_amt(store)?; + + // try filling up the last partition first. + for partition_idx in partitions.count().saturating_sub(1).. { + // Get/create partition to update. + let mut partition = match partitions.get(partition_idx)? { + Some(partition) => partition.clone(), + None => { + // This case will usually happen zero times. + // It would require adding more than a full partition in one go + // to happen more than once. + Partition::new(store)? + } + }; + + // Figure out which (if any) sectors we want to add to this partition. + let sector_count = partition.sectors.len(); + if sector_count >= partition_size { + continue; + } + + let size = cmp::min(partition_size - sector_count, sectors.len() as u64) as usize; + let partition_new_sectors = §ors[..size]; + + // Intentionally ignoring the index at size, split_at returns size inclusively for start + sectors = §ors[size..]; + + // Add sectors to partition. + let partition_power = + partition.add_sectors(store, proven, partition_new_sectors, sector_size, quant)?; + total_power += &partition_power; + + // Save partition back. + partitions.set(partition_idx, partition)?; + + // Record deadline -> partition mapping so we can later update the deadlines. + partition_deadline_updates.extend( + partition_new_sectors + .iter() + .map(|s| (s.expiration, partition_idx)), + ); + + if sectors.is_empty() { + break; + } + } + + // Save partitions back. + self.partitions = partitions.flush()?; + + // Next, update the expiration queue. + let mut deadline_expirations = + BitFieldQueue::new(store, &self.expirations_epochs, quant) + .map_err(|e| e.downcast_wrap("failed to load expiration epochs"))?; + deadline_expirations + .add_many_to_queue_values(partition_deadline_updates.iter().copied()) + .map_err(|e| e.downcast_wrap("failed to add expirations for new deadlines"))?; + self.expirations_epochs = deadline_expirations.amt.flush()?; + + Ok(total_power) + } + + pub fn pop_early_terminations( + &mut self, + store: &BS, + max_partitions: u64, + max_sectors: u64, + ) -> anyhow::Result<(TerminationResult, /* has more */ bool)> { + let mut partitions = self.partitions_amt(store)?; + + let mut partitions_finished = Vec::::new(); + let mut result = TerminationResult::new(); + + for i in self.early_terminations.iter() { + let partition_idx = i; + + let mut partition = match partitions.get(partition_idx).map_err(|e| { + e.downcast_wrap(format!("failed to load partition {}", partition_idx)) + })? { + Some(partition) => partition.clone(), + None => { + partitions_finished.push(partition_idx); + continue; + } + }; + + // Pop early terminations. + let (partition_result, more) = partition + .pop_early_terminations(store, max_sectors - result.sectors_processed) + .map_err(|e| e.downcast_wrap("failed to pop terminations from partition"))?; + + result += partition_result; + + // If we've processed all of them for this partition, unmark it in the deadline. + if !more { + partitions_finished.push(partition_idx); + } + + // Save partition + partitions.set(partition_idx, partition).map_err(|e| { + e.downcast_wrap(format!("failed to store partition {}", partition_idx)) + })?; + + if !result.below_limit(max_partitions, max_sectors) { + break; + } + } + + // Removed finished partitions from the index. + for finished in partitions_finished { + self.early_terminations.unset(finished); + } + + // Save deadline's partitions + self.partitions = partitions + .flush() + .map_err(|e| e.downcast_wrap("failed to update partitions"))?; + + // Update global early terminations bitfield. + let no_early_terminations = self.early_terminations.is_empty(); + Ok((result, !no_early_terminations)) + } + + pub fn pop_expired_partitions( + &mut self, + store: &BS, + until: ChainEpoch, + quant: QuantSpec, + ) -> anyhow::Result<(BitField, bool)> { + let mut expirations = BitFieldQueue::new(store, &self.expirations_epochs, quant)?; + let (popped, modified) = expirations + .pop_until(until) + .map_err(|e| e.downcast_wrap("failed to pop expiring partitions"))?; + + if modified { + self.expirations_epochs = expirations.amt.flush()?; + } + + Ok((popped, modified)) + } + + #[allow(clippy::too_many_arguments)] + pub fn terminate_sectors( + &mut self, + policy: &Policy, + store: &BS, + sectors: &Sectors<'_, BS>, + epoch: ChainEpoch, + partition_sectors: &mut PartitionSectorMap, + sector_size: SectorSize, + quant: QuantSpec, + ) -> anyhow::Result { + let mut partitions = self.partitions_amt(store)?; + + let mut power_lost = PowerPair::zero(); + for (partition_idx, sector_numbers) in partition_sectors.iter() { + let mut partition = partitions + .get(partition_idx) + .map_err(|e| { + e.downcast_wrap(format!("failed to load partition {}", partition_idx)) + })? + .ok_or_else( + || actor_error_v16!(not_found; "failed to find partition {}", partition_idx), + )? + .clone(); + + let removed = partition + .terminate_sectors( + policy, + store, + sectors, + epoch, + sector_numbers, + sector_size, + quant, + ) + .map_err(|e| { + e.downcast_wrap(format!( + "failed to terminate sectors in partition {}", + partition_idx + )) + })?; + + partitions.set(partition_idx, partition).map_err(|e| { + e.downcast_wrap(format!( + "failed to store updated partition {}", + partition_idx + )) + })?; + + if !removed.is_empty() { + // Record that partition now has pending early terminations. + self.early_terminations.set(partition_idx); + + // Record change to sectors and power + self.live_sectors -= removed.len(); + } // note: we should _always_ have early terminations, unless the early termination bitfield is empty. + + self.faulty_power -= &removed.faulty_power; + + // Aggregate power lost from active sectors + power_lost += &removed.active_power; + } + + // save partitions back + self.partitions = partitions + .flush() + .map_err(|e| e.downcast_wrap("failed to persist partitions"))?; + + Ok(power_lost) + } + + /// RemovePartitions removes the specified partitions, shifting the remaining + /// ones to the left, and returning the live and dead sectors they contained. + /// + /// Returns an error if any of the partitions contained faulty sectors or early + /// terminations. + pub fn remove_partitions( + &mut self, + store: &BS, + to_remove: &BitField, + quant: QuantSpec, + ) -> Result< + ( + BitField, // live + BitField, // dead + PowerPair, // removed power + ), + anyhow::Error, + > { + let old_partitions = self + .partitions_amt(store) + .map_err(|e| e.downcast_wrap("failed to load partitions"))?; + + let partition_count = old_partitions.count(); + let to_remove_set: BTreeSet<_> = to_remove + .bounded_iter(partition_count) + .ok_or_else( + || actor_error_v16!(illegal_argument; "partitions to remove exceeds total"), + )? + .collect(); + + if let Some(&max_partition) = to_remove_set.iter().max() { + if max_partition >= partition_count { + return Err( + actor_error_v16!(illegal_argument; "partition index {} out of range [0, {})", max_partition, partition_count).into() + ); + } + } else { + // Nothing to do. + return Ok((BitField::new(), BitField::new(), PowerPair::zero())); + } + + // Should already be checked earlier, but we might as well check again. + if !self.early_terminations.is_empty() { + return Err( + actor_error_v16!(illegal_argument; "cannot remove partitions from deadline with early terminations").into(), + ); + } + + let mut new_partitions = + Array::::new_with_bit_width(store, DEADLINE_PARTITIONS_AMT_BITWIDTH); + let mut all_dead_sectors = Vec::::with_capacity(to_remove_set.len()); + let mut all_live_sectors = Vec::::with_capacity(to_remove_set.len()); + let mut removed_power = PowerPair::zero(); + + // TODO: maybe only unmarshal the partition if `to_remove_set` contains the + // corresponding index, like the Go impl does + + old_partitions + .for_each(|partition_idx, partition| { + // If we're keeping the partition as-is, append it to the new partitions array. + if !to_remove_set.contains(&partition_idx) { + new_partitions.set(new_partitions.count(), partition.clone())?; + return Ok(()); + } + + // Don't allow removing partitions with faulty sectors. + let has_no_faults = partition.faults.is_empty(); + if !has_no_faults { + return Err(actor_error_v16!( + illegal_argument, + "cannot remove partition {}: has faults", + partition_idx + ) + .into()); + } + + // Don't allow removing partitions with unproven sectors + let all_proven = partition.unproven.is_empty(); + if !all_proven { + return Err(actor_error_v16!( + illegal_argument, + "cannot remove partition {}: has unproven sectors", + partition_idx + ) + .into()); + } + + // Get the live sectors. + let live_sectors = partition.live_sectors(); + + all_dead_sectors.push(partition.terminated.clone()); + all_live_sectors.push(live_sectors); + removed_power += &partition.live_power; + + Ok(()) + }) + .map_err(|e| e.downcast_wrap("while removing partitions"))?; + + self.partitions = new_partitions + .flush() + .map_err(|e| e.downcast_wrap("failed to persist new partition table"))?; + + let dead = BitField::union(&all_dead_sectors); + let live = BitField::union(&all_live_sectors); + + // Update sector counts. + let removed_dead_sectors = dead.len(); + let removed_live_sectors = live.len(); + + self.live_sectors -= removed_live_sectors; + self.total_sectors -= removed_live_sectors + removed_dead_sectors; + + // Update expiration bitfields. + let mut expiration_epochs = BitFieldQueue::new(store, &self.expirations_epochs, quant) + .map_err(|e| e.downcast_wrap("failed to load expiration queue"))?; + + expiration_epochs.cut(to_remove).map_err(|e| { + e.downcast_wrap("failed cut removed partitions from deadline expiration queue") + })?; + + self.expirations_epochs = expiration_epochs + .amt + .flush() + .map_err(|e| e.downcast_wrap("failed persist deadline expiration queue"))?; + + Ok((live, dead, removed_power)) + } + + pub fn record_faults( + &mut self, + store: &BS, + sectors: &Sectors<'_, BS>, + sector_size: SectorSize, + quant: QuantSpec, + fault_expiration_epoch: ChainEpoch, + partition_sectors: &mut PartitionSectorMap, + ) -> anyhow::Result { + let mut partitions = self.partitions_amt(store)?; + + // Record partitions with some fault, for subsequently indexing in the deadline. + // Duplicate entries don't matter, they'll be stored in a bitfield (a set). + let mut partitions_with_fault = Vec::::with_capacity(partition_sectors.len()); + let mut power_delta = PowerPair::zero(); + + for (partition_idx, sector_numbers) in partition_sectors.iter() { + let mut partition = partitions + .get(partition_idx) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to load partition {}", partition_idx), + ) + })? + .ok_or_else(|| actor_error_v16!(not_found; "no such partition {}", partition_idx))? + .clone(); + + let (new_faults, partition_power_delta, partition_new_faulty_power) = partition + .record_faults( + store, + sectors, + sector_numbers, + fault_expiration_epoch, + sector_size, + quant, + ) + .map_err(|e| { + e.downcast_wrap(format!( + "failed to declare faults in partition {}", + partition_idx + )) + })?; + + self.faulty_power += &partition_new_faulty_power; + power_delta += &partition_power_delta; + if !new_faults.is_empty() { + partitions_with_fault.push(partition_idx); + } + + partitions.set(partition_idx, partition).map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to store partition {}", partition_idx), + ) + })?; + } + + self.partitions = partitions.flush().map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to store partitions root", + ) + })?; + + self.add_expiration_partitions( + store, + fault_expiration_epoch, + &partitions_with_fault, + quant, + ) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to update expirations for partitions with faults", + ) + })?; + + Ok(power_delta) + } + + pub fn declare_faults_recovered( + &mut self, + store: &BS, + sectors: &Sectors<'_, BS>, + sector_size: SectorSize, + partition_sectors: &mut PartitionSectorMap, + ) -> anyhow::Result<()> { + let mut partitions = self.partitions_amt(store)?; + + for (partition_idx, sector_numbers) in partition_sectors.iter() { + let mut partition = partitions + .get(partition_idx) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to load partition {}", partition_idx), + ) + })? + .ok_or_else(|| actor_error_v16!(not_found; "no such partition {}", partition_idx))? + .clone(); + + partition + .declare_faults_recovered(sectors, sector_size, sector_numbers) + .map_err(|e| e.downcast_wrap("failed to add recoveries"))?; + + partitions.set(partition_idx, partition).map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to update partition {}", partition_idx), + ) + })?; + } + + // Power is not regained until the deadline end, when the recovery is confirmed. + + self.partitions = partitions.flush().map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to store partitions root", + ) + })?; + + Ok(()) + } + + /// Processes all PoSt submissions, marking unproven sectors as + /// faulty and clearing failed recoveries. It returns the power delta, and any + /// power that should be penalized (new faults and failed recoveries). + pub fn process_deadline_end( + &mut self, + store: &BS, + quant: QuantSpec, + fault_expiration_epoch: ChainEpoch, + sectors: Cid, + ) -> Result<(PowerPair, PowerPair), ActorError> { + let mut partitions = self.partitions_amt(store).map_err(|e| { + e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load partitions") + })?; + + let mut detected_any = false; + let mut rescheduled_partitions = Vec::::new(); + let mut power_delta = PowerPair::zero(); + let mut penalized_power = PowerPair::zero(); + for partition_idx in 0..partitions.count() { + let proven = self.partitions_posted.get(partition_idx); + + if proven { + continue; + } + + let mut partition = partitions + .get(partition_idx) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to load partition {}", partition_idx), + ) + })? + .ok_or_else(|| actor_error_v16!(illegal_state; "no partition {}", partition_idx))? + .clone(); + + // If we have no recovering power/sectors, and all power is faulty, skip + // this. This lets us skip some work if a miner repeatedly fails to PoSt. + if partition.recovering_power.is_zero() + && partition.faulty_power == partition.live_power + { + continue; + } + + // Ok, we actually need to process this partition. Make sure we save the partition state back. + detected_any = true; + + let (part_power_delta, part_penalized_power, part_new_faulty_power) = partition + .record_missed_post(store, fault_expiration_epoch, quant) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + format!( + "failed to record missed PoSt for partition {}", + partition_idx + ), + ) + })?; + + // We marked some sectors faulty, we need to record the new + // expiration. We don't want to do this if we're just penalizing + // the miner for failing to recover power. + if !part_new_faulty_power.is_zero() { + rescheduled_partitions.push(partition_idx); + } + + // Save new partition state. + partitions.set(partition_idx, partition).map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to update partition {}", partition_idx), + ) + })?; + + self.faulty_power += &part_new_faulty_power; + + power_delta += &part_power_delta; + penalized_power += &part_penalized_power; + } + + // Save modified deadline state. + if detected_any { + self.partitions = partitions.flush().map_err(|e| { + e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to store partitions") + })?; + } + + self.add_expiration_partitions( + store, + fault_expiration_epoch, + &rescheduled_partitions, + quant, + ) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to update deadline expiration queue", + ) + })?; + + // Reset PoSt submissions. + self.partitions_posted = BitField::new(); + self.partitions_snapshot = self.partitions; + self.optimistic_post_submissions_snapshot = self.optimistic_post_submissions; + self.optimistic_post_submissions = Array::<(), BS>::new_with_bit_width( + store, + DEADLINE_OPTIMISTIC_POST_SUBMISSIONS_AMT_BITWIDTH, + ) + .flush() + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to clear pending proofs array", + ) + })?; + + // only snapshot sectors if there's a proof that might be disputed (this is equivalent to asking if the OptimisticPoStSubmissionsSnapshot is empty) + if self.optimistic_post_submissions != self.optimistic_post_submissions_snapshot { + self.sectors_snapshot = sectors; + } else { + self.sectors_snapshot = + Array::<(), BS>::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) + .flush() + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to clear sectors snapshot array", + ) + })?; + } + Ok((power_delta, penalized_power)) + } + pub fn for_each( + &self, + store: &BS, + f: impl FnMut(u64, &Partition) -> anyhow::Result<()>, + ) -> anyhow::Result<()> { + let parts = self.partitions_amt(store)?; + parts.for_each(f)?; + Ok(()) + } + + pub fn validate_state(&self) -> anyhow::Result<()> { + if self.live_sectors > self.total_sectors { + return Err(anyhow!("deadline left with more live sectors than total")); + } + + if self.faulty_power.raw.is_negative() || self.faulty_power.qa.is_negative() { + return Err(anyhow!("deadline left with negative faulty power")); + } + + Ok(()) + } + + pub fn load_partitions_for_dispute( + &self, + store: &BS, + partitions: BitField, + ) -> anyhow::Result { + let partitions_snapshot = self + .partitions_snapshot_amt(store) + .map_err(|e| e.downcast_wrap("failed to load partitions {}"))?; + + let mut all_sectors = Vec::new(); + let mut all_ignored = Vec::new(); + let mut disputed_sectors = PartitionSectorMap::default(); + let mut disputed_power = PowerPair::zero(); + for part_idx in partitions.iter() { + let partition_snapshot = partitions_snapshot + .get(part_idx)? + .ok_or_else(|| anyhow!("failed to find partition {}", part_idx))?; + + // Record sectors for proof verification + all_sectors.push(partition_snapshot.sectors.clone()); + all_ignored.push(partition_snapshot.faults.clone()); + all_ignored.push(partition_snapshot.terminated.clone()); + all_ignored.push(partition_snapshot.unproven.clone()); + + // Record active sectors for marking faults. + let active = partition_snapshot.active_sectors(); + disputed_sectors.add(part_idx, active)?; + + // Record disputed power for penalties. + // + // NOTE: This also includes power that was + // activated at the end of the last challenge + // window, and power from sectors that have since + // expired. + disputed_power += &partition_snapshot.active_power(); + } + + let all_sector_nos = BitField::union(&all_sectors); + let all_ignored_nos = BitField::union(&all_ignored); + + Ok(DisputeInfo { + all_sector_nos, + disputed_sectors, + disputed_power, + ignored_sector_nos: all_ignored_nos, + }) + } + + pub fn is_live(&self) -> bool { + if self.live_sectors > 0 { + return true; + } + + let has_no_proofs = self.partitions_posted.is_empty(); + if !has_no_proofs { + // _This_ case should be impossible, but there's no good way to log from here. We + // might as well just process the deadline end and move on. + return true; + } + + // If the partitions have changed, we may have work to do. We should at least update the + // partitions snapshot one last time. + if self.partitions != self.partitions_snapshot { + return true; + } + + // If we don't have any proofs, and the proofs snapshot isn't the same as the current proofs + // snapshot (which should be empty), we should update the deadline one last time to empty + // the proofs snapshot. + if self.optimistic_post_submissions != self.optimistic_post_submissions_snapshot { + return true; + } + false + } +} + +pub struct PoStResult { + /// Power activated or deactivated (positive or negative). + pub power_delta: PowerPair, + pub new_faulty_power: PowerPair, + pub retracted_recovery_power: PowerPair, + pub recovered_power: PowerPair, + /// A bitfield of all sectors in the proven partitions. + pub sectors: BitField, + /// A subset of `sectors` that should be ignored. + pub ignored_sectors: BitField, + // Bitfield of partitions that were proven. + pub partitions: BitField, +} + +impl Deadline { + /// Processes a series of posts, recording proven partitions and marking skipped + /// sectors as faulty. + /// + /// It returns a PoStResult containing the list of proven and skipped sectors and + /// changes to power (newly faulty power, power that should have been proven + /// recovered but wasn't, and newly recovered power). + /// + /// NOTE: This function does not actually _verify_ any proofs. The returned + /// `sectors` and `ignored_sectors` must subsequently be validated against the PoSt + /// submitted by the miner. + pub fn record_proven_sectors( + &mut self, + store: &BS, + sectors: &Sectors<'_, BS>, + sector_size: SectorSize, + quant: QuantSpec, + fault_expiration: ChainEpoch, + post_partitions: &mut [PoStPartition], + ) -> anyhow::Result { + let partition_indexes = BitField::try_from_bits(post_partitions.iter().map(|p| p.index)) + .map_err( + |_| actor_error_v16!(illegal_argument; "partition index out of bitfield range"), + )?; + + let num_partitions = partition_indexes.len(); + if num_partitions != post_partitions.len() as u64 { + return Err(anyhow!(actor_error_v16!( + illegal_argument, + "duplicate partitions proven" + ))); + } + + // First check to see if we're proving any already proven partitions. + // This is faster than checking one by one. + let already_proven = &self.partitions_posted & &partition_indexes; + if !already_proven.is_empty() { + return Err(anyhow!(actor_error_v16!( + illegal_argument, + "partition already proven: {:?}", + already_proven + ))); + } + + let mut partitions = self.partitions_amt(store)?; + + let mut all_sectors = Vec::::with_capacity(post_partitions.len()); + let mut all_ignored = Vec::::with_capacity(post_partitions.len()); + let mut new_faulty_power_total = PowerPair::zero(); + let mut retracted_recovery_power_total = PowerPair::zero(); + let mut recovered_power_total = PowerPair::zero(); + let mut rescheduled_partitions = Vec::::new(); + let mut power_delta = PowerPair::zero(); + + // Accumulate sectors info for proof verification. + for post in post_partitions { + let mut partition = partitions + .get(post.index) + .map_err(|e| e.downcast_wrap(format!("failed to load partition {}", post.index)))? + .ok_or_else(|| actor_error_v16!(not_found; "no such partition {}", post.index))? + .clone(); + + // Process new faults and accumulate new faulty power. + // This updates the faults in partition state ahead of calculating the sectors to include for proof. + let (mut new_power_delta, new_fault_power, retracted_recovery_power, has_new_faults) = + partition + .record_skipped_faults( + store, + sectors, + sector_size, + quant, + fault_expiration, + &post.skipped, + ) + .map_err(|e| { + e.downcast_wrap(format!( + "failed to add skipped faults to partition {}", + post.index + )) + })?; + + // If we have new faulty power, we've added some faults. We need + // to record the new expiration in the deadline. + if has_new_faults { + rescheduled_partitions.push(post.index); + } + + let recovered_power = partition + .recover_faults(store, sectors, sector_size, quant) + .map_err(|e| { + e.downcast_wrap(format!( + "failed to recover faulty sectors for partition {}", + post.index + )) + })?; + + new_power_delta += &partition.activate_unproven(); + + // note: we do this first because `partition` is moved in the upcoming `partitions.set` call + // At this point, the partition faults represents the expected faults for the proof, with new skipped + // faults and recoveries taken into account. + all_sectors.push(partition.sectors.clone()); + all_ignored.push(partition.faults.clone()); + all_ignored.push(partition.terminated.clone()); + + // This will be rolled back if the method aborts with a failed proof. + partitions.set(post.index, partition).map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to update partition {}", post.index), + ) + })?; + + new_faulty_power_total += &new_fault_power; + retracted_recovery_power_total += &retracted_recovery_power; + recovered_power_total += &recovered_power; + power_delta += &new_power_delta; + power_delta += &recovered_power; + + // Record the post. + self.partitions_posted.set(post.index); + } + + self.add_expiration_partitions(store, fault_expiration, &rescheduled_partitions, quant) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to update expirations for partitions with faults", + ) + })?; + + // Save everything back. + self.faulty_power -= &recovered_power_total; + self.faulty_power += &new_faulty_power_total; + + self.partitions = partitions.flush().map_err(|e| { + e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to persist partitions") + })?; + + // Collect all sectors, faults, and recoveries for proof verification. + let all_sector_numbers = BitField::union(&all_sectors); + let all_ignored_sector_numbers = BitField::union(&all_ignored); + + Ok(PoStResult { + new_faulty_power: new_faulty_power_total, + retracted_recovery_power: retracted_recovery_power_total, + recovered_power: recovered_power_total, + sectors: all_sector_numbers, + power_delta, + ignored_sectors: all_ignored_sector_numbers, + partitions: partition_indexes, + }) + } + + // RecordPoStProofs records a set of optimistically accepted PoSt proofs + // (usually one), associating them with the given partitions. + pub fn record_post_proofs( + &mut self, + store: &BS, + partitions: &BitField, + proofs: &[PoStProof], + ) -> anyhow::Result<()> { + let mut proof_arr = self + .optimistic_proofs_amt(store) + .map_err(|e| e.downcast_wrap("failed to load post proofs"))?; + proof_arr + .set( + proof_arr.count(), + // TODO: Can we do this with out cloning? + WindowedPoSt { + partitions: partitions.clone(), + proofs: proofs.to_vec(), + }, + ) + .map_err(|e| e.downcast_wrap("failed to store proof"))?; + let root = proof_arr + .flush() + .map_err(|e| e.downcast_wrap("failed to save proofs"))?; + self.optimistic_post_submissions = root; + Ok(()) + } + + // TakePoStProofs removes and returns a PoSt proof by index, along with the + // associated partitions. This method takes the PoSt from the PoSt submissions + // snapshot. + pub fn take_post_proofs( + &mut self, + store: &BS, + idx: u64, + ) -> anyhow::Result<(BitField, Vec)> { + let mut proof_arr = self + .optimistic_proofs_snapshot_amt(store) + .map_err(|e| e.downcast_wrap("failed to load post proofs snapshot amt"))?; + // Extract and remove the proof from the proofs array, leaving a hole. + // This will not affect concurrent attempts to refute other proofs. + let post = proof_arr + .delete(idx) + .map_err(|e| e.downcast_wrap(format!("failed to retrieve proof {}", idx)))? + .ok_or_else(|| actor_error_v16!(illegal_argument, "proof {} not found", idx))?; + + let root = proof_arr + .flush() + .map_err(|e| e.downcast_wrap("failed to save proofs"))?; + self.optimistic_post_submissions_snapshot = root; + Ok((post.partitions, post.proofs)) + } + + /// RescheduleSectorExpirations reschedules the expirations of the given sectors + /// to the target epoch, skipping any sectors it can't find. + /// + /// The power of the rescheduled sectors is assumed to have not changed since + /// initial scheduling. + /// + /// Note: see the docs on State.RescheduleSectorExpirations for details on why we + /// skip sectors/partitions we can't find. + pub fn reschedule_sector_expirations( + &mut self, + store: &BS, + sectors: &Sectors<'_, BS>, + expiration: ChainEpoch, + partition_sectors: &mut PartitionSectorMap, + sector_size: SectorSize, + quant: QuantSpec, + ) -> anyhow::Result> { + let mut partitions = self.partitions_amt(store)?; + + // track partitions with moved expirations. + let mut rescheduled_partitions = Vec::::new(); + + let mut all_replaced = Vec::new(); + for (partition_idx, sector_numbers) in partition_sectors.iter() { + let mut partition = match partitions.get(partition_idx).map_err(|e| { + e.downcast_wrap(format!("failed to load partition {}", partition_idx)) + })? { + Some(partition) => partition.clone(), + None => { + // We failed to find the partition, it could have moved + // due to compaction. This function is only reschedules + // sectors it can find so we'll just skip it. + continue; + } + }; + + let replaced = partition + .reschedule_expirations( + store, + sectors, + expiration, + sector_numbers, + sector_size, + quant, + ) + .map_err(|e| { + e.downcast_wrap(format!( + "failed to reschedule expirations in partition {}", + partition_idx + )) + })?; + + if replaced.is_empty() { + // nothing moved. + continue; + } + all_replaced.extend(replaced); + + rescheduled_partitions.push(partition_idx); + partitions.set(partition_idx, partition).map_err(|e| { + e.downcast_wrap(format!("failed to store partition {}", partition_idx)) + })?; + } + + if !rescheduled_partitions.is_empty() { + self.partitions = partitions + .flush() + .map_err(|e| e.downcast_wrap("failed to save partitions"))?; + + self.add_expiration_partitions(store, expiration, &rescheduled_partitions, quant) + .map_err(|e| e.downcast_wrap("failed to reschedule partition expirations"))?; + } + + Ok(all_replaced) + } +} diff --git a/actors/miner/src/v16/deadlines.rs b/actors/miner/src/v16/deadlines.rs new file mode 100644 index 00000000..a5b3cb64 --- /dev/null +++ b/actors/miner/src/v16/deadlines.rs @@ -0,0 +1,157 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_shared::v16::runtime::Policy; +use fil_actors_shared::v16::Array; + +use fvm_ipld_blockstore::Blockstore; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::sector::SectorNumber; + +use super::{DeadlineInfo, Deadlines, Partition, QuantSpec}; + +pub fn new_deadline_info( + policy: &Policy, + proving_period_start: ChainEpoch, + deadline_idx: u64, + current_epoch: ChainEpoch, +) -> DeadlineInfo { + DeadlineInfo::new( + proving_period_start, + deadline_idx, + current_epoch, + policy.wpost_period_deadlines, + policy.wpost_proving_period, + policy.wpost_challenge_window, + policy.wpost_challenge_lookback, + policy.fault_declaration_cutoff, + ) +} + +impl Deadlines { + /// Returns the deadline and partition index for a sector number. + /// Returns an error if the sector number is not tracked by `self`. + pub fn find_sector( + &self, + store: &BS, + sector_number: SectorNumber, + ) -> anyhow::Result<(u64, u64)> { + for i in 0..self.due.len() { + let deadline_idx = i as u64; + let deadline = self.load_deadline(store, deadline_idx)?; + let partitions = Array::::load(&deadline.partitions, store)?; + + let mut partition_idx = None; + + partitions.for_each_while(|i, partition| { + if partition.sectors.get(sector_number) { + partition_idx = Some(i); + Ok(false) + } else { + Ok(true) + } + })?; + + if let Some(partition_idx) = partition_idx { + return Ok((deadline_idx, partition_idx)); + } + } + + Err(anyhow::anyhow!( + "sector {} not due at any deadline", + sector_number + )) + } +} + +/// Returns true if the deadline at the given index is currently mutable. +pub fn deadline_is_mutable( + policy: &Policy, + proving_period_start: ChainEpoch, + deadline_idx: u64, + current_epoch: ChainEpoch, +) -> bool { + // Get the next non-elapsed deadline (i.e., the next time we care about + // mutations to the deadline). + let deadline_info = + new_deadline_info(policy, proving_period_start, deadline_idx, current_epoch) + .next_not_elapsed(); + + // Ensure that the current epoch is at least one challenge window before + // that deadline opens. + current_epoch < deadline_info.open - policy.wpost_challenge_window +} + +pub fn quant_spec_for_deadline(policy: &Policy, di: &DeadlineInfo) -> QuantSpec { + QuantSpec { + unit: policy.wpost_proving_period, + offset: di.last(), + } +} + +// Returns true if optimistically accepted posts submitted to the given deadline +// may be disputed. Specifically, this ensures that: +// +// 1. Optimistic PoSts may not be disputed while the challenge window is open. +// 2. Optimistic PoSts may not be disputed after the miner could have compacted the deadline. +pub fn deadline_available_for_optimistic_post_dispute( + policy: &Policy, + proving_period_start: ChainEpoch, + deadline_idx: u64, + current_epoch: ChainEpoch, +) -> bool { + if proving_period_start > current_epoch { + return false; + } + let dl_info = new_deadline_info(policy, proving_period_start, deadline_idx, current_epoch) + .next_not_elapsed(); + + !dl_info.is_open() + && current_epoch + < (dl_info.close - policy.wpost_proving_period) + policy.wpost_dispute_window +} + +// Returns true if the given deadline may compacted in the current epoch. +// Deadlines may not be compacted when: +// +// 1. The deadline is currently being challenged. +// 2. The deadline is to be challenged next. +// 3. Optimistically accepted posts from the deadline's last challenge window +// can currently be disputed. +pub fn deadline_available_for_compaction( + policy: &Policy, + proving_period_start: ChainEpoch, + deadline_idx: u64, + current_epoch: ChainEpoch, +) -> bool { + deadline_is_mutable(policy, proving_period_start, deadline_idx, current_epoch) + && !deadline_available_for_optimistic_post_dispute( + policy, + proving_period_start, + deadline_idx, + current_epoch, + ) +} + +// Determine current period start and deadline index directly from current epoch and +// the offset implied by the proving period. This works correctly even for the state +// of a miner actor without an active deadline cron +pub fn new_deadline_info_from_offset_and_epoch( + policy: &Policy, + period_start_seed: ChainEpoch, + current_epoch: ChainEpoch, +) -> DeadlineInfo { + let q = QuantSpec { + unit: policy.wpost_proving_period, + offset: period_start_seed, + }; + let current_period_start = q.quantize_down(current_epoch); + let current_deadline_idx = + ((current_epoch - current_period_start) / policy.wpost_challenge_window) as u64; + new_deadline_info( + policy, + current_period_start, + current_deadline_idx, + current_epoch, + ) +} diff --git a/actors/miner/src/v16/expiration_queue.rs b/actors/miner/src/v16/expiration_queue.rs new file mode 100644 index 00000000..59d9ceb7 --- /dev/null +++ b/actors/miner/src/v16/expiration_queue.rs @@ -0,0 +1,986 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::{BTreeMap, BTreeSet}; +use std::convert::TryInto; + +use anyhow::{anyhow, Context}; +use cid::Cid; +use fil_actors_shared::v16::runtime::Policy; +use fil_actors_shared::v16::{ActorDowncast, Array}; +use fvm_ipld_amt::{Error as AmtError, ValueMut}; +use fvm_ipld_bitfield::BitField; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; + +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::sector::{SectorNumber, SectorSize}; +use num_traits::{Signed, Zero}; + +use super::{power_for_sector, PowerPair, QuantSpec, SectorOnChainInfo}; + +/// An internal limit on the cardinality of a bitfield in a queue entry. +/// This must be at least large enough to support the maximum number of sectors in a partition. +/// It would be a bit better to derive this number from an enumeration over all partition sizes. +const ENTRY_SECTORS_MAX: u64 = 10_000; + +/// ExpirationSet is a collection of sector numbers that are expiring, either due to +/// expected "on-time" expiration at the end of their life, or unexpected "early" termination +/// due to being faulty for too long consecutively. +/// Note that there is not a direct correspondence between on-time sectors and active power; +/// a sector may be faulty but expiring on-time if it faults just prior to expected termination. +/// Early sectors are always faulty, and active power always represents on-time sectors. +#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug, Default)] +pub struct ExpirationSet { + /// Sectors expiring "on time" at the end of their committed life + pub on_time_sectors: BitField, + /// Sectors expiring "early" due to being faulty for too long + pub early_sectors: BitField, + /// Pledge total for the on-time sectors + pub on_time_pledge: TokenAmount, + /// Power that is currently active (not faulty) + pub active_power: PowerPair, + /// Power that is currently faulty + pub faulty_power: PowerPair, +} + +impl ExpirationSet { + pub fn empty() -> Self { + Default::default() + } + + /// Adds sectors and power to the expiration set in place. + pub fn add( + &mut self, + on_time_sectors: &BitField, + early_sectors: &BitField, + on_time_pledge: &TokenAmount, + active_power: &PowerPair, + faulty_power: &PowerPair, + ) -> anyhow::Result<()> { + self.on_time_sectors |= on_time_sectors; + self.early_sectors |= early_sectors; + self.on_time_pledge += on_time_pledge; + self.active_power += active_power; + self.faulty_power += faulty_power; + + self.validate_state()?; + Ok(()) + } + + /// Removes sectors and power from the expiration set in place. + pub fn remove( + &mut self, + on_time_sectors: &BitField, + early_sectors: &BitField, + on_time_pledge: &TokenAmount, + active_power: &PowerPair, + faulty_power: &PowerPair, + ) -> anyhow::Result<()> { + // Check for sector intersection. This could be cheaper with a combined intersection/difference method used below. + if !self.on_time_sectors.contains_all(on_time_sectors) { + return Err(anyhow!( + "removing on-time sectors {:?} not contained in {:?}", + on_time_sectors, + self.on_time_sectors + )); + } + if !self.early_sectors.contains_all(early_sectors) { + return Err(anyhow!( + "removing early sectors {:?} not contained in {:?}", + early_sectors, + self.early_sectors + )); + } + + self.on_time_sectors -= on_time_sectors; + self.early_sectors -= early_sectors; + self.on_time_pledge -= on_time_pledge; + self.active_power -= active_power; + self.faulty_power -= faulty_power; + + // Check underflow. + if self.on_time_pledge.is_negative() { + return Err(anyhow!("expiration set pledge underflow: {:?}", self)); + } + if self.active_power.qa.is_negative() || self.faulty_power.qa.is_negative() { + return Err(anyhow!("expiration set power underflow: {:?}", self)); + } + self.validate_state()?; + Ok(()) + } + + /// A set is empty if it has no sectors. + /// The power and pledge are not checked, but are expected to be zero. + pub fn is_empty(&self) -> bool { + self.on_time_sectors.is_empty() && self.early_sectors.is_empty() + } + + /// Counts all sectors in the expiration set. + pub fn len(&self) -> u64 { + self.on_time_sectors.len() + self.early_sectors.len() + } + + /// validates a set of assertions that must hold for expiration sets + pub fn validate_state(&self) -> anyhow::Result<()> { + if self.on_time_pledge.is_negative() { + return Err(anyhow!("ExpirationSet left with negative pledge")); + } + + if self.active_power.raw.is_negative() { + return Err(anyhow!("ExpirationSet left with negative raw active power")); + } + + if self.active_power.qa.is_negative() { + return Err(anyhow!("ExpirationSet left with negative qa active power")); + } + + if self.faulty_power.raw.is_negative() { + return Err(anyhow!("ExpirationSet left with negative raw faulty power")); + } + + if self.faulty_power.qa.is_negative() { + return Err(anyhow!("ExpirationSet left with negative qa faulty power")); + } + + Ok(()) + } +} + +/// A queue of expiration sets by epoch, representing the on-time or early termination epoch for a collection of sectors. +/// Wraps an AMT[ChainEpoch]*ExpirationSet. +/// Keys in the queue are quantized (upwards), modulo some offset, to reduce the cardinality of keys. +pub struct ExpirationQueue<'db, BS> { + pub amt: Array<'db, ExpirationSet, BS>, + pub quant: QuantSpec, +} + +impl<'db, BS: Blockstore> ExpirationQueue<'db, BS> { + /// Loads a queue root. + /// + /// Epochs provided to subsequent method calls will be quantized upwards to quanta mod offsetSeed before being + /// written to/read from queue entries. + pub fn new(store: &'db BS, root: &Cid, quant: QuantSpec) -> Result { + Ok(Self { + amt: Array::load(root, store)?, + quant, + }) + } + + /// Adds a collection of sectors to their on-time target expiration entries (quantized). + /// The sectors are assumed to be active (non-faulty). + /// Returns the sector numbers, power, and pledge added. + pub fn add_active_sectors<'a>( + &mut self, + sectors: impl IntoIterator, + sector_size: SectorSize, + ) -> anyhow::Result<(BitField, PowerPair, TokenAmount)> { + let mut total_power = PowerPair::zero(); + let mut total_pledge = TokenAmount::zero(); + let mut total_sectors = Vec::::new(); + + for group in group_new_sectors_by_declared_expiration(sector_size, sectors, self.quant) { + let sector_numbers = BitField::try_from_bits(group.sectors)?; + + self.add( + group.epoch, + §or_numbers, + &BitField::new(), + &group.power, + &PowerPair::zero(), + &group.pledge, + ) + .map_err(|e| e.downcast_wrap("failed to record new sector expirations"))?; + + total_sectors.push(sector_numbers); + total_power += &group.power; + total_pledge += &group.pledge; + } + + let sector_numbers = BitField::union(total_sectors.iter()); + Ok((sector_numbers, total_power, total_pledge)) + } + + /// Reschedules some sectors to a new (quantized) expiration epoch. + /// The sectors being rescheduled are assumed to be not faulty, and hence are removed from and re-scheduled for on-time + /// rather than early expiration. + /// The sectors' power and pledge are assumed not to change, despite the new expiration. + pub fn reschedule_expirations( + &mut self, + new_expiration: ChainEpoch, + sectors: &[SectorOnChainInfo], + sector_size: SectorSize, + ) -> anyhow::Result<()> { + if sectors.is_empty() { + return Ok(()); + } + + let (sector_numbers, power, pledge) = self + .remove_active_sectors(sectors, sector_size) + .map_err(|e| e.downcast_wrap("failed to remove sector expirations"))?; + + self.add( + new_expiration, + §or_numbers, + &BitField::new(), + &power, + &PowerPair::zero(), + &pledge, + ) + .map_err(|e| e.downcast_wrap("failed to record new sector expirations"))?; + + Ok(()) + } + + /// Re-schedules sectors to expire at an early expiration epoch (quantized), if they wouldn't expire before then anyway. + /// The sectors must not be currently faulty, so must be registered as expiring on-time rather than early. + /// The pledge for the now-early sectors is removed from the queue. + /// Returns the total power represented by the sectors. + pub fn reschedule_as_faults( + &mut self, + new_expiration: ChainEpoch, + sectors: &[SectorOnChainInfo], + sector_size: SectorSize, + ) -> anyhow::Result { + let mut sectors_total = Vec::new(); + let mut expiring_power = PowerPair::zero(); + let mut rescheduled_power = PowerPair::zero(); + + let groups = self.find_sectors_by_expiration(sector_size, sectors)?; + + // Group sectors by their target expiration, then remove from existing queue entries according to those groups. + let new_quantized_expiration = self.quant.quantize_up(new_expiration); + for mut group in groups { + if group.sector_epoch_set.epoch <= new_quantized_expiration { + // Don't reschedule sectors that are already due to expire on-time before the fault-driven expiration, + // but do represent their power as now faulty. + // Their pledge remains as "on-time". + group.expiration_set.active_power -= &group.sector_epoch_set.power; + group.expiration_set.faulty_power += &group.sector_epoch_set.power; + expiring_power += &group.sector_epoch_set.power; + } else { + // Remove sectors from on-time expiry and active power. + let sectors_bitfield = + BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied())?; + group.expiration_set.on_time_sectors -= §ors_bitfield; + group.expiration_set.on_time_pledge -= &group.sector_epoch_set.pledge; + group.expiration_set.active_power -= &group.sector_epoch_set.power; + + // Accumulate the sectors and power removed. + sectors_total.extend_from_slice(&group.sector_epoch_set.sectors); + rescheduled_power += &group.sector_epoch_set.power; + } + + self.must_update_or_delete(group.sector_epoch_set.epoch, group.expiration_set.clone())?; + + group.expiration_set.validate_state()?; + } + + if !sectors_total.is_empty() { + // Add sectors to new expiration as early-terminating and faulty. + let early_sectors = BitField::try_from_bits(sectors_total)?; + self.add( + new_expiration, + &BitField::new(), + &early_sectors, + &PowerPair::zero(), + &rescheduled_power, + &TokenAmount::zero(), + )?; + } + + Ok(&rescheduled_power + &expiring_power) + } + + /// Re-schedules *all* sectors to expire at an early expiration epoch, if they wouldn't expire before then anyway. + pub fn reschedule_all_as_faults(&mut self, fault_expiration: ChainEpoch) -> anyhow::Result<()> { + let mut rescheduled_epochs = Vec::::new(); + let mut rescheduled_sectors = BitField::new(); + let mut rescheduled_power = PowerPair::zero(); + + let mut mutated_expiration_sets = Vec::<(ChainEpoch, ExpirationSet)>::new(); + + let quantized_fault_expiration = self.quant.quantize_up(fault_expiration); + self.amt.for_each(|e, expiration_set| { + let epoch: ChainEpoch = e.try_into()?; + + if epoch <= quantized_fault_expiration { + let mut expiration_set = expiration_set.clone(); + + // Regardless of whether the sectors were expiring on-time or early, all the power is now faulty. + // Pledge is still on-time. + expiration_set.faulty_power += &expiration_set.active_power; + expiration_set.active_power = PowerPair::zero(); + mutated_expiration_sets.push((epoch, expiration_set)); + } else { + rescheduled_epochs.push(e); + // sanity check to make sure we're not trying to re-schedule already faulty sectors. + if !expiration_set.early_sectors.is_empty() { + return Err(anyhow!( + "attempted to re-schedule early expirations to an earlier epoch" + )); + } + rescheduled_sectors |= &expiration_set.on_time_sectors; + rescheduled_power += &expiration_set.active_power; + rescheduled_power += &expiration_set.faulty_power; + } + + Ok(()) + })?; + + for (epoch, expiration_set) in mutated_expiration_sets { + let res = expiration_set.validate_state(); + self.must_update(epoch, expiration_set)?; + res?; + } + + // If we didn't reschedule anything, we're done. + if rescheduled_epochs.is_empty() { + return Ok(()); + } + + // Add rescheduled sectors to new expiration as early-terminating and faulty. + self.add( + fault_expiration, + &BitField::new(), + &rescheduled_sectors, + &PowerPair::zero(), + &rescheduled_power, + &TokenAmount::zero(), + )?; + + // Trim the rescheduled epochs from the queue. + self.amt.batch_delete(rescheduled_epochs, true)?; + + Ok(()) + } + + /// Removes sectors from any queue entries in which they appear that are earlier then their scheduled expiration epoch, + /// and schedules them at their expected termination epoch. + /// Pledge for the sectors is re-added as on-time. + /// Power for the sectors is changed from faulty to active (whether rescheduled or not). + /// Returns the newly-recovered power. Fails if any sectors are not found in the queue. + pub fn reschedule_recovered( + &mut self, + sectors: Vec, + sector_size: SectorSize, + ) -> anyhow::Result { + let mut remaining: BTreeMap = sectors + .iter() + .map(|sector| (sector.sector_number, sector)) + .collect(); + + // Traverse the expiration queue once to find each recovering sector and remove it from early/faulty there. + // We expect this to find all recovering sectors within the first FaultMaxAge/WPoStProvingPeriod entries + // (i.e. 14 for 14-day faults), but if something has gone wrong it's safer not to fail if that's not met. + let mut sectors_rescheduled = Vec::<&SectorOnChainInfo>::new(); + let mut recovered_power = PowerPair::zero(); + + self.iter_while_mut(|_epoch, expiration_set| { + let mut faulty_power_delta = PowerPair::zero(); + let mut active_power_delta = PowerPair::zero(); + + for sector_number in expiration_set.on_time_sectors.iter() { + let sector = match remaining.remove(§or_number) { + Some(s) => s, + None => continue, + }; + + // If the sector expires on-time at this epoch, leave it here but change faulty power to active. + // The pledge is already part of the on-time pledge at this entry. + let power = power_for_sector(sector_size, sector); + faulty_power_delta -= &power; + active_power_delta += &power; + + recovered_power += &power; + } + let mut early_unset = Vec::new(); + for sector_number in expiration_set.early_sectors.iter() { + let sector = match remaining.remove(§or_number) { + Some(s) => s, + None => continue, + }; + // If the sector expires early at this epoch, remove it for re-scheduling. + // It's not part of the on-time pledge number here. + early_unset.push(sector_number); + let power = power_for_sector(sector_size, sector); + faulty_power_delta -= &power; + sectors_rescheduled.push(sector); + + recovered_power += &power; + } + + // we need to defer the changes as we cannot borrow immutably for iteration + // and mutably for changes at the same time + if !early_unset.is_empty() + || !faulty_power_delta.is_zero() + || !active_power_delta.is_zero() + { + expiration_set.active_power += &active_power_delta; + expiration_set.faulty_power += &faulty_power_delta; + + expiration_set.early_sectors -= BitField::try_from_bits(early_unset)?; + } + + expiration_set.validate_state()?; + + let keep_going = !remaining.is_empty(); + Ok(keep_going) + })?; + + if !remaining.is_empty() { + return Err(anyhow!( + "sectors not found in expiration queue: {:?}", + remaining + )); + } + + // Re-schedule the removed sectors to their target expiration. + self.add_active_sectors(sectors_rescheduled, sector_size)?; + + Ok(recovered_power) + } + + /// Removes some sectors and adds some others. + /// The sectors being replaced must not be faulty, so must be scheduled for on-time rather than early expiration. + /// The sectors added are assumed to be not faulty. + /// Returns the old a new sector number bitfields, and delta to power and pledge, new minus old. + pub fn replace_sectors( + &mut self, + old_sectors: &[SectorOnChainInfo], + new_sectors: &[SectorOnChainInfo], + sector_size: SectorSize, + ) -> anyhow::Result<(BitField, BitField, PowerPair, TokenAmount)> { + let (old_sector_numbers, old_power, old_pledge) = self + .remove_active_sectors(old_sectors, sector_size) + .map_err(|e| e.downcast_wrap("failed to remove replaced sectors"))?; + + let (new_sector_numbers, new_power, new_pledge) = self + .add_active_sectors(new_sectors, sector_size) + .map_err(|e| e.downcast_wrap("failed to add replacement sectors"))?; + + Ok(( + old_sector_numbers, + new_sector_numbers, + &new_power - &old_power, + new_pledge - old_pledge, + )) + } + + /// Remove some sectors from the queue. + /// The sectors may be active or faulty, and scheduled either for on-time or early termination. + /// Returns the aggregate of removed sectors and power, and recovering power. + /// Fails if any sectors are not found in the queue. + pub fn remove_sectors( + &mut self, + policy: &Policy, + sectors: &[SectorOnChainInfo], + faults: &BitField, + recovering: &BitField, + sector_size: SectorSize, + ) -> anyhow::Result<(ExpirationSet, PowerPair)> { + let mut remaining: BTreeSet<_> = + sectors.iter().map(|sector| sector.sector_number).collect(); + + // ADDRESSED_SECTORS_MAX is defined as 25000, so this will not error. + let faults_map: BTreeSet<_> = faults + .bounded_iter(policy.addressed_sectors_max) + .context("too many faults to expand")? + .map(|i| i as SectorNumber) + .collect(); + + let recovering_map: BTreeSet<_> = recovering + .bounded_iter(policy.addressed_sectors_max) + .context("too many recoveries to expand")? + .map(|i| i as SectorNumber) + .collect(); + + // results + let mut removed = ExpirationSet::empty(); + let mut recovering_power = PowerPair::zero(); + + // Split into faulty and non-faulty. We process non-faulty sectors first + // because they always expire on-time so we know where to find them. + // TODO since cloning info, should be RC or find a way for data to be references. + // This might get optimized by the compiler, so not a priority + let mut non_faulty_sectors = Vec::::new(); + let mut faulty_sectors = Vec::<&SectorOnChainInfo>::new(); + + for sector in sectors { + if faults_map.contains(§or.sector_number) { + faulty_sectors.push(sector); + } else { + non_faulty_sectors.push(sector.clone()); + + // remove them from "remaining", we're going to process them below. + remaining.remove(§or.sector_number); + } + } + + // Remove non-faulty sectors. + let (removed_sector_numbers, removed_power, removed_pledge) = self + .remove_active_sectors(&non_faulty_sectors, sector_size) + .map_err(|e| e.downcast_wrap("failed to remove on-time recoveries"))?; + removed.on_time_sectors = removed_sector_numbers; + removed.active_power = removed_power; + removed.on_time_pledge = removed_pledge; + + // Finally, remove faulty sectors (on time and not). These sectors can + // only appear within the first 14 days (fault max age). Given that this + // queue is quantized, we should be able to stop traversing the queue + // after 14 entries. + self.iter_while_mut(|_epoch, expiration_set| { + let on_time_sectors: BTreeSet = expiration_set + .on_time_sectors + .bounded_iter(ENTRY_SECTORS_MAX) + .context("too many on-time sectors to expand")? + .map(|i| i as SectorNumber) + .collect(); + + let early_sectors: BTreeSet = expiration_set + .early_sectors + .bounded_iter(ENTRY_SECTORS_MAX) + .context("too many early sectors to expand")? + .map(|i| i as SectorNumber) + .collect(); + + // This loop could alternatively be done by constructing bitfields and intersecting them, but it's not + // clear that would be much faster (O(max(N, M)) vs O(N+M)). + // The length of sectors has a maximum of one partition size. + for sector in &faulty_sectors { + let sector_number = sector.sector_number; + let mut found = false; + + if on_time_sectors.contains(§or_number) { + found = true; + expiration_set.on_time_sectors.unset(sector_number); + removed.on_time_sectors.set(sector_number); + expiration_set.on_time_pledge -= §or.initial_pledge; + removed.on_time_pledge += §or.initial_pledge; + } else if early_sectors.contains(§or_number) { + found = true; + expiration_set.early_sectors.unset(sector_number); + removed.early_sectors.set(sector_number); + } + + if found { + let power = power_for_sector(sector_size, sector); + + if faults_map.contains(§or_number) { + expiration_set.faulty_power -= &power; + removed.faulty_power += &power; + } else { + expiration_set.active_power -= &power; + removed.active_power += &power; + } + + if recovering_map.contains(§or_number) { + recovering_power += &power; + } + + remaining.remove(§or_number); + } + } + + expiration_set.validate_state()?; + + let keep_going = !remaining.is_empty(); + Ok(keep_going) + })?; + + if !remaining.is_empty() { + return Err(anyhow!( + "sectors not found in expiration queue: {:?}", + remaining + )); + } + + Ok((removed, recovering_power)) + } + + /// Removes and aggregates entries from the queue up to and including some epoch. + pub fn pop_until(&mut self, until: ChainEpoch) -> anyhow::Result { + let mut on_time_sectors = BitField::new(); + let mut early_sectors = BitField::new(); + let mut active_power = PowerPair::zero(); + let mut faulty_power = PowerPair::zero(); + let mut on_time_pledge = TokenAmount::zero(); + let mut popped_keys = Vec::::new(); + + self.amt.for_each_while(|i, this_value| { + if i as ChainEpoch > until { + return Ok(false); + } + + popped_keys.push(i); + on_time_sectors |= &this_value.on_time_sectors; + early_sectors |= &this_value.early_sectors; + active_power += &this_value.active_power; + faulty_power += &this_value.faulty_power; + on_time_pledge += &this_value.on_time_pledge; + + Ok(true) + })?; + + self.amt.batch_delete(popped_keys, true)?; + + Ok(ExpirationSet { + on_time_sectors, + early_sectors, + on_time_pledge, + active_power, + faulty_power, + }) + } + + fn add( + &mut self, + raw_epoch: ChainEpoch, + on_time_sectors: &BitField, + early_sectors: &BitField, + active_power: &PowerPair, + faulty_power: &PowerPair, + pledge: &TokenAmount, + ) -> anyhow::Result<()> { + let epoch = self.quant.quantize_up(raw_epoch); + let mut expiration_set = self.may_get(epoch)?; + + expiration_set + .add( + on_time_sectors, + early_sectors, + pledge, + active_power, + faulty_power, + ) + .map_err(|e| anyhow!("failed to add expiration values for epoch {}: {}", epoch, e))?; + + self.must_update(epoch, expiration_set)?; + Ok(()) + } + + fn remove( + &mut self, + raw_epoch: ChainEpoch, + on_time_sectors: &BitField, + early_sectors: &BitField, + active_power: &PowerPair, + faulty_power: &PowerPair, + pledge: &TokenAmount, + ) -> anyhow::Result<()> { + let epoch = self.quant.quantize_up(raw_epoch); + let mut expiration_set = self + .amt + .get(epoch.try_into()?) + .map_err(|e| e.downcast_wrap(format!("failed to lookup queue epoch {}", epoch)))? + .ok_or_else(|| anyhow!("missing expected expiration set at epoch {}", epoch))? + .clone(); + expiration_set + .remove( + on_time_sectors, + early_sectors, + pledge, + active_power, + faulty_power, + ) + .map_err(|e| { + anyhow!( + "failed to remove expiration values for queue epoch {}: {}", + epoch, + e + ) + })?; + + self.must_update_or_delete(epoch, expiration_set)?; + Ok(()) + } + + fn remove_active_sectors( + &mut self, + sectors: &[SectorOnChainInfo], + sector_size: SectorSize, + ) -> anyhow::Result<(BitField, PowerPair, TokenAmount)> { + let mut removed_sector_numbers = Vec::::new(); + let mut removed_power = PowerPair::zero(); + let mut removed_pledge = TokenAmount::zero(); + + // Group sectors by their expiration, then remove from existing queue entries according to those groups. + let groups = self.find_sectors_by_expiration(sector_size, sectors)?; + for group in groups { + let sectors_bitfield = + BitField::try_from_bits(group.sector_epoch_set.sectors.iter().copied())?; + self.remove( + group.sector_epoch_set.epoch, + §ors_bitfield, + &BitField::new(), + &group.sector_epoch_set.power, + &PowerPair::zero(), + &group.sector_epoch_set.pledge, + )?; + + removed_sector_numbers.extend(&group.sector_epoch_set.sectors); + + removed_power += &group.sector_epoch_set.power; + removed_pledge += &group.sector_epoch_set.pledge; + } + + Ok(( + BitField::try_from_bits(removed_sector_numbers)?, + removed_power, + removed_pledge, + )) + } + + /// Traverses the entire queue with a callback function that may mutate entries. + /// Iff the function returns that it changed an entry, the new entry will be re-written in the + /// queue. Any changed entries that become empty are removed after iteration completes. + fn iter_while_mut( + &mut self, + mut f: impl FnMut( + ChainEpoch, + &mut ValueMut<'_, ExpirationSet>, + ) -> anyhow::Result, + ) -> anyhow::Result<()> { + let mut epochs_emptied = Vec::::new(); + + self.amt.for_each_while_mut(|e, expiration_set| { + let keep_going = f(e.try_into()?, expiration_set)?; + + if expiration_set.is_empty() { + epochs_emptied.push(e); + } + + Ok(keep_going) + })?; + + self.amt.batch_delete(epochs_emptied, true)?; + + Ok(()) + } + + fn may_get(&self, key: ChainEpoch) -> anyhow::Result { + Ok(self + .amt + .get(key.try_into()?) + .map_err(|e| e.downcast_wrap(format!("failed to lookup queue epoch {}", key)))? + .cloned() + .unwrap_or_default()) + } + + fn must_update( + &mut self, + epoch: ChainEpoch, + expiration_set: ExpirationSet, + ) -> anyhow::Result<()> { + self.amt + .set(epoch.try_into()?, expiration_set) + .map_err(|e| e.downcast_wrap(format!("failed to set queue epoch {}", epoch))) + } + + /// Since this might delete the node, it's not safe for use inside an iteration. + fn must_update_or_delete( + &mut self, + epoch: ChainEpoch, + expiration_set: ExpirationSet, + ) -> anyhow::Result<()> { + if expiration_set.is_empty() { + self.amt + .delete(epoch.try_into()?) + .map_err(|e| e.downcast_wrap(format!("failed to delete queue epoch {}", epoch)))?; + } else { + self.amt + .set(epoch.try_into()?, expiration_set) + .map_err(|e| e.downcast_wrap(format!("failed to set queue epoch {}", epoch)))?; + } + + Ok(()) + } + + /// Groups sectors into sets based on their Expiration field. + /// If sectors are not found in the expiration set corresponding to their expiration field + /// (i.e. they have been rescheduled) traverse expiration sets for groups where these + /// sectors actually expire. + /// Groups will be returned in expiration order, earliest first. + fn find_sectors_by_expiration( + &self, + sector_size: SectorSize, + sectors: &[SectorOnChainInfo], + ) -> anyhow::Result> { + let mut declared_expirations = BTreeMap::::new(); + let mut sectors_by_number = BTreeMap::::new(); + let mut all_remaining = BTreeSet::::new(); + + for sector in sectors { + let q_expiration = self.quant.quantize_up(sector.expiration); + declared_expirations.insert(q_expiration, true); + all_remaining.insert(sector.sector_number); + sectors_by_number.insert(sector.sector_number, sector); + } + + let mut expiration_groups = + Vec::::with_capacity(declared_expirations.len()); + + for (&expiration, _) in declared_expirations.iter() { + let es = self.may_get(expiration)?; + + let group = group_expiration_set( + sector_size, + §ors_by_number, + &mut all_remaining, + es, + expiration, + ); + if !group.sector_epoch_set.sectors.is_empty() { + expiration_groups.push(group); + } + } + + // If sectors remain, traverse next in epoch order. Remaining sectors should be + // rescheduled to expire soon, so this traversal should exit early. + if !all_remaining.is_empty() { + self.amt.for_each_while(|epoch, es| { + let epoch = epoch as ChainEpoch; + // If this set's epoch is one of our declared epochs, we've already processed it + // in the loop above, so skip processing here. Sectors rescheduled to this epoch + // would have been included in the earlier processing. + if declared_expirations.contains_key(&epoch) { + return Ok(true); + } + + // Sector should not be found in EarlyExpirations which holds faults. An implicit assumption + // of grouping is that it only returns sectors with active power. ExpirationQueue should not + // provide operations that allow this to happen. + check_no_early_sectors(&all_remaining, es)?; + + let group = group_expiration_set( + sector_size, + §ors_by_number, + &mut all_remaining, + es.clone(), + epoch, + ); + + if !group.sector_epoch_set.sectors.is_empty() { + expiration_groups.push(group); + } + + Ok(!all_remaining.is_empty()) + })?; + } + + if !all_remaining.is_empty() { + return Err(anyhow!("some sectors not found in expiration queue")); + } + + // The built-in stable sort is timsort. It will find the two sorted runs and merge them. + // + // We could also just assume they're sorted and use itertools.merge..., but this is safe and + // the perf shouldn't be much different. + expiration_groups.sort_by_key(|g| g.sector_epoch_set.epoch); + + Ok(expiration_groups) + } +} + +#[derive(Clone)] +struct SectorExpirationSet { + sector_epoch_set: SectorEpochSet, + // TODO try to make expiration set a reference (or Cow) + expiration_set: ExpirationSet, +} + +#[derive(Clone)] +struct SectorEpochSet { + epoch: ChainEpoch, + sectors: Vec, + power: PowerPair, + pledge: TokenAmount, +} + +/// Takes a slice of sector infos and returns sector info sets grouped and +/// sorted by expiration epoch, quantized. +/// +/// Note: While the result is sorted by epoch, the order of per-epoch sectors is maintained. +fn group_new_sectors_by_declared_expiration<'a>( + sector_size: SectorSize, + sectors: impl IntoIterator, + quant: QuantSpec, +) -> Vec { + let mut sectors_by_expiration = BTreeMap::>::new(); + + for sector in sectors { + let q_expiration = quant.quantize_up(sector.expiration); + sectors_by_expiration + .entry(q_expiration) + .or_default() + .push(sector); + } + + // The result is sorted by expiration because the BTreeMap iterates in sorted order. + sectors_by_expiration + .into_iter() + .map(|(expiration, epoch_sectors)| { + let mut sector_numbers = Vec::::with_capacity(epoch_sectors.len()); + let mut total_power = PowerPair::zero(); + let mut total_pledge = TokenAmount::zero(); + + for sector in epoch_sectors { + sector_numbers.push(sector.sector_number); + total_power += &power_for_sector(sector_size, sector); + total_pledge += §or.initial_pledge; + } + + SectorEpochSet { + epoch: expiration, + sectors: sector_numbers, + power: total_power, + pledge: total_pledge, + } + }) + .collect() +} + +fn group_expiration_set( + sector_size: SectorSize, + sectors: &BTreeMap, + include_set: &mut BTreeSet, + es: ExpirationSet, + expiration: ChainEpoch, +) -> SectorExpirationSet { + let mut sector_numbers = Vec::new(); + let mut total_power = PowerPair::zero(); + let mut total_pledge = TokenAmount::default(); + + for u in es.on_time_sectors.iter() { + if include_set.remove(&u) { + let sector = sectors.get(&u).expect("index should exist in sector set"); + sector_numbers.push(u); + total_power += &power_for_sector(sector_size, sector); + total_pledge += §or.initial_pledge; + } + } + + SectorExpirationSet { + sector_epoch_set: SectorEpochSet { + epoch: expiration, + sectors: sector_numbers, + power: total_power, + pledge: total_pledge, + }, + expiration_set: es, + } +} + +/// Checks for invalid overlap between bitfield and a set's early sectors. +fn check_no_early_sectors(set: &BTreeSet, es: &ExpirationSet) -> anyhow::Result<()> { + for u in es.early_sectors.iter() { + if set.contains(&u) { + return Err(anyhow!( + "Invalid attempt to group sector {} with an early expiration", + u + )); + } + } + Ok(()) +} diff --git a/actors/miner/src/v16/ext.rs b/actors/miner/src/v16/ext.rs new file mode 100644 index 00000000..b1425b0d --- /dev/null +++ b/actors/miner/src/v16/ext.rs @@ -0,0 +1,201 @@ +use cid::Cid; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::RawBytes; +use fvm_shared4::bigint::{bigint_ser, BigInt}; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::deal::DealID; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::piece::PaddedPieceSize; +use fvm_shared4::sector::SectorNumber; +use fvm_shared4::sector::{RegisteredSealProof, StoragePower}; +use fvm_shared4::ActorID; + +use fil_actors_shared::v16::reward::FilterEstimate; +use fil_actors_shared::v16::BatchReturn; + +pub mod account { + pub const PUBKEY_ADDRESS_METHOD: u64 = 2; +} + +pub mod market { + use super::*; + use fvm_ipld_bitfield::BitField; + + pub const VERIFY_DEALS_FOR_ACTIVATION_METHOD: u64 = 5; + pub const BATCH_ACTIVATE_DEALS_METHOD: u64 = 6; + pub const ON_MINER_SECTORS_TERMINATE_METHOD: u64 = 7; + + pub const NO_ALLOCATION_ID: u64 = 0; + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct SectorDeals { + pub sector_number: SectorNumber, + pub sector_type: RegisteredSealProof, + pub sector_expiry: ChainEpoch, + pub deal_ids: Vec, + } + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct BatchActivateDealsParams { + pub sectors: Vec, + pub compute_cid: bool, + } + + #[derive(Serialize_tuple, Deserialize_tuple, Clone)] + pub struct ActivatedDeal { + pub client: ActorID, + pub allocation_id: u64, + pub data: Cid, + pub size: PaddedPieceSize, + } + + #[derive(Serialize_tuple, Deserialize_tuple, Clone)] + pub struct SectorDealActivation { + pub activated: Vec, + pub unsealed_cid: Option, + } + + #[derive(Serialize_tuple, Deserialize_tuple, Clone)] + pub struct BatchActivateDealsResult { + pub activation_results: BatchReturn, + pub activations: Vec, + } + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct OnMinerSectorsTerminateParams { + pub epoch: ChainEpoch, + pub sectors: BitField, + } + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct SectorDataSpec { + pub deal_ids: Vec, + pub sector_type: RegisteredSealProof, + } + + #[derive(Serialize_tuple)] + pub struct VerifyDealsForActivationParamsRef<'a> { + pub sectors: &'a [SectorDeals], + } + + #[derive(Serialize_tuple, Deserialize_tuple, Default, Clone)] + pub struct VerifyDealsForActivationReturn { + pub unsealed_cids: Vec>, + } +} + +pub mod power { + use super::*; + + pub const UPDATE_CLAIMED_POWER_METHOD: u64 = 3; + pub const ENROLL_CRON_EVENT_METHOD: u64 = 4; + pub const UPDATE_PLEDGE_TOTAL_METHOD: u64 = 6; + pub const SUBMIT_POREP_FOR_BULK_VERIFY_METHOD: u64 = 8; + pub const CURRENT_TOTAL_POWER_METHOD: u64 = 9; + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct CurrentTotalPowerReturn { + #[serde(with = "bigint_ser")] + pub raw_byte_power: StoragePower, + #[serde(with = "bigint_ser")] + pub quality_adj_power: StoragePower, + pub pledge_collateral: TokenAmount, + pub quality_adj_power_smoothed: FilterEstimate, + pub ramp_start_epoch: i64, + pub ramp_duration_epochs: u64, + } + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct EnrollCronEventParams { + pub event_epoch: ChainEpoch, + pub payload: RawBytes, + } + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct UpdateClaimedPowerParams { + #[serde(with = "bigint_ser")] + pub raw_byte_delta: StoragePower, + #[serde(with = "bigint_ser")] + pub quality_adjusted_delta: StoragePower, + } + + pub const MAX_MINER_PROVE_COMMITS_PER_EPOCH: usize = 200; +} + +pub mod reward { + pub const THIS_EPOCH_REWARD_METHOD: u64 = 3; +} + +pub mod verifreg { + use super::*; + + pub const GET_CLAIMS_METHOD: u64 = 10; + pub const CLAIM_ALLOCATIONS_METHOD: u64 = 9; + + pub type ClaimID = u64; + pub type AllocationID = u64; + + #[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug, PartialEq, Eq)] + pub struct Claim { + // The provider storing the data (from allocation). + pub provider: ActorID, + // The client which allocated the DataCap (from allocation). + pub client: ActorID, + // Identifier of the data committed (from allocation). + pub data: Cid, + // The (padded) size of data (from allocation). + pub size: PaddedPieceSize, + // The min period which the provider must commit to storing data + pub term_min: ChainEpoch, + // The max period for which provider can earn QA-power for the data + pub term_max: ChainEpoch, + // The epoch at which the (first range of the) piece was committed. + pub term_start: ChainEpoch, + // ID of the provider's sector in which the data is committed. + pub sector: SectorNumber, + } + #[derive(Debug, Serialize_tuple, Deserialize_tuple)] + pub struct GetClaimsParams { + pub provider: ActorID, + pub claim_ids: Vec, + } + #[derive(Debug, Serialize_tuple, Deserialize_tuple)] + + pub struct GetClaimsReturn { + pub batch_info: BatchReturn, + pub claims: Vec, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct SectorAllocationClaims { + pub sector: SectorNumber, + pub expiry: ChainEpoch, + pub claims: Vec, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct AllocationClaim { + pub client: ActorID, + pub allocation_id: AllocationID, + pub data: Cid, + pub size: PaddedPieceSize, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct ClaimAllocationsParams { + pub sectors: Vec, + pub all_or_nothing: bool, + } + + #[derive(Clone, Debug, PartialEq, Eq, Default, Serialize_tuple, Deserialize_tuple)] + #[serde(transparent)] + pub struct SectorClaimSummary { + #[serde(with = "bigint_ser")] + pub claimed_space: BigInt, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct ClaimAllocationsReturn { + pub sector_results: BatchReturn, + pub sector_claims: Vec, + } +} diff --git a/actors/miner/src/v16/mod.rs b/actors/miner/src/v16/mod.rs new file mode 100644 index 00000000..7fce8e26 --- /dev/null +++ b/actors/miner/src/v16/mod.rs @@ -0,0 +1,244 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::anyhow; +use cid::Cid; +use fvm_ipld_bitfield::BitField; +use fvm_ipld_encoding::RawBytes; +use fvm_shared4::bigint::BigInt; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::deal::DealID; +use fvm_shared4::error::*; +use fvm_shared4::sector::{RegisteredSealProof, RegisteredUpdateProof, SectorNumber, SectorSize}; +use fvm_shared4::{MethodNum, METHOD_CONSTRUCTOR}; +use num_derive::FromPrimitive; + +pub use beneficiary::*; +pub use bitfield_queue::*; +pub use commd::*; +pub use deadline_assignment::*; +pub use deadline_info::*; +pub use deadline_state::*; +pub use deadlines::*; +pub use expiration_queue::*; +pub use monies::*; +pub use partition_state::*; +pub use policy::*; +pub use quantize::*; +pub use sector_map::*; +pub use sectors::*; +pub use state::*; +pub use termination::*; +pub use types::*; +pub use vesting_state::*; + +// The following errors are particular cases of illegal state. +// They're not expected to ever happen, but if they do, distinguished codes can help us +// diagnose the problem. + +mod beneficiary; +mod bitfield_queue; +mod commd; +mod deadline_assignment; +mod deadline_info; +mod deadline_state; +mod deadlines; +mod expiration_queue; +#[doc(hidden)] +pub mod ext; +mod monies; +mod partition_state; +mod policy; +mod quantize; +mod sector_map; +mod sectors; +mod state; +mod termination; +mod types; +mod vesting_state; + +/// Storage Miner actor methods available +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + ControlAddresses = 2, + ChangeWorkerAddress = 3, + ChangePeerID = 4, + SubmitWindowedPoSt = 5, + //PreCommitSector = 6, // Deprecated + //ProveCommitSector = 7, // Deprecated + ExtendSectorExpiration = 8, + TerminateSectors = 9, + DeclareFaults = 10, + DeclareFaultsRecovered = 11, + OnDeferredCronEvent = 12, + CheckSectorProven = 13, + ApplyRewards = 14, + ReportConsensusFault = 15, + WithdrawBalance = 16, + InternalSectorSetupForPreseal = 17, + ChangeMultiaddrs = 18, + CompactPartitions = 19, + CompactSectorNumbers = 20, + ConfirmChangeWorkerAddress = 21, + RepayDebt = 22, + ChangeOwnerAddress = 23, + DisputeWindowedPoSt = 24, + //PreCommitSectorBatch = 25, // Deprecated + ProveCommitAggregate = 26, + ProveReplicaUpdates = 27, + PreCommitSectorBatch2 = 28, + //ProveReplicaUpdates2 = 29, // Deprecated + ChangeBeneficiary = 30, + GetBeneficiary = 31, + ExtendSectorExpiration2 = 32, + // MovePartitions = 33, + ProveCommitSectors3 = 34, + ProveReplicaUpdates3 = 35, + ProveCommitSectorsNI = 36, + // Method numbers derived from FRC-0042 standards + ChangeWorkerAddressExported = frc42_dispatch::method_hash!("ChangeWorkerAddress"), + ChangePeerIDExported = frc42_dispatch::method_hash!("ChangePeerID"), + WithdrawBalanceExported = frc42_dispatch::method_hash!("WithdrawBalance"), + ChangeMultiaddrsExported = frc42_dispatch::method_hash!("ChangeMultiaddrs"), + ConfirmChangeWorkerAddressExported = frc42_dispatch::method_hash!("ConfirmChangeWorkerAddress"), + RepayDebtExported = frc42_dispatch::method_hash!("RepayDebt"), + ChangeOwnerAddressExported = frc42_dispatch::method_hash!("ChangeOwnerAddress"), + ChangeBeneficiaryExported = frc42_dispatch::method_hash!("ChangeBeneficiary"), + GetBeneficiaryExported = frc42_dispatch::method_hash!("GetBeneficiary"), + GetOwnerExported = frc42_dispatch::method_hash!("GetOwner"), + IsControllingAddressExported = frc42_dispatch::method_hash!("IsControllingAddress"), + GetSectorSizeExported = frc42_dispatch::method_hash!("GetSectorSize"), + GetAvailableBalanceExported = frc42_dispatch::method_hash!("GetAvailableBalance"), + GetVestingFundsExported = frc42_dispatch::method_hash!("GetVestingFunds"), + GetPeerIDExported = frc42_dispatch::method_hash!("GetPeerID"), + GetMultiaddrsExported = frc42_dispatch::method_hash!("GetMultiaddrs"), +} + +pub const SECTOR_CONTENT_CHANGED: MethodNum = frc42_dispatch::method_hash!("SectorContentChanged"); + +pub const ERR_BALANCE_INVARIANTS_BROKEN: ExitCode = ExitCode::new(1000); +pub const ERR_NOTIFICATION_SEND_FAILED: ExitCode = ExitCode::new(1001); +pub const ERR_NOTIFICATION_RECEIVER_ABORTED: ExitCode = ExitCode::new(1002); +pub const ERR_NOTIFICATION_RESPONSE_INVALID: ExitCode = ExitCode::new(1003); +pub const ERR_NOTIFICATION_REJECTED: ExitCode = ExitCode::new(1004); + +/// ReplicaUpdate param with Option for CommD +/// None means unknown +#[derive(Debug, Clone)] +pub struct ReplicaUpdateInner { + pub sector_number: SectorNumber, + pub deadline: u64, + pub partition: u64, + pub new_sealed_cid: Cid, + /// None means unknown + pub new_unsealed_cid: Option, + pub deals: Vec, + pub update_proof_type: RegisteredUpdateProof, + pub replica_proof: RawBytes, +} + +#[derive(Clone, Debug, PartialEq)] +pub struct ValidatedExpirationExtension { + pub deadline: u64, + pub partition: u64, + pub sectors: BitField, + pub new_expiration: ChainEpoch, +} + +impl From for ValidatedExpirationExtension { + fn from(e2: ExpirationExtension2) -> Self { + let mut sectors = BitField::new(); + for sc in e2.sectors_with_claims { + sectors.set(sc.sector_number) + } + sectors |= &e2.sectors; + + Self { + deadline: e2.deadline, + partition: e2.partition, + sectors, + new_expiration: e2.new_expiration, + } + } +} + +pub fn power_for_sector(sector_size: SectorSize, sector: &SectorOnChainInfo) -> PowerPair { + PowerPair { + raw: BigInt::from(sector_size as u64), + qa: qa_power_for_sector(sector_size, sector), + } +} + +/// Returns the sum of the raw byte and quality-adjusted power for sectors. +pub fn power_for_sectors(sector_size: SectorSize, sectors: &[SectorOnChainInfo]) -> PowerPair { + let qa = sectors + .iter() + .map(|s| qa_power_for_sector(sector_size, s)) + .sum(); + + PowerPair { + raw: BigInt::from(sector_size as u64) * BigInt::from(sectors.len()), + qa, + } +} + +pub struct SectorPiecesActivationInput { + pub piece_manifests: Vec, + pub sector_expiry: ChainEpoch, + pub sector_number: SectorNumber, + pub sector_type: RegisteredSealProof, + pub expected_commd: Option, +} + +// Inputs for activating builtin market deals for one sector +#[derive(Debug, Clone)] +pub struct DealsActivationInput { + pub deal_ids: Vec, + pub sector_expiry: ChainEpoch, + pub sector_number: SectorNumber, + pub sector_type: RegisteredSealProof, +} + +impl From for DealsActivationInput { + fn from(pci: SectorPreCommitOnChainInfo) -> DealsActivationInput { + DealsActivationInput { + deal_ids: pci.info.deal_ids, + sector_expiry: pci.info.expiration, + sector_number: pci.info.sector_number, + sector_type: pci.info.seal_proof, + } + } +} + +impl From<&UpdateAndSectorInfo<'_>> for DealsActivationInput { + fn from(usi: &UpdateAndSectorInfo) -> DealsActivationInput { + DealsActivationInput { + sector_number: usi.sector_info.sector_number, + sector_expiry: usi.sector_info.expiration, + deal_ids: usi.update.deals.clone(), + sector_type: usi.sector_info.seal_proof, + } + } +} + +// Track information needed to update a sector info's data during ProveReplicaUpdate +#[derive(Clone, Debug)] +struct UpdateAndSectorInfo<'a> { + update: &'a ReplicaUpdateInner, + sector_info: &'a SectorOnChainInfo, +} + +/// Validates that a partition contains the given sectors. +fn validate_partition_contains_sectors( + partition: &Partition, + sectors: &BitField, +) -> anyhow::Result<()> { + // Check that the declared sectors are actually assigned to the partition. + if partition.sectors.contains_all(sectors) { + Ok(()) + } else { + Err(anyhow!("not all sectors are assigned to the partition")) + } +} diff --git a/actors/miner/src/v16/monies.rs b/actors/miner/src/v16/monies.rs new file mode 100644 index 00000000..4d6bdd23 --- /dev/null +++ b/actors/miner/src/v16/monies.rs @@ -0,0 +1,364 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::cmp::{self, max}; + +use fil_actors_shared::v16::network::EPOCHS_IN_DAY; +use fil_actors_shared::v16::reward::math::PRECISION; +use fil_actors_shared::v16::reward::{smooth, FilterEstimate}; +use fil_actors_shared::v16::EXPECTED_LEADERS_PER_EPOCH; +use fvm_shared4::bigint::{BigInt, Integer}; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::sector::StoragePower; +use lazy_static::lazy_static; +use num_traits::Zero; + +use super::{VestSpec, REWARD_VESTING_SPEC}; +use crate::v16::detail::*; + +/// Projection period of expected sector block reward for deposit required to pre-commit a sector. +/// This deposit is lost if the pre-commitment is not timely followed up by a commitment proof. +const PRE_COMMIT_DEPOSIT_FACTOR: u64 = 20; + +/// Projection period of expected sector block rewards for storage pledge required to commit a sector. +/// This pledge is lost if a sector is terminated before its full committed lifetime. +pub const INITIAL_PLEDGE_FACTOR: u64 = 20; + +pub const PRE_COMMIT_DEPOSIT_PROJECTION_PERIOD: i64 = + (PRE_COMMIT_DEPOSIT_FACTOR as ChainEpoch) * EPOCHS_IN_DAY; +pub const INITIAL_PLEDGE_PROJECTION_PERIOD: i64 = + (INITIAL_PLEDGE_FACTOR as ChainEpoch) * EPOCHS_IN_DAY; + +const LOCK_TARGET_FACTOR_NUM: u32 = 3; +const LOCK_TARGET_FACTOR_DENOM: u32 = 10; + +pub const TERMINATION_REWARD_FACTOR_NUM: u32 = 1; +pub const TERMINATION_REWARD_FACTOR_DENOM: u32 = 2; + +// * go impl has 75/100 but this is just simplified +const LOCKED_REWARD_FACTOR_NUM: u32 = 3; +const LOCKED_REWARD_FACTOR_DENOM: u32 = 4; + +lazy_static! { + /// Cap on initial pledge requirement for sectors during the Space Race network. + /// The target is 1 FIL (10**18 attoFIL) per 32GiB. + /// This does not divide evenly, so the result is fractionally smaller. + static ref INITIAL_PLEDGE_MAX_PER_BYTE: TokenAmount = + TokenAmount::from_whole(1).div_floor(32i64 << 30); + + /// Base reward for successfully disputing a window posts proofs. + pub static ref BASE_REWARD_FOR_DISPUTED_WINDOW_POST: TokenAmount = TokenAmount::from_whole(4); + + /// Base penalty for a successful disputed window post proof. + pub static ref BASE_PENALTY_FOR_DISPUTED_WINDOW_POST: TokenAmount = TokenAmount::from_whole(20); +} +// FF + 2BR +const INVALID_WINDOW_POST_PROJECTION_PERIOD: ChainEpoch = + CONTINUED_FAULT_PROJECTION_PERIOD + 2 * EPOCHS_IN_DAY; + +// Projection period of expected daily sector block reward penalised when a fault is continued after initial detection. +// This guarantees that a miner pays back at least the expected block reward earned since the last successful PoSt. +// The network conservatively assumes the sector was faulty since the last time it was proven. +// This penalty is currently overly punitive for continued faults. +// FF = BR(t, ContinuedFaultProjectionPeriod) +const CONTINUED_FAULT_FACTOR_NUM: i64 = 351; +const CONTINUED_FAULT_FACTOR_DENOM: i64 = 100; +pub const CONTINUED_FAULT_PROJECTION_PERIOD: ChainEpoch = + (EPOCHS_IN_DAY * CONTINUED_FAULT_FACTOR_NUM) / CONTINUED_FAULT_FACTOR_DENOM; + +const TERMINATION_PENALTY_LOWER_BOUND_PROJECTIONS_PERIOD: ChainEpoch = (EPOCHS_IN_DAY * 35) / 10; + +// Maximum number of lifetime days penalized when a sector is terminated. +pub const TERMINATION_LIFETIME_CAP: ChainEpoch = 140; + +// Multiplier of whole per-winner rewards for a consensus fault penalty. +const CONSENSUS_FAULT_FACTOR: u64 = 5; + +const GAMMA_FIXED_POINT_FACTOR: u64 = 1000; // 3 decimal places + +/// The projected block reward a sector would earn over some period. +/// Also known as "BR(t)". +/// BR(t) = ProjectedRewardFraction(t) * SectorQualityAdjustedPower +/// ProjectedRewardFraction(t) is the sum of estimated reward over estimated total power +/// over all epochs in the projection period [t t+projectionDuration] +pub fn expected_reward_for_power( + reward_estimate: &FilterEstimate, + network_qa_power_estimate: &FilterEstimate, + qa_sector_power: &StoragePower, + projection_duration: ChainEpoch, +) -> TokenAmount { + let network_qa_power_smoothed = network_qa_power_estimate.estimate(); + + if network_qa_power_smoothed.is_zero() { + return TokenAmount::from_atto(reward_estimate.estimate()); + } + + let expected_reward_for_proving_period = smooth::extrapolated_cum_sum_of_ratio( + projection_duration, + 0, + reward_estimate, + network_qa_power_estimate, + ); + let br128 = qa_sector_power * expected_reward_for_proving_period; // Q.0 * Q.128 => Q.128 + TokenAmount::from_atto(std::cmp::max(br128 >> PRECISION, Default::default())) +} + +pub mod detail { + use super::*; + + lazy_static! { + pub static ref BATCH_BALANCER: TokenAmount = TokenAmount::from_nano(5); + } + + // BR but zero values are clamped at 1 attofil + // Some uses of BR (PCD, IP) require a strictly positive value for BR derived values so + // accounting variables can be used as succinct indicators of miner activity. + pub fn expected_reward_for_power_clamped_at_atto_fil( + reward_estimate: &FilterEstimate, + network_qa_power_estimate: &FilterEstimate, + qa_sector_power: &StoragePower, + projection_duration: ChainEpoch, + ) -> TokenAmount { + let br = expected_reward_for_power( + reward_estimate, + network_qa_power_estimate, + qa_sector_power, + projection_duration, + ); + if br.le(&TokenAmount::zero()) { + TokenAmount::from_atto(1) + } else { + br + } + } +} + +// func ExpectedRewardForPowerClampedAtAttoFIL(rewardEstimate, networkQAPowerEstimate smoothing.FilterEstimate, qaSectorPower abi.StoragePower, projectionDuration abi.ChainEpoch) abi.TokenAmount { +// br := ExpectedRewardForPower(rewardEstimate, networkQAPowerEstimate, qaSectorPower, projectionDuration) +// if br.LessThanEqual(big.Zero()) { +// br = abi.NewTokenAmount(1) +// } +// return br +// } + +/// The penalty for a sector continuing faulty for another proving period. +/// It is a projection of the expected reward earned by the sector. +/// Also known as "FF(t)" +pub fn pledge_penalty_for_continued_fault( + reward_estimate: &FilterEstimate, + network_qa_power_estimate: &FilterEstimate, + qa_sector_power: &StoragePower, +) -> TokenAmount { + expected_reward_for_power( + reward_estimate, + network_qa_power_estimate, + qa_sector_power, + CONTINUED_FAULT_PROJECTION_PERIOD, + ) +} + +/// This is the SP(t) penalty for a newly faulty sector that has not been declared. +/// SP(t) = UndeclaredFaultFactor * BR(t) +pub fn pledge_penalty_for_termination_lower_bound( + reward_estimate: &FilterEstimate, + network_qa_power_estimate: &FilterEstimate, + qa_sector_power: &StoragePower, +) -> TokenAmount { + expected_reward_for_power( + reward_estimate, + network_qa_power_estimate, + qa_sector_power, + TERMINATION_PENALTY_LOWER_BOUND_PROJECTIONS_PERIOD, + ) +} + +/// Penalty to locked pledge collateral for the termination of a sector before scheduled expiry. +/// SectorAge is the time between the sector's activation and termination. +#[allow(clippy::too_many_arguments)] +pub fn pledge_penalty_for_termination( + day_reward: &TokenAmount, + sector_age: ChainEpoch, + twenty_day_reward_at_activation: &TokenAmount, + network_qa_power_estimate: &FilterEstimate, + qa_sector_power: &StoragePower, + reward_estimate: &FilterEstimate, + replaced_day_reward: &TokenAmount, + replaced_sector_age: ChainEpoch, +) -> TokenAmount { + // max(SP(t), BR(StartEpoch, 20d) + BR(StartEpoch, 1d) * terminationRewardFactor * min(SectorAgeInDays, 140)) + // and sectorAgeInDays = sectorAge / EpochsInDay + let lifetime_cap = TERMINATION_LIFETIME_CAP * EPOCHS_IN_DAY; + let capped_sector_age = std::cmp::min(sector_age, lifetime_cap); + + let mut expected_reward: TokenAmount = day_reward * capped_sector_age; + + let relevant_replaced_age = + std::cmp::min(replaced_sector_age, lifetime_cap - capped_sector_age); + + expected_reward += replaced_day_reward * relevant_replaced_age; + + let penalized_reward = expected_reward * TERMINATION_REWARD_FACTOR_NUM; + let penalized_reward = penalized_reward.div_floor(TERMINATION_REWARD_FACTOR_DENOM); + + cmp::max( + pledge_penalty_for_termination_lower_bound( + reward_estimate, + network_qa_power_estimate, + qa_sector_power, + ), + twenty_day_reward_at_activation + (penalized_reward.div_floor(EPOCHS_IN_DAY)), + ) +} + +// The penalty for optimistically proving a sector with an invalid window PoSt. +pub fn pledge_penalty_for_invalid_windowpost( + reward_estimate: &FilterEstimate, + network_qa_power_estimate: &FilterEstimate, + qa_sector_power: &StoragePower, +) -> TokenAmount { + expected_reward_for_power( + reward_estimate, + network_qa_power_estimate, + qa_sector_power, + INVALID_WINDOW_POST_PROJECTION_PERIOD, + ) + &*BASE_PENALTY_FOR_DISPUTED_WINDOW_POST +} + +/// Computes the PreCommit deposit given sector qa weight and current network conditions. +/// PreCommit Deposit = BR(PreCommitDepositProjectionPeriod) +pub fn pre_commit_deposit_for_power( + reward_estimate: &FilterEstimate, + network_qa_power_estimate: &FilterEstimate, + qa_sector_power: &StoragePower, +) -> TokenAmount { + expected_reward_for_power_clamped_at_atto_fil( + reward_estimate, + network_qa_power_estimate, + qa_sector_power, + PRE_COMMIT_DEPOSIT_PROJECTION_PERIOD, + ) +} + +/// Computes the pledge requirement for committing new quality-adjusted power to the network, given +/// the current network total and baseline power, per-epoch reward, and circulating token supply. +/// The pledge comprises two parts: +/// - storage pledge, aka IP base: a multiple of the reward expected to be earned by newly-committed power +/// - consensus pledge, aka additional IP: a pro-rata fraction of the circulating money supply +/// +/// IP = IPBase(t) + AdditionalIP(t) +/// IPBase(t) = BR(t, InitialPledgeProjectionPeriod) +/// AdditionalIP(t) = LockTarget(t)*PledgeShare(t) +/// LockTarget = (LockTargetFactorNum / LockTargetFactorDenom) * FILCirculatingSupply(t) +/// PledgeShare(t) = sectorQAPower / max(BaselinePower(t), NetworkQAPower(t)) +pub fn initial_pledge_for_power( + qa_power: &StoragePower, + baseline_power: &StoragePower, + reward_estimate: &FilterEstimate, + network_qa_power_estimate: &FilterEstimate, + circulating_supply: &TokenAmount, + epochs_since_ramp_start: i64, + ramp_duration_epochs: u64, +) -> TokenAmount { + let ip_base = expected_reward_for_power_clamped_at_atto_fil( + reward_estimate, + network_qa_power_estimate, + qa_power, + INITIAL_PLEDGE_PROJECTION_PERIOD, + ); + + let lock_target_num = circulating_supply.atto() * LOCK_TARGET_FACTOR_NUM; + let lock_target_denom = LOCK_TARGET_FACTOR_DENOM; + let pledge_share_num = qa_power; + let network_qa_power = network_qa_power_estimate.estimate(); + + // Once FIP-0081 has fully activated, additional pledge will be 70% baseline + // pledge + 30% simple pledge. + const FIP_0081_ACTIVATION_PERMILLE: i64 = 300; + // Gamma/GAMMA_FIXED_POINT_FACTOR is the share of pledge coming from the + // baseline formulation, with 1-(gamma/GAMMA_FIXED_POINT_FACTOR) coming from + // simple pledge. + // gamma = 1000 - 300 * (epochs_since_ramp_start / ramp_duration_epochs).max(0).min(1) + let skew = if epochs_since_ramp_start < 0 { + // No skew before ramp start + 0 + } else if ramp_duration_epochs == 0 || epochs_since_ramp_start >= ramp_duration_epochs as i64 { + // 100% skew after ramp end + FIP_0081_ACTIVATION_PERMILLE as u64 + } else { + ((epochs_since_ramp_start * FIP_0081_ACTIVATION_PERMILLE) / ramp_duration_epochs as i64) + as u64 + }; + let gamma = GAMMA_FIXED_POINT_FACTOR - skew; + + let additional_ip_num = lock_target_num * pledge_share_num; + + let pledge_share_denom_baseline = + cmp::max(cmp::max(&network_qa_power, baseline_power), qa_power); + let pledge_share_denom_simple = cmp::max(&network_qa_power, qa_power); + + let additional_ip_denom_baseline = pledge_share_denom_baseline * lock_target_denom; + let additional_ip_baseline = (gamma * &additional_ip_num) + .div_floor(&(additional_ip_denom_baseline * GAMMA_FIXED_POINT_FACTOR)); + let additional_ip_denom_simple = pledge_share_denom_simple * lock_target_denom; + let additional_ip_simple = ((GAMMA_FIXED_POINT_FACTOR - gamma) * &additional_ip_num) + .div_floor(&(additional_ip_denom_simple * GAMMA_FIXED_POINT_FACTOR)); + + // convex combination of simple and baseline pledge + let additional_ip = additional_ip_baseline + additional_ip_simple; + + let nominal_pledge = ip_base + TokenAmount::from_atto(additional_ip); + let pledge_cap = TokenAmount::from_atto(INITIAL_PLEDGE_MAX_PER_BYTE.atto() * qa_power); + + cmp::min(nominal_pledge, pledge_cap) +} + +pub fn consensus_fault_penalty(this_epoch_reward: TokenAmount) -> TokenAmount { + (this_epoch_reward * CONSENSUS_FAULT_FACTOR).div_floor(EXPECTED_LEADERS_PER_EPOCH) +} + +/// Returns the amount of a reward to vest, and the vesting schedule, for a reward amount. +pub fn locked_reward_from_reward(reward: TokenAmount) -> (TokenAmount, &'static VestSpec) { + let lock_amount = (reward * LOCKED_REWARD_FACTOR_NUM).div_floor(LOCKED_REWARD_FACTOR_DENOM); + (lock_amount, &REWARD_VESTING_SPEC) +} + +const BATCH_DISCOUNT_NUM: u32 = 1; +const BATCH_DISCOUNT_DENOM: u32 = 20; + +lazy_static! { + static ref ESTIMATED_SINGLE_PROVE_COMMIT_GAS_USAGE: BigInt = BigInt::from(49299973); + static ref ESTIMATED_SINGLE_PRE_COMMIT_GAS_USAGE: BigInt = BigInt::from(16433324); +} + +pub fn aggregate_prove_commit_network_fee( + aggregate_size: usize, + base_fee: &TokenAmount, +) -> TokenAmount { + aggregate_network_fee( + aggregate_size, + &ESTIMATED_SINGLE_PROVE_COMMIT_GAS_USAGE, + base_fee, + ) +} + +pub fn aggregate_pre_commit_network_fee( + aggregate_size: usize, + base_fee: &TokenAmount, +) -> TokenAmount { + aggregate_network_fee( + aggregate_size, + &ESTIMATED_SINGLE_PRE_COMMIT_GAS_USAGE, + base_fee, + ) +} + +pub fn aggregate_network_fee( + aggregate_size: usize, + gas_usage: &BigInt, + base_fee: &TokenAmount, +) -> TokenAmount { + let effective_gas_fee = max(base_fee, &*BATCH_BALANCER); + let network_fee_num = effective_gas_fee * gas_usage * aggregate_size * BATCH_DISCOUNT_NUM; + network_fee_num.div_floor(BATCH_DISCOUNT_DENOM) +} diff --git a/actors/miner/src/v16/partition_state.rs b/actors/miner/src/v16/partition_state.rs new file mode 100644 index 00000000..a3fbc1fe --- /dev/null +++ b/actors/miner/src/v16/partition_state.rs @@ -0,0 +1,951 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::convert::TryInto; +use std::ops::{self, Neg}; + +use anyhow::{anyhow, Context}; +use cid::Cid; +use fil_actors_shared::actor_error_v16; +use fil_actors_shared::v16::runtime::Policy; +use fil_actors_shared::v16::{ActorDowncast, Array}; +use fvm_ipld_bitfield::BitField; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::bigint::bigint_ser; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::error::ExitCode; +use fvm_shared4::sector::{SectorSize, StoragePower}; +use num_traits::{Signed, Zero}; + +use super::{ + power_for_sectors, select_sectors, validate_partition_contains_sectors, BitFieldQueue, + ExpirationQueue, ExpirationSet, QuantSpec, SectorOnChainInfo, Sectors, TerminationResult, + NO_QUANTIZATION, +}; + +// Bitwidth of AMTs determined empirically from mutation patterns and projections of mainnet data. +const PARTITION_EXPIRATION_AMT_BITWIDTH: u32 = 4; +const PARTITION_EARLY_TERMINATION_ARRAY_AMT_BITWIDTH: u32 = 3; + +#[derive(Serialize_tuple, Deserialize_tuple, Clone)] +pub struct Partition { + /// Sector numbers in this partition, including faulty, unproven and terminated sectors. + pub sectors: BitField, + /// Unproven sectors in this partition. This bitfield will be cleared on + /// a successful window post (or at the end of the partition's next + /// deadline). At that time, any still unproven sectors will be added to + /// the faulty sector bitfield. + pub unproven: BitField, + /// Subset of sectors detected/declared faulty and not yet recovered (excl. from PoSt). + /// Faults ∩ Terminated = ∅ + pub faults: BitField, + /// Subset of faulty sectors expected to recover on next PoSt + /// Recoveries ∩ Terminated = ∅ + pub recoveries: BitField, + /// Subset of sectors terminated but not yet removed from partition (excl. from PoSt) + pub terminated: BitField, + /// Maps epochs sectors that expire in or before that epoch. + /// An expiration may be an "on-time" scheduled expiration, or early "faulty" expiration. + /// Keys are quantized to last-in-deadline epochs. + pub expirations_epochs: Cid, // AMT[ChainEpoch]ExpirationSet + /// Subset of terminated that were before their committed expiration epoch, by termination epoch. + /// Termination fees have not yet been calculated or paid and associated deals have not yet been + /// canceled but effective power has already been adjusted. + /// Not quantized. + pub early_terminated: Cid, // AMT[ChainEpoch]BitField + + /// Power of not-yet-terminated sectors (incl faulty & unproven). + pub live_power: PowerPair, + /// Power of yet-to-be-proved sectors (never faulty). + pub unproven_power: PowerPair, + /// Power of currently-faulty sectors. FaultyPower <= LivePower. + pub faulty_power: PowerPair, + /// Power of expected-to-recover sectors. RecoveringPower <= FaultyPower. + pub recovering_power: PowerPair, +} + +impl Partition { + pub fn new(store: &BS) -> anyhow::Result { + let empty_expiration_array = + Array::::new_with_bit_width(store, PARTITION_EXPIRATION_AMT_BITWIDTH) + .flush()?; + let empty_early_termination_array = Array::::new_with_bit_width( + store, + PARTITION_EARLY_TERMINATION_ARRAY_AMT_BITWIDTH, + ) + .flush()?; + + Ok(Self { + sectors: BitField::new(), + unproven: BitField::new(), + faults: BitField::new(), + recoveries: BitField::new(), + terminated: BitField::new(), + expirations_epochs: empty_expiration_array, + early_terminated: empty_early_termination_array, + live_power: PowerPair::zero(), + unproven_power: PowerPair::zero(), + faulty_power: PowerPair::zero(), + recovering_power: PowerPair::zero(), + }) + } + + /// Live sectors are those that are not terminated (but may be faulty). + pub fn live_sectors(&self) -> BitField { + &self.sectors - &self.terminated + } + + /// Active sectors are those that are neither terminated nor faulty nor unproven, i.e. actively contributing power. + pub fn active_sectors(&self) -> BitField { + let non_faulty = &self.live_sectors() - &self.faults; + &non_faulty - &self.unproven + } + + /// Active power is power of non-faulty sectors. + pub fn active_power(&self) -> PowerPair { + &(&self.live_power - &self.faulty_power) - &self.unproven_power + } + + /// AddSectors adds new sectors to the partition. + /// The sectors are "live", neither faulty, recovering, nor terminated. + /// Each new sector's expiration is scheduled shortly after its target expiration epoch. + pub fn add_sectors( + &mut self, + store: &BS, + proven: bool, + sectors: &[SectorOnChainInfo], + sector_size: SectorSize, + quant: QuantSpec, + ) -> anyhow::Result { + let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) + .map_err(|e| e.downcast_wrap("failed to load sector expirations"))?; + + let (sector_numbers, power, _) = expirations + .add_active_sectors(sectors, sector_size) + .map_err(|e| e.downcast_wrap("failed to record new sector expirations"))?; + + self.expirations_epochs = expirations + .amt + .flush() + .map_err(|e| e.downcast_wrap("failed to store sector expirations"))?; + + if self.sectors.contains_any(§or_numbers) { + return Err(anyhow!("not all added sectors are new")); + } + + // Update other metadata using the calculated totals. + self.sectors |= §or_numbers; + self.live_power += &power; + + if !proven { + self.unproven_power += &power; + self.unproven |= §or_numbers; + } + + // check invariants + self.validate_state()?; + + // No change to faults, recoveries, or terminations. + // No change to faulty or recovering power. + Ok(power) + } + + /// marks a set of sectors faulty + pub fn add_faults( + &mut self, + store: &BS, + sector_numbers: &BitField, + sectors: &[SectorOnChainInfo], + fault_expiration: ChainEpoch, + sector_size: SectorSize, + quant: QuantSpec, + ) -> anyhow::Result<(PowerPair, PowerPair)> { + // Load expiration queue + let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) + .map_err(|e| e.downcast_wrap("failed to load partition queue"))?; + + // Reschedule faults + let new_faulty_power = queue + .reschedule_as_faults(fault_expiration, sectors, sector_size) + .map_err(|e| e.downcast_wrap("failed to add faults to partition queue"))?; + + // Save expiration queue + self.expirations_epochs = queue.amt.flush()?; + + // Update partition metadata + self.faults |= sector_numbers; + + // The sectors must not have been previously faulty or recovering. + // No change to recoveries or terminations. + self.faulty_power += &new_faulty_power; + + // Once marked faulty, sectors are moved out of the unproven set. + let unproven = sector_numbers & &self.unproven; + + self.unproven -= &unproven; + + let mut power_delta = new_faulty_power.clone().neg(); + + let unproven_infos = select_sectors(sectors, &unproven) + .map_err(|e| e.downcast_wrap("failed to select unproven sectors"))?; + if !unproven_infos.is_empty() { + let lost_unproven_power = power_for_sectors(sector_size, &unproven_infos); + self.unproven_power -= &lost_unproven_power; + power_delta += &lost_unproven_power; + } + + // check invariants + self.validate_state()?; + + Ok((power_delta, new_faulty_power)) + } + + /// Declares a set of sectors faulty. Already faulty sectors are ignored, + /// terminated sectors are skipped, and recovering sectors are reverted to + /// faulty. + /// + /// - New faults are added to the Faults bitfield and the FaultyPower is increased. + /// - The sectors' expirations are rescheduled to the fault expiration epoch, as "early" (if not expiring earlier). + /// + /// Returns the power of the now-faulty sectors. + pub fn record_faults( + &mut self, + store: &BS, + sectors: &Sectors<'_, BS>, + sector_numbers: &BitField, + fault_expiration_epoch: ChainEpoch, + sector_size: SectorSize, + quant: QuantSpec, + ) -> anyhow::Result<(BitField, PowerPair, PowerPair)> { + validate_partition_contains_sectors(self, sector_numbers) + .map_err(|e| actor_error_v16!(illegal_argument; "failed fault declaration: {}", e))?; + + // Split declarations into declarations of new faults, and retraction of declared recoveries. + let retracted_recoveries = &self.recoveries & sector_numbers; + let mut new_faults = sector_numbers - &retracted_recoveries; + + // Ignore any terminated sectors and previously declared or detected faults + new_faults -= &self.terminated; + new_faults -= &self.faults; + + // Add new faults to state. + let new_fault_sectors = sectors + .load_sector(&new_faults) + .map_err(|e| e.wrap("failed to load fault sectors"))?; + + let (power_delta, new_faulty_power) = if !new_fault_sectors.is_empty() { + self.add_faults( + store, + &new_faults, + &new_fault_sectors, + fault_expiration_epoch, + sector_size, + quant, + ) + .map_err(|e| e.downcast_wrap("failed to add faults"))? + } else { + Default::default() + }; + + // remove faulty recoveries from state + let retracted_recovery_sectors = sectors + .load_sector(&retracted_recoveries) + .map_err(|e| e.wrap("failed to load recovery sectors"))?; + if !retracted_recovery_sectors.is_empty() { + let retracted_recovery_power = + power_for_sectors(sector_size, &retracted_recovery_sectors); + self.remove_recoveries(&retracted_recoveries, &retracted_recovery_power); + } + + // check invariants + self.validate_state()?; + + Ok((new_faults, power_delta, new_faulty_power)) + } + + /// Removes sector numbers from faults and thus from recoveries. + /// The sectors are removed from the Faults and Recovering bitfields, and FaultyPower and RecoveringPower reduced. + /// The sectors are re-scheduled for expiration shortly after their target expiration epoch. + /// Returns the power of the now-recovered sectors. + pub fn recover_faults( + &mut self, + store: &BS, + sectors: &Sectors<'_, BS>, + sector_size: SectorSize, + quant: QuantSpec, + ) -> anyhow::Result { + // Process recoveries, assuming the proof will be successful. + // This similarly updates state. + let recovered_sectors = sectors + .load_sector(&self.recoveries) + .map_err(|e| e.wrap("failed to load recovered sectors"))?; + + // Load expiration queue + let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) + .map_err(|e| anyhow!("failed to load partition queue: {:?}", e))?; + + // Reschedule recovered + let power = queue + .reschedule_recovered(recovered_sectors, sector_size) + .map_err(|e| e.downcast_wrap("failed to reschedule faults in partition queue"))?; + + // Save expiration queue + self.expirations_epochs = queue.amt.flush()?; + + // Update partition metadata + self.faults -= &self.recoveries; + self.recoveries = BitField::new(); + + // No change to live power. + // No change to unproven sectors. + self.faulty_power -= &power; + self.recovering_power -= &power; + + // check invariants + self.validate_state()?; + + Ok(power) + } + + /// Activates unproven sectors, returning the activated power. + pub fn activate_unproven(&mut self) -> PowerPair { + self.unproven = BitField::default(); + std::mem::take(&mut self.unproven_power) + } + + /// Declares sectors as recovering. Non-faulty and already recovering sectors will be skipped. + pub fn declare_faults_recovered( + &mut self, + sectors: &Sectors<'_, BS>, + sector_size: SectorSize, + sector_numbers: &BitField, + ) -> anyhow::Result<()> { + // Check that the declared sectors are actually assigned to the partition. + validate_partition_contains_sectors(self, sector_numbers) + .map_err(|e| actor_error_v16!(illegal_argument; "failed fault declaration: {}", e))?; + + // Ignore sectors not faulty or already declared recovered + let mut recoveries = sector_numbers & &self.faults; + recoveries -= &self.recoveries; + + // Record the new recoveries for processing at Window PoSt or deadline cron. + let recovery_sectors = sectors + .load_sector(&recoveries) + .map_err(|e| e.wrap("failed to load recovery sectors"))?; + + self.recoveries |= &recoveries; + + let power = power_for_sectors(sector_size, &recovery_sectors); + self.recovering_power += &power; + + // check invariants + self.validate_state()?; + + // No change to faults, or terminations. + // No change to faulty power. + // No change to unproven power/sectors. + Ok(()) + } + + /// Removes sectors from recoveries and recovering power. Assumes sectors are currently faulty and recovering. + pub fn remove_recoveries(&mut self, sector_numbers: &BitField, power: &PowerPair) { + if sector_numbers.is_empty() { + return; + } + + self.recoveries -= sector_numbers; + self.recovering_power -= power; + + // No change to faults, or terminations. + // No change to faulty power. + // No change to unproven power. + } + + /// RescheduleExpirations moves expiring sectors to the target expiration, + /// skipping any sectors it can't find. + /// + /// The power of the rescheduled sectors is assumed to have not changed since + /// initial scheduling. + /// + /// Note: see the docs on State.RescheduleSectorExpirations for details on why we + /// skip sectors/partitions we can't find. + pub fn reschedule_expirations( + &mut self, + store: &BS, + sectors: &Sectors<'_, BS>, + new_expiration: ChainEpoch, + sector_numbers: &BitField, + sector_size: SectorSize, + quant: QuantSpec, + ) -> anyhow::Result> { + // Ensure these sectors actually belong to this partition. + let present = sector_numbers & &self.sectors; + + // Filter out terminated sectors. + let live = &present - &self.terminated; + + // Filter out faulty sectors. + let active = &live - &self.faults; + + let sector_infos = sectors.load_sector(&active)?; + let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) + .map_err(|e| e.downcast_wrap("failed to load sector expirations"))?; + expirations.reschedule_expirations(new_expiration, §or_infos, sector_size)?; + self.expirations_epochs = expirations.amt.flush()?; + + // check invariants + self.validate_state()?; + + Ok(sector_infos) + } + + /// Replaces a number of "old" sectors with new ones. + /// The old sectors must not be faulty or terminated. + /// If the same sector is both removed and added, this permits rescheduling *with a change in power*, + /// unlike RescheduleExpirations. + /// Returns the delta to power and pledge requirement. + pub fn replace_sectors( + &mut self, + store: &BS, + old_sectors: &[SectorOnChainInfo], + new_sectors: &[SectorOnChainInfo], + sector_size: SectorSize, + quant: QuantSpec, + ) -> anyhow::Result<(PowerPair, TokenAmount)> { + let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) + .map_err(|e| e.downcast_wrap("failed to load sector expirations"))?; + + let (old_sector_numbers, new_sector_numbers, power_delta, pledge_delta) = expirations + .replace_sectors(old_sectors, new_sectors, sector_size) + .map_err(|e| e.downcast_wrap("failed to replace sector expirations"))?; + + self.expirations_epochs = expirations + .amt + .flush() + .map_err(|e| e.downcast_wrap("failed to save sector expirations"))?; + + // Check the sectors being removed are active (alive, not faulty). + let active = self.active_sectors(); + let all_active = active.contains_all(&old_sector_numbers); + + if !all_active { + return Err(anyhow!( + "refusing to replace inactive sectors in {:?} (active: {:?})", + old_sector_numbers, + active + )); + } + + // Update partition metadata. + self.sectors -= &old_sector_numbers; + self.sectors |= &new_sector_numbers; + self.live_power += &power_delta; + + // check invariants + self.validate_state()?; + + // No change to faults, recoveries, or terminations. + // No change to faulty or recovering power. + Ok((power_delta, pledge_delta)) + } + + /// Record the epoch of any sectors expiring early, for termination fee calculation later. + pub fn record_early_termination( + &mut self, + store: &BS, + epoch: ChainEpoch, + sectors: &BitField, + ) -> anyhow::Result<()> { + let mut early_termination_queue = + BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION) + .map_err(|e| e.downcast_wrap("failed to load early termination queue"))?; + + early_termination_queue + .add_to_queue(epoch, sectors) + .map_err(|e| e.downcast_wrap("failed to add to early termination queue"))?; + + self.early_terminated = early_termination_queue + .amt + .flush() + .map_err(|e| e.downcast_wrap("failed to save early termination queue"))?; + + Ok(()) + } + + /// Marks a collection of sectors as terminated. + /// The sectors are removed from Faults and Recoveries. + /// The epoch of termination is recorded for future termination fee calculation. + #[allow(clippy::too_many_arguments)] + pub fn terminate_sectors( + &mut self, + policy: &Policy, + store: &BS, + sectors: &Sectors<'_, BS>, + epoch: ChainEpoch, + sector_numbers: &BitField, + sector_size: SectorSize, + quant: QuantSpec, + ) -> anyhow::Result { + let live_sectors = self.live_sectors(); + + if !live_sectors.contains_all(sector_numbers) { + return Err( + actor_error_v16!(illegal_argument, "can only terminate live sectors").into(), + ); + } + + let sector_infos = sectors.load_sector(sector_numbers)?; + let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) + .map_err(|e| e.downcast_wrap("failed to load sector expirations"))?; + let (mut removed, removed_recovering) = expirations + .remove_sectors( + policy, + §or_infos, + &self.faults, + &self.recoveries, + sector_size, + ) + .map_err(|e| e.downcast_wrap("failed to remove sector expirations"))?; + + self.expirations_epochs = expirations + .amt + .flush() + .map_err(|e| e.downcast_wrap("failed to save sector expirations"))?; + + let removed_sectors = &removed.on_time_sectors | &removed.early_sectors; + + // Record early termination. + self.record_early_termination(store, epoch, &removed_sectors) + .map_err(|e| e.downcast_wrap("failed to record early sector termination"))?; + + let unproven_nos = &removed_sectors & &self.unproven; + + // Update partition metadata. + self.faults -= &removed_sectors; + self.recoveries -= &removed_sectors; + self.terminated |= &removed_sectors; + self.live_power -= &removed.active_power; + self.live_power -= &removed.faulty_power; + self.faulty_power -= &removed.faulty_power; + self.recovering_power -= &removed_recovering; + self.unproven -= &unproven_nos; + + let unproven_infos = select_sectors(§or_infos, &unproven_nos)?; + let removed_unproven_power = power_for_sectors(sector_size, &unproven_infos); + self.unproven_power -= &removed_unproven_power; + removed.active_power -= &removed_unproven_power; + + // check invariants + self.validate_state()?; + + Ok(removed) + } + + /// PopExpiredSectors traverses the expiration queue up to and including some epoch, and marks all expiring + /// sectors as terminated. + /// Returns the expired sector aggregates. + pub fn pop_expired_sectors( + &mut self, + store: &BS, + until: ChainEpoch, + quant: QuantSpec, + ) -> anyhow::Result { + // This is a sanity check to make sure we handle proofs _before_ + // handling sector expirations. + if !self.unproven.is_empty() { + return Err(anyhow!( + "cannot pop expired sectors from a partition with unproven sectors" + )); + } + + let mut expirations = ExpirationQueue::new(store, &self.expirations_epochs, quant) + .map_err(|e| e.downcast_wrap("failed to load expiration queue"))?; + let popped = expirations.pop_until(until).map_err(|e| { + e.downcast_wrap(format!("failed to pop expiration queue until {}", until)) + })?; + self.expirations_epochs = expirations.amt.flush()?; + + let expired_sectors = &popped.on_time_sectors | &popped.early_sectors; + + // There shouldn't be any recovering sectors or power if this is invoked at deadline end. + // Either the partition was PoSted and the recovering became recovered, or the partition was not PoSted + // and all recoveries retracted. + // No recoveries may be posted until the deadline is closed. + if !self.recoveries.is_empty() { + return Err(anyhow!( + "unexpected recoveries while processing expirations" + )); + } + if !self.recovering_power.is_zero() { + return Err(anyhow!( + "unexpected recovering power while processing expirations" + )); + } + + // Nothing expiring now should have already terminated. + if self.terminated.contains_any(&expired_sectors) { + return Err(anyhow!("expiring sectors already terminated")); + } + + // Mark the sectors as terminated and subtract sector power. + self.terminated |= &expired_sectors; + self.faults -= &expired_sectors; + self.live_power -= &(&popped.active_power + &popped.faulty_power); + self.faulty_power -= &popped.faulty_power; + + // Record the epoch of any sectors expiring early, for termination fee calculation later. + self.record_early_termination(store, until, &popped.early_sectors) + .map_err(|e| e.downcast_wrap("failed to record early terminations"))?; + + // check invariants + self.validate_state()?; + + Ok(popped) + } + + /// Marks all non-faulty sectors in the partition as faulty and clears recoveries, updating power memos appropriately. + /// All sectors' expirations are rescheduled to the fault expiration, as "early" (if not expiring earlier) + /// Returns the power of the newly faulty and failed recovery sectors. + pub fn record_missed_post( + &mut self, + store: &BS, + fault_expiration: ChainEpoch, + quant: QuantSpec, + ) -> anyhow::Result<(PowerPair, PowerPair, PowerPair)> { + // Collapse tail of queue into the last entry, and mark all power faulty. + // Load expiration queue + let mut queue = ExpirationQueue::new(store, &self.expirations_epochs, quant) + .map_err(|e| e.downcast_wrap("failed to load partition queue"))?; + + queue + .reschedule_all_as_faults(fault_expiration) + .map_err(|e| e.downcast_wrap("failed to reschedule all as faults"))?; + + // Save expiration queue + self.expirations_epochs = queue.amt.flush()?; + + // Compute faulty power for penalization. New faulty power is the total power minus already faulty. + let new_faulty_power = &self.live_power - &self.faulty_power; + // Penalized power is the newly faulty power, plus the failed recovery power. + let penalized_power = &self.recovering_power + &new_faulty_power; + + // The power delta is -(newFaultyPower-unproven), because unproven power + // was never activated in the first place. + let power_delta = &self.unproven_power - &new_faulty_power; + + // Update partition metadata + let all_faults = self.live_sectors(); + self.faults = all_faults; + self.recoveries = BitField::new(); + self.unproven = BitField::new(); + self.faulty_power = self.live_power.clone(); + self.recovering_power = PowerPair::zero(); + self.unproven_power = PowerPair::zero(); + + // check invariants + self.validate_state()?; + + Ok((power_delta, penalized_power, new_faulty_power)) + } + + pub fn pop_early_terminations( + &mut self, + store: &BS, + max_sectors: u64, + ) -> anyhow::Result<(TerminationResult, /* has more */ bool)> { + // Load early terminations. + let mut early_terminated_queue = + BitFieldQueue::new(store, &self.early_terminated, NO_QUANTIZATION)?; + + let mut processed = Vec::::new(); + let mut remaining: Option<(BitField, ChainEpoch)> = None; + let mut result = TerminationResult::new(); + result.partitions_processed = 1; + + early_terminated_queue + .amt + .for_each_while(|i, sectors| { + let epoch: ChainEpoch = i.try_into()?; + let count = sectors.len(); + let limit = max_sectors - result.sectors_processed; + + let to_process = if limit < count { + let to_process = sectors + .slice(0, limit) + .context("expected more sectors in bitfield")?; + let rest = sectors - &to_process; + remaining = Some((rest, epoch)); + result.sectors_processed += limit; + to_process + } else { + processed.push(i); + result.sectors_processed += count; + sectors.clone() + }; + + result.sectors.insert(epoch, to_process); + + let keep_going = result.sectors_processed < max_sectors; + Ok(keep_going) + }) + .map_err(|e| e.downcast_wrap("failed to walk early terminations queue"))?; + + // Update early terminations + early_terminated_queue + .amt + .batch_delete(processed, true) + .map_err(|e| { + e.downcast_wrap("failed to remove entries from early terminations queue") + })?; + + if let Some((remaining_sectors, remaining_epoch)) = remaining.take() { + early_terminated_queue + .amt + .set(remaining_epoch as u64, remaining_sectors) + .map_err(|e| { + e.downcast_wrap("failed to update remaining entry early terminations queue") + })?; + } + + // Save early terminations. + self.early_terminated = early_terminated_queue + .amt + .flush() + .map_err(|e| e.downcast_wrap("failed to store early terminations queue"))?; + + // check invariants + self.validate_state()?; + + let has_more = early_terminated_queue.amt.count() > 0; + Ok((result, has_more)) + } + + /// Discovers how skipped faults declared during post intersect with existing faults and recoveries, records the + /// new faults in state. + /// Returns the amount of power newly faulty, or declared recovered but faulty again. + /// + /// - Skipped faults that are not in the provided partition triggers an error. + /// - Skipped faults that are already declared (but not declared recovered) are ignored. + pub fn record_skipped_faults( + &mut self, + store: &BS, + sectors: &Sectors<'_, BS>, + sector_size: SectorSize, + quant: QuantSpec, + fault_expiration: ChainEpoch, + skipped: &BitField, + ) -> anyhow::Result<(PowerPair, PowerPair, PowerPair, bool)> { + if skipped.is_empty() { + return Ok(( + PowerPair::zero(), + PowerPair::zero(), + PowerPair::zero(), + false, + )); + } + + // Check that the declared sectors are actually in the partition. + if !self.sectors.contains_all(skipped) { + return Err(actor_error_v16!( + illegal_argument, + "skipped faults contains sectors outside partition" + ) + .into()); + } + + // Find all skipped faults that have been labeled recovered + let retracted_recoveries = &self.recoveries & skipped; + let retracted_recovery_sectors = sectors + .load_sector(&retracted_recoveries) + .map_err(|e| e.wrap("failed to load sectors"))?; + let retracted_recovery_power = power_for_sectors(sector_size, &retracted_recovery_sectors); + + // Ignore skipped faults that are already faults or terminated. + let new_faults = &(skipped - &self.terminated) - &self.faults; + let new_fault_sectors = sectors + .load_sector(&new_faults) + .map_err(|e| e.wrap("failed to load sectors"))?; + + // Record new faults + let (power_delta, new_fault_power) = self + .add_faults( + store, + &new_faults, + &new_fault_sectors, + fault_expiration, + sector_size, + quant, + ) + .map_err(|e| { + e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to add skipped faults") + })?; + + // Remove faulty recoveries + self.remove_recoveries(&retracted_recoveries, &retracted_recovery_power); + + // check invariants + self.validate_state()?; + + Ok(( + power_delta, + new_fault_power, + retracted_recovery_power, + !new_fault_sectors.is_empty(), + )) + } + + /// Test invariants about the partition power are valid. + pub fn validate_power_state(&self) -> anyhow::Result<()> { + if self.live_power.raw.is_negative() || self.live_power.qa.is_negative() { + return Err(anyhow!("Partition left with negative live power")); + } + if self.unproven_power.raw.is_negative() || self.unproven_power.qa.is_negative() { + return Err(anyhow!("Partition left with negative unproven power")); + } + if self.faulty_power.raw.is_negative() || self.faulty_power.qa.is_negative() { + return Err(anyhow!("Partition left with negative faulty power")); + } + if self.recovering_power.raw.is_negative() || self.recovering_power.qa.is_negative() { + return Err(anyhow!("Partition left with negative recovering power")); + } + if self.unproven_power.raw > self.live_power.raw { + return Err(anyhow!("Partition left with invalid unproven power")); + } + if self.faulty_power.raw > self.live_power.raw { + return Err(anyhow!("Partition left with invalid faulty power")); + } + // The first half of this conditional shouldn't matter, keeping for readability + if self.recovering_power.raw > self.live_power.raw + || self.recovering_power.raw > self.faulty_power.raw + { + return Err(anyhow!("Partition left with invalid recovering power")); + } + + Ok(()) + } + + pub fn validate_bf_state(&self) -> anyhow::Result<()> { + let mut merge = &self.unproven | &self.faults; + + // Unproven or faulty sectors should not be in terminated + if self.terminated.contains_any(&merge) { + return Err(anyhow!( + "Partition left with terminated sectors in multiple states" + )); + } + + merge |= &self.terminated; + + // All merged sectors should exist in partition sectors + if !self.sectors.contains_all(&merge) { + return Err(anyhow!("Partition left with invalid sector state")); + } + + // All recoveries should exist in partition faults + if !self.faults.contains_all(&self.recoveries) { + return Err(anyhow!("Partition left with invalid recovery state")); + } + + Ok(()) + } + + pub fn validate_state(&self) -> anyhow::Result<()> { + self.validate_power_state()?; + self.validate_bf_state()?; + Ok(()) + } +} + +#[derive(Serialize_tuple, Deserialize_tuple, Eq, PartialEq, Clone, Debug, Default)] +// Value type for a pair of raw and QA power. +pub struct PowerPair { + #[serde(with = "bigint_ser")] + pub raw: StoragePower, + #[serde(with = "bigint_ser")] + pub qa: StoragePower, +} + +impl PowerPair { + pub fn new(raw: StoragePower, qa: StoragePower) -> Self { + Self { raw, qa } + } + + pub fn zero() -> Self { + Default::default() + } + + pub fn is_zero(&self) -> bool { + self.raw.is_zero() && self.qa.is_zero() + } +} + +impl ops::Add for &PowerPair { + type Output = PowerPair; + + fn add(self, rhs: Self) -> Self::Output { + PowerPair { + raw: &self.raw + &rhs.raw, + qa: &self.qa + &rhs.qa, + } + } +} + +impl ops::Add for PowerPair { + type Output = Self; + + fn add(self, rhs: Self) -> Self::Output { + &self + &rhs + } +} + +impl ops::AddAssign<&Self> for PowerPair { + fn add_assign(&mut self, rhs: &Self) { + *self = &*self + rhs; + } +} + +impl ops::Sub for &PowerPair { + type Output = PowerPair; + + fn sub(self, rhs: Self) -> Self::Output { + PowerPair { + raw: &self.raw - &rhs.raw, + qa: &self.qa - &rhs.qa, + } + } +} + +impl ops::Sub for PowerPair { + type Output = Self; + + fn sub(self, rhs: Self) -> Self::Output { + &self - &rhs + } +} + +impl ops::SubAssign<&Self> for PowerPair { + fn sub_assign(&mut self, rhs: &Self) { + *self = &*self - rhs; + } +} + +impl ops::Neg for PowerPair { + type Output = PowerPair; + + fn neg(self) -> Self::Output { + PowerPair { + raw: -self.raw, + qa: -self.qa, + } + } +} + +impl ops::Neg for &PowerPair { + type Output = PowerPair; + + fn neg(self) -> Self::Output { + -self.clone() + } +} diff --git a/actors/miner/src/v16/policy.rs b/actors/miner/src/v16/policy.rs new file mode 100644 index 00000000..6a01b919 --- /dev/null +++ b/actors/miner/src/v16/policy.rs @@ -0,0 +1,211 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::cmp; + +use cid::{Cid, Version}; +use fil_actors_shared::v16::network::*; +use fil_actors_shared::v16::runtime::Policy; +use fil_actors_shared::v16::{DealWeight, EXPECTED_LEADERS_PER_EPOCH}; +use fvm_shared4::bigint::{BigInt, Integer}; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::commcid::{FIL_COMMITMENT_SEALED, POSEIDON_BLS12_381_A1_FC1}; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::sector::{RegisteredPoStProof, RegisteredSealProof, SectorSize, StoragePower}; +use lazy_static::lazy_static; + +use super::types::SectorOnChainInfo; +use super::{PowerPair, BASE_REWARD_FOR_DISPUTED_WINDOW_POST}; + +/// Precision used for making QA power calculations +pub const SECTOR_QUALITY_PRECISION: i64 = 20; + +/// Base number of sectors before imposing the additional aggregate fee in ProveCommitSectorsNI +pub const NI_AGGREGATE_FEE_BASE_SECTOR_COUNT: usize = 5; + +lazy_static! { + /// Quality multiplier for committed capacity (no deals) in a sector + pub static ref QUALITY_BASE_MULTIPLIER: BigInt = BigInt::from(10); + + /// Quality multiplier for verified deals in a sector + pub static ref VERIFIED_DEAL_WEIGHT_MULTIPLIER: BigInt = BigInt::from(100); + +} + +/// The maximum number of partitions that may be required to be loaded in a single invocation, +/// when all the sector infos for the partitions will be loaded. +pub fn load_partitions_sectors_max(policy: &Policy, partition_sector_count: u64) -> u64 { + cmp::min( + policy.addressed_sectors_max / partition_sector_count, + policy.addressed_partitions_max, + ) +} + +/// Prefix for sealed sector CIDs (CommR). +pub fn is_sealed_sector(c: &Cid) -> bool { + // TODO: Move FIL_COMMITMENT etc, into a better place + c.version() == Version::V1 + && c.codec() == FIL_COMMITMENT_SEALED + && c.hash().code() == POSEIDON_BLS12_381_A1_FC1 + && c.hash().size() == 32 +} + +/// List of proof types which can be used when creating new miner actors +pub fn can_pre_commit_seal_proof(policy: &Policy, proof: RegisteredSealProof) -> bool { + policy.valid_pre_commit_proof_type.contains(proof) +} + +pub fn can_prove_commit_ni_seal_proof(policy: &Policy, proof: RegisteredSealProof) -> bool { + policy.valid_prove_commit_ni_proof_type.contains(proof) +} + +/// Checks whether a seal proof type is supported for new miners and sectors. +pub fn can_extend_seal_proof_type(_proof: RegisteredSealProof) -> bool { + true +} + +/// Maximum duration to allow for the sealing process for seal algorithms. +/// Dependent on algorithm and sector size +pub fn max_prove_commit_duration( + policy: &Policy, + proof: RegisteredSealProof, +) -> Option { + use RegisteredSealProof::*; + match proof { + StackedDRG32GiBV1 | StackedDRG2KiBV1 | StackedDRG8MiBV1 | StackedDRG512MiBV1 + | StackedDRG64GiBV1 => Some(EPOCHS_IN_DAY + policy.pre_commit_challenge_delay), + StackedDRG32GiBV1P1 + | StackedDRG64GiBV1P1 + | StackedDRG512MiBV1P1 + | StackedDRG8MiBV1P1 + | StackedDRG2KiBV1P1 + | StackedDRG32GiBV1P1_Feat_SyntheticPoRep + | StackedDRG64GiBV1P1_Feat_SyntheticPoRep + | StackedDRG512MiBV1P1_Feat_SyntheticPoRep + | StackedDRG8MiBV1P1_Feat_SyntheticPoRep + | StackedDRG2KiBV1P1_Feat_SyntheticPoRep => { + Some(30 * EPOCHS_IN_DAY + policy.pre_commit_challenge_delay) + } + _ => None, + } +} + +/// Maximum duration to allow for the sealing process for seal algorithms. +/// Dependent on algorithm and sector size +pub fn seal_proof_sector_maximum_lifetime(proof: RegisteredSealProof) -> Option { + use RegisteredSealProof::*; + match proof { + StackedDRG32GiBV1 | StackedDRG2KiBV1 | StackedDRG8MiBV1 | StackedDRG512MiBV1 + | StackedDRG64GiBV1 => Some(EPOCHS_IN_DAY * 540), + StackedDRG32GiBV1P1 + | StackedDRG2KiBV1P1 + | StackedDRG8MiBV1P1 + | StackedDRG512MiBV1P1 + | StackedDRG64GiBV1P1 + | StackedDRG32GiBV1P1_Feat_SyntheticPoRep + | StackedDRG2KiBV1P1_Feat_SyntheticPoRep + | StackedDRG8MiBV1P1_Feat_SyntheticPoRep + | StackedDRG512MiBV1P1_Feat_SyntheticPoRep + | StackedDRG64GiBV1P1_Feat_SyntheticPoRep + | StackedDRG32GiBV1P2_Feat_NiPoRep + | StackedDRG2KiBV1P2_Feat_NiPoRep + | StackedDRG8MiBV1P2_Feat_NiPoRep + | StackedDRG512MiBV1P2_Feat_NiPoRep + | StackedDRG64GiBV1P2_Feat_NiPoRep => Some(EPOCHS_IN_YEAR * 5), + _ => None, + } +} + +/// minimum number of epochs past the current epoch a sector may be set to expire +pub const MIN_SECTOR_EXPIRATION: i64 = 180 * EPOCHS_IN_DAY; + +/// VerifiedDealWeight is spacetime occupied by verified pieces in a sector. +/// VerifiedDealWeight should be less than or equal to total SpaceTime of a sector. +/// Sectors full of VerifiedDeals will have a BigInt of VerifiedDealWeightMultiplier/QualityBaseMultiplier. +/// Sectors without VerifiedDeals will have a BigInt of QualityBaseMultiplier/QualityBaseMultiplier. +/// BigInt of a sector is a weighted average of multipliers based on their proportions. +pub fn quality_for_weight( + size: SectorSize, + duration: ChainEpoch, + verified_weight: &DealWeight, +) -> BigInt { + let sector_space_time = BigInt::from(size as u64) * BigInt::from(duration); + + let weighted_base_space_time = + (§or_space_time - verified_weight) * &*QUALITY_BASE_MULTIPLIER; + let weighted_verified_space_time = verified_weight * &*VERIFIED_DEAL_WEIGHT_MULTIPLIER; + let weighted_sum_space_time = weighted_base_space_time + weighted_verified_space_time; + let scaled_up_weighted_sum_space_time: BigInt = + weighted_sum_space_time << SECTOR_QUALITY_PRECISION; + + scaled_up_weighted_sum_space_time + .div_floor(§or_space_time) + .div_floor(&QUALITY_BASE_MULTIPLIER) +} + +/// Returns maximum achievable QA power. +pub fn qa_power_max(size: SectorSize) -> StoragePower { + (BigInt::from(size as u64) * &*VERIFIED_DEAL_WEIGHT_MULTIPLIER) + .div_floor(&QUALITY_BASE_MULTIPLIER) +} + +/// Returns the power for a sector size and weight. +pub fn qa_power_for_weight( + size: SectorSize, + duration: ChainEpoch, + verified_weight: &DealWeight, +) -> StoragePower { + let quality = quality_for_weight(size, duration, verified_weight); + (BigInt::from(size as u64) * quality) >> SECTOR_QUALITY_PRECISION +} + +/// Returns the quality-adjusted power for a sector. +pub fn qa_power_for_sector(size: SectorSize, sector: &SectorOnChainInfo) -> StoragePower { + let duration = sector.expiration - sector.power_base_epoch; + qa_power_for_weight(size, duration, §or.verified_deal_weight) +} + +pub fn raw_power_for_sector(size: SectorSize) -> StoragePower { + BigInt::from(size as u64) +} + +/// Determine maximum number of deal miner's sector can hold +pub fn sector_deals_max(policy: &Policy, size: SectorSize) -> u64 { + cmp::max(256, size as u64 / policy.deal_limit_denominator) +} + +/// Specification for a linear vesting schedule. +pub struct VestSpec { + /// Delay before any amount starts vesting. + pub initial_delay: ChainEpoch, + /// Period over which the total should vest, after the initial delay. + pub vest_period: ChainEpoch, + /// Duration between successive incremental vests (independent of vesting period). + pub step_duration: ChainEpoch, + /// Maximum precision of vesting table (limits cardinality of table). + pub quantization: ChainEpoch, +} + +pub const REWARD_VESTING_SPEC: VestSpec = VestSpec { + initial_delay: 0, // PARAM_FINISH + vest_period: 180 * EPOCHS_IN_DAY, // PARAM_FINISH + step_duration: EPOCHS_IN_DAY, // PARAM_FINISH + quantization: 12 * EPOCHS_IN_HOUR, // PARAM_FINISH +}; + +// Default share of block reward allocated as reward to the consensus fault reporter. +// Applied as epochReward / (expectedLeadersPerEpoch * consensusFaultReporterDefaultShare) +pub const CONSENSUS_FAULT_REPORTER_DEFAULT_SHARE: u64 = 4; + +pub fn reward_for_consensus_slash_report(epoch_reward: &TokenAmount) -> TokenAmount { + epoch_reward.div_floor(EXPECTED_LEADERS_PER_EPOCH * CONSENSUS_FAULT_REPORTER_DEFAULT_SHARE) +} + +// The reward given for successfully disputing a window post. +pub fn reward_for_disputed_window_post( + _proof_type: RegisteredPoStProof, + _disputed_power: PowerPair, +) -> TokenAmount { + // This is currently just the base. In the future, the fee may scale based on the disputed power. + BASE_REWARD_FOR_DISPUTED_WINDOW_POST.clone() +} diff --git a/actors/miner/src/v16/quantize.rs b/actors/miner/src/v16/quantize.rs new file mode 100644 index 00000000..532eb1ca --- /dev/null +++ b/actors/miner/src/v16/quantize.rs @@ -0,0 +1,53 @@ +// Copyright 2021-2023 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::clock::ChainEpoch; + +/// Constant defining the [QuantSpec] which performs no quantization. +pub const NO_QUANTIZATION: QuantSpec = QuantSpec { unit: 1, offset: 0 }; + +/// A spec for epoch quantization. +#[derive(Copy, Clone)] +pub struct QuantSpec { + /// The unit of quantization + pub unit: ChainEpoch, + /// The offset from zero from which to base the modulus + pub offset: ChainEpoch, +} + +impl QuantSpec { + /// Rounds `epoch` to the nearest exact multiple of the quantization unit offset by + /// `offset % unit`, rounding up. + /// + /// This function is equivalent to `unit * ceil(epoch - (offset % unit) / unit) + (offsetSeed % unit)` + /// with the variables/operations over real numbers instead of ints. + /// + /// Precondition: `unit >= 0` + pub fn quantize_up(&self, epoch: ChainEpoch) -> ChainEpoch { + let offset = self.offset % self.unit; + + let remainder = (epoch - offset) % self.unit; + let quotient = (epoch - offset) / self.unit; + + // Don't round if epoch falls on a quantization epoch + if remainder == 0 + // Negative truncating division rounds up + || epoch - offset < 0 + { + self.unit * quotient + offset + } else { + self.unit * (quotient + 1) + offset + } + } + + pub fn quantize_down(&self, epoch: ChainEpoch) -> ChainEpoch { + let next = self.quantize_up(epoch); + // QuantizeDown == QuantizeUp iff epoch is a fixed point of QuantizeUp + if epoch == next { + next + } else { + next - self.unit + } + } +} diff --git a/actors/miner/src/v16/sector_map.rs b/actors/miner/src/v16/sector_map.rs new file mode 100644 index 00000000..c448c31d --- /dev/null +++ b/actors/miner/src/v16/sector_map.rs @@ -0,0 +1,177 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::BTreeMap; + +use anyhow::anyhow; +use fvm_ipld_bitfield::{BitField, Validate}; +use serde::{Deserialize, Serialize}; + +use fil_actors_shared::v16::runtime::Policy; + +/// Maps deadlines to partition maps. +#[derive(Default)] +pub struct DeadlineSectorMap(BTreeMap); + +impl DeadlineSectorMap { + pub fn new() -> Self { + Default::default() + } + + /// Check validates all bitfields and counts the number of partitions & sectors + /// contained within the map, and returns an error if they exceed the given + /// maximums. + pub fn check(&mut self, max_partitions: u64, max_sectors: u64) -> anyhow::Result<()> { + let (partition_count, sector_count) = self + .count() + .map_err(|e| anyhow!("failed to count sectors: {:?}", e))?; + + if partition_count > max_partitions { + return Err(anyhow!( + "too many partitions {}, max {}", + partition_count, + max_partitions + )); + } + + if sector_count > max_sectors { + return Err(anyhow!( + "too many sectors {}, max {}", + sector_count, + max_sectors + )); + } + + Ok(()) + } + + /// Counts the number of partitions & sectors within the map. + pub fn count(&mut self) -> anyhow::Result<(/* partitions */ u64, /* sectors */ u64)> { + self.0.iter_mut().try_fold( + (0_u64, 0_u64), + |(partitions, sectors), (deadline_idx, pm)| { + let (partition_count, sector_count) = pm + .count() + .map_err(|e| anyhow!("when counting deadline {}: {:?}", deadline_idx, e))?; + Ok(( + partitions + .checked_add(partition_count) + .ok_or_else(|| anyhow!("integer overflow when counting partitions"))?, + sectors + .checked_add(sector_count) + .ok_or_else(|| anyhow!("integer overflow when counting sectors"))?, + )) + }, + ) + } + + /// Records the given sector bitfield at the given deadline/partition index. + pub fn add( + &mut self, + policy: &Policy, + deadline_idx: u64, + partition_idx: u64, + sector_numbers: BitField, + ) -> anyhow::Result<()> { + if deadline_idx >= policy.wpost_period_deadlines { + return Err(anyhow!("invalid deadline {}", deadline_idx)); + } + + self.0 + .entry(deadline_idx) + .or_default() + .add(partition_idx, sector_numbers) + } + + /// Records the given sectors at the given deadline/partition index. + pub fn add_values( + &mut self, + policy: &Policy, + deadline_idx: u64, + partition_idx: u64, + sector_numbers: &[u64], + ) -> anyhow::Result<()> { + self.add( + policy, + deadline_idx, + partition_idx, + BitField::try_from_bits(sector_numbers.iter().copied())?, + ) + } + + /// Returns a sorted vec of deadlines in the map. + pub fn deadlines(&self) -> impl Iterator + '_ { + self.0.keys().copied() + } + + /// Walks the deadlines in deadline order. + pub fn iter(&mut self) -> impl Iterator + '_ { + self.0.iter_mut().map(|(&i, x)| (i, x)) + } +} + +/// Maps partitions to sector bitfields. +#[derive(Default, Serialize, Deserialize)] +pub struct PartitionSectorMap(BTreeMap); + +impl PartitionSectorMap { + /// Records the given sectors at the given partition. + pub fn add_values( + &mut self, + partition_idx: u64, + sector_numbers: Vec, + ) -> anyhow::Result<()> { + self.add(partition_idx, BitField::try_from_bits(sector_numbers)?) + } + /// Records the given sector bitfield at the given partition index, merging + /// it with any existing bitfields if necessary. + pub fn add(&mut self, partition_idx: u64, sector_numbers: BitField) -> anyhow::Result<()> { + match self.0.get_mut(&partition_idx) { + Some(old_sector_numbers) => { + *old_sector_numbers |= §or_numbers; + } + None => { + self.0.insert(partition_idx, sector_numbers); + } + } + Ok(()) + } + + /// Counts the number of partitions & sectors within the map. + pub fn count(&mut self) -> anyhow::Result<(/* partitions */ u64, /* sectors */ u64)> { + let sectors = self + .0 + .iter_mut() + .try_fold(0_u64, |sectors, (partition_idx, bf)| { + let validated = bf.validate().map_err(|e| { + anyhow!( + "failed to parse bitmap for partition {}: {}", + partition_idx, + e + ) + })?; + sectors + .checked_add(validated.len()) + .ok_or_else(|| anyhow!("integer overflow when counting sectors")) + })?; + Ok((self.0.len() as u64, sectors)) + } + + /// Returns a sorted vec of partitions in the map. + pub fn partitions(&self) -> impl Iterator + '_ { + self.0.keys().copied() + } + + /// Walks the partitions in the map, in order of increasing index. + pub fn iter(&mut self) -> impl Iterator + '_ { + self.0.iter_mut().map(|(&i, x)| (i, x)) + } + + pub fn len(&self) -> usize { + self.0.len() + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} diff --git a/actors/miner/src/v16/sectors.rs b/actors/miner/src/v16/sectors.rs new file mode 100644 index 00000000..23422d75 --- /dev/null +++ b/actors/miner/src/v16/sectors.rs @@ -0,0 +1,161 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::BTreeSet; + +use anyhow::anyhow; +use cid::Cid; +use fil_actors_shared::actor_error_v16; +use fil_actors_shared::v16::runtime::policy_constants::MAX_SECTOR_NUMBER; +use fil_actors_shared::v16::{ActorDowncast, ActorError, Array, AsActorError}; +use fvm_ipld_amt::Error as AmtError; +use fvm_ipld_bitfield::BitField; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared4::error::ExitCode; +use fvm_shared4::sector::SectorNumber; + +use super::SectorOnChainInfo; + +pub struct Sectors<'db, BS> { + pub amt: Array<'db, SectorOnChainInfo, BS>, +} + +impl<'db, BS: Blockstore> Sectors<'db, BS> { + pub fn load(store: &'db BS, root: &Cid) -> Result { + Ok(Self { + amt: Array::load(root, store)?, + }) + } + + pub fn load_sector( + &self, + sector_numbers: &BitField, + ) -> Result, ActorError> { + let mut sector_infos: Vec = Vec::new(); + for sector_number in sector_numbers.iter() { + let sector_on_chain = self + .amt + .get(sector_number) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + format!("failed to load sector {}", sector_number), + ) + })? + .cloned() + .ok_or_else( + || actor_error_v16!(not_found; "sector not found: {}", sector_number), + )?; + sector_infos.push(sector_on_chain); + } + Ok(sector_infos) + } + + pub fn get( + &self, + sector_number: SectorNumber, + ) -> Result, ActorError> { + Ok(self + .amt + .get(sector_number) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get sector {}", sector_number) + })? + .cloned()) + } + + pub fn store(&mut self, infos: Vec) -> anyhow::Result<()> { + for info in infos { + let sector_number = info.sector_number; + + if sector_number > MAX_SECTOR_NUMBER { + return Err(anyhow!("sector number {} out of range", info.sector_number)); + } + + self.amt.set(sector_number, info).map_err(|e| { + e.downcast_wrap(format!("failed to store sector {}", sector_number)) + })?; + } + + Ok(()) + } + + pub fn must_get(&self, sector_number: SectorNumber) -> Result { + self.get(sector_number)? + .ok_or_else(|| actor_error_v16!(not_found, "sector {} not found", sector_number)) + } + + /// Loads info for a set of sectors to be proven. + /// If any of the sectors are declared faulty and not to be recovered, info for the first non-faulty sector is substituted instead. + /// If any of the sectors are declared recovered, they are returned from this method. + pub fn load_for_proof( + &self, + proven_sectors: &BitField, + expected_faults: &BitField, + ) -> anyhow::Result> { + let non_faults = proven_sectors - expected_faults; + + if non_faults.is_empty() { + return Ok(Vec::new()); + } + + let good_sector_number = non_faults.first().expect("faults are not empty"); + + let sector_infos = self.load_with_fault_max( + proven_sectors, + expected_faults, + good_sector_number as SectorNumber, + )?; + + Ok(sector_infos) + } + /// Loads sector info for a sequence of sectors, substituting info for a stand-in sector for any that are faulty. + pub fn load_with_fault_max( + &self, + sectors: &BitField, + faults: &BitField, + fault_stand_in: SectorNumber, + ) -> anyhow::Result> { + let stand_in_info = self.must_get(fault_stand_in)?; + + // Expand faults into a map for quick lookups. + // The faults bitfield should already be a subset of the sectors bitfield. + let sector_count = sectors.len(); + + let fault_set: BTreeSet = faults.iter().collect(); + + let mut sector_infos = Vec::with_capacity(sector_count as usize); + for i in sectors.iter() { + let faulty = fault_set.contains(&i); + let sector = if !faulty { + self.must_get(i)? + } else { + stand_in_info.clone() + }; + sector_infos.push(sector); + } + + Ok(sector_infos) + } +} + +pub fn select_sectors( + sectors: &[SectorOnChainInfo], + field: &BitField, +) -> anyhow::Result> { + let mut to_include: BTreeSet<_> = field.iter().collect(); + let included = sectors + .iter() + .filter(|si| to_include.remove(&si.sector_number)) + .cloned() + .collect(); + + if !to_include.is_empty() { + return Err(anyhow!( + "failed to find {} expected sectors", + to_include.len() + )); + } + + Ok(included) +} diff --git a/actors/miner/src/v16/state.rs b/actors/miner/src/v16/state.rs new file mode 100644 index 00000000..036440ad --- /dev/null +++ b/actors/miner/src/v16/state.rs @@ -0,0 +1,1414 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::borrow::Borrow; +use std::cmp; +use std::ops::Neg; + +use anyhow::{anyhow, Error}; +use cid::Cid; +use fvm_ipld_amt::Error as AmtError; +use fvm_ipld_bitfield::BitField; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::{strict_bytes, BytesDe, CborStore}; +use fvm_shared4::address::Address; +use fvm_shared4::clock::{ChainEpoch, EPOCH_UNDEFINED}; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::error::ExitCode; +use fvm_shared4::sector::{RegisteredPoStProof, SectorNumber, SectorSize}; +use fvm_shared4::{ActorID, HAMT_BIT_WIDTH}; +use itertools::Itertools; +use multihash_codetable::Code; +use num_traits::Zero; + +use fil_actors_shared::actor_error_v16; +use fil_actors_shared::v16::runtime::policy_constants::MAX_SECTOR_NUMBER; +use fil_actors_shared::v16::runtime::Policy; +use fil_actors_shared::v16::{ + ActorContext, ActorDowncast, ActorError, Array, AsActorError, Config, Map2, DEFAULT_HAMT_CONFIG, +}; + +use super::beneficiary::*; +use super::deadlines::new_deadline_info; +use super::policy::*; +use super::types::*; +use super::{ + assign_deadlines, deadline_is_mutable, new_deadline_info_from_offset_and_epoch, + quant_spec_for_deadline, BitFieldQueue, Deadline, DeadlineInfo, DeadlineSectorMap, Deadlines, + PowerPair, QuantSpec, Sectors, TerminationResult, VestingFunds, +}; + +pub type PreCommitMap = Map2; +pub const PRECOMMIT_CONFIG: Config = Config { + bit_width: HAMT_BIT_WIDTH, + ..DEFAULT_HAMT_CONFIG +}; + +const PRECOMMIT_EXPIRY_AMT_BITWIDTH: u32 = 6; +pub const SECTORS_AMT_BITWIDTH: u32 = 5; + +/// Balance of Miner Actor should be greater than or equal to +/// the sum of PreCommitDeposits and LockedFunds. +/// It is possible for balance to fall below the sum of PCD, LF and +/// InitialPledgeRequirements, and this is a bad state (IP Debt) +/// that limits a miner actor's behavior (i.e. no balance withdrawals) +/// Excess balance as computed by st.GetAvailableBalance will be +/// withdrawable or usable for pre-commit deposit or pledge lock-up. +#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug)] +pub struct State { + /// Contains static info about this miner + pub info: Cid, + + /// Total funds locked as pre_commit_deposit + pub pre_commit_deposits: TokenAmount, + + /// Total rewards and added funds locked in vesting table + pub locked_funds: TokenAmount, + + /// VestingFunds (Vesting Funds schedule for the miner). + pub vesting_funds: Cid, + + /// Absolute value of debt this miner owes from unpaid fees. + pub fee_debt: TokenAmount, + + /// Sum of initial pledge requirements of all active sectors. + pub initial_pledge: TokenAmount, + + /// Sectors that have been pre-committed but not yet proven. + /// Map, HAMT + pub pre_committed_sectors: Cid, + + // PreCommittedSectorsCleanUp maintains the state required to cleanup expired PreCommittedSectors. + pub pre_committed_sectors_cleanup: Cid, // BitFieldQueue (AMT[Epoch]*BitField) + + /// Allocated sector IDs. Sector IDs can never be reused once allocated. + pub allocated_sectors: Cid, // BitField + + /// Information for all proven and not-yet-garbage-collected sectors. + /// + /// Sectors are removed from this AMT when the partition to which the + /// sector belongs is compacted. + pub sectors: Cid, // Array, AMT[SectorNumber]SectorOnChainInfo (sparse) + + /// The first epoch in this miner's current proving period. This is the first epoch in which a PoSt for a + /// partition at the miner's first deadline may arrive. Alternatively, it is after the last epoch at which + /// a PoSt for the previous window is valid. + /// Always greater than zero, this may be greater than the current epoch for genesis miners in the first + /// WPoStProvingPeriod epochs of the chain; the epochs before the first proving period starts are exempt from Window + /// PoSt requirements. + /// Updated at the end of every period by a cron callback. + pub proving_period_start: ChainEpoch, + + /// Index of the deadline within the proving period beginning at ProvingPeriodStart that has not yet been + /// finalized. + /// Updated at the end of each deadline window by a cron callback. + pub current_deadline: u64, + + /// The sector numbers due for PoSt at each deadline in the current proving period, frozen at period start. + /// New sectors are added and expired ones removed at proving period boundary. + /// Faults are not subtracted from this in state, but on the fly. + pub deadlines: Cid, + + /// Deadlines with outstanding fees for early sector termination. + pub early_terminations: BitField, + + // True when miner cron is active, false otherwise + pub deadline_cron_active: bool, +} + +#[derive(PartialEq, Eq)] +pub enum CollisionPolicy { + AllowCollisions, + DenyCollisions, +} + +impl State { + #[allow(clippy::too_many_arguments)] + pub fn new( + policy: &Policy, + store: &BS, + info_cid: Cid, + period_start: ChainEpoch, + deadline_idx: u64, + ) -> Result { + let empty_precommit_map = + PreCommitMap::empty(store, PRECOMMIT_CONFIG, "precommits").flush()?; + + let empty_precommits_cleanup_array = + Array::::new_with_bit_width(store, PRECOMMIT_EXPIRY_AMT_BITWIDTH) + .flush() + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to construct empty precommits array", + ) + })?; + let empty_sectors_array = + Array::::new_with_bit_width(store, SECTORS_AMT_BITWIDTH) + .flush() + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to construct sectors array", + ) + })?; + let empty_bitfield = store + .put_cbor(&BitField::new(), Code::Blake2b256) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to construct empty bitfield", + ) + })?; + let deadline = Deadline::new(store)?; + let empty_deadline = store.put_cbor(&deadline, Code::Blake2b256).map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to construct illegal state", + ) + })?; + + let empty_deadlines = store + .put_cbor(&Deadlines::new(policy, empty_deadline), Code::Blake2b256) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to construct illegal state", + ) + })?; + + let empty_vesting_funds_cid = store + .put_cbor(&VestingFunds::new(), Code::Blake2b256) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to construct illegal state", + ) + })?; + + Ok(Self { + info: info_cid, + + pre_commit_deposits: TokenAmount::default(), + locked_funds: TokenAmount::default(), + + vesting_funds: empty_vesting_funds_cid, + + initial_pledge: TokenAmount::default(), + fee_debt: TokenAmount::default(), + + pre_committed_sectors: empty_precommit_map, + allocated_sectors: empty_bitfield, + sectors: empty_sectors_array, + proving_period_start: period_start, + current_deadline: deadline_idx, + deadlines: empty_deadlines, + early_terminations: BitField::new(), + deadline_cron_active: false, + pre_committed_sectors_cleanup: empty_precommits_cleanup_array, + }) + } + + pub fn get_info(&self, store: &BS) -> anyhow::Result { + match store.get_cbor(&self.info) { + Ok(Some(info)) => Ok(info), + Ok(None) => Err(actor_error_v16!(not_found, "failed to get miner info").into()), + Err(e) => Err(e.downcast_wrap("failed to get miner info")), + } + } + + pub fn save_info( + &mut self, + store: &BS, + info: &MinerInfo, + ) -> anyhow::Result<()> { + let cid = store.put_cbor(&info, Code::Blake2b256)?; + self.info = cid; + Ok(()) + } + + /// Returns deadline calculations for the current (according to state) proving period. + pub fn deadline_info(&self, policy: &Policy, current_epoch: ChainEpoch) -> DeadlineInfo { + new_deadline_info_from_offset_and_epoch(policy, self.proving_period_start, current_epoch) + } + // Returns deadline calculations for the state recorded proving period and deadline. + // This is out of date if the a miner does not have an active miner cron + pub fn recorded_deadline_info( + &self, + policy: &Policy, + current_epoch: ChainEpoch, + ) -> DeadlineInfo { + new_deadline_info( + policy, + self.proving_period_start, + self.current_deadline, + current_epoch, + ) + } + + // Returns current proving period start for the current epoch according to the current epoch and constant state offset + pub fn current_proving_period_start( + &self, + policy: &Policy, + current_epoch: ChainEpoch, + ) -> ChainEpoch { + let dl_info = self.deadline_info(policy, current_epoch); + dl_info.period_start + } + + /// Returns deadline calculations for the current (according to state) proving period. + pub fn quant_spec_for_deadline(&self, policy: &Policy, deadline_idx: u64) -> QuantSpec { + new_deadline_info(policy, self.proving_period_start, deadline_idx, 0).quant_spec() + } + + /// Marks a set of sector numbers as having been allocated. + /// If policy is `DenyCollisions`, fails if the set intersects with the sector numbers already allocated. + pub fn allocate_sector_numbers( + &mut self, + store: &BS, + sector_numbers: &BitField, + policy: CollisionPolicy, + ) -> Result<(), ActorError> { + let prior_allocation = store + .get_cbor(&self.allocated_sectors) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_STATE, + "failed to load allocated sectors bitfield", + ) + })? + .ok_or_else(|| { + actor_error_v16!(illegal_state, "allocated sectors bitfield not found") + })?; + + if policy != CollisionPolicy::AllowCollisions { + // NOTE: A fancy merge algorithm could extract this intersection while merging, below, saving + // one iteration of the runs + let collisions = &prior_allocation & sector_numbers; + if !collisions.is_empty() { + return Err(actor_error_v16!( + illegal_argument, + "sector numbers {:?} already allocated", + collisions + )); + } + } + let new_allocation = &prior_allocation | sector_numbers; + self.allocated_sectors = + store + .put_cbor(&new_allocation, Code::Blake2b256) + .map_err(|e| { + e.downcast_default( + ExitCode::USR_ILLEGAL_ARGUMENT, + format!( + "failed to store allocated sectors bitfield after adding {:?}", + sector_numbers, + ), + ) + })?; + Ok(()) + } + + /// Stores a pre-committed sector info, failing if the sector number is already present. + pub fn put_precommitted_sectors( + &mut self, + store: &BS, + precommits: Vec, + ) -> anyhow::Result<()> { + let mut precommitted = PreCommitMap::load( + store, + &self.pre_committed_sectors, + PRECOMMIT_CONFIG, + "precommits", + )?; + for precommit in precommits.into_iter() { + let sector_no = precommit.info.sector_number; + let modified = precommitted + .set_if_absent(§or_no, precommit) + .with_context(|| format!("storing precommit for {}", sector_no))?; + if !modified { + return Err(anyhow!("sector {} already pre-commited", sector_no)); + } + } + + self.pre_committed_sectors = precommitted.flush()?; + Ok(()) + } + + pub fn get_precommitted_sector( + &self, + store: &BS, + sector_num: SectorNumber, + ) -> Result, ActorError> { + let precommitted = PreCommitMap::load( + store, + &self.pre_committed_sectors, + PRECOMMIT_CONFIG, + "precommits", + )?; + Ok(precommitted.get(§or_num)?.cloned()) + } + + /// Gets and returns the requested pre-committed sectors, skipping missing sectors. + pub fn find_precommitted_sectors( + &self, + store: &BS, + sector_numbers: &[SectorNumber], + ) -> anyhow::Result> { + let precommitted = PreCommitMap::load( + store, + &self.pre_committed_sectors, + PRECOMMIT_CONFIG, + "precommits", + )?; + let mut result = Vec::with_capacity(sector_numbers.len()); + + for §or_number in sector_numbers { + let info = match precommitted + .get(§or_number) + .with_context(|| format!("loading precommit {}", sector_number))? + { + Some(info) => info.clone(), + None => continue, + }; + + result.push(info); + } + + Ok(result) + } + + pub fn delete_precommitted_sectors( + &mut self, + store: &BS, + sector_nums: &[SectorNumber], + ) -> Result<(), ActorError> { + let mut precommitted = PreCommitMap::load( + store, + &self.pre_committed_sectors, + PRECOMMIT_CONFIG, + "precommits", + )?; + for §or_num in sector_nums { + let prev_entry = precommitted.delete(§or_num)?; + if prev_entry.is_none() { + return Err(actor_error_v16!( + illegal_state, + "sector {} not pre-committed", + sector_num + )); + } + } + + self.pre_committed_sectors = precommitted.flush()?; + Ok(()) + } + + pub fn has_sector_number( + &self, + store: &BS, + sector_num: SectorNumber, + ) -> anyhow::Result { + let sectors = Sectors::load(store, &self.sectors)?; + Ok(sectors.get(sector_num)?.is_some()) + } + + pub fn put_sectors( + &mut self, + store: &BS, + new_sectors: Vec, + ) -> anyhow::Result<()> { + let mut sectors = Sectors::load(store, &self.sectors) + .map_err(|e| e.downcast_wrap("failed to load sectors"))?; + + sectors.store(new_sectors)?; + + self.sectors = sectors + .amt + .flush() + .map_err(|e| e.downcast_wrap("failed to persist sectors"))?; + + Ok(()) + } + + pub fn get_sector( + &self, + store: &BS, + sector_num: SectorNumber, + ) -> Result, ActorError> { + let sectors = Sectors::load(store, &self.sectors) + .context_code(ExitCode::USR_ILLEGAL_STATE, "loading sectors")?; + sectors.get(sector_num) + } + + pub fn delete_sectors( + &mut self, + store: &BS, + sector_nos: &BitField, + ) -> Result<(), AmtError> { + let mut sectors = Sectors::load(store, &self.sectors)?; + + for sector_num in sector_nos.iter() { + let deleted_sector = sectors + .amt + .delete(sector_num) + .map_err(|e| e.downcast_wrap("could not delete sector number"))?; + if deleted_sector.is_none() { + return Err(AmtError::Dynamic(Error::msg(format!( + "sector {} doesn't exist, failed to delete", + sector_num + )))); + } + } + + self.sectors = sectors.amt.flush()?; + Ok(()) + } + + pub fn for_each_sector(&self, store: &BS, mut f: F) -> anyhow::Result<()> + where + F: FnMut(&SectorOnChainInfo) -> anyhow::Result<()>, + { + let sectors = Sectors::load(store, &self.sectors)?; + sectors.amt.for_each(|_, v| f(v))?; + Ok(()) + } + + /// Returns the deadline and partition index for a sector number. + pub fn find_sector( + &self, + store: &BS, + sector_number: SectorNumber, + ) -> anyhow::Result<(u64, u64)> { + let deadlines = self.load_deadlines(store)?; + deadlines.find_sector(store, sector_number) + } + + /// Schedules each sector to expire at its next deadline end. If it can't find + /// any given sector, it skips it. + /// + /// This method assumes that each sector's power has not changed, despite the rescheduling. + /// + /// Note: this method is used to "upgrade" sectors, rescheduling the now-replaced + /// sectors to expire at the end of the next deadline. Given the expense of + /// sealing a sector, this function skips missing/faulty/terminated "upgraded" + /// sectors instead of failing. That way, the new sectors can still be proved. + pub fn reschedule_sector_expirations( + &mut self, + policy: &Policy, + store: &BS, + current_epoch: ChainEpoch, + sector_size: SectorSize, + mut deadline_sectors: DeadlineSectorMap, + ) -> anyhow::Result> { + let mut deadlines = self.load_deadlines(store)?; + let sectors = Sectors::load(store, &self.sectors)?; + + let mut all_replaced = Vec::new(); + for (deadline_idx, partition_sectors) in deadline_sectors.iter() { + let deadline_info = new_deadline_info( + policy, + self.current_proving_period_start(policy, current_epoch), + deadline_idx, + current_epoch, + ) + .next_not_elapsed(); + let new_expiration = deadline_info.last(); + let mut deadline = deadlines.load_deadline(store, deadline_idx)?; + + let replaced = deadline.reschedule_sector_expirations( + store, + §ors, + new_expiration, + partition_sectors, + sector_size, + deadline_info.quant_spec(), + )?; + all_replaced.extend(replaced); + + deadlines.update_deadline(policy, store, deadline_idx, &deadline)?; + } + + self.save_deadlines(store, deadlines)?; + + Ok(all_replaced) + } + + /// Assign new sectors to deadlines. + pub fn assign_sectors_to_deadlines( + &mut self, + policy: &Policy, + store: &BS, + current_epoch: ChainEpoch, + mut sectors: Vec, + partition_size: u64, + sector_size: SectorSize, + ) -> anyhow::Result<()> { + let mut deadlines = self.load_deadlines(store)?; + + // Sort sectors by number to get better runs in partition bitfields. + sectors.sort_by_key(|info| info.sector_number); + + let mut deadline_vec: Vec> = + (0..policy.wpost_period_deadlines).map(|_| None).collect(); + + deadlines.for_each(store, |deadline_idx, deadline| { + // Skip deadlines that aren't currently mutable. + if deadline_is_mutable( + policy, + self.current_proving_period_start(policy, current_epoch), + deadline_idx, + current_epoch, + ) { + deadline_vec[deadline_idx as usize] = Some(deadline); + } + + Ok(()) + })?; + + let deadline_to_sectors = assign_deadlines( + policy, + policy.max_partitions_per_deadline, + partition_size, + &deadline_vec, + sectors, + )?; + + for (deadline_idx, deadline_sectors) in deadline_to_sectors.into_iter().enumerate() { + if deadline_sectors.is_empty() { + continue; + } + + let quant = self.quant_spec_for_deadline(policy, deadline_idx as u64); + let deadline = deadline_vec[deadline_idx].as_mut().unwrap(); + + // The power returned from AddSectors is ignored because it's not activated (proven) yet. + let proven = false; + deadline.add_sectors( + store, + partition_size, + proven, + &deadline_sectors, + sector_size, + quant, + )?; + + deadlines.update_deadline(policy, store, deadline_idx as u64, deadline)?; + } + + self.save_deadlines(store, deadlines)?; + + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + pub fn assign_sectors_to_deadline( + &mut self, + policy: &Policy, + store: &BS, + current_epoch: ChainEpoch, + mut sectors: Vec, + partition_size: u64, + sector_size: SectorSize, + deadline_idx: u64, + ) -> Result<(), ActorError> { + let mut deadlines = self.load_deadlines(store)?; + let mut deadline = deadlines.load_deadline(store, deadline_idx)?; + + // Sort sectors by number to get better runs in partition bitfields. + sectors.sort_by_key(|info| info.sector_number); + + if !deadline_is_mutable( + policy, + self.current_proving_period_start(policy, current_epoch), + deadline_idx, + current_epoch, + ) { + return Err(actor_error_v16!( + illegal_argument, + "proving deadline {} must not be the current or next deadline ", + deadline_idx + )); + } + + let quant = self.quant_spec_for_deadline(policy, deadline_idx); + let proven = false; + deadline + .add_sectors(store, partition_size, proven, §ors, sector_size, quant) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to add sectors to deadline {}", deadline_idx) + })?; + + deadlines + .update_deadline(policy, store, deadline_idx, &deadline) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to update deadline {}", deadline_idx) + })?; + self.save_deadlines(store, deadlines) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || "failed to save deadlines")?; + + Ok(()) + } + + /// Pops up to `max_sectors` early terminated sectors from all deadlines. + /// + /// Returns `true` if we still have more early terminations to process. + pub fn pop_early_terminations( + &mut self, + policy: &Policy, + store: &BS, + max_partitions: u64, + max_sectors: u64, + ) -> anyhow::Result<(TerminationResult, /* has more */ bool)> { + // Anything to do? This lets us avoid loading the deadlines if there's nothing to do. + if self.early_terminations.is_empty() { + return Ok((Default::default(), false)); + } + + // Load deadlines + let mut deadlines = self.load_deadlines(store)?; + + let mut result = TerminationResult::new(); + let mut to_unset = Vec::new(); + + // Process early terminations. + for i in self.early_terminations.iter() { + let deadline_idx = i; + + // Load deadline + partitions. + let mut deadline = deadlines.load_deadline(store, deadline_idx)?; + + let (deadline_result, more) = deadline + .pop_early_terminations( + store, + max_partitions - result.partitions_processed, + max_sectors - result.sectors_processed, + ) + .map_err(|e| { + e.downcast_wrap(format!( + "failed to pop early terminations for deadline {}", + deadline_idx + )) + })?; + + result += deadline_result; + + if !more { + to_unset.push(i); + } + + // Save the deadline + deadlines.update_deadline(policy, store, deadline_idx, &deadline)?; + + if !result.below_limit(max_partitions, max_sectors) { + break; + } + } + + for deadline_idx in to_unset { + self.early_terminations.unset(deadline_idx); + } + + // Save back the deadlines. + self.save_deadlines(store, deadlines)?; + + // Ok, check to see if we've handled all early terminations. + let no_early_terminations = self.early_terminations.is_empty(); + + Ok((result, !no_early_terminations)) + } + + /// Returns an error if the target sector cannot be found, or some other bad state is reached. + /// Returns Ok(false) if the target sector is faulty, terminated, or unproven + /// Returns Ok(true) otherwise + pub fn check_sector_active( + &self, + store: &BS, + deadline_idx: u64, + partition_idx: u64, + sector_number: SectorNumber, + require_proven: bool, + ) -> Result { + let dls = self.load_deadlines(store)?; + let dl = dls.load_deadline(store, deadline_idx)?; + let partition = dl.load_partition(store, partition_idx)?; + + let exists = partition.sectors.get(sector_number); + if !exists { + return Err(actor_error_v16!( + not_found; + "sector {} not a member of partition {}, deadline {}", + sector_number, partition_idx, deadline_idx + )); + } + + let faulty = partition.faults.get(sector_number); + if faulty { + return Ok(false); + } + + let terminated = partition.terminated.get(sector_number); + if terminated { + return Ok(false); + } + + let unproven = partition.unproven.get(sector_number); + if unproven && require_proven { + return Ok(false); + } + + Ok(true) + } + + /// Returns an error if the target sector cannot be found and/or is faulty/terminated. + pub fn check_sector_health( + &self, + store: &BS, + deadline_idx: u64, + partition_idx: u64, + sector_number: SectorNumber, + ) -> anyhow::Result<()> { + let deadlines = self.load_deadlines(store)?; + let deadline = deadlines.load_deadline(store, deadline_idx)?; + let partition = deadline.load_partition(store, partition_idx)?; + + if !partition.sectors.get(sector_number) { + return Err(actor_error_v16!( + not_found; + "sector {} not a member of partition {}, deadline {}", + sector_number, partition_idx, deadline_idx + ) + .into()); + } + + if partition.faults.get(sector_number) { + return Err(actor_error_v16!( + forbidden; + "sector {} not a member of partition {}, deadline {}", + sector_number, partition_idx, deadline_idx + ) + .into()); + } + + if partition.terminated.get(sector_number) { + return Err(actor_error_v16!( + not_found; + "sector {} not of partition {}, deadline {} is terminated", + sector_number, partition_idx, deadline_idx + ) + .into()); + } + + Ok(()) + } + + /// Loads sector info for a sequence of sectors. + pub fn load_sector_infos( + &self, + store: &BS, + sectors: &BitField, + ) -> anyhow::Result> { + Ok(Sectors::load(store, &self.sectors)?.load_sector(sectors)?) + } + + pub fn load_deadlines(&self, store: &BS) -> Result { + store + .get_cbor::(&self.deadlines) + .map_err(|e| { + e.downcast_default(ExitCode::USR_ILLEGAL_STATE, "failed to load deadlines") + })? + .ok_or_else( + || actor_error_v16!(illegal_state; "failed to load deadlines {}", self.deadlines), + ) + } + + pub fn save_deadlines( + &mut self, + store: &BS, + deadlines: Deadlines, + ) -> anyhow::Result<()> { + self.deadlines = store.put_cbor(&deadlines, Code::Blake2b256)?; + Ok(()) + } + + /// Loads the vesting funds table from the store. + pub fn load_vesting_funds(&self, store: &BS) -> anyhow::Result { + Ok(store + .get_cbor(&self.vesting_funds) + .map_err(|e| { + e.downcast_wrap(format!("failed to load vesting funds {}", self.vesting_funds)) + })? + .ok_or_else( + || actor_error_v16!(not_found; "failed to load vesting funds {:?}", self.vesting_funds), + )?) + } + + /// Saves the vesting table to the store. + pub fn save_vesting_funds( + &mut self, + store: &BS, + funds: &VestingFunds, + ) -> anyhow::Result<()> { + self.vesting_funds = store.put_cbor(funds, Code::Blake2b256)?; + Ok(()) + } + + // Return true when the miner actor needs to continue scheduling deadline crons + pub fn continue_deadline_cron(&self) -> bool { + !self.pre_commit_deposits.is_zero() + || !self.initial_pledge.is_zero() + || !self.locked_funds.is_zero() + } + + // + // Funds and vesting + // + + pub fn add_pre_commit_deposit(&mut self, amount: &TokenAmount) -> anyhow::Result<()> { + let new_total = &self.pre_commit_deposits + amount; + if new_total.is_negative() { + return Err(anyhow!( + "negative pre-commit deposit {} after adding {} to prior {}", + new_total, + amount, + self.pre_commit_deposits + )); + } + self.pre_commit_deposits = new_total; + Ok(()) + } + + pub fn add_initial_pledge(&mut self, amount: &TokenAmount) -> anyhow::Result<()> { + let new_total = &self.initial_pledge + amount; + if new_total.is_negative() { + return Err(anyhow!( + "negative initial pledge requirement {} after adding {} to prior {}", + new_total, + amount, + self.initial_pledge + )); + } + self.initial_pledge = new_total; + Ok(()) + } + + pub fn apply_penalty(&mut self, penalty: &TokenAmount) -> anyhow::Result<()> { + if penalty.is_negative() { + Err(anyhow!("applying negative penalty {} not allowed", penalty)) + } else { + self.fee_debt += penalty; + Ok(()) + } + } + + /// First vests and unlocks the vested funds AND then locks the given funds in the vesting table. + pub fn add_locked_funds( + &mut self, + store: &BS, + current_epoch: ChainEpoch, + vesting_sum: &TokenAmount, + spec: &VestSpec, + ) -> anyhow::Result { + if vesting_sum.is_negative() { + return Err(anyhow!("negative vesting sum {}", vesting_sum)); + } + + let mut vesting_funds = self.load_vesting_funds(store)?; + + // unlock vested funds first + let amount_unlocked = vesting_funds.unlock_vested_funds(current_epoch); + self.locked_funds -= &amount_unlocked; + if self.locked_funds.is_negative() { + return Err(anyhow!( + "negative locked funds {} after unlocking {}", + self.locked_funds, + amount_unlocked + )); + } + // add locked funds now + vesting_funds.add_locked_funds(current_epoch, vesting_sum, self.proving_period_start, spec); + self.locked_funds += vesting_sum; + + // save the updated vesting table state + self.save_vesting_funds(store, &vesting_funds)?; + + Ok(amount_unlocked) + } + + /// Draws from vesting table and unlocked funds to repay up to the fee debt. + /// Returns the amount unlocked from the vesting table and the amount taken from + /// current balance. If the fee debt exceeds the total amount available for repayment + /// the fee debt field is updated to track the remaining debt. Otherwise it is set to zero. + pub fn repay_partial_debt_in_priority_order( + &mut self, + store: &BS, + current_epoch: ChainEpoch, + curr_balance: &TokenAmount, + ) -> Result< + ( + TokenAmount, // from vesting + TokenAmount, // from balance + ), + anyhow::Error, + > { + let unlocked_balance = self.get_unlocked_balance(curr_balance)?; + + let fee_debt = self.fee_debt.clone(); + let from_vesting = self.unlock_unvested_funds(store, current_epoch, &fee_debt)?; + + if from_vesting > self.fee_debt { + return Err(anyhow!( + "should never unlock more than the debt we need to repay" + )); + } + self.fee_debt -= &from_vesting; + + let from_balance = cmp::min(&unlocked_balance, &self.fee_debt).clone(); + self.fee_debt -= &from_balance; + + Ok((from_vesting, from_balance)) + } + + /// Repays the full miner actor fee debt. Returns the amount that must be + /// burnt and an error if there are not sufficient funds to cover repayment. + /// Miner state repays from unlocked funds and fails if unlocked funds are insufficient to cover fee debt. + /// FeeDebt will be zero after a successful call. + pub fn repay_debts(&mut self, curr_balance: &TokenAmount) -> anyhow::Result { + let unlocked_balance = self.get_unlocked_balance(curr_balance)?; + if unlocked_balance < self.fee_debt { + return Err(actor_error_v16!( + insufficient_funds, + "unlocked balance can not repay fee debt ({} < {})", + unlocked_balance, + self.fee_debt + ) + .into()); + } + + Ok(std::mem::take(&mut self.fee_debt)) + } + /// Unlocks an amount of funds that have *not yet vested*, if possible. + /// The soonest-vesting entries are unlocked first. + /// Returns the amount actually unlocked. + pub fn unlock_unvested_funds( + &mut self, + store: &BS, + current_epoch: ChainEpoch, + target: &TokenAmount, + ) -> anyhow::Result { + if target.is_zero() || self.locked_funds.is_zero() { + return Ok(TokenAmount::zero()); + } + + let mut vesting_funds = self.load_vesting_funds(store)?; + let amount_unlocked = vesting_funds.unlock_unvested_funds(current_epoch, target); + self.locked_funds -= &amount_unlocked; + if self.locked_funds.is_negative() { + return Err(anyhow!( + "negative locked funds {} after unlocking {}", + self.locked_funds, + amount_unlocked + )); + } + + self.save_vesting_funds(store, &vesting_funds)?; + Ok(amount_unlocked) + } + + /// Unlocks all vesting funds that have vested before the provided epoch. + /// Returns the amount unlocked. + pub fn unlock_vested_funds( + &mut self, + store: &BS, + current_epoch: ChainEpoch, + ) -> anyhow::Result { + if self.locked_funds.is_zero() { + return Ok(TokenAmount::zero()); + } + + let mut vesting_funds = self.load_vesting_funds(store)?; + let amount_unlocked = vesting_funds.unlock_vested_funds(current_epoch); + self.locked_funds -= &amount_unlocked; + if self.locked_funds.is_negative() { + return Err(anyhow!( + "vesting cause locked funds to become negative: {}", + self.locked_funds, + )); + } + + self.save_vesting_funds(store, &vesting_funds)?; + Ok(amount_unlocked) + } + + /// CheckVestedFunds returns the amount of vested funds that have vested before the provided epoch. + pub fn check_vested_funds( + &self, + store: &BS, + current_epoch: ChainEpoch, + ) -> anyhow::Result { + let vesting_funds = self.load_vesting_funds(store)?; + Ok(vesting_funds + .funds + .iter() + .take_while(|fund| fund.epoch < current_epoch) + .fold(TokenAmount::zero(), |acc, fund| acc + &fund.amount)) + } + + /// Unclaimed funds that are not locked -- includes funds used to cover initial pledge requirement. + pub fn get_unlocked_balance(&self, actor_balance: &TokenAmount) -> anyhow::Result { + let unlocked_balance = + actor_balance - &self.locked_funds - &self.pre_commit_deposits - &self.initial_pledge; + if unlocked_balance.is_negative() { + return Err(anyhow!("negative unlocked balance {}", unlocked_balance)); + } + Ok(unlocked_balance) + } + + /// Unclaimed funds. Actor balance - (locked funds, precommit deposit, ip requirement) + /// Can go negative if the miner is in IP debt. + pub fn get_available_balance( + &self, + actor_balance: &TokenAmount, + ) -> anyhow::Result { + // (actor_balance - &self.locked_funds) - &self.pre_commit_deposit - &self.initial_pledge + Ok(self.get_unlocked_balance(actor_balance)? - &self.fee_debt) + } + + pub fn check_balance_invariants(&self, balance: &TokenAmount) -> anyhow::Result<()> { + if self.pre_commit_deposits.is_negative() { + return Err(anyhow!( + "pre-commit deposit is negative: {}", + self.pre_commit_deposits + )); + } + if self.locked_funds.is_negative() { + return Err(anyhow!("locked funds is negative: {}", self.locked_funds)); + } + if self.initial_pledge.is_negative() { + return Err(anyhow!( + "initial pledge is negative: {}", + self.initial_pledge + )); + } + if self.fee_debt.is_negative() { + return Err(anyhow!("fee debt is negative: {}", self.fee_debt)); + } + + let min_balance = &self.pre_commit_deposits + &self.locked_funds + &self.initial_pledge; + if balance < &min_balance { + return Err(anyhow!("fee debt is negative: {}", self.fee_debt)); + } + + Ok(()) + } + + /// pre-commit expiry + pub fn quant_spec_every_deadline(&self, policy: &Policy) -> QuantSpec { + QuantSpec { + unit: policy.wpost_challenge_window, + offset: self.proving_period_start, + } + } + + pub fn add_pre_commit_clean_ups( + &mut self, + policy: &Policy, + store: &BS, + cleanup_events: Vec<(ChainEpoch, u64)>, + ) -> anyhow::Result<()> { + // Load BitField Queue for sector expiry + let quant = self.quant_spec_every_deadline(policy); + let mut queue = + super::BitFieldQueue::new(store, &self.pre_committed_sectors_cleanup, quant) + .map_err(|e| e.downcast_wrap("failed to load pre-commit clean up queue"))?; + + queue.add_many_to_queue_values(cleanup_events.into_iter())?; + self.pre_committed_sectors_cleanup = queue.amt.flush()?; + Ok(()) + } + + pub fn cleanup_expired_pre_commits( + &mut self, + policy: &Policy, + store: &BS, + current_epoch: ChainEpoch, + ) -> anyhow::Result { + let mut deposit_to_burn = TokenAmount::zero(); + + // cleanup expired pre-committed sectors + let mut cleanup_queue = BitFieldQueue::new( + store, + &self.pre_committed_sectors_cleanup, + self.quant_spec_every_deadline(policy), + )?; + + let (sectors, modified) = cleanup_queue.pop_until(current_epoch)?; + + if modified { + self.pre_committed_sectors_cleanup = cleanup_queue.amt.flush()?; + } + + let mut precommits_to_delete = Vec::new(); + let precommitted = PreCommitMap::load( + store, + &self.pre_committed_sectors, + PRECOMMIT_CONFIG, + "precommits", + )?; + + for i in sectors.iter() { + let sector_number = i as SectorNumber; + let sector: SectorPreCommitOnChainInfo = + match precommitted.get(§or_number)?.cloned() { + Some(sector) => sector, + // already committed/deleted + None => continue, + }; + + // mark it for deletion + precommits_to_delete.push(sector_number); + + // increment deposit to burn + deposit_to_burn += sector.pre_commit_deposit; + } + + // Actually delete it. + if !precommits_to_delete.is_empty() { + self.delete_precommitted_sectors(store, &precommits_to_delete)?; + } + + self.pre_commit_deposits -= &deposit_to_burn; + if self.pre_commit_deposits.is_negative() { + return Err(anyhow!( + "pre-commit clean up caused negative deposits: {}", + self.pre_commit_deposits + )); + } + + Ok(deposit_to_burn) + } + + pub fn advance_deadline( + &mut self, + policy: &Policy, + store: &BS, + current_epoch: ChainEpoch, + ) -> anyhow::Result { + let mut pledge_delta = TokenAmount::zero(); + + let dl_info = self.deadline_info(policy, current_epoch); + + if !dl_info.period_started() { + return Ok(AdvanceDeadlineResult { + pledge_delta, + power_delta: PowerPair::zero(), + previously_faulty_power: PowerPair::zero(), + detected_faulty_power: PowerPair::zero(), + total_faulty_power: PowerPair::zero(), + }); + } + + self.current_deadline = (dl_info.index + 1) % policy.wpost_period_deadlines; + if self.current_deadline == 0 { + self.proving_period_start = dl_info.period_start + policy.wpost_proving_period; + } + + let mut deadlines = self.load_deadlines(store)?; + + let mut deadline = deadlines.load_deadline(store, dl_info.index)?; + + let previously_faulty_power = deadline.faulty_power.clone(); + + if !deadline.is_live() { + return Ok(AdvanceDeadlineResult { + pledge_delta, + power_delta: PowerPair::zero(), + previously_faulty_power, + detected_faulty_power: PowerPair::zero(), + total_faulty_power: deadline.faulty_power, + }); + } + + let quant = quant_spec_for_deadline(policy, &dl_info); + + // Detect and penalize missing proofs. + let fault_expiration = dl_info.last() + policy.fault_max_age; + + let (mut power_delta, detected_faulty_power) = + deadline.process_deadline_end(store, quant, fault_expiration, self.sectors)?; + + // Capture deadline's faulty power after new faults have been detected, but before it is + // dropped along with faulty sectors expiring this round. + let total_faulty_power = deadline.faulty_power.clone(); + + // Expire sectors that are due, either for on-time expiration or "early" faulty-for-too-long. + let expired = deadline.pop_expired_sectors(store, dl_info.last(), quant)?; + + // Release pledge requirements for the sectors expiring on-time. + // Pledge for the sectors expiring early is retained to support the termination fee that + // will be assessed when the early termination is processed. + pledge_delta -= &expired.on_time_pledge; + self.add_initial_pledge(&expired.on_time_pledge.neg())?; + + // Record reduction in power of the amount of expiring active power. + // Faulty power has already been lost, so the amount expiring can be excluded from the delta. + power_delta -= &expired.active_power; + + let no_early_terminations = expired.early_sectors.is_empty(); + if !no_early_terminations { + self.early_terminations.set(dl_info.index); + } + + deadlines.update_deadline(policy, store, dl_info.index, &deadline)?; + + self.save_deadlines(store, deadlines)?; + + Ok(AdvanceDeadlineResult { + pledge_delta, + power_delta, + previously_faulty_power, + detected_faulty_power, + total_faulty_power, + }) + } + + // Loads sectors precommit information from store, requiring it to exist. + pub fn get_precommitted_sectors( + &self, + store: &BS, + sector_nos: impl IntoIterator>, + ) -> Result, ActorError> { + let mut precommits = Vec::new(); + let precommitted = PreCommitMap::load( + store, + &self.pre_committed_sectors, + PRECOMMIT_CONFIG, + "precommits", + )?; + for sector_no in sector_nos.into_iter() { + let sector_no = *sector_no.borrow(); + if sector_no > MAX_SECTOR_NUMBER { + return Err( + actor_error_v16!(illegal_argument; "sector number greater than maximum"), + ); + } + let info: &SectorPreCommitOnChainInfo = precommitted + .get(§or_no) + .exit_code(ExitCode::USR_ILLEGAL_STATE)? + .ok_or_else(|| actor_error_v16!(not_found, "sector {} not found", sector_no))?; + precommits.push(info.clone()); + } + Ok(precommits) + } +} + +pub struct AdvanceDeadlineResult { + pub pledge_delta: TokenAmount, + pub power_delta: PowerPair, + /// Power that was faulty before this advance (including recovering) + pub previously_faulty_power: PowerPair, + /// Power of new faults and failed recoveries + pub detected_faulty_power: PowerPair, + /// Total faulty power after detecting faults (before expiring sectors) + /// Note that failed recovery power is included in both PreviouslyFaultyPower and + /// DetectedFaultyPower, so TotalFaultyPower is not simply their sum. + pub total_faulty_power: PowerPair, +} + +/// Static information about miner +#[derive(Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct MinerInfo { + /// Account that owns this miner + /// - Income and returned collateral are paid to this address + /// - This address is also allowed to change the worker address for the miner + pub owner: Address, + + /// Worker account for this miner + /// This will be the key that is used to sign blocks created by this miner, and + /// sign messages sent on behalf of this miner to commit sectors, submit PoSts, and + /// other day to day miner activities + pub worker: Address, + + /// Additional addresses that are permitted to submit messages controlling this actor (optional). + pub control_addresses: Vec
, // Must all be ID addresses. + + /// Optional worker key to update at an epoch + pub pending_worker_key: Option, + + /// Libp2p identity that should be used when connecting to this miner + #[serde(with = "strict_bytes")] + pub peer_id: Vec, + + /// Vector of byte arrays representing Libp2p multi-addresses used for establishing a connection with this miner. + pub multi_address: Vec, + + /// The proof type used by this miner for sealing sectors. + pub window_post_proof_type: RegisteredPoStProof, + + /// Amount of space in each sector committed to the network by this miner + pub sector_size: SectorSize, + + /// The number of sectors in each Window PoSt partition (proof). + /// This is computed from the proof type and represented here redundantly. + pub window_post_partition_sectors: u64, + + /// The next epoch this miner is eligible for certain permissioned actor methods + /// and winning block elections as a result of being reported for a consensus fault. + pub consensus_fault_elapsed: ChainEpoch, + + /// A proposed new owner account for this miner. + /// Must be confirmed by a message from the pending address itself. + pub pending_owner_address: Option
, + + /// Account for receive miner benefits, withdraw on miner must send to this address, + /// set owner address by default when create miner + pub beneficiary: Address, + + /// beneficiary's total quota, how much quota has been withdraw, + /// and when this beneficiary expired + pub beneficiary_term: BeneficiaryTerm, + + /// A proposal new beneficiary message for this miner + pub pending_beneficiary_term: Option, +} + +impl MinerInfo { + pub fn new( + owner: ActorID, + worker: ActorID, + control_addresses: Vec, + peer_id: Vec, + multi_address: Vec, + window_post_proof_type: RegisteredPoStProof, + ) -> Result { + let sector_size = window_post_proof_type + .sector_size() + .map_err(|e| actor_error_v16!(illegal_argument, "invalid sector size: {}", e))?; + + let window_post_partition_sectors = window_post_proof_type + .window_post_partitions_sector() + .map_err(|e| actor_error_v16!(illegal_argument, "invalid partition sectors: {}", e))?; + + Ok(Self { + owner: Address::new_id(owner), + worker: Address::new_id(worker), + control_addresses: control_addresses + .into_iter() + .map(Address::new_id) + .collect_vec(), + + pending_worker_key: None, + beneficiary: Address::new_id(owner), + beneficiary_term: BeneficiaryTerm::default(), + pending_beneficiary_term: None, + peer_id, + multi_address, + window_post_proof_type, + sector_size, + window_post_partition_sectors, + consensus_fault_elapsed: EPOCH_UNDEFINED, + pending_owner_address: None, + }) + } +} diff --git a/actors/miner/src/v16/termination.rs b/actors/miner/src/v16/termination.rs new file mode 100644 index 00000000..aeb1c568 --- /dev/null +++ b/actors/miner/src/v16/termination.rs @@ -0,0 +1,52 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::collections::BTreeMap; +use std::ops::AddAssign; + +use fvm_ipld_bitfield::BitField; +use fvm_shared4::clock::ChainEpoch; + +#[derive(Default)] +pub struct TerminationResult { + /// Sectors maps epochs at which sectors expired, to bitfields of sector numbers. + pub sectors: BTreeMap, + pub partitions_processed: u64, + pub sectors_processed: u64, +} + +impl AddAssign for TerminationResult { + #[allow(clippy::suspicious_op_assign_impl)] + fn add_assign(&mut self, rhs: Self) { + self.partitions_processed += rhs.partitions_processed; + self.sectors_processed += rhs.sectors_processed; + + for (epoch, new_sectors) in rhs.sectors { + self.sectors + .entry(epoch) + .and_modify(|sectors| *sectors |= &new_sectors) + .or_insert(new_sectors); + } + } +} + +impl TerminationResult { + pub fn new() -> Self { + Default::default() + } + + /// Returns true if we're below the partition/sector limit. Returns false if + /// we're at (or above) the limit. + pub fn below_limit(&self, partition_limit: u64, sector_limit: u64) -> bool { + self.partitions_processed < partition_limit && self.sectors_processed < sector_limit + } + + pub fn is_empty(&self) -> bool { + self.sectors_processed == 0 + } + + pub fn iter(&self) -> impl Iterator { + // The btreemap is already sorted. + self.sectors.iter().map(|(&epoch, bf)| (epoch, bf)) + } +} diff --git a/actors/miner/src/v16/types.rs b/actors/miner/src/v16/types.rs new file mode 100644 index 00000000..47284d9e --- /dev/null +++ b/actors/miner/src/v16/types.rs @@ -0,0 +1,681 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_bitfield::BitField; +use fvm_ipld_encoding::{strict_bytes, BytesDe}; +use fvm_ipld_encoding::{tuple::*, RawBytes}; +use fvm_shared4::address::Address; +use fvm_shared4::bigint::bigint_ser; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::deal::DealID; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::piece::PaddedPieceSize; +use fvm_shared4::randomness::Randomness; +use fvm_shared4::sector::{ + PoStProof, RegisteredAggregateProof, RegisteredPoStProof, RegisteredSealProof, + RegisteredUpdateProof, SectorNumber, SectorSize, StoragePower, +}; +use fvm_shared4::ActorID; +use serde::{Deserialize, Serialize}; + +use fil_actors_shared::v16::reward::FilterEstimate; +use fil_actors_shared::v16::{BatchReturn, DealWeight}; + +use crate::v16::commd::CompactCommD; +use crate::v16::ext::verifreg::AllocationID; +use crate::v16::ext::verifreg::ClaimID; + +use super::beneficiary::*; + +pub type CronEvent = i64; + +pub const CRON_EVENT_WORKER_KEY_CHANGE: CronEvent = 0; +pub const CRON_EVENT_PROVING_DEADLINE: CronEvent = 1; +pub const CRON_EVENT_PROCESS_EARLY_TERMINATIONS: CronEvent = 2; + +/// Storage miner actor constructor params are defined here so the power actor can send them to the init actor +/// to instantiate miners. +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct MinerConstructorParams { + pub owner: Address, + pub worker: Address, + pub control_addresses: Vec
, + pub window_post_proof_type: RegisteredPoStProof, + #[serde(with = "strict_bytes")] + pub peer_id: Vec, + pub multi_addresses: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct CronEventPayload { + pub event_type: i64, +} + +#[derive(Debug)] +pub struct PartitionKey { + pub deadline: u64, + pub partition: u64, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct GetControlAddressesReturn { + pub owner: Address, + pub worker: Address, + pub control_addresses: Vec
, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ChangeWorkerAddressParams { + pub new_worker: Address, + pub new_control_addresses: Vec
, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct ChangeOwnerAddressParams { + pub new_owner: Address, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ChangePeerIDParams { + #[serde(with = "strict_bytes")] + pub new_id: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ChangeMultiaddrsParams { + pub new_multi_addrs: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct InternalSectorSetupForPresealParams { + pub sectors: Vec, + pub reward_smoothed: FilterEstimate, + #[serde(with = "bigint_ser")] + pub reward_baseline_power: StoragePower, + pub quality_adj_power_smoothed: FilterEstimate, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct DeferredCronEventParams { + #[serde(with = "strict_bytes")] + pub event_payload: Vec, + pub reward_smoothed: FilterEstimate, + pub quality_adj_power_smoothed: FilterEstimate, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct PoStPartition { + /// Partitions are numbered per-deadline, from zero. + pub index: u64, + /// Sectors skipped while proving that weren't already declared faulty. + pub skipped: BitField, +} + +/// Information submitted by a miner to provide a Window PoSt. +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct SubmitWindowedPoStParams { + /// The deadline index which the submission targets. + pub deadline: u64, + /// The partitions being proven. + pub partitions: Vec, + /// Array of proofs, one per distinct registered proof type present in the sectors being proven. + /// In the usual case of a single proof type, this array will always have a single element (independent of number of partitions). + pub proofs: Vec, + /// The epoch at which these proofs is being committed to a particular chain. + pub chain_commit_epoch: ChainEpoch, + /// The ticket randomness on the chain at the `chain_commit_epoch` on the chain this post is committed to. + pub chain_commit_rand: Randomness, +} + +// Deprecated as of FIP 0084 -- kept for legacy testing +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ProveCommitSectorParams { + pub sector_number: SectorNumber, + pub proof: RawBytes, +} + +// Note no UnsealedCID because it must be "zero" data. +#[derive(Clone, Debug, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct SectorNIActivationInfo { + pub sealing_number: SectorNumber, // Sector number used to generate replica id + pub sealer_id: ActorID, // Must be set to ID of receiving actor for now + pub sealed_cid: Cid, // CommR + pub sector_number: SectorNumber, // Unique id of sector in actor state + pub seal_rand_epoch: ChainEpoch, + pub expiration: ChainEpoch, +} + +#[derive(Clone, Debug, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct ProveCommitSectorsNIParams { + pub sectors: Vec, // Information about sealing of each sector + pub aggregate_proof: RawBytes, // Aggregate proof for all sectors + pub seal_proof_type: RegisteredSealProof, // Proof type for each seal (must be an NI-PoRep variant) + pub aggregate_proof_type: RegisteredAggregateProof, // Proof type for aggregation + pub proving_deadline: u64, // The Window PoST deadline index at which to schedule the new sectors + pub require_activation_success: bool, // Whether to abort if any sector activation fails +} + +#[derive(Clone, Debug, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct ProveCommitSectors3Params { + // Activation manifest for each sector being proven. + pub sector_activations: Vec, + // Proofs for each sector, parallel to activation manifests. + // Exactly one of sector_proofs or aggregate_proof must be non-empty. + pub sector_proofs: Vec, + // Aggregate proof for all sectors. + // Exactly one of sector_proofs or aggregate_proof must be non-empty. + pub aggregate_proof: RawBytes, + // The proof type for the aggregate proof (must be None if no aggregate proof). + pub aggregate_proof_type: Option, + // Whether to abort if any sector activation fails. + pub require_activation_success: bool, + // Whether to abort if any notification returns a non-zero exit code. + pub require_notification_success: bool, +} + +// Data to activate a commitment to one sector and its data. +// All pieces of data must be specified, whether or not not claiming a FIL+ activation or being +// notified to a data consumer. +// An implicit zero piece fills any remaining sector capacity. +// Note: we should consider fast tracking the special case where there is only +// one piece not claiming or notifying other actors to allow an empty piece vector. +// We could interpret this as a single piece, size == sector size, cid == commD, empty allocation empty notify vector +#[derive(Clone, Debug, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct SectorActivationManifest { + // Sector to be activated. + pub sector_number: SectorNumber, + // Pieces comprising the sector content, in order. + pub pieces: Vec, +} + +#[derive(Clone, Debug, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct PieceActivationManifest { + // Piece data commitment. + pub cid: Cid, + // Piece size. + pub size: PaddedPieceSize, + // Identifies a verified allocation to be claimed. + pub verified_allocation_key: Option, + // Synchronous notifications to be sent to other actors after activation. + pub notify: Vec, +} + +#[derive(Clone, Debug, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct VerifiedAllocationKey { + pub client: ActorID, + pub id: AllocationID, +} + +#[derive(Clone, Debug, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +pub struct DataActivationNotification { + // Actor to be notified. + pub address: Address, + // Data to send in the notification. + pub payload: RawBytes, +} + +#[derive(Clone, Debug, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct ProveCommitSectors3Return { + pub activation_results: BatchReturn, +} + +#[derive(Clone, Debug, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct ProveCommitSectorsNIReturn { + pub activation_results: BatchReturn, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct CheckSectorProvenParams { + pub sector_number: SectorNumber, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ExtendSectorExpirationParams { + pub extensions: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ExpirationExtension { + pub deadline: u64, + pub partition: u64, + pub sectors: BitField, + pub new_expiration: ChainEpoch, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ExtendSectorExpiration2Params { + pub extensions: Vec, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct SectorClaim { + pub sector_number: SectorNumber, + pub maintain_claims: Vec, + pub drop_claims: Vec, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ExpirationExtension2 { + pub deadline: u64, + pub partition: u64, + // IDs of sectors without FIL+ claims + pub sectors: BitField, + pub sectors_with_claims: Vec, + pub new_expiration: ChainEpoch, +} + +// From is straightforward when there are no claim bearing sectors +impl From<&ExpirationExtension> for ExpirationExtension2 { + fn from(e: &ExpirationExtension) -> Self { + ExpirationExtension2 { + deadline: e.deadline, + partition: e.partition, + sectors: e.sectors.clone(), + sectors_with_claims: vec![], + new_expiration: e.new_expiration, + } + } +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct TerminateSectorsParams { + pub terminations: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct TerminationDeclaration { + pub deadline: u64, + pub partition: u64, + pub sectors: BitField, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct TerminateSectorsReturn { + // Set to true if all early termination work has been completed. When + // false, the miner may choose to repeatedly invoke TerminateSectors + // with no new sectors to process the remainder of the pending + // terminations. While pending terminations are outstanding, the miner + // will not be able to withdraw funds. + pub done: bool, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct DeclareFaultsParams { + pub faults: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct FaultDeclaration { + /// The deadline to which the faulty sectors are assigned, in range [0..WPoStPeriodDeadlines) + pub deadline: u64, + /// Partition index within the deadline containing the faulty sectors. + pub partition: u64, + /// Sectors in the partition being declared faulty. + pub sectors: BitField, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct DeclareFaultsRecoveredParams { + pub recoveries: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct RecoveryDeclaration { + /// The deadline to which the recovered sectors are assigned, in range [0..WPoStPeriodDeadlines) + pub deadline: u64, + /// Partition index within the deadline containing the recovered sectors. + pub partition: u64, + /// Sectors in the partition being declared recovered. + pub sectors: BitField, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct CompactPartitionsParams { + pub deadline: u64, + pub partitions: BitField, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct CompactSectorNumbersParams { + pub mask_sector_numbers: BitField, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ReportConsensusFaultParams { + #[serde(with = "strict_bytes")] + pub header1: Vec, + #[serde(with = "strict_bytes")] + pub header2: Vec, + #[serde(with = "strict_bytes")] + pub header_extra: Vec, +} + +#[derive(Clone, Serialize_tuple, Deserialize_tuple)] +pub struct WithdrawBalanceParams { + pub amount_requested: TokenAmount, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct WithdrawBalanceReturn { + pub amount_withdrawn: TokenAmount, +} + +#[derive(Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct WorkerKeyChange { + /// Must be an ID address + pub new_worker: Address, + pub effective_at: ChainEpoch, +} + +#[derive(Debug, Default, PartialEq, Eq, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct PreCommitSectorParams { + pub seal_proof: RegisteredSealProof, + pub sector_number: SectorNumber, + /// CommR + pub sealed_cid: Cid, + pub seal_rand_epoch: ChainEpoch, + pub deal_ids: Vec, + pub expiration: ChainEpoch, + /// Deprecated: + /// Whether to replace a "committed capacity" no-deal sector (requires non-empty DealIDs) + pub replace_capacity: bool, + /// Deprecated: + /// The committed capacity sector to replace, and its deadline/partition location + pub replace_sector_deadline: u64, + pub replace_sector_partition: u64, + pub replace_sector_number: SectorNumber, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct PreCommitSectorBatchParams { + pub sectors: Vec, +} + +#[derive(Debug, PartialEq, Eq, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct PreCommitSectorBatchParams2 { + pub sectors: Vec, +} + +#[derive(Debug, Default, PartialEq, Eq, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct SectorPreCommitInfo { + pub seal_proof: RegisteredSealProof, + pub sector_number: SectorNumber, + /// CommR + pub sealed_cid: Cid, + pub seal_rand_epoch: ChainEpoch, + pub deal_ids: Vec, + pub expiration: ChainEpoch, + /// CommD + pub unsealed_cid: CompactCommD, +} + +/// Information stored on-chain for a pre-committed sector. +#[derive(Debug, PartialEq, Eq, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct SectorPreCommitOnChainInfo { + pub info: SectorPreCommitInfo, + pub pre_commit_deposit: TokenAmount, + pub pre_commit_epoch: ChainEpoch, +} + +/// Information stored on-chain for a proven sector. +#[derive(Debug, Default, PartialEq, Eq, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct SectorOnChainInfo { + pub sector_number: SectorNumber, + /// The seal proof type implies the PoSt proofs + pub seal_proof: RegisteredSealProof, + /// CommR + pub sealed_cid: Cid, + pub deprecated_deal_ids: Vec, + /// Epoch during which the sector proof was accepted + pub activation: ChainEpoch, + /// Epoch during which the sector expires + pub expiration: ChainEpoch, + /// Integral of active deals over sector lifetime + #[serde(with = "bigint_ser")] + pub deal_weight: DealWeight, + /// Integral of active verified deals over sector lifetime + #[serde(with = "bigint_ser")] + pub verified_deal_weight: DealWeight, + /// Pledge collected to commit this sector + pub initial_pledge: TokenAmount, + /// Expected one day projection of reward for sector computed at activation / update / extension time + pub expected_day_reward: TokenAmount, + /// Expected twenty day projection of reward for sector computed at activation / update / extension time + pub expected_storage_pledge: TokenAmount, + /// Epoch at which this sector's power was most recently updated + pub power_base_epoch: ChainEpoch, + /// Maximum day reward this sector has had in previous iterations (zero for brand new sectors) + pub replaced_day_reward: TokenAmount, + /// The original SealedSectorCID, only gets set on the first ReplicaUpdate + pub sector_key_cid: Option, + /// Additional flags, see [`SectorOnChainInfoFlags`] + pub flags: SectorOnChainInfoFlags, +} + +bitflags::bitflags! { + #[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Eq, Default, Debug)] + #[serde(transparent)] + pub struct SectorOnChainInfoFlags: u32 { + /// QA power mechanism introduced in FIP-0045 + const SIMPLE_QA_POWER = 0x1; + } +} + +#[derive(Debug, PartialEq, Eq, Copy, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct Fault { + pub miner: Address, + pub fault: ChainEpoch, +} + +// * Added in v2 -- param was previously a big int. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct ApplyRewardParams { + pub reward: TokenAmount, + pub penalty: TokenAmount, +} + +#[derive(Debug, Clone, PartialEq, Eq, Copy, Serialize_tuple, Deserialize_tuple)] +pub struct DisputeWindowedPoStParams { + pub deadline: u64, + pub post_index: u64, // only one is allowed at a time to avoid loading too many sector infos. +} + +#[derive(Debug, Clone, Serialize_tuple, Deserialize_tuple)] +pub struct ProveCommitAggregateParams { + pub sector_numbers: BitField, + pub aggregate_proof: RawBytes, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ReplicaUpdate { + pub sector_number: SectorNumber, + pub deadline: u64, + pub partition: u64, + pub new_sealed_cid: Cid, + pub deals: Vec, + pub update_proof_type: RegisteredUpdateProof, + pub replica_proof: RawBytes, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ProveReplicaUpdatesParams { + pub updates: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ProveReplicaUpdates3Params { + pub sector_updates: Vec, + // Proofs for each sector, parallel to activation manifests. + // Exactly one of sector_proofs or aggregate_proof must be non-empty. + pub sector_proofs: Vec, + // Aggregate proof for all sectors. + // Exactly one of sector_proofs or aggregate_proof must be non-empty. + pub aggregate_proof: RawBytes, + // The proof type for all sector update proofs, individually or before aggregation. + pub update_proofs_type: RegisteredUpdateProof, + // The proof type for the aggregate proof (must be None if no aggregate proof). + pub aggregate_proof_type: Option, + // Whether to abort if any sector update activation fails. + pub require_activation_success: bool, + // Whether to abort if any notification returns a non-zero exit code. + pub require_notification_success: bool, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct SectorUpdateManifest { + pub sector: SectorNumber, + pub deadline: u64, + pub partition: u64, + pub new_sealed_cid: Cid, // CommR + // Declaration of all pieces that make up the new sector data, in order. + // Until we support re-snap, pieces must all be new because the sector was previously empty. + // Implicit "zero" piece fills any remaining capacity. + // These pieces imply the new unsealed sector CID. + pub pieces: Vec, +} + +#[derive(Clone, Debug, Eq, PartialEq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct ProveReplicaUpdates3Return { + pub activation_results: BatchReturn, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ChangeBeneficiaryParams { + pub new_beneficiary: Address, + pub new_quota: TokenAmount, + pub new_expiration: ChainEpoch, +} + +impl ChangeBeneficiaryParams { + pub fn new(beneficiary: Address, quota: TokenAmount, expiration: ChainEpoch) -> Self { + ChangeBeneficiaryParams { + new_beneficiary: beneficiary, + new_quota: quota, + new_expiration: expiration, + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ActiveBeneficiary { + pub beneficiary: Address, + pub term: BeneficiaryTerm, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct GetBeneficiaryReturn { + pub active: ActiveBeneficiary, + pub proposed: Option, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct GetOwnerReturn { + pub owner: Address, + pub proposed: Option
, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct IsControllingAddressParam { + pub address: Address, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct IsControllingAddressReturn { + pub is_controlling: bool, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct GetSectorSizeReturn { + pub sector_size: SectorSize, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct GetAvailableBalanceReturn { + pub available_balance: TokenAmount, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct GetVestingFundsReturn { + pub vesting_funds: Vec<(ChainEpoch, TokenAmount)>, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct GetPeerIDReturn { + #[serde(with = "strict_bytes")] + pub peer_id: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct GetMultiaddrsReturn { + pub multi_addrs: Vec, +} + +// Notification of change committed to one or more sectors. +// The relevant state must be already committed so the receiver can observe any impacts +// at the sending miner actor. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct SectorContentChangedParams { + // Distinct sectors with changed content. + pub sectors: Vec, +} + +// Description of changes to one sector's content. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct SectorChanges { + // Identifier of sector being updated. + pub sector: SectorNumber, + // Minimum epoch until which the data is committed to the sector. + // Note the sector may later be extended without necessarily another notification. + pub minimum_commitment_epoch: ChainEpoch, + // Information about some pieces added to (or retained in) the sector. + // This may be only a subset of sector content. + // Inclusion here does not mean the piece was definitely absent previously. + // Exclusion here does not mean a piece has been removed since a prior notification. + pub added: Vec, +} + +// Description of a piece of data committed to a sector. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct PieceChange { + pub data: Cid, + pub size: PaddedPieceSize, + // A receiver-specific identifier. + // E.g. an encoded deal ID which the provider claims this piece satisfies. + pub payload: RawBytes, +} + +// For each piece in each sector, the notifee returns an exit code and +// (possibly-empty) result data. +// The miner actor will pass through results to its caller. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct SectorContentChangedReturn { + // A result for each sector that was notified, in the same order. + pub sectors: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct SectorReturn { + // A result for each piece for the sector that was notified, in the same order. + pub added: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct PieceReturn { + // Indicates whether the receiver accepted the notification. + // The caller is free to ignore this, but may chose to abort and roll back. + pub accepted: bool, +} diff --git a/actors/miner/src/v16/vesting_state.rs b/actors/miner/src/v16/vesting_state.rs new file mode 100644 index 00000000..f770f849 --- /dev/null +++ b/actors/miner/src/v16/vesting_state.rs @@ -0,0 +1,144 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::{iter, mem}; + +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use itertools::{EitherOrBoth, Itertools}; +use num_traits::Zero; + +use super::{QuantSpec, VestSpec}; + +// Represents miner funds that will vest at the given epoch. +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct VestingFund { + pub epoch: ChainEpoch, + pub amount: TokenAmount, +} + +/// Represents the vesting table state for the miner. +/// It is a slice of (VestingEpoch, VestingAmount). +/// The slice will always be sorted by the VestingEpoch. +#[derive(Serialize_tuple, Deserialize_tuple, Default)] +pub struct VestingFunds { + pub funds: Vec, +} + +impl VestingFunds { + pub fn new() -> Self { + Default::default() + } + + pub fn unlock_vested_funds(&mut self, current_epoch: ChainEpoch) -> TokenAmount { + // TODO: the funds are sorted by epoch, so we could do a binary search here + let i = self + .funds + .iter() + .position(|fund| fund.epoch >= current_epoch) + .unwrap_or(self.funds.len()); + + self.funds.drain(..i).map(|f| f.amount).sum() + } + + pub fn add_locked_funds( + &mut self, + current_epoch: ChainEpoch, + vesting_sum: &TokenAmount, + proving_period_start: ChainEpoch, + spec: &VestSpec, + ) { + // Quantization is aligned with when regular cron will be invoked, in the last epoch of deadlines. + let vest_begin = current_epoch + spec.initial_delay; // Nothing unlocks here, this is just the start of the clock. + let mut vested_so_far = TokenAmount::zero(); + + let mut epoch = vest_begin; + + // Create an iterator for the vesting schedule we're going to "join" with the current + // vesting schedule. + let new_funds = iter::from_fn(|| { + if vested_so_far >= *vesting_sum { + return None; + } + + epoch += spec.step_duration; + + let vest_epoch = QuantSpec { + unit: spec.quantization, + offset: proving_period_start, + } + .quantize_up(epoch); + + let elapsed = vest_epoch - vest_begin; + let target_vest = if elapsed < spec.vest_period { + // Linear vesting + (vesting_sum * elapsed).div_floor(spec.vest_period) + } else { + vesting_sum.clone() + }; + + let vest_this_time = &target_vest - &vested_so_far; + vested_so_far = target_vest; + + Some(VestingFund { + epoch: vest_epoch, + amount: vest_this_time, + }) + }); + + // Take the old funds array and replace it with a new one. + let funds_len = self.funds.len(); + let old_funds = mem::replace(&mut self.funds, Vec::with_capacity(funds_len)); + + // Fill back in the funds array, merging existing and new schedule. + self.funds.extend( + old_funds + .into_iter() + .merge_join_by(new_funds, |a, b| a.epoch.cmp(&b.epoch)) + .map(|item| match item { + EitherOrBoth::Left(a) => a, + EitherOrBoth::Right(b) => b, + EitherOrBoth::Both(a, b) => VestingFund { + epoch: a.epoch, + amount: a.amount + b.amount, + }, + }), + ); + } + + pub fn unlock_unvested_funds( + &mut self, + current_epoch: ChainEpoch, + target: &TokenAmount, + ) -> TokenAmount { + let mut amount_unlocked = TokenAmount::zero(); + let mut last = None; + let mut start = 0; + for (i, vf) in self.funds.iter_mut().enumerate() { + if &amount_unlocked >= target { + break; + } + + if vf.epoch >= current_epoch { + let unlock_amount = std::cmp::min(target - &amount_unlocked, vf.amount.clone()); + amount_unlocked += &unlock_amount; + let new_amount = &vf.amount - &unlock_amount; + + if new_amount.is_zero() { + last = Some(i); + } else { + vf.amount = new_amount; + } + } else { + start = i + 1; + } + } + + if let Some(end) = last { + self.funds.drain(start..=end); + } + + amount_unlocked + } +} diff --git a/actors/multisig/src/lib.rs b/actors/multisig/src/lib.rs index f95f7f59..26e0c7c7 100644 --- a/actors/multisig/src/lib.rs +++ b/actors/multisig/src/lib.rs @@ -7,5 +7,6 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v8; pub mod v9; diff --git a/actors/multisig/src/v16/mod.rs b/actors/multisig/src/v16/mod.rs new file mode 100644 index 00000000..b2800997 --- /dev/null +++ b/actors/multisig/src/v16/mod.rs @@ -0,0 +1,28 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +pub use self::state::*; +pub use self::types::*; + +mod state; +mod types; + +/// Multisig actor methods available +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + Propose = 2, + Approve = 3, + Cancel = 4, + AddSigner = 5, + RemoveSigner = 6, + SwapSigner = 7, + ChangeNumApprovalsThreshold = 8, + LockBalance = 9, + // Method numbers derived from FRC-0042 standards + UniversalReceiverHook = frc42_dispatch::method_hash!("Receive"), +} diff --git a/actors/multisig/src/v16/state.rs b/actors/multisig/src/v16/state.rs new file mode 100644 index 00000000..7b8b6b8a --- /dev/null +++ b/actors/multisig/src/v16/state.rs @@ -0,0 +1,152 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; +use fvm_shared4::bigint::BigInt; +use fvm_shared4::bigint::Integer; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use indexmap::IndexMap; +use num_traits::Zero; + +use fil_actors_shared::actor_error_v16; +use fil_actors_shared::v16::{ActorError, Config, Map2, DEFAULT_HAMT_CONFIG}; + +use super::types::Transaction; +use super::TxnID; + +pub type PendingTxnMap = Map2; +pub const PENDING_TXN_CONFIG: Config = DEFAULT_HAMT_CONFIG; + +/// Multisig actor state +#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug)] +pub struct State { + pub signers: Vec
, + pub num_approvals_threshold: u64, + pub next_tx_id: TxnID, + + // Linear unlock + pub initial_balance: TokenAmount, + pub start_epoch: ChainEpoch, + pub unlock_duration: ChainEpoch, + + pub pending_txs: Cid, +} + +impl State { + /// Checks if `address` is in the list of signers + pub fn is_signer(&self, address: &Address) -> bool { + self.signers.contains(address) + } + + /// Set locked amount in multisig state. + pub fn set_locked( + &mut self, + start_epoch: ChainEpoch, + unlock_duration: ChainEpoch, + locked_amount: TokenAmount, + ) { + self.start_epoch = start_epoch; + self.unlock_duration = unlock_duration; + self.initial_balance = locked_amount; + } + + /// Returns amount locked in multisig contract + pub fn amount_locked(&self, elapsed_epoch: ChainEpoch) -> TokenAmount { + if elapsed_epoch >= self.unlock_duration { + return TokenAmount::zero(); + } + if elapsed_epoch <= 0 { + return self.initial_balance.clone(); + } + + let remaining_lock_duration = self.unlock_duration - elapsed_epoch; + + // locked = ceil(InitialBalance * remainingLockDuration / UnlockDuration) + let numerator: TokenAmount = &self.initial_balance * remaining_lock_duration; + let denominator = BigInt::from(self.unlock_duration); + + TokenAmount::from_atto(numerator.atto().div_ceil(&denominator)) + } + + /// Iterates all pending transactions and removes an address from each list of approvals, + /// if present. If an approval list becomes empty, the pending transaction is deleted. + pub fn purge_approvals( + &mut self, + store: &BS, + addr: &Address, + ) -> Result<(), ActorError> { + let mut txns = + PendingTxnMap::load(store, &self.pending_txs, PENDING_TXN_CONFIG, "pending txns")?; + + // Identify transactions that need updating + let mut txn_ids_to_purge = IndexMap::new(); + txns.for_each(|tx_id, txn: &Transaction| { + for approver in txn.approved.iter() { + if approver == addr { + txn_ids_to_purge.insert(tx_id, txn.clone()); + } + } + Ok(()) + })?; + + // Update or remove those transactions. + for (tx_id, mut txn) in txn_ids_to_purge { + txn.approved.retain(|approver| approver != addr); + + if !txn.approved.is_empty() { + txns.set(&tx_id, txn)?; + } else { + txns.delete(&tx_id)?; + } + } + + self.pending_txs = txns.flush()?; + Ok(()) + } + + pub(crate) fn _check_available( + &self, + balance: TokenAmount, + amount_to_spend: &TokenAmount, + curr_epoch: ChainEpoch, + ) -> Result<(), ActorError> { + if amount_to_spend.is_negative() { + return Err(actor_error_v16!( + illegal_argument, + "amount to spend {} less than zero", + amount_to_spend + )); + } + if &balance < amount_to_spend { + return Err(actor_error_v16!( + insufficient_funds, + "current balance {} less than amount to spend {}", + balance, + amount_to_spend + )); + } + + if amount_to_spend.is_zero() { + // Always permit a transaction that sends no value, + // even if the lockup exceeds the current balance. + return Ok(()); + } + + let remaining_balance = balance - amount_to_spend; + let amount_locked = self.amount_locked(curr_epoch - self.start_epoch); + if remaining_balance < amount_locked { + return Err(actor_error_v16!( + insufficient_funds, + "actor balance {} if spent {} would be less than required locked amount {}", + remaining_balance, + amount_to_spend, + amount_locked + )); + } + Ok(()) + } +} diff --git a/actors/multisig/src/v16/types.rs b/actors/multisig/src/v16/types.rs new file mode 100644 index 00000000..e81f1409 --- /dev/null +++ b/actors/multisig/src/v16/types.rs @@ -0,0 +1,158 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::fmt::Display; + +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::{strict_bytes, RawBytes}; +use fvm_shared4::address::Address; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::error::ExitCode; +use fvm_shared4::MethodNum; +use serde::{Deserialize, Serialize}; + +use fil_actors_shared::v16::MapKey; + +/// SignersMax is the maximum number of signers allowed in a multisig. If more +/// are required, please use a combining tree of multisigs. +pub const SIGNERS_MAX: usize = 256; + +/// Transaction ID type +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, Hash, Eq, PartialEq, PartialOrd)] +#[serde(transparent)] +pub struct TxnID(pub i64); + +impl MapKey for TxnID { + fn from_bytes(b: &[u8]) -> Result { + i64::from_bytes(b).map(TxnID) + } + + fn to_bytes(&self) -> Result, String> { + self.0.to_bytes() + } +} + +impl Display for TxnID { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +/// Transaction type used in multisig actor +#[derive(Clone, PartialEq, Eq, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct Transaction { + pub to: Address, + pub value: TokenAmount, + pub method: MethodNum, + pub params: RawBytes, + + pub approved: Vec
, +} + +/// Data for a BLAKE2B-256 to be attached to methods referencing proposals via TXIDs. +/// Ensures the existence of a cryptographic reference to the original proposal. Useful +/// for offline signers and for protection when reorgs change a multisig TXID. +/// +/// Requester - The requesting multisig wallet member. +/// All other fields - From the "Transaction" struct. +#[derive(Serialize_tuple, Debug)] +pub struct ProposalHashData<'a> { + pub requester: Option<&'a Address>, + pub to: &'a Address, + pub value: &'a TokenAmount, + pub method: &'a MethodNum, + pub params: &'a RawBytes, +} + +/// Constructor parameters for multisig actor. +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ConstructorParams { + pub signers: Vec
, + pub num_approvals_threshold: u64, + pub unlock_duration: ChainEpoch, + // * Added in v2 + pub start_epoch: ChainEpoch, +} + +/// Propose method call parameters. +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ProposeParams { + pub to: Address, + pub value: TokenAmount, + pub method: MethodNum, + pub params: RawBytes, +} + +/// Propose method call return. +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ProposeReturn { + /// TxnID is the ID of the proposed transaction. + pub txn_id: TxnID, + /// Applied indicates if the transaction was applied as opposed to proposed but not applied + /// due to lack of approvals. + pub applied: bool, + /// Code is the exitcode of the transaction, if Applied is false this field should be ignored. + pub code: ExitCode, + /// Ret is the return value of the transaction, if Applied is false this field should + /// be ignored. + pub ret: RawBytes, +} + +/// Parameters for approve and cancel multisig functions. +#[derive(Clone, PartialEq, Eq, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct TxnIDParams { + pub id: TxnID, + /// Optional hash of proposal to ensure an operation can only apply to a + /// specific proposal. + #[serde(with = "strict_bytes")] + pub proposal_hash: Vec, +} + +/// Parameters for approve and cancel multisig functions. +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ApproveReturn { + /// Applied indicates if the transaction was applied as opposed to proposed but not applied + /// due to lack of approvals + pub applied: bool, + /// Code is the exitcode of the transaction, if Applied is false this field should be ignored. + pub code: ExitCode, + /// Ret is the return value of the transaction, if Applied is false this field should + /// be ignored. + pub ret: RawBytes, +} + +/// Add signer params. +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct AddSignerParams { + pub signer: Address, + pub increase: bool, +} + +/// Remove signer params. +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct RemoveSignerParams { + pub signer: Address, + pub decrease: bool, +} + +/// Swap signer multisig method params +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct SwapSignerParams { + pub from: Address, + pub to: Address, +} + +/// Propose method call parameters +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ChangeNumApprovalsThresholdParams { + pub new_threshold: u64, +} + +/// Lock balance call params. +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct LockBalanceParams { + pub start_epoch: ChainEpoch, + pub unlock_duration: ChainEpoch, + pub amount: TokenAmount, +} diff --git a/actors/paych/src/lib.rs b/actors/paych/src/lib.rs index f95f7f59..26e0c7c7 100644 --- a/actors/paych/src/lib.rs +++ b/actors/paych/src/lib.rs @@ -7,5 +7,6 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v8; pub mod v9; diff --git a/actors/paych/src/v16/ext.rs b/actors/paych/src/v16/ext.rs new file mode 100644 index 00000000..9f2a14e6 --- /dev/null +++ b/actors/paych/src/v16/ext.rs @@ -0,0 +1,17 @@ +use fvm_ipld_encoding::strict_bytes; +use fvm_ipld_encoding::tuple::*; + +pub mod account { + use super::*; + + pub const AUTHENTICATE_MESSAGE_METHOD: u64 = + frc42_dispatch::method_hash!("AuthenticateMessage"); + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct AuthenticateMessageParams { + #[serde(with = "strict_bytes")] + pub signature: Vec, + #[serde(with = "strict_bytes")] + pub message: Vec, + } +} diff --git a/actors/paych/src/v16/mod.rs b/actors/paych/src/v16/mod.rs new file mode 100644 index 00000000..1e42050f --- /dev/null +++ b/actors/paych/src/v16/mod.rs @@ -0,0 +1,25 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::error::ExitCode; +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +pub use self::state::*; +pub use self::types::*; + +pub mod ext; +mod state; +mod types; + +/// Payment Channel actor methods available +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + UpdateChannelState = 2, + Settle = 3, + Collect = 4, +} + +pub const ERR_CHANNEL_STATE_UPDATE_AFTER_SETTLED: ExitCode = ExitCode::new(32); diff --git a/actors/paych/src/v16/state.rs b/actors/paych/src/v16/state.rs new file mode 100644 index 00000000..42cf64ad --- /dev/null +++ b/actors/paych/src/v16/state.rs @@ -0,0 +1,56 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; + +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; + +/// A given payment channel actor is established by `from` +/// to enable off-chain microtransactions to `to` address +/// to be reconciled and tallied on chain. +#[derive(Debug, Serialize_tuple, Deserialize_tuple, Clone)] +pub struct State { + /// Channel owner, who has funded the actor. + pub from: Address, + /// Recipient of payouts from channel. + pub to: Address, + /// Amount successfully redeemed through the payment channel, paid out on `Collect`. + pub to_send: TokenAmount, + /// Height at which the channel can be collected. + pub settling_at: ChainEpoch, + /// Height before which the channel `ToSend` cannot be collected. + pub min_settle_height: ChainEpoch, + /// Collections of lane states for the channel, maintained in ID order. + pub lane_states: Cid, // AMT +} + +impl State { + pub fn new(from: Address, to: Address, empty_arr_cid: Cid) -> Self { + Self { + from, + to, + to_send: Default::default(), + settling_at: 0, + min_settle_height: 0, + lane_states: empty_arr_cid, + } + } +} + +/// The Lane state tracks the latest (highest) voucher nonce used to merge the lane +/// as well as the amount it has already redeemed. +#[derive(Default, Clone, PartialEq, Eq, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct LaneState { + pub redeemed: TokenAmount, + pub nonce: u64, +} + +/// Specifies which `lane`s to be merged with what `nonce` on `channel_update` +#[derive(Default, Clone, Copy, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct Merge { + pub lane: u64, + pub nonce: u64, +} diff --git a/actors/paych/src/v16/types.rs b/actors/paych/src/v16/types.rs new file mode 100644 index 00000000..825ac776 --- /dev/null +++ b/actors/paych/src/v16/types.rs @@ -0,0 +1,128 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_shared::v16::network::EPOCHS_IN_HOUR; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::{strict_bytes, to_vec, Error, RawBytes}; +use fvm_shared4::address::Address; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::crypto::signature::Signature; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::MethodNum; + +use super::Merge; + +/// Maximum number of lanes in a channel +pub const MAX_LANE: u64 = i64::MAX as u64; + +pub const SETTLE_DELAY: ChainEpoch = EPOCHS_IN_HOUR * 12; + +// Maximum byte length of a secret that can be submitted with a payment channel update. +pub const MAX_SECRET_SIZE: usize = 256; + +pub const LANE_STATES_AMT_BITWIDTH: u32 = 3; + +/// Constructor parameters for payment channel actor +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct ConstructorParams { + pub from: Address, + pub to: Address, +} + +/// A voucher is sent by `from` to `to` off-chain in order to enable +/// `to` to redeem payments on-chain in the future +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct SignedVoucher { + /// ChannelAddr is the address of the payment channel this signed voucher is valid for + pub channel_addr: Address, + /// Min epoch before which the voucher cannot be redeemed + pub time_lock_min: ChainEpoch, + /// Max epoch beyond which the voucher cannot be redeemed + /// set to 0 means no timeout + pub time_lock_max: ChainEpoch, + /// (optional) Used by `to` to validate + #[serde(with = "strict_bytes")] + pub secret_pre_image: Vec, + /// (optional) Specified by `from` to add a verification method to the voucher + pub extra: Option, + /// Specifies which lane the Voucher merges into (will be created if does not exist) + pub lane: u64, + /// Set by `from` to prevent redemption of stale vouchers on a lane + pub nonce: u64, + /// Amount voucher can be redeemed for + pub amount: TokenAmount, + /// (optional) Can extend channel min_settle_height if needed + pub min_settle_height: ChainEpoch, + + /// (optional) Set of lanes to be merged into `lane` + pub merges: Vec, + + /// Sender's signature over the voucher (sign on none) + pub signature: Option, +} + +impl SignedVoucher { + pub fn signing_bytes(&self) -> Result, Error> { + /// Helper struct to avoid cloning for serializing structure. + #[derive(Serialize_tuple)] + struct SignedVoucherSer<'a> { + pub channel_addr: &'a Address, + pub time_lock_min: ChainEpoch, + pub time_lock_max: ChainEpoch, + #[serde(with = "strict_bytes")] + pub secret_pre_image: &'a [u8], + pub extra: &'a Option, + pub lane: u64, + pub nonce: u64, + pub amount: &'a TokenAmount, + pub min_settle_height: ChainEpoch, + pub merges: &'a [Merge], + pub signature: (), + } + let osv = SignedVoucherSer { + channel_addr: &self.channel_addr, + time_lock_min: self.time_lock_min, + time_lock_max: self.time_lock_max, + secret_pre_image: &self.secret_pre_image, + extra: &self.extra, + lane: self.lane, + nonce: self.nonce, + amount: &self.amount, + min_settle_height: self.min_settle_height, + merges: &self.merges, + signature: (), + }; + // Cbor serialize struct + to_vec(&osv) + } +} + +/// Modular Verification method +#[derive(Debug, Clone, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ModVerifyParams { + pub actor: Address, + pub method: MethodNum, + pub data: RawBytes, +} + +/// Payment Verification parameters +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct PaymentVerifyParams { + pub extra: RawBytes, + #[serde(with = "strict_bytes")] + pub proof: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple)] +pub struct UpdateChannelStateParams { + pub sv: SignedVoucher, + #[serde(with = "strict_bytes")] + pub secret: Vec, + // * proof removed in v2 +} + +impl From for UpdateChannelStateParams { + fn from(sv: SignedVoucher) -> Self { + UpdateChannelStateParams { secret: vec![], sv } + } +} diff --git a/actors/power/src/lib.rs b/actors/power/src/lib.rs index f95f7f59..26e0c7c7 100644 --- a/actors/power/src/lib.rs +++ b/actors/power/src/lib.rs @@ -7,5 +7,6 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v8; pub mod v9; diff --git a/actors/power/src/v16/ext.rs b/actors/power/src/v16/ext.rs new file mode 100644 index 00000000..86802af5 --- /dev/null +++ b/actors/power/src/v16/ext.rs @@ -0,0 +1,73 @@ +use cid::Cid; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::{strict_bytes, BytesDe}; + +use fvm_shared4::address::Address; +use fvm_shared4::sector::RegisteredPoStProof; +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +use fil_actors_shared::v16::reward::FilterEstimate; + +pub mod init { + use super::*; + use fvm_ipld_encoding::RawBytes; + + pub const EXEC_METHOD: u64 = 2; + + /// Init actor Exec Params + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct ExecParams { + pub code_cid: Cid, + pub constructor_params: RawBytes, + } + + /// Init actor Exec Return value + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct ExecReturn { + /// ID based address for created actor + pub id_address: Address, + /// Reorg safe address for actor + pub robust_address: Address, + } +} + +pub mod miner { + use super::*; + + pub const ON_DEFERRED_CRON_EVENT_METHOD: u64 = 12; + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct MinerConstructorParams { + pub owner: Address, + pub worker: Address, + pub control_addresses: Vec
, + pub window_post_proof_type: RegisteredPoStProof, + #[serde(with = "strict_bytes")] + pub peer_id: Vec, + pub multi_addresses: Vec, + } + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct DeferredCronEventParams { + #[serde(with = "strict_bytes")] + pub event_payload: Vec, + pub reward_smoothed: FilterEstimate, + pub quality_adj_power_smoothed: FilterEstimate, + } +} + +pub mod reward { + use super::*; + + pub const UPDATE_NETWORK_KPI: u64 = 4; + + #[derive(FromPrimitive)] + #[repr(u64)] + pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + AwardBlockReward = 2, + ThisEpochReward = 3, + UpdateNetworkKPI = 4, + } +} diff --git a/actors/power/src/v16/mod.rs b/actors/power/src/v16/mod.rs new file mode 100644 index 00000000..97b9defd --- /dev/null +++ b/actors/power/src/v16/mod.rs @@ -0,0 +1,40 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::error::ExitCode; +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +pub use self::policy::*; +pub use self::state::*; +pub use self::types::*; + +#[doc(hidden)] +pub mod ext; +mod policy; +mod state; +mod types; + +/// Storage power actor methods available +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + /// Constructor for Storage Power Actor + Constructor = METHOD_CONSTRUCTOR, + CreateMiner = 2, + UpdateClaimedPower = 3, + EnrollCronEvent = 4, + OnEpochTickEnd = 5, + UpdatePledgeTotal = 6, + // OnConsensusFault = 7, // Deprecated v2 + // SubmitPoRepForBulkVerify = 8, // Deprecated + CurrentTotalPower = 9, + // Method numbers derived from FRC-0042 standards + CreateMinerExported = frc42_dispatch::method_hash!("CreateMiner"), + NetworkRawPowerExported = frc42_dispatch::method_hash!("NetworkRawPower"), + MinerRawPowerExported = frc42_dispatch::method_hash!("MinerRawPower"), + MinerCountExported = frc42_dispatch::method_hash!("MinerCount"), + MinerConsensusCountExported = frc42_dispatch::method_hash!("MinerConsensusCount"), +} + +pub const ERR_TOO_MANY_PROVE_COMMITS: ExitCode = ExitCode::new(32); diff --git a/actors/power/src/v16/policy.rs b/actors/power/src/v16/policy.rs new file mode 100644 index 00000000..dc0c0b46 --- /dev/null +++ b/actors/power/src/v16/policy.rs @@ -0,0 +1,13 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +/// Minimum power of an individual miner to meet the threshold for leader election. +pub const CONSENSUS_MINER_MIN_MINERS: i64 = 4; + +/// Maximum number of prove commits a miner can submit in one epoch +/// +/// We bound this to 200 to limit the number of prove partitions we may need to update in a +/// given epoch to 200. +/// +/// To support onboarding 1EiB/year, we need to allow at least 32 prove commits per epoch. +pub const MAX_MINER_PROVE_COMMITS_PER_EPOCH: u64 = 200; diff --git a/actors/power/src/v16/state.rs b/actors/power/src/v16/state.rs new file mode 100644 index 00000000..bf362cc4 --- /dev/null +++ b/actors/power/src/v16/state.rs @@ -0,0 +1,471 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::ops::Neg; + +use anyhow::anyhow; +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::RawBytes; +use fvm_ipld_hamt::BytesKey; +use fvm_shared4::address::Address; +use fvm_shared4::bigint::bigint_ser; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::error::ExitCode; +use fvm_shared4::sector::{RegisteredPoStProof, StoragePower}; +use fvm_shared4::ActorID; +use integer_encoding::VarInt; +use lazy_static::lazy_static; +use num_traits::Signed; + +use fil_actors_shared::actor_error_v16; +use fil_actors_shared::v16::builtin::reward::smooth::{ + AlphaBetaFilter, FilterEstimate, DEFAULT_ALPHA, DEFAULT_BETA, +}; +use fil_actors_shared::v16::runtime::Policy; +use fil_actors_shared::v16::{ + ActorContext, ActorDowncast, ActorError, AsActorError, Config, Map2, Multimap, + DEFAULT_HAMT_CONFIG, +}; + +use super::CONSENSUS_MINER_MIN_MINERS; + +lazy_static! { + /// genesis power in bytes = 750,000 GiB + pub static ref INITIAL_QA_POWER_ESTIMATE_POSITION: StoragePower = StoragePower::from(750_000) * (1 << 30); + /// max chain throughput in bytes per epoch = 120 ProveCommits / epoch = 3,840 GiB + pub static ref INITIAL_QA_POWER_ESTIMATE_VELOCITY: StoragePower = StoragePower::from(3_840) * (1 << 30); +} + +pub const CRON_QUEUE_HAMT_BITWIDTH: u32 = 6; +pub const CRON_QUEUE_AMT_BITWIDTH: u32 = 6; + +pub type ClaimsMap = Map2; +pub const CLAIMS_CONFIG: Config = DEFAULT_HAMT_CONFIG; + +/// Storage power actor state +#[derive(Default, Serialize_tuple, Deserialize_tuple, Clone, Debug)] +pub struct State { + #[serde(with = "bigint_ser")] + pub total_raw_byte_power: StoragePower, + #[serde(with = "bigint_ser")] + pub total_bytes_committed: StoragePower, + #[serde(with = "bigint_ser")] + pub total_quality_adj_power: StoragePower, + #[serde(with = "bigint_ser")] + pub total_qa_bytes_committed: StoragePower, + pub total_pledge_collateral: TokenAmount, + + #[serde(with = "bigint_ser")] + pub this_epoch_raw_byte_power: StoragePower, + #[serde(with = "bigint_ser")] + pub this_epoch_quality_adj_power: StoragePower, + pub this_epoch_pledge_collateral: TokenAmount, + pub this_epoch_qa_power_smoothed: FilterEstimate, + + pub miner_count: i64, + /// Number of miners having proven the minimum consensus power. + pub miner_above_min_power_count: i64, + + /// FIP0081 changed pledge calculations, moving from ruleset A to ruleset B. + /// This change is spread over several epochs to avoid sharp jumps in pledge + /// amounts. At `ramp_start_epoch`, we use the old ruleset. At + /// `ramp_start_epoch + ramp_duration_epochs`, we use 70% old rules + 30% + /// new rules. See FIP0081 for more details. + pub ramp_start_epoch: i64, + /// Number of epochs over which the new pledge calculation is ramped up. + pub ramp_duration_epochs: u64, + + /// A queue of events to be triggered by cron, indexed by epoch. + pub cron_event_queue: Cid, // Multimap, (HAMT[ChainEpoch]AMT[CronEvent] + + /// First epoch in which a cron task may be stored. Cron will iterate every epoch between this + /// and the current epoch inclusively to find tasks to execute. + pub first_cron_epoch: ChainEpoch, + + /// Claimed power for each miner. + pub claims: Cid, // Map, HAMT[address]Claim + + // Deprecated as of FIP 0084 + pub proof_validation_batch: Option, +} + +impl State { + pub fn new(store: &BS) -> anyhow::Result { + let empty_claims = ClaimsMap::empty(store, CLAIMS_CONFIG, "empty").flush()?; + let empty_mmap = Multimap::new(store, CRON_QUEUE_HAMT_BITWIDTH, CRON_QUEUE_AMT_BITWIDTH) + .root() + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "Failed to get empty multimap cid", + )?; + Ok(State { + cron_event_queue: empty_mmap, + claims: empty_claims, + this_epoch_qa_power_smoothed: FilterEstimate::new( + INITIAL_QA_POWER_ESTIMATE_POSITION.clone(), + INITIAL_QA_POWER_ESTIMATE_VELOCITY.clone(), + ), + ..Default::default() + }) + } + + pub fn into_total_locked(self) -> TokenAmount { + self.total_pledge_collateral + } + + /// Checks power actor state for if miner meets minimum consensus power. + pub fn miner_nominal_power_meets_consensus_minimum( + &self, + policy: &Policy, + s: &BS, + miner: ActorID, + ) -> Result<(StoragePower, bool), ActorError> { + let claims = self.load_claims(s)?; + let a = &Address::new_id(miner); + let claim = claims + .get(a)? + .with_context_code(ExitCode::USR_ILLEGAL_ARGUMENT, || { + format!("no claim for actor: {}", miner) + })?; + + let miner_nominal_power = claim.raw_byte_power.clone(); + let miner_min_power = consensus_miner_min_power(policy, claim.window_post_proof_type) + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "could not get miner min power from proof type: {}", + )?; + + if miner_nominal_power >= miner_min_power { + // If miner is larger than min power requirement, valid + Ok((miner_nominal_power, true)) + } else if self.miner_above_min_power_count >= CONSENSUS_MINER_MIN_MINERS { + // if min consensus miners requirement met, return false + Ok((miner_nominal_power, false)) + } else { + // if fewer miners than consensus minimum, return true if non-zero power + Ok(( + miner_nominal_power.clone(), + miner_nominal_power.is_positive(), + )) + } + } + + pub fn miner_power( + &self, + s: &BS, + miner: &Address, + ) -> Result, ActorError> { + let claims = self.load_claims(s)?; + claims.get(miner).map(|s| s.cloned()) + } + + pub(super) fn _add_to_claim( + &mut self, + policy: &Policy, + claims: &mut ClaimsMap, + miner: &Address, + power: &StoragePower, + qa_power: &StoragePower, + ) -> Result<(), ActorError> { + let old_claim = claims + .get(miner)? + .ok_or_else(|| actor_error_v16!(not_found, "no claim for actor {}", miner))?; + + self.total_qa_bytes_committed += qa_power; + self.total_bytes_committed += power; + + let new_claim = Claim { + raw_byte_power: old_claim.raw_byte_power.clone() + power, + quality_adj_power: old_claim.quality_adj_power.clone() + qa_power, + window_post_proof_type: old_claim.window_post_proof_type, + }; + + let min_power: StoragePower = + consensus_miner_min_power(policy, old_claim.window_post_proof_type) + .exit_code(ExitCode::USR_ILLEGAL_STATE)?; + let prev_below: bool = old_claim.raw_byte_power < min_power; + let still_below: bool = new_claim.raw_byte_power < min_power; + + if prev_below && !still_below { + // Just passed min miner size + self.miner_above_min_power_count += 1; + self.total_quality_adj_power += &new_claim.quality_adj_power; + self.total_raw_byte_power += &new_claim.raw_byte_power; + } else if !prev_below && still_below { + // just went below min miner size + self.miner_above_min_power_count -= 1; + self.total_quality_adj_power = self + .total_quality_adj_power + .checked_sub(&old_claim.quality_adj_power) + .expect("Negative nominal power"); + self.total_raw_byte_power = self + .total_raw_byte_power + .checked_sub(&old_claim.raw_byte_power) + .expect("Negative raw byte power"); + } else if !prev_below && !still_below { + // Was above the threshold, still above + self.total_quality_adj_power += qa_power; + self.total_raw_byte_power += power; + } + + if new_claim.raw_byte_power.is_negative() { + return Err(actor_error_v16!( + illegal_state, + "negative claimed raw byte power: {}", + new_claim.raw_byte_power + )); + } + if new_claim.quality_adj_power.is_negative() { + return Err(actor_error_v16!( + illegal_state, + "negative claimed quality adjusted power: {}", + new_claim.quality_adj_power + )); + } + if self.miner_above_min_power_count < 0 { + return Err(actor_error_v16!( + illegal_state, + "negative amount of miners lather than min: {}", + self.miner_above_min_power_count + )); + } + + set_claim(claims, miner, new_claim) + } + + pub fn load_claims(&self, s: BS) -> Result, ActorError> { + ClaimsMap::load(s, &self.claims, CLAIMS_CONFIG, "claims") + } + + pub fn save_claims( + &mut self, + claims: &mut ClaimsMap, + ) -> Result<(), ActorError> { + self.claims = claims.flush()?; + Ok(()) + } + + pub(super) fn _add_pledge_total(&mut self, amount: TokenAmount) { + self.total_pledge_collateral += amount; + } + + pub(super) fn _append_cron_event( + &mut self, + events: &mut Multimap, + epoch: ChainEpoch, + event: CronEvent, + ) -> anyhow::Result<()> { + if epoch < self.first_cron_epoch { + self.first_cron_epoch = epoch; + } + + events.add(epoch_key(epoch), event).map_err(|e| { + e.downcast_wrap(format!("failed to store cron event at epoch {}", epoch)) + })?; + Ok(()) + } + + pub fn current_total_power(&self) -> (StoragePower, StoragePower) { + if self.miner_above_min_power_count < CONSENSUS_MINER_MIN_MINERS { + ( + self.total_bytes_committed.clone(), + self.total_qa_bytes_committed.clone(), + ) + } else { + ( + self.total_raw_byte_power.clone(), + self.total_quality_adj_power.clone(), + ) + } + } + + pub(super) fn _update_smoothed_estimate(&mut self, delta: ChainEpoch) { + let filter_qa_power = AlphaBetaFilter::load( + &self.this_epoch_qa_power_smoothed, + &DEFAULT_ALPHA, + &DEFAULT_BETA, + ); + self.this_epoch_qa_power_smoothed = + filter_qa_power.next_estimate(&self.this_epoch_quality_adj_power, delta); + } + + /// Update stats on new miner creation. This is currently just used to update the miner count + /// when new added miner starts above the minimum. + pub(super) fn _update_stats_for_new_miner( + &mut self, + policy: &Policy, + window_post_proof: RegisteredPoStProof, + ) -> anyhow::Result<()> { + let min_power = consensus_miner_min_power(policy, window_post_proof)?; + + if !min_power.is_positive() { + self.miner_above_min_power_count += 1; + } + Ok(()) + } + + /// Validates that miner has + pub(super) fn _validate_miner_has_claim( + &self, + store: &BS, + miner_addr: &Address, + ) -> Result<(), ActorError> + where + BS: Blockstore, + { + let claims = self.load_claims(store)?; + if !claims.contains_key(miner_addr)? { + return Err(actor_error_v16!( + forbidden, + "unknown miner {} forbidden to interact with power actor", + miner_addr + )); + } + Ok(()) + } + + pub fn get_claim( + &self, + store: &BS, + miner: &Address, + ) -> anyhow::Result> { + let claims = self.load_claims(store)?; + let claim = claims.get(miner)?; + Ok(claim.cloned()) + } + + pub(super) fn _delete_claim( + &mut self, + policy: &Policy, + claims: &mut ClaimsMap, + miner: &Address, + ) -> anyhow::Result<()> { + let (rbp, qap) = match claims.get(miner)? { + None => { + return Ok(()); + } + Some(claim) => ( + claim.raw_byte_power.clone(), + claim.quality_adj_power.clone(), + ), + }; + + // Subtract from stats to remove power + self._add_to_claim(policy, claims, miner, &rbp.neg(), &qap.neg()) + .context("subtract miner power before deleting claim")?; + claims + .delete(miner)? + .ok_or_else(|| anyhow!("failed to delete claim for {miner}: doesn't exist"))?; + Ok(()) + } +} + +pub(super) fn _load_cron_events( + mmap: &Multimap, + epoch: ChainEpoch, +) -> anyhow::Result> { + let mut events = Vec::new(); + + mmap.for_each(&epoch_key(epoch), |_, v: &CronEvent| { + events.push(v.clone()); + Ok(()) + })?; + + Ok(events) +} + +pub fn set_claim( + claims: &mut ClaimsMap, + a: &Address, + claim: Claim, +) -> Result<(), ActorError> { + if claim.raw_byte_power.is_negative() { + return Err(actor_error_v16!( + illegal_state, + "negative claim raw power {}", + claim.raw_byte_power + )); + } + if claim.quality_adj_power.is_negative() { + return Err(actor_error_v16!( + illegal_state, + "negative claim quality-adjusted power {}", + claim.quality_adj_power + )); + } + + claims.set(a, claim)?; + Ok(()) +} + +pub fn epoch_key(e: ChainEpoch) -> BytesKey { + let bz = e.encode_var_vec(); + bz.into() +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple, Clone, PartialEq, Eq)] +pub struct Claim { + /// Miner's proof type used to determine minimum miner size + pub window_post_proof_type: RegisteredPoStProof, + /// Sum of raw byte power for a miner's sectors. + #[serde(with = "bigint_ser")] + pub raw_byte_power: StoragePower, + /// Sum of quality adjusted power for a miner's sectors. + #[serde(with = "bigint_ser")] + pub quality_adj_power: StoragePower, +} + +#[derive(Clone, Debug, Serialize_tuple, Deserialize_tuple)] +pub struct CronEvent { + pub miner_addr: Address, + pub callback_payload: RawBytes, +} + +/// Returns the minimum storage power required for each PoSt proof type. +pub fn consensus_miner_min_power( + policy: &Policy, + p: RegisteredPoStProof, +) -> anyhow::Result { + use RegisteredPoStProof::*; + match p { + StackedDRGWinning2KiBV1 + | StackedDRGWinning8MiBV1 + | StackedDRGWinning512MiBV1 + | StackedDRGWinning32GiBV1 + | StackedDRGWinning64GiBV1 + | StackedDRGWindow2KiBV1P1 + | StackedDRGWindow8MiBV1P1 + | StackedDRGWindow512MiBV1P1 + | StackedDRGWindow32GiBV1P1 + | StackedDRGWindow64GiBV1P1 => Ok(policy.minimum_consensus_power.clone()), + Invalid(i) => Err(anyhow::anyhow!("unsupported proof type: {}", i)), + } +} + +#[cfg(test)] +mod test { + use fvm_shared4::clock::ChainEpoch; + + use super::*; + + #[test] + fn epoch_key_test() { + let e1: ChainEpoch = 101; + let e2: ChainEpoch = 102; + let e3: ChainEpoch = 103; + let e4: ChainEpoch = -1; + + let b1: BytesKey = [0xca, 0x1].to_vec().into(); + let b2: BytesKey = [0xcc, 0x1].to_vec().into(); + let b3: BytesKey = [0xce, 0x1].to_vec().into(); + let b4: BytesKey = [0x1].to_vec().into(); + + assert_eq!(b1, epoch_key(e1)); + assert_eq!(b2, epoch_key(e2)); + assert_eq!(b3, epoch_key(e3)); + assert_eq!(b4, epoch_key(e4)); + } +} diff --git a/actors/power/src/v16/types.rs b/actors/power/src/v16/types.rs new file mode 100644 index 00000000..d3b851f1 --- /dev/null +++ b/actors/power/src/v16/types.rs @@ -0,0 +1,105 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fil_actors_shared::v16::reward::FilterEstimate; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::{strict_bytes, BytesDe, RawBytes}; +use fvm_shared4::address::Address; +use fvm_shared4::bigint::bigint_ser; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::sector::{RegisteredPoStProof, StoragePower}; +use fvm_shared4::ActorID; + +use serde::{Deserialize, Serialize}; + +pub type SectorTermination = i64; + +/// Implicit termination after all deals expire +pub const SECTOR_TERMINATION_EXPIRED: SectorTermination = 0; +/// Unscheduled explicit termination by the miner +pub const SECTOR_TERMINATION_MANUAL: SectorTermination = 1; +/// Implicit termination due to unrecovered fault +pub const SECTOR_TERMINATION_FAULTY: SectorTermination = 3; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct CreateMinerParams { + pub owner: Address, + pub worker: Address, + pub window_post_proof_type: RegisteredPoStProof, + #[serde(with = "strict_bytes")] + pub peer: Vec, + pub multiaddrs: Vec, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct CreateMinerReturn { + /// Canonical ID-based address for the actor. + pub id_address: Address, + /// Re-org safe address for created actor. + pub robust_address: Address, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct UpdateClaimedPowerParams { + #[serde(with = "bigint_ser")] + pub raw_byte_delta: StoragePower, + #[serde(with = "bigint_ser")] + pub quality_adjusted_delta: StoragePower, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct EnrollCronEventParams { + pub event_epoch: ChainEpoch, + pub payload: RawBytes, +} + +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone, Default)] +#[serde(transparent)] +pub struct UpdatePledgeTotalParams { + pub pledge_delta: TokenAmount, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct CurrentTotalPowerReturn { + #[serde(with = "bigint_ser")] + pub raw_byte_power: StoragePower, + #[serde(with = "bigint_ser")] + pub quality_adj_power: StoragePower, + pub pledge_collateral: TokenAmount, + pub quality_adj_power_smoothed: FilterEstimate, + pub ramp_start_epoch: i64, + pub ramp_duration_epochs: u64, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct NetworkRawPowerReturn { + #[serde(with = "bigint_ser")] + pub raw_byte_power: StoragePower, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct MinerRawPowerParams { + pub miner: ActorID, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +pub struct MinerRawPowerReturn { + #[serde(with = "bigint_ser")] + pub raw_byte_power: StoragePower, + pub meets_consensus_minimum: bool, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct MinerCountReturn { + pub miner_count: i64, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone, Eq, PartialEq)] +#[serde(transparent)] +pub struct MinerConsensusCountReturn { + pub miner_consensus_count: i64, +} diff --git a/actors/reward/src/lib.rs b/actors/reward/src/lib.rs index f95f7f59..26e0c7c7 100644 --- a/actors/reward/src/lib.rs +++ b/actors/reward/src/lib.rs @@ -7,5 +7,6 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v8; pub mod v9; diff --git a/actors/reward/src/v16/expneg.rs b/actors/reward/src/v16/expneg.rs new file mode 100644 index 00000000..a26f46dd --- /dev/null +++ b/actors/reward/src/v16/expneg.rs @@ -0,0 +1,49 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::bigint::{BigInt, Integer}; +use lazy_static::lazy_static; + +use fil_actors_shared::v16::builtin::reward::math::{poly_parse, poly_val, PRECISION}; + +lazy_static! { + static ref EXP_NUM_COEF: Vec = poly_parse(&[ + "-648770010757830093818553637600", + "67469480939593786226847644286976", + "-3197587544499098424029388939001856", + "89244641121992890118377641805348864", + "-1579656163641440567800982336819953664", + "17685496037279256458459817590917169152", + "-115682590513835356866803355398940131328", + "340282366920938463463374607431768211456", + ]) + .unwrap(); + static ref EXP_DENO_COEF: Vec = poly_parse(&[ + "1225524182432722209606361", + "114095592300906098243859450", + "5665570424063336070530214243", + "194450132448609991765137938448", + "5068267641632683791026134915072", + "104716890604972796896895427629056", + "1748338658439454459487681798864896", + "23704654329841312470660182937960448", + "259380097567996910282699886670381056", + "2250336698853390384720606936038375424", + "14978272436876548034486263159246028800", + "72144088983913131323343765784380833792", + "224599776407103106596571252037123047424", + "340282366920938463463374607431768211456", + ]) + .unwrap(); +} + +/// expneg accepts x in Q.128 format and computes e^-x. +/// It is most precise within [0, 1.725) range, where error is less than 3.4e-30. +/// Over the [0, 5) range its error is less than 4.6e-15. +/// Output is in Q.128 format. +pub(crate) fn expneg(x: &BigInt) -> BigInt { + let num = poly_val(&EXP_NUM_COEF, x); + let deno = poly_val(&EXP_DENO_COEF, x); + + (num << PRECISION).div_floor(&deno) +} diff --git a/actors/reward/src/v16/ext.rs b/actors/reward/src/v16/ext.rs new file mode 100644 index 00000000..097a102e --- /dev/null +++ b/actors/reward/src/v16/ext.rs @@ -0,0 +1,14 @@ +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::econ::TokenAmount; + +pub mod miner { + use super::*; + + pub const APPLY_REWARDS_METHOD: u64 = 14; + + #[derive(Debug, Serialize_tuple, Deserialize_tuple)] + pub struct ApplyRewardParams { + pub reward: TokenAmount, + pub penalty: TokenAmount, + } +} diff --git a/actors/reward/src/v16/logic.rs b/actors/reward/src/v16/logic.rs new file mode 100644 index 00000000..521f53e0 --- /dev/null +++ b/actors/reward/src/v16/logic.rs @@ -0,0 +1,318 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::str::FromStr; + +use fvm_shared4::bigint::{BigInt, Integer}; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::sector::StoragePower; +use lazy_static::lazy_static; + +use super::expneg::expneg; +use fil_actors_shared::v16::builtin::reward::math::PRECISION; + +lazy_static! { + /// Floor(e^(ln[1 + 100%] / epochsInYear) * 2^128 + /// Q.128 formatted number such that f(epoch) = baseExponent^epoch grows 100% in one + /// year of epochs. + /// Calculation here: https://www.wolframalpha.com/input?i=IntegerPart%5BExp%5BLog%5B1%2B100%25%5D%2F%28%28365+days%29%2F%2830+seconds%29%29%5D*2%5E128%5D + pub static ref BASELINE_EXPONENT: StoragePower = + StoragePower::from_str("340282591298641078465964189926313473653").unwrap(); + + // 2.5057116798121726 EiB + pub static ref BASELINE_INITIAL_VALUE: StoragePower = StoragePower::from(2_888_888_880_000_000_000u128); + + /// 1EiB + pub static ref INIT_BASELINE_POWER: StoragePower = + ((BASELINE_INITIAL_VALUE.clone() << (2*PRECISION)) / &*BASELINE_EXPONENT) >> PRECISION; + + /// 330M for mainnet + pub(super) static ref SIMPLE_TOTAL: TokenAmount = TokenAmount::from_whole(330_000_000); + /// 770M for mainnet + pub(super) static ref BASELINE_TOTAL: TokenAmount = TokenAmount::from_whole(770_000_000); + /// expLamSubOne = e^lambda - 1 + /// for Q.128: int(expLamSubOne * 2^128) + static ref EXP_LAM_SUB_ONE: BigInt = BigInt::from(37396273494747879394193016954629u128); + /// lambda = ln(2) / (6 * epochsInYear) + /// for Q.128: int(lambda * 2^128) + static ref LAMBDA: BigInt = BigInt::from(37396271439864487274534522888786u128); + +} + +/// Compute BaselinePower(t) from BaselinePower(t-1) with an additional multiplication +/// of the base exponent. +pub(crate) fn baseline_power_from_prev(prev_power: &StoragePower) -> StoragePower { + (prev_power * &*BASELINE_EXPONENT) >> PRECISION +} + +/// Computes RewardTheta which is is precise fractional value of effectiveNetworkTime. +/// The effectiveNetworkTime is defined by CumsumBaselinePower(theta) == CumsumRealizedPower +/// As baseline power is defined over integers and the RewardTheta is required to be fractional, +/// we perform linear interpolation between CumsumBaseline(⌊theta⌋) and CumsumBaseline(⌈theta⌉). +/// The effectiveNetworkTime argument is ceiling of theta. +/// The result is a fractional effectiveNetworkTime (theta) in Q.128 format. +pub(crate) fn compute_r_theta( + effective_network_time: ChainEpoch, + baseline_power_at_effective_network_time: &BigInt, + cumsum_realized: &BigInt, + cumsum_baseline: &BigInt, +) -> BigInt { + if effective_network_time != 0 { + let reward_theta = BigInt::from(effective_network_time) << PRECISION; + let diff = ((cumsum_baseline - cumsum_realized) << PRECISION) + .div_floor(baseline_power_at_effective_network_time); + + reward_theta - diff + } else { + Default::default() + } +} + +/// Computes a reward for all expected leaders when effective network time changes +/// from prevTheta to currTheta. Inputs are in Q.128 format +pub(crate) fn compute_reward( + epoch: ChainEpoch, + prev_theta: BigInt, + curr_theta: BigInt, + simple_total: &TokenAmount, + baseline_total: &TokenAmount, +) -> TokenAmount { + let mut simple_reward = simple_total.atto() * &*EXP_LAM_SUB_ONE; + let epoch_lam = &*LAMBDA * epoch; + + simple_reward *= expneg(&epoch_lam); + simple_reward >>= PRECISION; + + let baseline_reward = compute_baseline_supply(curr_theta, baseline_total.atto()) + - compute_baseline_supply(prev_theta, baseline_total.atto()); + + TokenAmount::from_atto((simple_reward + baseline_reward) >> PRECISION) +} + +/// Computes baseline supply based on theta in Q.128 format. +/// Return is in Q.128 format +fn compute_baseline_supply(theta: BigInt, baseline_total: &BigInt) -> BigInt { + let theta_lam = (theta * &*LAMBDA) >> PRECISION; + + let etl = expneg(&theta_lam); + + let one = BigInt::from(1) << PRECISION; + let one_sub = one - etl; + + one_sub * baseline_total +} + +#[cfg(test)] +mod tests { + const SECONDS_IN_HOUR: i64 = 60 * 60; + const EPOCH_DURATION_IN_SECONDS: i64 = 30; + const EPOCHS_IN_HOUR: i64 = SECONDS_IN_HOUR / EPOCH_DURATION_IN_SECONDS; + const EPOCHS_IN_DAY: i64 = 24 * EPOCHS_IN_HOUR; + const EPOCHS_IN_YEAR: i64 = 365 * EPOCHS_IN_DAY; + + use super::*; + use num::BigRational; + use num::ToPrimitive; + use std::fs; + use std::ops::Shl; + + // Converted from: https://github.com/filecoin-project/specs-actors/blob/d56b240af24517443ce1f8abfbdab7cb22d331f1/actors/builtin/reward/reward_logic_test.go#L18 + // x => x/(2^128) + fn q128_to_f64(x: BigInt) -> f64 { + let denom = BigInt::from(1u64).shl(u128::BITS); + BigRational::new(x, denom) + .to_f64() + .expect("BigInt cannot be expressed as a 64bit float") + } + + // Converted from: https://github.com/filecoin-project/specs-actors/blob/d56b240af24517443ce1f8abfbdab7cb22d331f1/actors/builtin/reward/reward_logic_test.go#L25 + #[test] + fn test_compute_r_theta() { + fn baseline_power_at(epoch: ChainEpoch) -> BigInt { + (BigInt::from(epoch) + BigInt::from(1i64)) * BigInt::from(2048) + } + + assert_eq!( + q128_to_f64(compute_r_theta( + 1, + &baseline_power_at(1), + &BigInt::from(2048 + 2 * 2048 / 2), + &BigInt::from(2048 + 2 * 2048), + )), + 0.5 + ); + + assert_eq!( + q128_to_f64(compute_r_theta( + 1, + &baseline_power_at(1), + &BigInt::from(2048 + 2 * 2048 / 4), + &BigInt::from(2048 + 2 * 2048), + )), + 0.25 + ); + + let cumsum15 = (0..16).map(baseline_power_at).sum::(); + assert_eq!( + q128_to_f64(compute_r_theta( + 16, + &baseline_power_at(16), + &(&cumsum15 + baseline_power_at(16) / BigInt::from(4)), + &(&cumsum15 + baseline_power_at(16)), + )), + 15.25 + ); + } + + // Converted from: https://github.com/filecoin-project/specs-actors/blob/d56b240af24517443ce1f8abfbdab7cb22d331f1/actors/builtin/reward/reward_logic_test.go#L43 + #[test] + fn test_baseline_reward() { + let step = BigInt::from(5000_i64).shl(u128::BITS) - BigInt::from(77_777_777_777_i64); // offset from full integers + let delta = BigInt::from(1_i64).shl(u128::BITS) - BigInt::from(33_333_333_333_i64); // offset from full integers + + let mut prev_theta = BigInt::from(0i64); + let mut theta = delta; + + let mut b = String::from("t0, t1, y\n"); + let simple = compute_reward( + 0, + BigInt::from(0i64), + BigInt::from(0i64), + &SIMPLE_TOTAL, + &BASELINE_TOTAL, + ); + + for _ in 0..512 { + let mut reward = compute_reward( + 0, + prev_theta.clone(), + theta.clone(), + &SIMPLE_TOTAL, + &BASELINE_TOTAL, + ); + reward -= &simple; + + let prev_theta_str = &prev_theta.to_string(); + let theta_str = &theta.to_string(); + let reward_str = &reward.atto().to_string(); + b.push_str(prev_theta_str); + b.push(','); + b.push_str(theta_str); + b.push(','); + b.push_str(reward_str); + b.push('\n'); + + prev_theta += &step; + theta += &step; + } + + // compare test output to golden file used for golang tests; file originally located at filecoin-project/specs-actors/actors/builtin/reward/testdata/TestBaselineReward.golden (current link: https://github.com/filecoin-project/specs-actors/blob/d56b240af24517443ce1f8abfbdab7cb22d331f1/actors/builtin/reward/testdata/TestBaselineReward.golden) + let filename = "src/v16/testdata/TestBaselineReward.golden"; + let golden_contents = + fs::read_to_string(filename).expect("Something went wrong reading the file"); + + assert_eq!(golden_contents, b); + } + + // Converted from: https://github.com/filecoin-project/specs-actors/blob/d56b240af24517443ce1f8abfbdab7cb22d331f1/actors/builtin/reward/reward_logic_test.go#L70 + #[test] + fn test_simple_reward() { + let mut b = String::from("x, y\n"); + for i in 0..512 { + let x: i64 = i * 5000; + let reward = compute_reward( + x, + BigInt::from(0i64), + BigInt::from(0i64), + &SIMPLE_TOTAL, + &BASELINE_TOTAL, + ); + + let x_str = &x.to_string(); + let reward_str = &reward.atto().to_string(); + b.push_str(x_str); + b.push(','); + b.push_str(reward_str); + b.push('\n'); + } + + // compare test output to golden file used for golang tests; file originally located at filecoin-project/specs-actors/actors/builtin/reward/testdata/TestSimpleReward.golden (current link: https://github.com/filecoin-project/specs-actors/blob/d56b240af24517443ce1f8abfbdab7cb22d331f1/actors/builtin/reward/testdata/TestSimpleReward.golden) + let filename = "src/v16/testdata/TestSimpleReward.golden"; + let golden_contents = + fs::read_to_string(filename).expect("Something went wrong reading the file"); + + assert_eq!(golden_contents, b); + } + + // Converted from: https://github.com/filecoin-project/specs-actors/blob/d56b240af24517443ce1f8abfbdab7cb22d331f1/actors/builtin/reward/reward_logic_test.go#L82 + #[test] + fn test_baseline_reward_growth() { + fn baseline_in_years(start: StoragePower, x: ChainEpoch) -> StoragePower { + let mut baseline = start; + for _ in 0..(x * EPOCHS_IN_YEAR) { + baseline = baseline_power_from_prev(&baseline); + } + baseline + } + + struct GrowthTestCase { + start_val: StoragePower, + err_bound: f64, + } + + let cases: [GrowthTestCase; 7] = [ + // 1 byte + GrowthTestCase { + start_val: StoragePower::from(1i64), + err_bound: 1.0, + }, + // GiB + GrowthTestCase { + start_val: StoragePower::from(1i64 << 30), + err_bound: 1e-3, + }, + // TiB + GrowthTestCase { + start_val: StoragePower::from(1i64 << 40), + err_bound: 1e-6, + }, + // PiB + GrowthTestCase { + start_val: StoragePower::from(1i64 << 50), + err_bound: 1e-8, + }, + // EiB + GrowthTestCase { + start_val: BASELINE_INITIAL_VALUE.clone(), + err_bound: 1e-8, + }, + // ZiB + GrowthTestCase { + start_val: StoragePower::from(1u128 << 70), + err_bound: 1e-8, + }, + // non power of 2 ~ 1 EiB + GrowthTestCase { + start_val: StoragePower::from(513_633_559_722_596_517_u128), + err_bound: 1e-8, + }, + ]; + + for case in cases { + let years = 1u32; + let end = baseline_in_years(case.start_val.clone(), 1); + + // logic from golang test was preserved to enable future testing of more than one year + let multiplier = BigInt::pow(&BigInt::from(2u32), years); + let expected = case.start_val * multiplier; + let diff = &expected - end; + + let perr = BigRational::new(diff, expected) + .to_f64() + .expect("BigInt cannot be expressed as a 64bit float"); + + assert!(perr < case.err_bound); + } + } +} diff --git a/actors/reward/src/v16/mod.rs b/actors/reward/src/v16/mod.rs new file mode 100644 index 00000000..c3d38fc5 --- /dev/null +++ b/actors/reward/src/v16/mod.rs @@ -0,0 +1,33 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +pub use self::logic::*; +pub use self::state::*; +pub use self::types::*; + +pub(crate) mod expneg; +mod logic; +mod state; +mod types; + +// only exported for tests +#[doc(hidden)] +pub mod ext; + +// * Updated to specs-actors commit: 999e57a151cc7ada020ca2844b651499ab8c0dec (v3.0.1) + +/// PenaltyMultiplier is the factor miner penalties are scaled up by +pub const PENALTY_MULTIPLIER: u64 = 3; + +/// Reward actor methods available +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + AwardBlockReward = 2, + ThisEpochReward = 3, + UpdateNetworkKPI = 4, +} diff --git a/actors/reward/src/v16/state.rs b/actors/reward/src/v16/state.rs new file mode 100644 index 00000000..bd7bf958 --- /dev/null +++ b/actors/reward/src/v16/state.rs @@ -0,0 +1,201 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::repr::*; +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::bigint::bigint_ser; +use fvm_shared4::bigint::BigInt; +use fvm_shared4::clock::{ChainEpoch, EPOCH_UNDEFINED}; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::sector::StoragePower; +use lazy_static::lazy_static; +use num_derive::FromPrimitive; + +use fil_actors_shared::v16::builtin::reward::smooth::{ + AlphaBetaFilter, FilterEstimate, DEFAULT_ALPHA, DEFAULT_BETA, +}; + +/// The unit of spacetime committed to the network +pub type Spacetime = BigInt; + +use super::logic::*; + +lazy_static! { + /// 36.266260308195979333 FIL + pub static ref INITIAL_REWARD_POSITION_ESTIMATE: TokenAmount = TokenAmount::from_atto(36266260308195979333u128); + /// -1.0982489*10^-7 FIL per epoch. Change of simple minted tokens between epochs 0 and 1. + pub static ref INITIAL_REWARD_VELOCITY_ESTIMATE: TokenAmount = TokenAmount::from_atto(-109897758509i64); +} + +/// Reward actor state +#[derive(Serialize_tuple, Deserialize_tuple, Default, Debug, Clone)] +pub struct State { + /// Target CumsumRealized needs to reach for EffectiveNetworkTime to increase + /// Expressed in byte-epochs. + #[serde(with = "bigint_ser")] + pub cumsum_baseline: Spacetime, + + /// CumsumRealized is cumulative sum of network power capped by BaselinePower(epoch). + /// Expressed in byte-epochs. + #[serde(with = "bigint_ser")] + pub cumsum_realized: Spacetime, + + /// Ceiling of real effective network time `theta` based on + /// CumsumBaselinePower(theta) == CumsumRealizedPower + /// Theta captures the notion of how much the network has progressed in its baseline + /// and in advancing network time. + pub effective_network_time: ChainEpoch, + + /// EffectiveBaselinePower is the baseline power at the EffectiveNetworkTime epoch. + #[serde(with = "bigint_ser")] + pub effective_baseline_power: StoragePower, + + /// The reward to be paid in per WinCount to block producers. + /// The actual reward total paid out depends on the number of winners in any round. + /// This value is recomputed every non-null epoch and used in the next non-null epoch. + pub this_epoch_reward: TokenAmount, + /// Smoothed `this_epoch_reward`. + pub this_epoch_reward_smoothed: FilterEstimate, + + /// The baseline power the network is targeting at st.Epoch. + #[serde(with = "bigint_ser")] + pub this_epoch_baseline_power: StoragePower, + + /// Epoch tracks for which epoch the Reward was computed. + pub epoch: ChainEpoch, + + // TotalStoragePowerReward tracks the total FIL awarded to block miners + pub total_storage_power_reward: TokenAmount, + + // Simple and Baseline totals are constants used for computing rewards. + // They are on chain because of a historical fix resetting baseline value + // in a way that depended on the history leading immediately up to the + // migration fixing the value. These values can be moved from state back + // into a code constant in a subsequent upgrade. + pub simple_total: TokenAmount, + pub baseline_total: TokenAmount, +} + +impl State { + pub fn new(curr_realized_power: StoragePower) -> Self { + let mut st = Self { + effective_baseline_power: BASELINE_INITIAL_VALUE.clone(), + this_epoch_baseline_power: INIT_BASELINE_POWER.clone(), + epoch: EPOCH_UNDEFINED, + this_epoch_reward_smoothed: FilterEstimate::new( + INITIAL_REWARD_POSITION_ESTIMATE.atto().clone(), + INITIAL_REWARD_VELOCITY_ESTIMATE.atto().clone(), + ), + simple_total: SIMPLE_TOTAL.clone(), + baseline_total: BASELINE_TOTAL.clone(), + ..Default::default() + }; + st.update_to_next_epoch_with_reward(&curr_realized_power); + + st + } + + /// Takes in current realized power and updates internal state + /// Used for update of internal state during null rounds + pub(super) fn update_to_next_epoch(&mut self, curr_realized_power: &StoragePower) { + self.epoch += 1; + self.this_epoch_baseline_power = baseline_power_from_prev(&self.this_epoch_baseline_power); + let capped_realized_power = + std::cmp::min(&self.this_epoch_baseline_power, curr_realized_power); + self.cumsum_realized += capped_realized_power; + + while self.cumsum_realized > self.cumsum_baseline { + self.effective_network_time += 1; + self.effective_baseline_power = + baseline_power_from_prev(&self.effective_baseline_power); + self.cumsum_baseline += &self.effective_baseline_power; + } + } + + /// Takes in a current realized power for a reward epoch and computes + /// and updates reward state to track reward for the next epoch + pub(super) fn update_to_next_epoch_with_reward(&mut self, curr_realized_power: &StoragePower) { + let prev_reward_theta = compute_r_theta( + self.effective_network_time, + &self.effective_baseline_power, + &self.cumsum_realized, + &self.cumsum_baseline, + ); + self.update_to_next_epoch(curr_realized_power); + let curr_reward_theta = compute_r_theta( + self.effective_network_time, + &self.effective_baseline_power, + &self.cumsum_realized, + &self.cumsum_baseline, + ); + + self.this_epoch_reward = compute_reward( + self.epoch, + prev_reward_theta, + curr_reward_theta, + &self.simple_total, + &self.baseline_total, + ); + } + + pub(super) fn _update_smoothed_estimates(&mut self, delta: ChainEpoch) { + let filter_reward = AlphaBetaFilter::load( + &self.this_epoch_reward_smoothed, + &DEFAULT_ALPHA, + &DEFAULT_BETA, + ); + self.this_epoch_reward_smoothed = + filter_reward.next_estimate(self.this_epoch_reward.atto(), delta); + } + + pub fn into_total_storage_power_reward(self) -> TokenAmount { + self.total_storage_power_reward + } + + pub fn pre_commit_deposit_for_power( + &self, + reward_estimate: &FilterEstimate, + network_qa_power_estimate: &FilterEstimate, + qa_sector_power: &StoragePower, + ) -> TokenAmount { + fil_actor_miner_state::v16::pre_commit_deposit_for_power( + reward_estimate, + network_qa_power_estimate, + qa_sector_power, + ) + } +} + +/// Defines vestion function type for reward actor. +#[derive(Clone, Debug, PartialEq, Eq, Copy, FromPrimitive, Serialize_repr, Deserialize_repr)] +#[repr(u8)] +pub enum VestingFunction { + None = 0, + Linear = 1, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct Reward { + pub vesting_function: VestingFunction, + pub start_epoch: ChainEpoch, + pub end_epoch: ChainEpoch, + pub value: TokenAmount, + pub amount_withdrawn: TokenAmount, +} + +impl Reward { + pub fn amount_vested(&self, curr_epoch: ChainEpoch) -> TokenAmount { + match self.vesting_function { + VestingFunction::None => self.value.clone(), + VestingFunction::Linear => { + let elapsed = curr_epoch - self.start_epoch; + let vest_duration = self.end_epoch - self.start_epoch; + if elapsed >= vest_duration { + self.value.clone() + } else { + (self.value.clone() * elapsed as u64).div_floor(vest_duration) + } + } + } + } +} diff --git a/actors/reward/src/v16/testdata/TestBaselineReward.golden b/actors/reward/src/v16/testdata/TestBaselineReward.golden new file mode 100644 index 00000000..1a114ad8 --- /dev/null +++ b/actors/reward/src/v16/testdata/TestBaselineReward.golden @@ -0,0 +1,513 @@ +t0, t1, y +0,340282366920938463463374607398434878123,84621274052457285111 +1701411834604692317316873037158763279502223,1701752116971613255780336411766161714380346,84574788381047331439 +3402823669209384634633746074317526559004446,3403163951576305573097209448924924993882569,84528328245977625901 +5104235503814076951950619111476289838506669,5104575786180997890414082486083688273384792,84481893633220089447 +6805647338418769269267492148635053118008892,6805987620785690207730955523242451552887015,84435484528754349181 +8507059173023461586584365185793816397511115,8507399455390382525047828560401214832389238,84389100918567734129 +10208471007628153903901238222952579677013338,10208811289995074842364701597559978111891461,84342742788655271010 +11909882842232846221218111260111342956515561,11910223124599767159681574634718741391393684,84296410125019680001 +13611294676837538538534984297270106236017784,13611634959204459476998447671877504670895907,84250102913671370520 +15312706511442230855851857334428869515520007,15313046793809151794315320709036267950398130,84203821140628436995 +17014118346046923173168730371587632795022230,17014458628413844111632193746195031229900353,84157564791916654642 +18715530180651615490485603408746396074524453,18715870463018536428949066783353794509402576,84111333853569475252 +20416942015256307807802476445905159354026676,20417282297623228746265939820512557788904799,84065128311628022968 +22118353849861000125119349483063922633528899,22118694132227921063582812857671321068407022,84018948152141090071 +23819765684465692442436222520222685913031122,23820105966832613380899685894830084347909245,83972793361165132773 +25521177519070384759753095557381449192533345,25521517801437305698216558931988847627411468,83926663924764266998 +27222589353675077077069968594540212472035568,27222929636041998015533431969147610906913691,83880559829010264183 +28924001188279769394386841631698975751537791,28924341470646690332850305006306374186415914,83834481059982547065 +30625413022884461711703714668857739031040014,30625753305251382650167178043465137465918137,83788427603768185484 +32326824857489154029020587706016502310542237,32327165139856074967484051080623900745420360,83742399446461892180 +34028236692093846346337460743175265590044460,34028576974460767284800924117782664024922583,83696396574166018591 +35729648526698538663654333780334028869546683,35729988809065459602117797154941427304424806,83650418972990550662 +37431060361303230980971206817492792149048906,37431400643670151919434670192100190583927029,83604466629053104650 +39132472195907923298288079854651555428551129,39132812478274844236751543229258953863429252,83558539528478922929 +40833884030512615615604952891810318708053352,40834224312879536554068416266417717142931475,83512637657400869803 +42535295865117307932921825928969081987555575,42535636147484228871385289303576480422433698,83466761001959427321 +44236707699722000250238698966127845267057798,44237047982088921188702162340735243701935921,83420909548302691088 +45938119534326692567555572003286608546560021,45938459816693613506019035377894006981438144,83375083282586366086 +47639531368931384884872445040445371826062244,47639871651298305823335908415052770260940367,83329282190973762493 +49340943203536077202189318077604135105564467,49341283485902998140652781452211533540442590,83283506259635791504 +51042355038140769519506191114762898385066690,51042695320507690457969654489370296819944813,83237755474750961157 +52743766872745461836823064151921661664568913,52744107155112382775286527526529060099447036,83192029822505372158 +54445178707350154154139937189080424944071136,54445518989717075092603400563687823378949259,83146329289092713713 +56146590541954846471456810226239188223573359,56146930824321767409920273600846586658451482,83100653860714259356 +57848002376559538788773683263397951503075582,57848342658926459727237146638005349937953705,83055003523578862785 +59549414211164231106090556300556714782577805,59549754493531152044554019675164113217455928,83009378263902953696 +61250826045768923423407429337715478062080028,61251166328135844361870892712322876496958151,82963778067910533624 +62952237880373615740724302374874241341582251,62952578162740536679187765749481639776460374,82918202921833171780 +64653649714978308058041175412033004621084474,64653989997345228996504638786640403055962597,82872652811910000897 +66355061549583000375358048449191767900586697,66355401831949921313821511823799166335464820,82827127724387713073 +68056473384187692692674921486350531180088920,68056813666554613631138384860957929614967043,82781627645520555620 +69757885218792385009991794523509294459591143,69758225501159305948455257898116692894469266,82736152561570326910 +71459297053397077327308667560668057739093366,71459637335763998265772130935275456173971489,82690702458806372233 +73160708888001769644625540597826821018595589,73161049170368690583089003972434219453473712,82645277323505579646 +74862120722606461961942413634985584298097812,74862461004973382900405877009592982732975935,82599877141952375831 +76563532557211154279259286672144347577600035,76563872839578075217722750046751746012478158,82554501900438721954 +78264944391815846596576159709303110857102258,78265284674182767535039623083910509291980381,82509151585264109528 +79966356226420538913893032746461874136604481,79966696508787459852356496121069272571482604,82463826182735556271 +81667768061025231231209905783620637416106704,81668108343392152169673369158228035850984827,82418525679167601977 +83369179895629923548526778820779400695608927,83369520177996844486990242195386799130487050,82373250060882304382 +85070591730234615865843651857938163975111150,85070932012601536804307115232545562409989273,82327999314209235033 +86772003564839308183160524895096927254613373,86772343847206229121623988269704325689491496,82282773425485475161 +88473415399444000500477397932255690534115596,88473755681810921438940861306863088968993719,82237572381055611557 +90174827234048692817794270969414453813617819,90175167516415613756257734344021852248495942,82192396167271732445 +91876239068653385135111144006573217093120042,91876579351020306073574607381180615527998165,82147244770493423368 +93577650903258077452428017043731980372622265,93577991185624998390891480418339378807500388,82102118177087763062 +95279062737862769769744890080890743652124488,95279403020229690708208353455498142087002611,82057016373429319346 +96980474572467462087061763118049506931626711,96980814854834383025525226492656905366504834,82011939345900145001 +98681886407072154404378636155208270211128934,98682226689439075342842099529815668646007057,81966887080889773666 +100383298241676846721695509192367033490631157,100383638524043767660158972566974431925509280,81921859564795215724 +102084710076281539039012382229525796770133380,102085050358648459977475845604133195205011503,81876856784020954195 +103786121910886231356329255266684560049635603,103786462193253152294792718641291958484513726,81831878724978940630 +105487533745490923673646128303843323329137826,105487874027857844612109591678450721764015949,81786925374088591012 +107188945580095615990963001341002086608640049,107189285862462536929426464715609485043518172,81741996717776781652 +108890357414700308308279874378160849888142272,108890697697067229246743337752768248323020395,81697092742477845094 +110591769249305000625596747415319613167644495,110592109531671921564060210789927011602522618,81652213434633566014 +112293181083909692942913620452478376447146718,112293521366276613881377083827085774882024841,81607358780693177131 +113994592918514385260230493489637139726648941,113994933200881306198693956864244538161527064,81562528767113355113 +115696004753119077577547366526795903006151164,115696345035485998516010829901403301441029287,81517723380358216491 +117397416587723769894864239563954666285653387,117397756870090690833327702938562064720531510,81472942606899313566 +119098828422328462212181112601113429565155610,119099168704695383150644575975720828000033733,81428186433215630329 +120800240256933154529497985638272192844657833,120800580539300075467961449012879591279535956,81383454845793578379 +122501652091537846846814858675430956124160056,122501992373904767785278322050038354559038179,81338747831126992840 +124203063926142539164131731712589719403662279,124203404208509460102595195087197117838540402,81294065375717128282 +125904475760747231481448604749748482683164502,125904816043114152419912068124355881118042625,81249407466072654651 +127605887595351923798765477786907245962666725,127606227877718844737228941161514644397544848,81204774088709653188 +129307299429956616116082350824066009242168948,129307639712323537054545814198673407677047071,81160165230151612364 +131008711264561308433399223861224772521671171,131009051546928229371862687235832170956549294,81115580876929423806 +132710123099166000750716096898383535801173394,132710463381532921689179560272990934236051517,81071021015581378236 +134411534933770693068032969935542299080675617,134411875216137614006496433310149697515553740,81026485632653161398 +136112946768375385385349842972701062360177840,136113287050742306323813306347308460795055963,80981974714697850005 +137814358602980077702666716009859825639680063,137814698885346998641130179384467224074558186,80937488248275907672 +139515770437584770019983589047018588919182286,139516110719951690958447052421625987354060409,80893026219955180860 +141217182272189462337300462084177352198684509,141217522554556383275763925458784750633562632,80848588616310894823 +142918594106794154654617335121336115478186732,142918934389161075593080798495943513913064855,80804175423925649550 +144620005941398846971934208158494878757688955,144620346223765767910397671533102277192567078,80759786629389415718 +146321417776003539289251081195653642037191178,146321758058370460227714544570261040472069301,80715422219299530638 +148022829610608231606567954232812405316693401,148023169892975152545031417607419803751571524,80671082180260694216 +149724241445212923923884827269971168596195624,149724581727579844862348290644578567031073747,80626766498884964900 +151425653279817616241201700307129931875697847,151425993562184537179665163681737330310575970,80582475161791755644 +153127065114422308558518573344288695155200070,153127405396789229496982036718896093590078193,80538208155607829862 +154828476949027000875835446381447458434702293,154828817231393921814298909756054856869580416,80493965466967297397 +156529888783631693193152319418606221714204516,156530229065998614131615782793213620149082639,80449747082511610479 +158231300618236385510469192455764984993706739,158231640900603306448932655830372383428584862,80405552988889559697 +159932712452841077827786065492923748273208962,159933052735207998766249528867531146708087085,80361383172757269963 +161634124287445770145102938530082511552711185,161634464569812691083566401904689909987589308,80317237620778196486 +163335536122050462462419811567241274832213408,163335876404417383400883274941848673267091531,80273116319623120744 +165036947956655154779736684604400038111715631,165037288239022075718200147979007436546593754,80229019255970146460 +166738359791259847097053557641558801391217854,166738700073626768035517021016166199826095977,80184946416504695579 +168439771625864539414370430678717564670720077,168440111908231460352833894053324963105598200,80140897787919504247 +170141183460469231731687303715876327950222300,170141523742836152670150767090483726385100423,80096873356914618796 +171842595295073924049004176753035091229724523,171842935577440844987467640127642489664602646,80052873110197391726 +173544007129678616366321049790193854509226746,173544347412045537304784513164801252944104869,80008897034482477690 +175245418964283308683637922827352617788728969,175245759246650229622101386201960016223607092,79964945116491829488 +176946830798888001000954795864511381068231192,176947171081254921939418259239118779503109315,79921017342954694050 +178648242633492693318271668901670144347733415,178648582915859614256735132276277542782611538,79877113700607608438 +180349654468097385635588541938828907627235638,180349994750464306574052005313436306062113761,79833234176194395831 +182051066302702077952905414975987670906737861,182051406585068998891368878350595069341615984,79789378756466161534 +183752478137306770270222288013146434186240084,183752818419673691208685751387753832621118207,79745547428181288968 +185453889971911462587539161050305197465742307,185454230254278383526002624424912595900620430,79701740178105435677 +187155301806516154904856034087463960745244530,187155642088883075843319497462071359180122653,79657956993011529330 +188856713641120847222172907124622724024746753,188857053923487768160636370499230122459624876,79614197859679763728 +190558125475725539539489780161781487304248976,190558465758092460477953243536388885739127099,79570462764897594812 +192259537310330231856806653198940250583751199,192259877592697152795270116573547649018629322,79526751695459736676 +193960949144934924174123526236099013863253422,193961289427301845112586989610706412298131545,79483064638168157575 +195662360979539616491440399273257777142755645,195662701261906537429903862647865175577633768,79439401579832075944 +197363772814144308808757272310416540422257868,197364113096511229747220735685023938857135991,79395762507267956415 +199065184648749001126074145347575303701760091,199065524931115922064537608722182702136638214,79352147407299505834 +200766596483353693443391018384734066981262314,200766936765720614381854481759341465416140437,79308556266757669284 +202468008317958385760707891421892830260764537,202468348600325306699171354796500228695642660,79264989072480626109 +204169420152563078078024764459051593540266760,204169760434929999016488227833658991975144883,79221445811313785940 +205870831987167770395341637496210356819768983,205871172269534691333805100870817755254647106,79177926470109784723 +207572243821772462712658510533369120099271206,207572584104139383651121973907976518534149329,79134431035728480748 +209273655656377155029975383570527883378773429,209273995938744075968438846945135281813651552,79090959495036950685 +210975067490981847347292256607686646658275652,210975407773348768285755719982294045093153775,79047511834909485612 +212676479325586539664609129644845409937777875,212676819607953460603072593019452808372655998,79004088042227587061 +214377891160191231981926002682004173217280098,214378231442558152920389466056611571652158221,78960688103879963048 +216079302994795924299242875719162936496782321,216079643277162845237706339093770334931660444,78917312006762524122 +217780714829400616616559748756321699776284544,217781055111767537555023212130929098211162667,78873959737778379402 +219482126664005308933876621793480463055786767,219482466946372229872340085168087861490664890,78830631283837832627 +221183538498610001251193494830639226335288990,221183878780976922189656958205246624770167113,78787326631858378200 +222884950333214693568510367867797989614791213,222885290615581614506973831242405388049669336,78744045768764697242 +224586362167819385885827240904956752894293436,224586702450186306824290704279564151329171559,78700788681488653641 +226287774002424078203144113942115516173795659,226288114284790999141607577316722914608673782,78657555356969290108 +227989185837028770520460986979274279453297882,227989526119395691458924450353881677888176005,78614345782152824231 +229690597671633462837777860016433042732800105,229690937954000383776241323391040441167678228,78571159943992644537 +231392009506238155155094733053591806012302328,231392349788605076093558196428199204447180451,78527997829449306550 +233093421340842847472411606090750569291804551,233093761623209768410875069465357967726682674,78484859425490528854 +234794833175447539789728479127909332571306774,234795173457814460728191942502516731006184897,78441744719091189160 +236496245010052232107045352165068095850808997,236496585292419153045508815539675494285687120,78398653697233320373 +238197656844656924424362225202226859130311220,238197997127023845362825688576834257565189343,78355586346906106657 +239899068679261616741679098239385622409813443,239899408961628537680142561613993020844691566,78312542655105879515 +241600480513866309058995971276544385689315666,241600820796233229997459434651151784124193789,78269522608836113853 +243301892348471001376312844313703148968817889,243302232630837922314776307688310547403696012,78226526195107424063 +245003304183075693693629717350861912248320112,245003644465442614632093180725469310683198235,78183553400937560098 +246704716017680386010946590388020675527822335,246705056300047306949410053762628073962700458,78140604213351403553 +248406127852285078328263463425179438807324558,248406468134651999266726926799786837242202681,78097678619380963748 +250107539686889770645580336462338202086826781,250107879969256691584043799836945600521704904,78054776606065373809 +251808951521494462962897209499496965366329004,251809291803861383901360672874104363801207127,78011898160450886759 +253510363356099155280214082536655728645831227,253510703638466076218677545911263127080709350,77969043269590871604 +255211775190703847597530955573814491925333450,255212115473070768535994418948421890360211573,77926211920545809426 +256913187025308539914847828610973255204835673,256913527307675460853311291985580653639713796,77883404100383289474 +258614598859913232232164701648132018484337896,258614939142280153170628165022739416919216019,77840619796178005261 +260316010694517924549481574685290781763840119,260316350976884845487945038059898180198718242,77797858995011750659 +262017422529122616866798447722449545043342342,262017762811489537805261911097056943478220465,77755121683973416002 +263718834363727309184115320759608308322844565,263719174646094230122578784134215706757722688,77712407850158984183 +265420246198332001501432193796767071602346788,265420586480698922439895657171374470037224911,77669717480671526762 +267121658032936693818749066833925834881849011,267121998315303614757212530208533233316727134,77627050562621200072 +268823069867541386136065939871084598161351234,268823410149908307074529403245691996596229357,77584407083125241322 +270524481702146078453382812908243361440853457,270524821984512999391846276282850759875731580,77541787029307964712 +272225893536750770770699685945402124720355680,272226233819117691709163149320009523155233803,77499190388300757545 +273927305371355463088016558982560887999857903,273927645653722384026480022357168286434736026,77456617147242076341 +275628717205960155405333432019719651279360126,275629057488327076343796895394327049714238249,77414067293277442949 +277330129040564847722650305056878414558862349,277330469322931768661113768431485812993740472,77371540813559440674 +279031540875169540039967178094037177838364572,279031881157536460978430641468644576273242695,77329037695247710392 +280732952709774232357284051131195941117866795,280733292992141153295747514505803339552744918,77286557925508946675 +282434364544378924674600924168354704397369018,282434704826745845613064387542962102832247141,77244101491516893916 +284135776378983616991917797205513467676871241,284136116661350537930381260580120866111749364,77201668380452342454 +285837188213588309309234670242672230956373464,285837528495955230247698133617279629391251587,77159258579503124710 +287538600048193001626551543279830994235875687,287538940330559922565015006654438392670753810,77116872075864111309 +289240011882797693943868416316989757515377910,289240352165164614882331879691597155950256033,77074508856737207221 +290941423717402386261185289354148520794880133,290941763999769307199648752728755919229758256,77032168909331347895 +292642835552007078578502162391307284074382356,292643175834373999516965625765914682509260479,76989852220862495396 +294344247386611770895819035428466047353884579,294344587668978691834282498803073445788762702,76947558778553634544 +296045659221216463213135908465624810633386802,296045999503583384151599371840232209068264925,76905288569634769059 +297747071055821155530452781502783573912889025,297747411338188076468916244877390972347767148,76863041581342917702 +299448482890425847847769654539942337192391248,299448823172792768786233117914549735627269371,76820817800922110425 +301149894725030540165086527577101100471893471,301150235007397461103549990951708498906771594,76778617215623384516 +302851306559635232482403400614259863751395694,302851646842002153420866863988867262186273817,76736439812704780753 +304552718394239924799720273651418627030897917,304553058676606845738183737026026025465776040,76694285579431339553 +306254130228844617117037146688577390310400140,306254470511211538055500610063184788745278263,76652154503075097129 +307955542063449309434354019725736153589902363,307955882345816230372817483100343552024780486,76610046570915081648 +309656953898054001751670892762894916869404586,309657294180420922690134356137502315304282709,76567961770237309387 +311358365732658694068987765800053680148906809,311358706015025615007451229174661078583784932,76525900088334780897 +313059777567263386386304638837212443428409032,313060117849630307324768102211819841863287155,76483861512507477165 +314761189401868078703621511874371206707911255,314761529684234999642084975248978605142789378,76441846030062355780 +316462601236472771020938384911529969987413478,316462941518839691959401848286137368422291601,76399853628313347098 +318164013071077463338255257948688733266915701,318164353353444384276718721323296131701793824,76357884294581350416 +319865424905682155655572130985847496546417924,319865765188049076594035594360454894981296047,76315938016194230140 +321566836740286847972889004023006259825920147,321567177022653768911352467397613658260798270,76274014780486811961 +323268248574891540290205877060165023105422370,323268588857258461228669340434772421540300493,76232114574800879029 +324969660409496232607522750097323786384924593,324970000691863153545986213471931184819802716,76190237386485168132 +326671072244100924924839623134482549664426816,326671412526467845863303086509089948099304939,76148383202895365878 +328372484078705617242156496171641312943929039,328372824361072538180619959546248711378807162,76106552011394104874 +330073895913310309559473369208800076223431262,330074236195677230497936832583407474658309385,76064743799350959910 +331775307747915001876790242245958839502933485,331775648030281922815253705620566237937811608,76022958554142444150 +333476719582519694194107115283117602782435708,333477059864886615132570578657725001217313831,75981196263152005315 +335178131417124386511423988320276366061937931,335178471699491307449887451694883764496816054,75939456913770021879 +336879543251729078828740861357435129341440154,336879883534095999767204324732042527776318277,75897740493393799256 +338580955086333771146057734394593892620942377,338581295368700692084521197769201291055820500,75856046989427565998 +340282366920938463463374607431752655900444600,340282707203305384401838070806360054335322723,75814376389282469993 +341983778755543155780691480468911419179946823,341984119037910076719154943843518817614824946,75772728680376574661 +343685190590147848098008353506070182459449046,343685530872514769036471816880677580894327169,75731103850134855157 +345386602424752540415325226543228945738951269,345386942707119461353788689917836344173829392,75689501885989194573 +347088014259357232732642099580387709018453492,347088354541724153671105562954995107453331615,75647922775378380143 +348789426093961925049958972617546472297955715,348789766376328845988422435992153870732833838,75606366505748099453 +350490837928566617367275845654705235577457938,350491178210933538305739309029312634012336061,75564833064550936648 +352192249763171309684592718691863998856960161,352192590045538230623056182066471397291838284,75523322439246368641 +353893661597776002001909591729022762136462384,353894001880142922940373055103630160571340507,75481834617300761334 +355595073432380694319226464766181525415964607,355595413714747615257689928140788923850842730,75440369586187365825 +357296485266985386636543337803340288695466830,357296825549352307575006801177947687130344953,75398927333386314632 +358997897101590078953860210840499051974969053,358998237383956999892323674215106450409847176,75357507846384617909 +360699308936194771271177083877657815254471276,360699649218561692209640547252265213689349399,75316111112676159670 +362400720770799463588493956914816578533973499,362401061053166384526957420289423976968851622,75274737119761694014 +364102132605404155905810829951975341813475722,364102472887771076844274293326582740248353845,75233385855148841346 +365803544440008848223127702989134105092977945,365803884722375769161591166363741503527856068,75192057306352084610 +367504956274613540540444576026292868372480168,367505296556980461478908039400900266807358291,75150751460892765518 +369206368109218232857761449063451631651982391,369206708391585153796224912438059030086860514,75109468306299080781 +370907779943822925175078322100610394931484614,370908120226189846113541785475217793366362737,75068207830106078345 +372609191778427617492395195137769158210986837,372609532060794538430858658512376556645864960,75026970019855653623 +374310603613032309809712068174927921490489060,374310943895399230748175531549535319925367183,74985754863096545742 +376012015447637002127028941212086684769991283,376012355730003923065492404586694083204869406,74944562347384333773 +377713427282241694444345814249245448049493506,377713767564608615382809277623852846484371629,74903392460281432984 +379414839116846386761662687286404211328995729,379415179399213307700126150661011609763873852,74862245189357091075 +381116250951451079078979560323562974608497952,381116591233818000017443023698170373043376075,74821120522187384431 +382817662786055771396296433360721737888000175,382818003068422692334759896735329136322878298,74780018446355214370 +384519074620660463713613306397880501167502398,384519414903027384652076769772487899602380521,74738938949450303391 +386220486455265156030930179435039264447004621,386220826737632076969393642809646662881882744,74697882019069191428 +387921898289869848348247052472198027726506844,387922238572236769286710515846805426161384967,74656847642815232108 +389623310124474540665563925509356791006009067,389623650406841461604027388883964189440887190,74615835808298589003 +391324721959079232982880798546515554285511290,391325062241446153921344261921122952720389413,74574846503136231892 +393026133793683925300197671583674317565013513,393026474076050846238661134958281715999891636,74533879714951933024 +394727545628288617617514544620833080844515736,394727885910655538555978007995440479279393859,74492935431376263375 +396428957462893309934831417657991844124017959,396429297745260230873294881032599242558896082,74452013640046588920 +398130369297498002252148290695150607403520182,398130709579864923190611754069758005838398305,74411114328607066894 +399831781132102694569465163732309370683022405,399832121414469615507928627106916769117900528,74370237484708642069 +401533192966707386886782036769468133962524628,401533533249074307825245500144075532397402751,74329383096009043016 +403234604801312079204098909806626897242026851,403234945083679000142562373181234295676904974,74288551150172778387 +404936016635916771521415782843785660521529074,404936356918283692459879246218393058956407197,74247741634871133184 +406637428470521463838732655880944423801031297,406637768752888384777196119255551822235909420,74206954537782165041 +408338840305126156156049528918103187080533520,408339180587493077094512992292710585515411643,74166189846590700502 +410040252139730848473366401955261950360035743,410040592422097769411829865329869348794913866,74125447548988331299 +411741663974335540790683274992420713639537966,411742004256702461729146738367028112074416089,74084727632673410643 +413443075808940233108000148029579476919040189,413443416091307154046463611404186875353918312,74044030085351049503 +415144487643544925425317021066738240198542412,415144827925911846363780484441345638633420535,74003354894733112898 +416845899478149617742633894103897003478044635,416846239760516538681097357478504401912922758,73962702048538216183 +418547311312754310059950767141055766757546858,418547651595121230998414230515663165192424981,73922071534491721346 +420248723147359002377267640178214530037049081,420249063429725923315731103552821928471927204,73881463340325733294 +421950134981963694694584513215373293316551304,421950475264330615633047976589980691751429427,73840877453779096158 +423651546816568387011901386252532056596053527,423651887098935307950364849627139455030931650,73800313862597389584 +425352958651173079329218259289690819875555750,425353298933540000267681722664298218310433873,73759772554532925036 +427054370485777771646535132326849583155057973,427054710768144692584998595701456981589936096,73719253517344742097 +428755782320382463963852005364008346434560196,428756122602749384902315468738615744869438319,73678756738798604773 +430457194154987156281168878401167109714062419,430457534437354077219632341775774508148940542,73638282206666997800 +432158605989591848598485751438325872993564642,432158946271958769536949214812933271428442765,73597829908729122951 +433860017824196540915802624475484636273066865,433860358106563461854266087850092034707944988,73557399832770895347 +435561429658801233233119497512643399552569088,435561769941168154171582960887250797987447211,73516991966584939767 +437262841493405925550436370549802162832071311,437263181775772846488899833924409561266949434,73476606297970586965 +438964253328010617867753243586960926111573534,438964593610377538806216706961568324546451657,73436242814733869984 +440665665162615310185070116624119689391075757,440666005444982231123533579998727087825953880,73395901504687520475 +442367076997220002502386989661278452670577980,442367417279586923440850453035885851105456103,73355582355650965019 +444068488831824694819703862698437215950080203,444068829114191615758167326073044614384958326,73315285355450321444 +445769900666429387137020735735595979229582426,445770240948796308075484199110203377664460549,73275010491918395156 +447471312501034079454337608772754742509084649,447471652783401000392801072147362140943962772,73234757752894675460 +449172724335638771771654481809913505788586872,449173064618005692710117945184520904223464995,73194527126225331890 +450874136170243464088971354847072269068089095,450874476452610385027434818221679667502967218,73154318599763210541 +452575548004848156406288227884231032347591318,452575888287215077344751691258838430782469441,73114132161367830399 +454276959839452848723605100921389795627093541,454277300121819769662068564295997194061971664,73073967798905379675 +455978371674057541040921973958548558906595764,455978711956424461979385437333155957341473887,73033825500248712143 +457679783508662233358238846995707322186097987,457680123791029154296702310370314720620976110,72993705253277343480 +459381195343266925675555720032866085465600210,459381535625633846614019183407473483900478333,72953607045877447602 +461082607177871617992872593070024848745102433,461082947460238538931336056444632247179980556,72913530865941853008 +462784019012476310310189466107183612024604656,462784359294843231248652929481791010459482779,72873476701370039127 +464485430847081002627506339144342375304106879,464485771129447923565969802518949773738985002,72833444540068132661 +466186842681685694944823212181501138583609102,466187182964052615883286675556108537018487225,72793434369948903935 +467888254516290387262140085218659901863111325,467888594798657308200603548593267300297989448,72753446178931763248 +469589666350895079579456958255818665142613548,469590006633262000517920421630426063577491671,72713479954942757222 +471291078185499771896773831292977428422115771,471291418467866692835237294667584826856993894,72673535685914565161 +472992490020104464214090704330136191701617994,472992830302471385152554167704743590136496117,72633613359786495407 +474693901854709156531407577367294954981120217,474694242137076077469871040741902353415998340,72593712964504481693 +476395313689313848848724450404453718260622440,476395653971680769787187913779061116695500563,72553834488021079509 +478096725523918541166041323441612481540124663,478097065806285462104504786816219879975002786,72513977918295462464 +479798137358523233483358196478771244819626886,479798477640890154421821659853378643254505009,72474143243293418647 +481499549193127925800675069515930008099129109,481499889475494846739138532890537406534007232,72434330450987346997 +483200961027732618117991942553088771378631332,483201301310099539056455405927696169813509455,72394539529356253668 +484902372862337310435308815590247534658133555,484902713144704231373772278964854933093011678,72354770466385748403 +486603784696942002752625688627406297937635778,486604124979308923691089152002013696372513901,72315023250068040905 +488305196531546695069942561664565061217138001,488305536813913616008406025039172459652016124,72275297868401937209 +490006608366151387387259434701723824496640224,490006948648518308325722898076331222931518347,72235594309392836063 +491708020200756079704576307738882587776142447,491708360483123000643039771113489986211020570,72195912561052725301 +493409432035360772021893180776041351055644670,493409772317727692960356644150648749490522793,72156252611400178229 +495110843869965464339210053813200114335146893,495111184152332385277673517187807512770025016,72116614448460350002 +496812255704570156656526926850358877614649116,496812595986937077594990390224966276049527239,72076998060264974014 +498513667539174848973843799887517640894151339,498514007821541769912307263262125039329029462,72037403434852358278 +500215079373779541291160672924676404173653562,500215419656146462229624136299283802608531685,71997830560267381819 +501916491208384233608477545961835167453155785,501916831490751154546941009336442565888033908,71958279424561491064 +503617903042988925925794418998993930732658008,503618243325355846864257882373601329167536131,71918750015792696231 +505319314877593618243111292036152694012160231,505319655159960539181574755410760092447038354,71879242322025567726 +507020726712198310560428165073311457291662454,507021066994565231498891628447918855726540577,71839756331331232539 +508722138546803002877745038110470220571164677,508722478829169923816208501485077619006042800,71800292031787370643 +510423550381407695195061911147628983850666900,510423890663774616133525374522236382285545023,71760849411478211389 +512124962216012387512378784184787747130169123,512125302498379308450842247559395145565047246,71721428458494529918 +513826374050617079829695657221946510409671346,513826714332984000768159120596553908844549469,71682029160933643554 +515527785885221772147012530259105273689173569,515528126167588693085475993633712672124051692,71642651506899408219 +517229197719826464464329403296264036968675792,517229538002193385402792866670871435403553915,71603295484502214836 +518930609554431156781646276333422800248178015,518930949836798077720109739708030198683056138,71563961081858985740 +520632021389035849098963149370581563527680238,520632361671402770037426612745188961962558361,71524648287093171094 +522333433223640541416280022407740326807182461,522333773506007462354743485782347725242060584,71485357088334745295 +524034845058245233733596895444899090086684684,524035185340612154672060358819506488521562807,71446087473720203399 +525736256892849926050913768482057853366186907,525736597175216846989377231856665251801065030,71406839431392557531 +527437668727454618368230641519216616645689130,527438009009821539306694104893824015080567253,71367612949501333313 +529139080562059310685547514556375379925191353,529139420844426231624010977930982778360069476,71328408016202566276 +530840492396664003002864387593534143204693576,530840832679030923941327850968141541639571699,71289224619658798295 +532541904231268695320181260630692906484195799,532542244513635616258644724005300304919073922,71250062748039074004 +534243316065873387637498133667851669763698022,534243656348240308575961597042459068198576145,71210922389518937233 +535944727900478079954815006705010433043200245,535945068182845000893278470079617831478078368,71171803532280427431 +537646139735082772272131879742169196322702468,537646480017449693210595343116776594757580591,71132706164512076103 +539347551569687464589448752779327959602204691,539347891852054385527912216153935358037082814,71093630274408903238 +541048963404292156906765625816486722881706914,541049303686659077845229089191094121316585037,71054575850172413751 +542750375238896849224082498853645486161209137,542750715521263770162545962228252884596087260,71015542880010593913 +544451787073501541541399371890804249440711360,544452127355868462479862835265411647875589483,70976531352137907800 +546153198908106233858716244927963012720213583,546153539190473154797179708302570411155091706,70937541254775293726 +547854610742710926176033117965121775999715806,547854951025077847114496581339729174434593929,70898572576150160690 +549556022577315618493349991002280539279218029,549556362859682539431813454376887937714096152,70859625304496384821 +551257434411920310810666864039439302558720252,551257774694287231749130327414046700993598375,70820699428054305829 +552958846246525003127983737076598065838222475,552959186528891924066447200451205464273100598,70781794935070723445 +554660258081129695445300610113756829117724698,554660598363496616383764073488364227552602821,70742911813798893884 +556361669915734387762617483150915592397226921,556362010198101308701080946525522990832105044,70704050052498526288 +558063081750339080079934356188074355676729144,558063422032706001018397819562681754111607267,70665209639435779188 +559764493584943772397251229225233118956231367,559764833867310693335714692599840517391109490,70626390562883256957 +561465905419548464714568102262391882235733590,561466245701915385653031565636999280670611713,70587592811120006272 +563167317254153157031884975299550645515235813,563167657536520077970348438674158043950113936,70548816372431512573 +564868729088757849349201848336709408794738036,564869069371124770287665311711316807229616159,70510061235109696526 +566570140923362541666518721373868172074240259,566570481205729462604982184748475570509118382,70471327387452910491 +568271552757967233983835594411026935353742482,568271893040334154922299057785634333788620605,70432614817765934983 +569972964592571926301152467448185698633244705,569973304874938847239615930822793097068122828,70393923514359975147 +571674376427176618618469340485344461912746928,571674716709543539556932803859951860347625051,70355253465552657223 +573375788261781310935786213522503225192249151,573376128544148231874249676897110623627127274,70316604659668025023 +575077200096386003253103086559661988471751374,575077540378752924191566549934269386906629497,70277977085036536405 +576778611930990695570419959596820751751253597,576778952213357616508883422971428150186131720,70239370729995059746 +578480023765595387887736832633979515030755820,578480364047962308826200296008586913465633943,70200785582886870426 +580181435600200080205053705671138278310258043,580181775882567001143517169045745676745136166,70162221632061647304 +581882847434804772522370578708297041589760266,581883187717171693460834042082904440024638389,70123678865875469200 +583584259269409464839687451745455804869262489,583584599551776385778150915120063203304140612,70085157272690811384 +585285671104014157157004324782614568148764712,585286011386381078095467788157221966583642835,70046656840876542057 +586987082938618849474321197819773331428266935,586987423220985770412784661194380729863145058,70008177558807918842 +588688494773223541791638070856932094707769158,588688835055590462730101534231539493142647281,69969719414866585275 +590389906607828234108954943894090857987271381,590390246890195155047418407268698256422149504,69931282397440567292 +592091318442432926426271816931249621266773604,592091658724799847364735280305857019701651727,69892866494924269730 +593792730277037618743588689968408384546275827,593793070559404539682052153343015782981153950,69854471695718472815 +595494142111642311060905563005567147825778050,595494482394009231999369026380174546260656173,69816097988230328666 +597195553946247003378222436042725911105280273,597195894228613924316685899417333309540158396,69777745360873357794 +598896965780851695695539309079884674384782496,598897306063218616634002772454492072819660619,69739413802067445600 +600598377615456388012856182117043437664284719,600598717897823308951319645491650836099162842,69701103300238838880 +602299789450061080330173055154202200943786942,602300129732428001268636518528809599378665065,69662813843820142333 +604001201284665772647489928191360964223289165,604001541567032693585953391565968362658167288,69624545421250315066 +605702613119270464964806801228519727502791388,605702953401637385903270264603127125937669511,69586298020974667102 +607404024953875157282123674265678490782293611,607404365236242078220587137640285889217171734,69548071631444855895 +609105436788479849599440547302837254061795834,609105777070846770537904010677444652496673957,69509866241118882841 +610806848623084541916757420339996017341298057,610807188905451462855220883714603415776176180,69471681838461089791 +612508260457689234234074293377154780620800280,612508600740056155172537756751762179055678403,69433518411942155573 +614209672292293926551391166414313543900302503,614210012574660847489854629788920942335180626,69395375950039092504 +615911084126898618868708039451472307179804726,615911424409265539807171502826079705614682849,69357254441235242918 +617612495961503311186024912488631070459306949,617612836243870232124488375863238468894185072,69319153874020275685 +619313907796108003503341785525789833738809172,619314248078474924441805248900397232173687295,69281074236890182733 +621015319630712695820658658562948597018311395,621015659913079616759122121937555995453189518,69243015518347275581 +622716731465317388137975531600107360297813618,622717071747684309076438994974714758732691741,69204977706900181862 +624418143299922080455292404637266123577315841,624418483582289001393755868011873522012193964,69166960791063841856 +626119555134526772772609277674424886856818064,626119895416893693711072741049032285291696187,69128964759359505020 +627820966969131465089926150711583650136320287,627821307251498386028389614086191048571198410,69090989600314726524 +629522378803736157407243023748742413415822510,629522719086103078345706487123349811850700633,69053035302463363788 +631223790638340849724559896785901176695324733,631224130920707770663023360160508575130202856,69015101854345573017 +632925202472945542041876769823059939974826956,632925542755312462980340233197667338409705079,68977189244507805742 +634626614307550234359193642860218703254329179,634626954589917155297657106234826101689207302,68939297461502805364 +636328026142154926676510515897377466533831402,636328366424521847614973979271984864968709525,68901426493889603693 +638029437976759618993827388934536229813333625,638029778259126539932290852309143628248211748,68863576330233517499 +639730849811364311311144261971694993092835848,639731190093731232249607725346302391527713971,68825746959106145054 +641432261645969003628461135008853756372338071,641432601928335924566924598383461154807216194,68787938369085362686 +643133673480573695945778008046012519651840294,643134013762940616884241471420619918086718417,68750150548755321327 +644835085315178388263094881083171282931342517,644835425597545309201558344457778681366220640,68712383486706443069 +646536497149783080580411754120330046210844740,646536837432150001518875217494937444645722863,68674637171535417716 +648237908984387772897728627157488809490346963,648238249266754693836192090532096207925225086,68636911591845199341 +649939320818992465215045500194647572769849186,649939661101359386153508963569254971204727309,68599206736245002849 +651640732653597157532362373231806336049351409,651641072935964078470825836606413734484229532,68561522593350300532 +653342144488201849849679246268965099328853632,653342484770568770788142709643572497763731755,68523859151782818637 +655043556322806542166996119306123862608355855,655043896605173463105459582680731261043233978,68486216400170533926 +656744968157411234484312992343282625887858078,656745308439778155422776455717890024322736201,68448594327147670244 +658446379992015926801629865380441389167360301,658446720274382847740093328755048787602238424,68410992921354695088 +660147791826620619118946738417600152446862524,660148132108987540057410201792207550881740647,68373412171438316178 +661849203661225311436263611454758915726364747,661849543943592232374727074829366314161242870,68335852066051478024 +663550615495830003753580484491917679005866970,663550955778196924692043947866525077440745093,68298312593853358506 +665252027330434696070897357529076442285369193,665252367612801617009360820903683840720247316,68260793743509365447 +666953439165039388388214230566235205564871416,666953779447406309326677693940842603999749539,68223295503691133191 +668654850999644080705531103603393968844373639,668655191282011001643994566978001367279251762,68185817863076519181 +670356262834248773022847976640552732123875862,670356603116615693961311440015160130558753985,68148360810349600542 +672057674668853465340164849677711495403378085,672058014951220386278628313052318893838256208,68110924334200670667 +673759086503458157657481722714870258682880308,673759426785825078595945186089477657117758431,68073508423326235795 +675460498338062849974798595752029021962382531,675460838620429770913262059126636420397260654,68036113066429011606 +677161910172667542292115468789187785241884754,677162250455034463230578932163795183676762877,67998738252217919803 +678863322007272234609432341826346548521386977,678863662289639155547895805200953946956265100,67961383969408084708 +680564733841876926926749214863505311800889200,680565074124243847865212678238112710235767323,67924050206720829853 +682266145676481619244066087900664075080391423,682266485958848540182529551275271473515269546,67886736952883674573 +683967557511086311561382960937822838359893646,683967897793453232499846424312430236794771769,67849444196630330605 +685668969345691003878699833974981601639395869,685669309628057924817163297349589000074273992,67812171926700698683 +687370381180295696196016707012140364918898092,687370721462662617134480170386747763353776215,67774920131840865142 +689071793014900388513333580049299128198400315,689072133297267309451797043423906526633278438,67737688800803098516 +690773204849505080830650453086457891477902538,690773545131872001769113916461065289912780661,67700477922345846148 +692474616684109773147967326123616654757404761,692474956966476694086430789498224053192282884,67663287485233730786 +694176028518714465465284199160775418036906984,694176368801081386403747662535382816471785107,67626117478237547201 +695877440353319157782601072197934181316409207,695877780635686078721064535572541579751287330,67588967890134258790 +697578852187923850099917945235092944595911430,697579192470290771038381408609700343030789553,67551838709706994188 +699280264022528542417234818272251707875413653,699280604304895463355698281646859106310291776,67514729925745043886 +700981675857133234734551691309410471154915876,700982016139500155673015154684017869589793999,67477641527043856838 +702683087691737927051868564346569234434418099,702683427974104847990332027721176632869296222,67440573502405037085 +704384499526342619369185437383727997713920322,704384839808709540307648900758335396148798445,67403525840636340371 +706085911360947311686502310420886760993422545,706086251643314232624965773795494159428300668,67366498530551670762 +707787323195552004003819183458045524272924768,707787663477918924942282646832652922707802891,67329491560971077273 +709488735030156696321136056495204287552426991,709489075312523617259599519869811685987305114,67292504920720750488 +711190146864761388638452929532363050831929214,711190487147128309576916392906970449266807337,67255538598633019186 +712891558699366080955769802569521814111431437,712891898981733001894233265944129212546309560,67218592583546346974 +714592970533970773273086675606680577390933660,714593310816337694211550138981287975825811783,67181666864305328913 +716294382368575465590403548643839340670435883,716294722650942386528867012018446739105314006,67144761429760688149 +717995794203180157907720421680998103949938106,717996134485547078846183885055605502384816229,67107876268769272550 +719697206037784850225037294718156867229440329,719697546320151771163500758092764265664318452,67071011370194051338 +721398617872389542542354167755315630508942552,721398958154756463480817631129923028943820675,67034166722904111729 +723100029706994234859671040792474393788444775,723100369989361155798134504167081792223322898,66997342315774655570 +724801441541598927176987913829633157067946998,724801781823965848115451377204240555502825121,66960538137686995982 +726502853376203619494304786866791920347449221,726503193658570540432768250241399318782327344,66923754177528554001 +728204265210808311811621659903950683626951444,728204605493175232750085123278558082061829567,66886990424192855226 +729905677045413004128938532941109446906453667,729906017327779925067401996315716845341331790,66850246866579526460 +731607088880017696446255405978268210185955890,731607429162384617384718869352875608620834013,66813523493594292362 +733308500714622388763572279015426973465458113,733308840996989309702035742390034371900336236,66776820294148972099 +735009912549227081080889152052585736744960336,735010252831594002019352615427193135179838459,66740137257161475994 +736711324383831773398206025089744500024462559,736711664666198694336669488464351898459340682,66703474371555802181 +738412736218436465715522898126903263303964782,738413076500803386653986361501510661738842905,66666831626262033262 +740114148053041158032839771164062026583467005,740114488335408078971303234538669425018345128,66630209010216332966 +741815559887645850350156644201220789862969228,741815900170012771288620107575828188297847351,66593606512360942804 +743516971722250542667473517238379553142471451,743517312004617463605936980612986951577349574,66557024121644178733 +745218383556855234984790390275538316421973674,745218723839222155923253853650145714856851797,66520461827020427819 +746919795391459927302107263312697079701475897,746920135673826848240570726687304478136354020,66483919617450144901 +748621207226064619619424136349855842980978120,748621547508431540557887599724463241415856243,66447397481899849260 +750322619060669311936741009387014606260480343,750322959343036232875204472761622004695358466,66410895409342121284 +752024030895274004254057882424173369539982566,752024371177640925192521345798780767974860689,66374413388755599143 +753725442729878696571374755461332132819484789,753725783012245617509838218835939531254362912,66337951409124975454 +755426854564483388888691628498490896098987012,755427194846850309827155091873098294533865135,66301509459440993964 +757128266399088081206008501535649659378489235,757128606681455002144471964910257057813367358,66265087528700446220 +758829678233692773523325374572808422657991458,758830018516059694461788837947415821092869581,66228685605906168248 +760531090068297465840642247609967185937493681,760531430350664386779105710984574584372371804,66192303680067037232 +762232501902902158157959120647125949216995904,762232842185269079096422584021733347651874027,66155941740197968197 +763933913737506850475275993684284712496498127,763934254019873771413739457058892110931376250,66119599775319910693 +765635325572111542792592866721443475776000350,765635665854478463731056330096050874210878473,66083277774459845475 +767336737406716235109909739758602239055502573,767337077689083156048373203133209637490380696,66046975726650781195 +769038149241320927427226612795761002335004796,769038489523687848365690076170368400769882919,66010693620931751089 +770739561075925619744543485832919765614507019,770739901358292540683006949207527164049385142,65974431446347809666 +772440972910530312061860358870078528894009242,772441313192897233000323822244685927328887365,65938189191950029403 +774142384745135004379177231907237292173511465,774142725027501925317640695281844690608389588,65901966846795497437 +775843796579739696696494104944396055453013688,775844136862106617634957568319003453887891811,65865764399947312262 +777545208414344389013810977981554818732515911,777545548696711309952274441356162217167394034,65829581840474580424 +779246620248949081331127851018713582012018134,779246960531316002269591314393320980446896257,65793419157452413227 +780948032083553773648444724055872345291520357,780948372365920694586908187430479743726398480,65757276339961923427 +782649443918158465965761597093031108571022580,782649784200525386904225060467638507005900703,65721153377090221938 +784350855752763158283078470130189871850524803,784351196035130079221541933504797270285402926,65685050257930414541 +786052267587367850600395343167348635130027026,786052607869734771538858806541956033564905149,65648966971581598583 +787753679421972542917712216204507398409529249,787754019704339463856175679579114796844407372,65612903507148859693 +789455091256577235235029089241666161689031472,789455431538944156173492552616273560123909595,65576859853743268487 +791156503091181927552345962278824924968533695,791156843373548848490809425653432323403411818,65540836000481877283 +792857914925786619869662835315983688248035918,792858255208153540808126298690591086682914041,65504831936487716816 +794559326760391312186979708353142451527538141,794559667042758233125443171727749849962416264,65468847650889792949 +796260738594996004504296581390301214807040364,796261078877362925442760044764908613241918487,65432883132823083396 +797962150429600696821613454427459978086542587,797962490711967617760076917802067376521420710,65396938371428534440 +799663562264205389138930327464618741366044810,799663902546572310077393790839226139800922933,65361013355853057653 +801364974098810081456247200501777504645547033,801365314381177002394710663876384903080425156,65325108075249526618 +803066385933414773773564073538936267925049256,803066726215781694712027536913543666359927379,65289222518776773657 +804767797768019466090880946576095031204551479,804768138050386387029344409950702429639429602,65253356675599586557 +806469209602624158408197819613253794484053702,806469549884991079346661282987861192918931825,65217510534888705294 +808170621437228850725514692650412557763555925,808170961719595771663978156025019956198434048,65181684085820818771 +809872033271833543042831565687571321043058148,809872373554200463981295029062178719477936271,65145877317578561543 +811573445106438235360148438724730084322560371,811573785388805156298611902099337482757438494,65110090219350510555 +813274856941042927677465311761888847602062594,813275197223409848615928775136496246036940717,65074322780331181875 +814976268775647619994782184799047610881564817,814976609058014540933245648173655009316442940,65038574989721027434 +816677680610252312312099057836206374161067040,816678020892619233250562521210813772595945163,65002846836726431763 +818379092444857004629415930873365137440569263,818379432727223925567879394247972535875447386,64967138310559708735 +820080504279461696946732803910523900720071486,820080844561828617885196267285131299154949609,64931449400439098307 +821781916114066389264049676947682663999573709,821782256396433310202513140322290062434451832,64895780095588763267 +823483327948671081581366549984841427279075932,823483668231038002519830013359448825713954055,64860130385238785977 +825184739783275773898683423022000190558578155,825185080065642694837146886396607588993456278,64824500258625165123 +826886151617880466216000296059158953838080378,826886491900247387154463759433766352272958501,64788889704989812466 +828587563452485158533317169096317717117582601,828587903734852079471780632470925115552460724,64753298713580549590 +830288975287089850850634042133476480397084824,830289315569456771789097505508083878831962947,64717727273651104660 +831990387121694543167950915170635243676587047,831990727404061464106414378545242642111465170,64682175374461109174 +833691798956299235485267788207794006956089270,833692139238666156423731251582401405390967393,64646643005276094722 +835393210790903927802584661244952770235591493,835393551073270848741048124619560168670469616,64611130155367489744 +837094622625508620119901534282111533515093716,837094962907875541058364997656718931949971839,64575636814012616290 +838796034460113312437218407319270296794595939,838796374742480233375681870693877695229474062,64540162970494686784 +840497446294718004754535280356429060074098162,840497786577084925692998743731036458508976285,64504708614102800788 +842198858129322697071852153393587823353600385,842199198411689618010315616768195221788478508,64469273734131941766 +843900269963927389389169026430746586633102608,843900610246294310327632489805353985067980731,64433858319882973853 +845601681798532081706485899467905349912604831,845602022080899002644949362842512748347482954,64398462360662638626 +847303093633136774023802772505064113192107054,847303433915503694962266235879671511626985177,64363085845783551874 +849004505467741466341119645542222876471609277,849004845750108387279583108916830274906487400,64327728764564200368 +850705917302346158658436518579381639751111500,850706257584713079596899981953989038185989623,64292391106328938643 +852407329136950850975753391616540403030613723,852407669419317771914216854991147801465491846,64257072860407985768 +854108740971555543293070264653699166310115946,854109081253922464231533728028306564744994069,64221774016137422127 +855810152806160235610387137690857929589618169,855810493088527156548850601065465328024496292,64186494562859186199 +857511564640764927927704010728016692869120392,857511904923131848866167474102624091303998515,64151234489921071342 +859212976475369620245020883765175456148622615,859213316757736541183484347139782854583500738,64115993786676722571 +860914388309974312562337756802334219428124838,860914728592341233500801220176941617863002961,64080772442485633350 +862615800144579004879654629839492982707627061,862616140426945925818118093214100381142505184,64045570446713142375 +864317211979183697196971502876651745987129284,864317552261550618135434966251259144422007407,64010387788730430365 +866018623813788389514288375913810509266631507,866018964096155310452751839288417907701509630,63975224457914516852 +867720035648393081831605248950969272546133730,867720375930760002770068712325576670981011853,63940080443648256973 +869421447482997774148922121988128035825635953,869421787765364695087385585362735434260514076,63904955735320338264 diff --git a/actors/reward/src/v16/testdata/TestSimpleReward.golden b/actors/reward/src/v16/testdata/TestSimpleReward.golden new file mode 100644 index 00000000..6117a314 --- /dev/null +++ b/actors/reward/src/v16/testdata/TestSimpleReward.golden @@ -0,0 +1,513 @@ +x, y +0,36266264293777134739 +5000,36246341860983438171 +10000,36226430372336764970 +15000,36206529821825080595 +20000,36186640203439653146 +25000,36166761511175051545 +30000,36146893739029143725 +35000,36127036881003094819 +40000,36107190931101365346 +45000,36087355883331709402 +50000,36067531731705172852 +55000,36047718470236091517 +60000,36027916092942089375 +65000,36008124593844076745 +70000,35988343966966248490 +75000,35968574206336082207 +80000,35948815305984336427 +85000,35929067259945048810 +90000,35909330062255534348 +95000,35889603706956383558 +100000,35869888188091460688 +105000,35850183499707901919 +110000,35830489635856113563 +115000,35810806590589770270 +120000,35791134357965813232 +125000,35771472932044448387 +130000,35751822306889144629 +135000,35732182476566632013 +140000,35712553435146899962 +145000,35692935176703195481 +150000,35673327695312021363 +155000,35653730985053134403 +160000,35634145040009543611 +165000,35614569854267508423 +170000,35595005421916536916 +175000,35575451737049384026 +180000,35555908793762049762 +185000,35536376586153777422 +190000,35516855108327051817 +195000,35497344354387597482 +200000,35477844318444376903 +205000,35458354994609588738 +210000,35438876376998666034 +215000,35419408459730274454 +220000,35399951236926310503 +225000,35380504702711899749 +230000,35361068851215395052 +235000,35341643676568374790 +240000,35322229172905641086 +245000,35302825334365218041 +250000,35283432155088349958 +255000,35264049629219499580 +260000,35244677750906346316 +265000,35225316514299784477 +270000,35205965913553921511 +275000,35186625942826076233 +280000,35167296596276777067 +285000,35147977868069760277 +290000,35128669752371968212 +295000,35109372243353547537 +300000,35090085335187847477 +305000,35070809022051418057 +310000,35051543298124008345 +315000,35032288157588564692 +320000,35013043594631228977 +325000,34993809603441336853 +330000,34974586178211415988 +335000,34955373313137184317 +340000,34936171002417548287 +345000,34916979240254601104 +350000,34897798020853620985 +355000,34878627338423069407 +360000,34859467187174589358 +365000,34840317561323003590 +370000,34821178455086312872 +375000,34802049862685694246 +380000,34782931778345499277 +385000,34763824196293252314 +390000,34744727110759648747 +395000,34725640515978553261 +400000,34706564406186998099 +405000,34687498775625181322 +410000,34668443618536465065 +415000,34649398929167373804 +420000,34630364701767592618 +425000,34611340930589965451 +430000,34592327609890493376 +435000,34573324733928332864 +440000,34554332296965794049 +445000,34535350293268338995 +450000,34516378717104579964 +455000,34497417562746277688 +460000,34478466824468339636 +465000,34459526496548818288 +470000,34440596573268909408 +475000,34421677048912950313 +480000,34402767917768418153 +485000,34383869174125928181 +490000,34364980812279232031 +495000,34346102826525215998 +500000,34327235211163899311 +505000,34308377960498432415 +510000,34289531068835095251 +515000,34270694530483295536 +520000,34251868339755567043 +525000,34233052490967567889 +530000,34214246978438078812 +535000,34195451796489001460 +540000,34176666939445356677 +545000,34157892401635282787 +550000,34139128177390033882 +555000,34120374261043978112 +560000,34101630646934595971 +565000,34082897329402478592 +570000,34064174302791326035 +575000,34045461561447945578 +580000,34026759099722250015 +585000,34008066911967255944 +590000,33989384992539082067 +595000,33970713335796947482 +600000,33952051936103169985 +605000,33933400787823164359 +610000,33914759885325440683 +615000,33896129222981602625 +620000,33877508795166345743 +625000,33858898596257455790 +630000,33840298620635807012 +635000,33821708862685360457 +640000,33803129316793162272 +645000,33784559977349342015 +650000,33766000838747110957 +655000,33747451895382760391 +660000,33728913141655659939 +665000,33710384571968255862 +670000,33691866180726069369 +675000,33673357962337694928 +680000,33654859911214798578 +685000,33636372021772116242 +690000,33617894288427452039 +695000,33599426705601676601 +700000,33580969267718725386 +705000,33562521969205596996 +710000,33544084804492351495 +715000,33525657768012108722 +720000,33507240854201046620 +725000,33488834057498399544 +730000,33470437372346456594 +735000,33452050793190559927 +740000,33433674314479103086 +745000,33415307930663529321 +750000,33396951636198329914 +755000,33378605425541042506 +760000,33360269293152249423 +765000,33341943233495576000 +770000,33323627241037688917 +775000,33305321310248294519 +780000,33287025435600137154 +785000,33268739611568997498 +790000,33250463832633690894 +795000,33232198093276065677 +800000,33213942387981001514 +805000,33195696711236407735 +810000,33177461057533221671 +815000,33159235421365406991 +820000,33141019797229952037 +825000,33122814179626868163 +830000,33104618563059188076 +835000,33086432942032964176 +840000,33068257311057266896 +845000,33050091664644183044 +850000,33031935997308814148 +855000,33013790303569274797 +860000,32995654577946690988 +865000,32977528814965198472 +870000,32959413009151941097 +875000,32941307155037069161 +880000,32923211247153737756 +885000,32905125280038105120 +890000,32887049248229330986 +895000,32868983146269574933 +900000,32850926968703994738 +905000,32832880710080744731 +910000,32814844364950974146 +915000,32796817927868825478 +920000,32778801393391432838 +925000,32760794756078920308 +930000,32742798010494400302 +935000,32724811151203971919 +940000,32706834172776719310 +945000,32688867069784710030 +950000,32670909836802993406 +955000,32652962468409598893 +960000,32635024959185534441 +965000,32617097303714784859 +970000,32599179496584310176 +975000,32581271532384044011 +980000,32563373405706891936 +985000,32545485111148729845 +990000,32527606643308402322 +995000,32509737996787721011 +1000000,32491879166191462986 +1005000,32474030146127369118 +1010000,32456190931206142454 +1015000,32438361516041446585 +1020000,32420541895249904018 +1025000,32402732063451094557 +1030000,32384932015267553672 +1035000,32367141745324770879 +1040000,32349361248251188117 +1045000,32331590518678198123 +1050000,32313829551240142816 +1055000,32296078340574311671 +1060000,32278336881320940107 +1065000,32260605168123207863 +1070000,32242883195627237380 +1075000,32225170958482092190 +1080000,32207468451339775295 +1085000,32189775668855227557 +1090000,32172092605686326078 +1095000,32154419256493882593 +1100000,32136755615941641854 +1105000,32119101678696280022 +1110000,32101457439427403051 +1115000,32083822892807545087 +1120000,32066198033512166852 +1125000,32048582856219654040 +1130000,32030977355611315709 +1135000,32013381526371382675 +1140000,31995795363187005909 +1145000,31978218860748254929 +1150000,31960652013748116201 +1155000,31943094816882491533 +1160000,31925547264850196477 +1165000,31908009352352958723 +1170000,31890481074095416507 +1175000,31872962424785117005 +1180000,31855453399132514738 +1185000,31837953991850969976 +1190000,31820464197656747139 +1195000,31802984011269013204 +1200000,31785513427409836109 +1205000,31768052440804183162 +1210000,31750601046179919443 +1215000,31733159238267806217 +1220000,31715727011801499344 +1225000,31698304361517547681 +1230000,31680891282155391503 +1235000,31663487768457360906 +1240000,31646093815168674227 +1245000,31628709417037436451 +1250000,31611334568814637627 +1255000,31593969265254151288 +1260000,31576613501112732858 +1265000,31559267271150018079 +1270000,31541930570128521418 +1275000,31524603392813634497 +1280000,31507285733973624502 +1285000,31489977588379632610 +1290000,31472678950805672410 +1295000,31455389816028628319 +1300000,31438110178828254013 +1305000,31420840033987170845 +1310000,31403579376290866273 +1315000,31386328200527692284 +1320000,31369086501488863820 +1325000,31351854273968457206 +1330000,31334631512763408579 +1335000,31317418212673512315 +1340000,31300214368501419460 +1345000,31283019975052636163 +1350000,31265835027135522102 +1355000,31248659519561288921 +1360000,31231493447143998662 +1365000,31214336804700562200 +1370000,31197189587050737676 +1375000,31180051789017128935 +1380000,31162923405425183960 +1385000,31145804431103193314 +1390000,31128694860882288574 +1395000,31111594689596440773 +1400000,31094503912082458840 +1405000,31077422523179988040 +1410000,31060350517731508415 +1415000,31043287890582333230 +1420000,31026234636580607413 +1425000,31009190750577306003 +1430000,30992156227426232592 +1435000,30975131061984017774 +1440000,30958115249110117588 +1445000,30941108783666811972 +1450000,30924111660519203206 +1455000,30907123874535214365 +1460000,30890145420585587766 +1465000,30873176293543883425 +1470000,30856216488286477503 +1475000,30839265999692560763 +1480000,30822324822644137022 +1485000,30805392952026021606 +1490000,30788470382725839805 +1495000,30771557109634025334 +1500000,30754653127643818780 +1505000,30737758431651266072 +1510000,30720873016555216931 +1515000,30703996877257323335 +1520000,30687130008662037976 +1525000,30670272405676612725 +1530000,30653424063211097091 +1535000,30636584976178336685 +1540000,30619755139493971688 +1545000,30602934548076435308 +1550000,30586123196846952254 +1555000,30569321080729537196 +1560000,30552528194650993236 +1565000,30535744533540910376 +1570000,30518970092331663985 +1575000,30502204865958413271 +1580000,30485448849359099750 +1585000,30468702037474445720 +1590000,30451964425247952730 +1595000,30435236007625900057 +1600000,30418516779557343177 +1605000,30401806735994112241 +1610000,30385105871890810552 +1615000,30368414182204813041 +1620000,30351731661896264742 +1625000,30335058305928079272 +1630000,30318394109265937314 +1635000,30301739066878285090 +1640000,30285093173736332845 +1645000,30268456424814053330 +1650000,30251828815088180284 +1655000,30235210339538206915 +1660000,30218600993146384385 +1665000,30202000770897720297 +1670000,30185409667779977178 +1675000,30168827678783670969 +1680000,30152254798902069507 +1685000,30135691023131191020 +1690000,30119136346469802612 +1695000,30102590763919418751 +1700000,30086054270484299766 +1705000,30069526861171450333 +1710000,30053008530990617971 +1715000,30036499274954291531 +1720000,30019999088077699695 +1725000,30003507965378809469 +1730000,29987025901878324677 +1735000,29970552892599684460 +1740000,29954088932569061773 +1745000,29937634016815361880 +1750000,29921188140370220858 +1755000,29904751298268004095 +1760000,29888323485545804788 +1765000,29871904697243442447 +1770000,29855494928403461400 +1775000,29839094174071129289 +1780000,29822702429294435581 +1785000,29806319689124090070 +1790000,29789945948613521380 +1795000,29773581202818875477 +1800000,29757225446799014173 +1805000,29740878675615513631 +1810000,29724540884332662882 +1815000,29708212068017462327 +1820000,29691892221739622251 +1825000,29675581340571561335 +1830000,29659279419588405166 +1835000,29642986453867984751 +1840000,29626702438490835031 +1845000,29610427368540193398 +1850000,29594161239101998203 +1855000,29577904045264887282 +1860000,29561655782120196465 +1865000,29545416444761958098 +1870000,29529186028286899564 +1875000,29512964527794441794 +1880000,29496751938386697796 +1885000,29480548255168471173 +1890000,29464353473247254644 +1895000,29448167587733228567 +1900000,29431990593739259465 +1905000,29415822486380898546 +1910000,29399663260776380234 +1915000,29383512912046620688 +1920000,29367371435315216335 +1925000,29351238825708442396 +1930000,29335115078355251410 +1935000,29319000188387271772 +1940000,29302894150938806253 +1945000,29286796961146830539 +1950000,29270708614150991759 +1955000,29254629105093607016 +1960000,29238558429119661925 +1965000,29222496581376809142 +1970000,29206443557015366902 +1975000,29190399351188317555 +1980000,29174363959051306100 +1985000,29158337375762638725 +1990000,29142319596483281341 +1995000,29126310616376858128 +2000000,29110310430609650067 +2005000,29094319034350593486 +2010000,29078336422771278598 +2015000,29062362591045948044 +2020000,29046397534351495438 +2025000,29030441247867463909 +2030000,29014493726776044643 +2035000,28998554966262075434 +2040000,28982624961513039226 +2045000,28966703707719062659 +2050000,28950791200072914622 +2055000,28934887433770004795 +2060000,28918992404008382204 +2065000,28903106105988733766 +2070000,28887228534914382845 +2075000,28871359685991287801 +2080000,28855499554428040540 +2085000,28839648135435865072 +2090000,28823805424228616064 +2095000,28807971416022777392 +2100000,28792146106037460699 +2105000,28776329489494403952 +2110000,28760521561617969998 +2115000,28744722317635145121 +2120000,28728931752775537605 +2125000,28713149862271376289 +2130000,28697376641357509128 +2135000,28681612085271401759 +2140000,28665856189253136056 +2145000,28650108948545408700 +2150000,28634370358393529736 +2155000,28618640414045421140 +2160000,28602919110751615388 +2165000,28587206443765254015 +2170000,28571502408342086186 +2175000,28555806999740467263 +2180000,28540120213221357374 +2185000,28524442044048319980 +2190000,28508772487487520445 +2195000,28493111538807724610 +2200000,28477459193280297361 +2205000,28461815446179201201 +2210000,28446180292780994828 +2215000,28430553728364831702 +2220000,28414935748212458624 +2225000,28399326347608214310 +2230000,28383725521839027967 +2235000,28368133266194417872 +2240000,28352549575966489946 +2245000,28336974446449936336 +2250000,28321407872942033992 +2255000,28305849850742643249 +2260000,28290300375154206404 +2265000,28274759441481746304 +2270000,28259227045032864923 +2275000,28243703181117741948 +2280000,28228187845049133359 +2285000,28212681032142370020 +2290000,28197182737715356260 +2295000,28181692957088568460 +2300000,28166211685585053643 +2305000,28150738918530428055 +2310000,28135274651252875762 +2315000,28119818879083147235 +2320000,28104371597354557938 +2325000,28088932801402986924 +2330000,28073502486566875425 +2335000,28058080648187225440 +2340000,28042667281607598337 +2345000,28027262382174113438 +2350000,28011865945235446620 +2355000,27996477966142828909 +2360000,27981098440250045075 +2365000,27965727362913432230 +2370000,27950364729491878425 +2375000,27935010535346821252 +2380000,27919664775842246439 +2385000,27904327446344686454 +2390000,27888998542223219102 +2395000,27873678058849466131 +2400000,27858365991597591831 +2405000,27843062335844301640 +2410000,27827767086968840747 +2415000,27812480240352992695 +2420000,27797201791381077989 +2425000,27781931735439952703 +2430000,27766670067919007084 +2435000,27751416784210164162 +2440000,27736171879707878359 +2445000,27720935349809134096 +2450000,27705707189913444406 +2455000,27690487395422849544 +2460000,27675275961741915599 +2465000,27660072884277733104 +2470000,27644878158439915652 +2475000,27629691779640598509 +2480000,27614513743294437231 +2485000,27599344044818606274 +2490000,27584182679632797616 +2495000,27569029643159219370 +2500000,27553884930822594406 +2505000,27538748538050158965 +2510000,27523620460271661280 +2515000,27508500692919360199 +2520000,27493389231428023800 +2525000,27478286071234928018 +2530000,27463191207779855265 +2535000,27448104636505093052 +2540000,27433026352855432615 +2545000,27417956352278167539 +2550000,27402894630223092384 +2555000,27387841182142501307 diff --git a/actors/reward/src/v16/types.rs b/actors/reward/src/v16/types.rs new file mode 100644 index 00000000..188b1599 --- /dev/null +++ b/actors/reward/src/v16/types.rs @@ -0,0 +1,29 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; +use fvm_shared4::bigint::bigint_ser::BigIntDe; +use fvm_shared4::econ::TokenAmount; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct ConstructorParams { + pub power: Option, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct AwardBlockRewardParams { + pub miner: Address, + pub penalty: TokenAmount, + pub gas_reward: TokenAmount, + pub win_count: i64, +} + +pub use fil_actors_shared::v16::builtin::reward::ThisEpochRewardReturn; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct UpdateNetworkKPIParams { + pub curr_realized_power: Option, +} diff --git a/actors/system/src/lib.rs b/actors/system/src/lib.rs index f95f7f59..26e0c7c7 100644 --- a/actors/system/src/lib.rs +++ b/actors/system/src/lib.rs @@ -7,5 +7,6 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v8; pub mod v9; diff --git a/actors/system/src/v16/mod.rs b/actors/system/src/v16/mod.rs new file mode 100644 index 00000000..cfcbaca6 --- /dev/null +++ b/actors/system/src/v16/mod.rs @@ -0,0 +1,48 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fil_actors_shared::v16::{ActorError, AsActorError}; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::CborStore; +use fvm_shared4::error::ExitCode; +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +/// System actor methods. +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, +} + +/// System actor state. +#[derive(Default, Deserialize_tuple, Serialize_tuple, Debug, Clone)] +pub struct State { + // builtin actor registry: Vec<(String, Cid)> + pub builtin_actors: Cid, +} + +impl State { + pub fn new(store: &BS) -> Result { + let c = store + .put_cbor( + &Vec::<(String, Cid)>::new(), + multihash_codetable::Code::Blake2b256, + ) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to store system state")?; + Ok(Self { builtin_actors: c }) + } + + pub fn get_builtin_actors( + &self, + store: &B, + ) -> Result, String> { + match store.get_cbor(&self.builtin_actors) { + Ok(Some(obj)) => Ok(obj), + Ok(None) => Err("failed to load builtin actor registry; not found".to_string()), + Err(e) => Err(e.to_string()), + } + } +} diff --git a/actors/verifreg/src/lib.rs b/actors/verifreg/src/lib.rs index f95f7f59..26e0c7c7 100644 --- a/actors/verifreg/src/lib.rs +++ b/actors/verifreg/src/lib.rs @@ -7,5 +7,6 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v8; pub mod v9; diff --git a/actors/verifreg/src/v16/expiration.rs b/actors/verifreg/src/v16/expiration.rs new file mode 100644 index 00000000..712775a5 --- /dev/null +++ b/actors/verifreg/src/v16/expiration.rs @@ -0,0 +1,94 @@ +use crate::v16::{Allocation, Claim}; +use fil_actors_shared::v16::{ + parse_uint_key, ActorError, AsActorError, BatchReturn, BatchReturnGen, MapMap, +}; +use fvm_ipld_blockstore::Blockstore; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::error::ExitCode; +use fvm_shared4::ActorID; +use log::info; +use serde::de::DeserializeOwned; +use serde::Serialize; + +// Something with an expiration epoch. +pub trait Expires { + fn expiration(&self) -> ChainEpoch; +} + +impl Expires for Allocation { + fn expiration(&self) -> ChainEpoch { + self.expiration + } +} + +impl Expires for Claim { + fn expiration(&self) -> ChainEpoch { + self.term_start + self.term_max + } +} + +// Finds all items in a collection for some owner that have expired. +// Returns those items' keys. +pub fn find_expired( + collection: &mut MapMap, + owner: ActorID, + curr_epoch: ChainEpoch, +) -> Result, ActorError> +where + T: Expires + Serialize + DeserializeOwned + Clone + PartialEq, + BS: Blockstore, +{ + let mut found_ids = Vec::::new(); + collection + .for_each_in(owner, |key, record| { + if curr_epoch >= record.expiration() { + let id = parse_uint_key(key) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to parse uint key")?; + found_ids.push(id); + } + Ok(()) + }) + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to iterate over allocations/claims", + )?; + Ok(found_ids) +} + +// Checks each candidate item from the collection for expiration. +// Returns a batch return with OK for expired items, and FORBIDDEN for non-expired. +pub fn check_expired( + collection: &mut MapMap, + candidates: &Vec, + owner: ActorID, + curr_epoch: ChainEpoch, +) -> Result +where + T: Expires + Serialize + DeserializeOwned + Clone + PartialEq, + BS: Blockstore, +{ + let mut ret_gen = BatchReturnGen::new(candidates.len()); + for id in candidates { + // Check each specified claim is expired. + let maybe_record = collection.get(owner, *id).context_code( + ExitCode::USR_ILLEGAL_STATE, + "HAMT lookup failure getting allocation/claim", + )?; + + if let Some(record) = maybe_record { + if curr_epoch >= record.expiration() { + ret_gen.add_success(); + } else { + ret_gen.add_fail(ExitCode::USR_FORBIDDEN); + info!("cannot remove allocation/claim {} that has not expired", id); + } + } else { + ret_gen.add_fail(ExitCode::USR_NOT_FOUND); + info!( + "allocation/claim references id {} that does not belong to {}", + id, owner, + ); + } + } + Ok(ret_gen.gen()) +} diff --git a/actors/verifreg/src/v16/ext.rs b/actors/verifreg/src/v16/ext.rs new file mode 100644 index 00000000..26866a63 --- /dev/null +++ b/actors/verifreg/src/v16/ext.rs @@ -0,0 +1,46 @@ +use fvm_ipld_encoding::strict_bytes; +use fvm_ipld_encoding::tuple::*; +use fvm_ipld_encoding::tuple::{Deserialize_tuple, Serialize_tuple}; +use fvm_shared4::address::Address; + +pub mod account { + use super::*; + + pub const AUTHENTICATE_MESSAGE_METHOD: u64 = + frc42_dispatch::method_hash!("AuthenticateMessage"); + + #[derive(Serialize_tuple, Deserialize_tuple)] + pub struct AuthenticateMessageParams { + #[serde(with = "strict_bytes")] + pub signature: Vec, + #[serde(with = "strict_bytes")] + pub message: Vec, + } +} + +pub mod datacap { + use super::*; + use fvm_shared4::econ::TokenAmount; + + #[repr(u64)] + pub enum Method { + Mint = frc42_dispatch::method_hash!("Mint"), + Destroy = frc42_dispatch::method_hash!("Destroy"), + Balance = frc42_dispatch::method_hash!("Balance"), + Transfer = frc42_dispatch::method_hash!("Transfer"), + Burn = frc42_dispatch::method_hash!("Burn"), + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct MintParams { + pub to: Address, + pub amount: TokenAmount, + pub operators: Vec
, + } + + #[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] + pub struct DestroyParams { + pub owner: Address, + pub amount: TokenAmount, + } +} diff --git a/actors/verifreg/src/v16/mod.rs b/actors/verifreg/src/v16/mod.rs new file mode 100644 index 00000000..7967279c --- /dev/null +++ b/actors/verifreg/src/v16/mod.rs @@ -0,0 +1,38 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::METHOD_CONSTRUCTOR; +use num_derive::FromPrimitive; + +pub use self::state::*; +pub use self::types::*; + +pub mod expiration; +pub mod ext; +pub mod state; +pub mod types; + +/// Account actor methods available +#[derive(FromPrimitive)] +#[repr(u64)] +pub enum Method { + Constructor = METHOD_CONSTRUCTOR, + AddVerifier = 2, + RemoveVerifier = 3, + AddVerifiedClient = 4, + // UseBytes = 5, // Deprecated + // RestoreBytes = 6, // Deprecated + RemoveVerifiedClientDataCap = 7, + RemoveExpiredAllocations = 8, + ClaimAllocations = 9, + GetClaims = 10, + ExtendClaimTerms = 11, + RemoveExpiredClaims = 12, + // Method numbers derived from FRC-0042 standards + AddVerifiedClientExported = frc42_dispatch::method_hash!("AddVerifiedClient"), + RemoveExpiredAllocationsExported = frc42_dispatch::method_hash!("RemoveExpiredAllocations"), + GetClaimsExported = frc42_dispatch::method_hash!("GetClaims"), + ExtendClaimTermsExported = frc42_dispatch::method_hash!("ExtendClaimTerms"), + RemoveExpiredClaimsExported = frc42_dispatch::method_hash!("RemoveExpiredClaims"), + UniversalReceiverHook = frc42_dispatch::method_hash!("Receive"), +} diff --git a/actors/verifreg/src/v16/state.rs b/actors/verifreg/src/v16/state.rs new file mode 100644 index 00000000..dd68a5a0 --- /dev/null +++ b/actors/verifreg/src/v16/state.rs @@ -0,0 +1,269 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; +use fvm_shared4::bigint::bigint_ser::BigIntDe; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::error::ExitCode; +use fvm_shared4::piece::PaddedPieceSize; +use fvm_shared4::sector::SectorNumber; +use fvm_shared4::{ActorID, HAMT_BIT_WIDTH}; + +use fil_actors_shared::actor_error_v16; +use fil_actors_shared::v16::{ActorError, AsActorError, Config, Map2, MapMap, DEFAULT_HAMT_CONFIG}; + +use crate::v16::{AddrPairKey, AllocationID, ClaimID}; +use crate::v16::{DataCap, RemoveDataCapProposalID}; + +pub type DataCapMap = Map2; +pub const DATACAP_MAP_CONFIG: Config = DEFAULT_HAMT_CONFIG; + +pub type RemoveDataCapProposalMap = Map2; +pub const REMOVE_DATACAP_PROPOSALS_CONFIG: Config = DEFAULT_HAMT_CONFIG; + +#[derive(Serialize_tuple, Deserialize_tuple, Debug, Clone)] +pub struct State { + pub root_key: Address, + // Maps verifier addresses to data cap minting allowance (in bytes). + pub verifiers: Cid, // HAMT[Address]DataCap + pub remove_data_cap_proposal_ids: Cid, + // Maps client IDs to allocations made by that client. + pub allocations: Cid, // HAMT[ActorID]HAMT[AllocationID]Allocation + // Next allocation identifier to use. + // The value 0 is reserved to mean "no allocation". + pub next_allocation_id: u64, + // Maps provider IDs to allocations claimed by that provider. + pub claims: Cid, // HAMT[ActorID]HAMT[ClaimID]Claim +} + +impl State { + pub fn new(store: &BS, root_key: Address) -> Result { + let empty_dcap = DataCapMap::empty(store, DATACAP_MAP_CONFIG, "empty").flush()?; + let empty_allocs_claims = + MapMap::<_, (), ActorID, u64>::new(store, HAMT_BIT_WIDTH, HAMT_BIT_WIDTH) + .flush() + .map_err(|e| { + actor_error_v16!(illegal_state, "failed to create empty multi map: {}", e) + })?; + + Ok(State { + root_key, + verifiers: empty_dcap, + remove_data_cap_proposal_ids: empty_dcap, + allocations: empty_allocs_claims, + next_allocation_id: 1, + claims: empty_allocs_claims, + }) + } + + // Adds a verifier and cap, overwriting any existing cap for that verifier. + pub fn put_verifier( + &mut self, + store: &impl Blockstore, + verifier: &Address, + cap: &DataCap, + ) -> Result<(), ActorError> { + let mut verifiers = self.load_verifiers(store)?; + verifiers.set(verifier, BigIntDe(cap.clone()))?; + self.verifiers = verifiers.flush()?; + Ok(()) + } + + pub fn remove_verifier( + &mut self, + store: &impl Blockstore, + verifier: &Address, + ) -> Result<(), ActorError> { + let mut verifiers = self.load_verifiers(store)?; + verifiers + .delete(verifier)? + .context_code(ExitCode::USR_ILLEGAL_ARGUMENT, "verifier not found")?; + self.verifiers = verifiers.flush()?; + Ok(()) + } + + pub fn get_verifier_cap( + &self, + store: &impl Blockstore, + verifier: &Address, + ) -> Result, ActorError> { + let verifiers = self.load_verifiers(store)?; + let allowance = verifiers.get(verifier)?; + Ok(allowance.map(|a| a.clone().0)) + } + + pub fn load_verifiers(&self, store: BS) -> Result, ActorError> { + DataCapMap::load(store, &self.verifiers, DATACAP_MAP_CONFIG, "verifiers") + } + + pub fn load_allocs<'a, BS: Blockstore>( + &self, + store: &'a BS, + ) -> Result, ActorError> { + MapMap::::from_root( + store, + &self.allocations, + HAMT_BIT_WIDTH, + HAMT_BIT_WIDTH, + ) + .context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to load allocations table", + ) + } + + pub fn save_allocs( + &mut self, + allocs: &mut MapMap<'_, BS, Allocation, ActorID, AllocationID>, + ) -> Result<(), ActorError> { + self.allocations = allocs.flush().context_code( + ExitCode::USR_ILLEGAL_STATE, + "failed to flush allocations table", + )?; + Ok(()) + } + + /// Inserts a batch of allocations under a single client address. + /// The allocations are assigned sequential IDs starting from the next available. + pub fn insert_allocations( + &mut self, + store: &BS, + client: ActorID, + new_allocs: Vec, + ) -> Result, ActorError> { + if new_allocs.is_empty() { + return Ok(vec![]); + } + let mut allocs = self.load_allocs(store)?; + // These local variables allow the id-associating map closure to move the allocations + // from the iterator rather than clone, without moving self. + let first_id = self.next_allocation_id; + let mut count = 0; + let count_ref = &mut count; + allocs + .put_many( + client, + new_allocs.into_iter().map(move |a| { + let id = first_id + *count_ref; + *count_ref += 1; + (id, a) + }), + ) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to put allocations")?; + self.save_allocs(&mut allocs)?; + self.next_allocation_id += count; + let allocated_ids = (first_id..first_id + count).collect(); + Ok(allocated_ids) + } + + pub fn load_claims<'a, BS: Blockstore>( + &self, + store: &'a BS, + ) -> Result, ActorError> { + MapMap::::from_root( + store, + &self.claims, + HAMT_BIT_WIDTH, + HAMT_BIT_WIDTH, + ) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to load claims table") + } + + pub fn save_claims( + &mut self, + claims: &mut MapMap<'_, BS, Claim, ActorID, ClaimID>, + ) -> Result<(), ActorError> { + self.claims = claims + .flush() + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to flush claims table")?; + Ok(()) + } + + pub fn put_claims( + &mut self, + store: &BS, + claims: Vec<(ClaimID, Claim)>, + ) -> Result<(), ActorError> { + if claims.is_empty() { + return Ok(()); + } + let mut st_claims = self.load_claims(store)?; + for (id, claim) in claims.into_iter() { + st_claims + .put(claim.provider, id, claim) + .context_code(ExitCode::USR_ILLEGAL_STATE, "failed to put claim")?; + } + self.save_claims(&mut st_claims)?; + Ok(()) + } +} +#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug, PartialEq, Eq)] +pub struct Claim { + // The provider storing the data (from allocation). + pub provider: ActorID, + // The client which allocated the DataCap (from allocation). + pub client: ActorID, + // Identifier of the data committed (from allocation). + pub data: Cid, + // The (padded) size of data (from allocation). + pub size: PaddedPieceSize, + // The min period after term_start which the provider must commit to storing data + pub term_min: ChainEpoch, + // The max period after term_start for which provider can earn QA-power for the data + pub term_max: ChainEpoch, + // The epoch at which the (first range of the) piece was committed. + pub term_start: ChainEpoch, + // ID of the provider's sector in which the data is committed. + pub sector: SectorNumber, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug, PartialEq, Eq)] +pub struct Allocation { + // The verified client which allocated the DataCap. + pub client: ActorID, + // The provider (miner actor) which may claim the allocation. + pub provider: ActorID, + // Identifier of the data to be committed. + pub data: Cid, + // The (padded) size of data. + pub size: PaddedPieceSize, + // The minimum duration which the provider must commit to storing the piece to avoid + // early-termination penalties (epochs). + pub term_min: ChainEpoch, + // The maximum period for which a provider can earn quality-adjusted power + // for the piece (epochs). + pub term_max: ChainEpoch, + // The latest epoch by which a provider must commit data before the allocation expires. + pub expiration: ChainEpoch, +} + +pub fn get_allocation<'a, BS>( + allocations: &'a mut MapMap, + client: ActorID, + id: AllocationID, +) -> Result, ActorError> +where + BS: Blockstore, +{ + allocations.get(client, id).context_code( + ExitCode::USR_ILLEGAL_STATE, + "HAMT lookup failure getting allocation", + ) +} + +pub fn get_claim<'a, BS>( + claims: &'a mut MapMap, + provider: ActorID, + id: ClaimID, +) -> Result, ActorError> +where + BS: Blockstore, +{ + claims.get(provider, id).context_code( + ExitCode::USR_ILLEGAL_STATE, + "HAMT lookup failure getting claim", + ) +} diff --git a/actors/verifreg/src/v16/types.rs b/actors/verifreg/src/v16/types.rs new file mode 100644 index 00000000..a6a40b20 --- /dev/null +++ b/actors/verifreg/src/v16/types.rs @@ -0,0 +1,260 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fil_actors_shared::v16::{BatchReturn, MapKey}; +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::address::Address; +use fvm_shared4::bigint::{bigint_ser, BigInt}; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::crypto::signature::Signature; +use fvm_shared4::piece::PaddedPieceSize; +use fvm_shared4::sector::SectorNumber; +use fvm_shared4::sector::StoragePower; +use fvm_shared4::ActorID; +use std::fmt::{Debug, Formatter}; + +use crate::v16::Claim; + +pub type AllocationID = u64; +pub type ClaimID = u64; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct ConstructorParams { + pub root_key: Address, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct VerifierParams { + pub address: Address, + #[serde(with = "bigint_ser")] + pub allowance: DataCap, +} + +pub type AddVerifierParams = VerifierParams; + +pub type AddVerifiedClientParams = VerifierParams; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct RemoveVerifierParams { + pub verifier: Address, +} + +/// DataCap is an integer number of bytes. +/// We can introduce policy changes and replace this in the future. +pub type DataCap = StoragePower; + +pub const SIGNATURE_DOMAIN_SEPARATION_REMOVE_DATA_CAP: &[u8] = b"fil_removedatacap:"; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveDataCapParams { + pub verified_client_to_remove: Address, + #[serde(with = "bigint_ser")] + pub data_cap_amount_to_remove: DataCap, + pub verifier_request_1: RemoveDataCapRequest, + pub verifier_request_2: RemoveDataCapRequest, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveDataCapRequest { + pub verifier: Address, + pub signature: Signature, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveDataCapReturn { + pub verified_client: Address, + #[serde(with = "bigint_ser")] + pub data_cap_removed: DataCap, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveDataCapProposalID { + pub id: u64, +} + +#[derive(Debug, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveDataCapProposal { + pub verified_client: Address, + #[serde(with = "bigint_ser")] + pub data_cap_amount: DataCap, + pub removal_proposal_id: RemoveDataCapProposalID, +} + +pub struct AddrPairKey { + pub first: Address, + pub second: Address, +} + +impl AddrPairKey { + pub fn new(first: Address, second: Address) -> Self { + AddrPairKey { first, second } + } +} + +impl Debug for AddrPairKey { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + (self.first, self.second).fmt(f) + } +} + +impl MapKey for AddrPairKey { + fn from_bytes(_b: &[u8]) -> Result { + unimplemented!() + } + + fn to_bytes(&self) -> Result, String> { + let mut first = self.first.to_bytes(); + let mut second = self.second.to_bytes(); + first.append(&mut second); + Ok(first) + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveExpiredAllocationsParams { + // Client for which to remove expired allocations. + pub client: ActorID, + // Optional list of allocation IDs to attempt to remove. + // Empty means remove all eligible expired allocations. + pub allocation_ids: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveExpiredAllocationsReturn { + // Ids of the allocations that were either specified by the caller or discovered to be expired. + pub considered: Vec, + // Results for each processed allocation. + pub results: BatchReturn, + // The amount of datacap reclaimed for the client. + #[serde(with = "bigint_ser")] + pub datacap_recovered: DataCap, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct SectorAllocationClaims { + pub sector: SectorNumber, + pub expiry: ChainEpoch, + pub claims: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct AllocationClaim { + pub client: ActorID, + pub allocation_id: AllocationID, + pub data: Cid, + pub size: PaddedPieceSize, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ClaimAllocationsParams { + /// Allocations to claim, grouped by sector. + pub sectors: Vec, + /// Whether to abort entirely if any claim fails. + /// If false, a failed claim will cause other claims in the same sector group to also fail, + /// but allow other sectors to proceed. + pub all_or_nothing: bool, +} + +#[derive(Clone, Debug, PartialEq, Eq, Default, Serialize_tuple, Deserialize_tuple)] +#[serde(transparent)] +pub struct SectorClaimSummary { + #[serde(with = "bigint_ser")] + pub claimed_space: BigInt, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ClaimAllocationsReturn { + /// Status of each sector grouping of claims. + pub sector_results: BatchReturn, + /// The claimed space for each successful sector group. + pub sector_claims: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ClaimTerm { + pub provider: ActorID, + pub claim_id: ClaimID, + pub term_max: ChainEpoch, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ExtendClaimTermsParams { + pub terms: Vec, +} + +pub type ExtendClaimTermsReturn = BatchReturn; + +// +// Receiver hook payload +// + +// A request to create an allocation with datacap tokens. +// See Allocation state for description of field semantics. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct AllocationRequest { + pub provider: ActorID, + pub data: Cid, + pub size: PaddedPieceSize, + pub term_min: ChainEpoch, + pub term_max: ChainEpoch, + pub expiration: ChainEpoch, +} + +// A request to extend the term of an existing claim with datacap tokens. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ClaimExtensionRequest { + pub provider: ActorID, + pub claim: ClaimID, + pub term_max: ChainEpoch, +} + +/// Operator-data payload for a datacap token transfer receiver hook specifying an allocation. +/// The implied client is the sender of the datacap. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct AllocationRequests { + pub allocations: Vec, + pub extensions: Vec, +} + +/// Recipient data payload in response to a datacap token transfer. +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct AllocationsResponse { + // Result for each allocation request. + pub allocation_results: BatchReturn, + // Result for each extension request. + pub extension_results: BatchReturn, + // IDs of new allocations created. + pub new_allocations: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct GetClaimsParams { + pub provider: ActorID, + pub claim_ids: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct GetClaimsReturn { + pub batch_info: BatchReturn, + pub claims: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveExpiredClaimsParams { + // Provider to clean up (need not be the caller) + pub provider: ActorID, + // Optional list of claim IDs to attempt to remove. + // Empty means remove all eligible expired claims. + pub claim_ids: Vec, +} + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct RemoveExpiredClaimsReturn { + // Ids of the claims that were either specified by the caller or discovered to be expired. + pub considered: Vec, + // Results for each processed claim. + pub results: BatchReturn, +} diff --git a/fil_actors_shared/src/abi/commp.rs b/fil_actors_shared/src/abi/commp.rs index 7b4706ff..37f0a6e3 100644 --- a/fil_actors_shared/src/abi/commp.rs +++ b/fil_actors_shared/src/abi/commp.rs @@ -3,9 +3,9 @@ use cid::Cid; use fvm_shared::{ - piece::PieceInfo as PieceInfoV2, sector::RegisteredSealProof as RegisteredSealProofV2, + commcid::data_commitment_v1_to_cid, piece::PieceInfo as PieceInfoV2, + sector::RegisteredSealProof as RegisteredSealProofV2, }; -use fvm_shared4::commcid::data_commitment_v1_to_cid; /// Computes an unsealed sector CID (`CommD`) from its constituent piece CIDs (`CommPs`) and sizes. /// diff --git a/fil_actors_shared/src/lib.rs b/fil_actors_shared/src/lib.rs index ddbc2e51..549785a2 100644 --- a/fil_actors_shared/src/lib.rs +++ b/fil_actors_shared/src/lib.rs @@ -8,6 +8,7 @@ pub mod v12; pub mod v13; pub mod v14; pub mod v15; +pub mod v16; pub mod v8; pub mod v9; diff --git a/fil_actors_shared/src/v16/actor_error.rs b/fil_actors_shared/src/v16/actor_error.rs new file mode 100644 index 00000000..b3197d36 --- /dev/null +++ b/fil_actors_shared/src/v16/actor_error.rs @@ -0,0 +1,317 @@ +use fvm_ipld_encoding::de::DeserializeOwned; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use std::fmt::Display; + +use fvm_shared4::error::ExitCode; +use thiserror::Error; + +/// The error type returned by actor method calls. +#[derive(Error, Debug, Clone, PartialEq, Eq)] +#[error("ActorError(exit_code: {exit_code:?}, msg: {msg})")] +pub struct ActorError { + /// The exit code for this invocation. + /// Codes less than `FIRST_USER_EXIT_CODE` are prohibited and will be overwritten by the VM. + exit_code: ExitCode, + /// Optional exit data + data: Option, + /// Message for debugging purposes, + msg: String, +} + +impl ActorError { + /// Creates a new ActorError. This method does not check that the code is in the + /// range of valid actor abort codes. + pub fn unchecked(code: ExitCode, msg: String) -> Self { + Self { + exit_code: code, + msg, + data: None, + } + } + + pub fn unchecked_with_data(code: ExitCode, msg: String, data: Option) -> Self { + Self { + exit_code: code, + msg, + data, + } + } + + /// Creates a new ActorError. This method checks if the exit code is within the allowed range, + /// and automatically converts it into a user code. + pub fn checked(code: ExitCode, msg: String, data: Option) -> Self { + let exit_code = match code { + // This means the called actor did something wrong. We can't "make up" a + // reasonable exit code. + ExitCode::SYS_MISSING_RETURN + | ExitCode::SYS_ILLEGAL_INSTRUCTION + | ExitCode::SYS_ILLEGAL_EXIT_CODE => ExitCode::USR_UNSPECIFIED, + // We don't expect any other system errors. + code if code.is_system_error() => ExitCode::USR_ASSERTION_FAILED, + // Otherwise, pass it through. + code => code, + }; + Self { + exit_code, + msg, + data, + } + } + + pub fn illegal_argument(msg: String) -> Self { + Self { + exit_code: ExitCode::USR_ILLEGAL_ARGUMENT, + msg, + data: None, + } + } + pub fn not_found(msg: String) -> Self { + Self { + exit_code: ExitCode::USR_NOT_FOUND, + msg, + data: None, + } + } + pub fn forbidden(msg: String) -> Self { + Self { + exit_code: ExitCode::USR_FORBIDDEN, + msg, + data: None, + } + } + pub fn insufficient_funds(msg: String) -> Self { + Self { + exit_code: ExitCode::USR_INSUFFICIENT_FUNDS, + msg, + data: None, + } + } + pub fn illegal_state(msg: String) -> Self { + Self { + exit_code: ExitCode::USR_ILLEGAL_STATE, + msg, + data: None, + } + } + pub fn serialization(msg: String) -> Self { + Self { + exit_code: ExitCode::USR_SERIALIZATION, + msg, + data: None, + } + } + pub fn unhandled_message(msg: String) -> Self { + Self { + exit_code: ExitCode::USR_UNHANDLED_MESSAGE, + msg, + data: None, + } + } + pub fn unspecified(msg: String) -> Self { + Self { + exit_code: ExitCode::USR_UNSPECIFIED, + msg, + data: None, + } + } + pub fn assertion_failed(msg: String) -> Self { + Self { + exit_code: ExitCode::USR_ASSERTION_FAILED, + msg, + data: None, + } + } + + pub fn read_only(msg: String) -> Self { + Self { + exit_code: ExitCode::USR_READ_ONLY, + msg, + data: None, + } + } + + /// Returns the exit code of the error. + pub fn exit_code(&self) -> ExitCode { + self.exit_code + } + + /// Error message of the actor error. + pub fn msg(&self) -> &str { + &self.msg + } + + /// Extracts the optional associated data without copying. + pub fn take_data(&mut self) -> Option { + std::mem::take(&mut self.data) + } + + /// Prefix error message with a string message. + pub fn wrap(mut self, msg: impl AsRef) -> Self { + self.msg = format!("{}: {}", msg.as_ref(), self.msg); + self + } +} + +/// Converts a raw encoding error into an ErrSerialization. +impl From for ActorError { + fn from(e: fvm_ipld_encoding::Error) -> Self { + Self { + exit_code: ExitCode::USR_SERIALIZATION, + msg: e.to_string(), + data: None, + } + } +} + +/// Convenience macro for generating Actor Errors +#[macro_export] +macro_rules! actor_error_v16 { + // Error with only one stringable expression + ( $code:ident; $msg:expr ) => { $crate::v16::ActorError::$code($msg.to_string()) }; + + // String with positional arguments + ( $code:ident; $msg:literal $(, $ex:expr)+ ) => { + $crate::v16::ActorError::$code(format!($msg, $($ex,)*)) + }; + + // Error with only one stringable expression, with comma separator + ( $code:ident, $msg:expr ) => { $crate::actor_error_v16!($code; $msg) }; + + // String with positional arguments, with comma separator + ( $code:ident, $msg:literal $(, $ex:expr)+ ) => { + $crate::actor_error_v16!($code; $msg $(, $ex)*) + }; +} + +// Adds context to an actor error's descriptive message. +pub trait ActorContext { + fn context(self, context: C) -> Result + where + C: Display + 'static; + + fn with_context(self, f: F) -> Result + where + C: Display + 'static, + F: FnOnce() -> C; +} + +impl ActorContext for Result +where + E: Into, +{ + fn context(self, context: C) -> Result + where + C: Display + 'static, + { + self.map_err(|err| { + let mut err = err.into(); + err.msg = format!("{}: {}", context, err.msg); + err + }) + } + + fn with_context(self, f: F) -> Result + where + C: Display + 'static, + F: FnOnce() -> C, + { + self.map_err(|err| { + let mut err = err.into(); + err.msg = format!("{}: {}", f(), err.msg); + err + }) + } +} + +// Adapts a target into an actor error. +pub trait AsActorError: Sized { + fn exit_code(self, code: ExitCode) -> Result; + + fn context_code(self, code: ExitCode, context: C) -> Result + where + C: Display + 'static; + + fn with_context_code(self, code: ExitCode, f: F) -> Result + where + C: Display + 'static, + F: FnOnce() -> C; +} + +// Note: E should be std::error::Error, revert to this after anyhow:Error is no longer used. +impl AsActorError for Result { + fn exit_code(self, code: ExitCode) -> Result { + self.map_err(|err| ActorError { + exit_code: code, + msg: err.to_string(), + data: None, + }) + } + + fn context_code(self, code: ExitCode, context: C) -> Result + where + C: Display + 'static, + { + self.map_err(|err| ActorError { + exit_code: code, + msg: format!("{}: {}", context, err), + data: None, + }) + } + + fn with_context_code(self, code: ExitCode, f: F) -> Result + where + C: Display + 'static, + F: FnOnce() -> C, + { + self.map_err(|err| ActorError { + exit_code: code, + msg: format!("{}: {}", f(), err), + data: None, + }) + } +} + +impl AsActorError for Option { + fn exit_code(self, code: ExitCode) -> Result { + self.ok_or_else(|| ActorError { + exit_code: code, + msg: "None".to_string(), + data: None, + }) + } + + fn context_code(self, code: ExitCode, context: C) -> Result + where + C: Display + 'static, + { + self.ok_or_else(|| ActorError { + exit_code: code, + msg: context.to_string(), + data: None, + }) + } + + fn with_context_code(self, code: ExitCode, f: F) -> Result + where + C: Display + 'static, + F: FnOnce() -> C, + { + self.ok_or_else(|| ActorError { + exit_code: code, + msg: f().to_string(), + data: None, + }) + } +} + +pub fn deserialize_block(ret: Option) -> Result +where + T: DeserializeOwned, +{ + ret.context_code( + ExitCode::USR_ASSERTION_FAILED, + "return expected".to_string(), + )? + .deserialize() + .exit_code(ExitCode::USR_SERIALIZATION) +} diff --git a/fil_actors_shared/src/v16/builtin/mod.rs b/fil_actors_shared/src/v16/builtin/mod.rs new file mode 100644 index 00000000..91747b51 --- /dev/null +++ b/fil_actors_shared/src/v16/builtin/mod.rs @@ -0,0 +1,11 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +pub use self::network::*; +pub use self::shared::*; +pub use self::singletons::*; + +pub mod network; +pub mod reward; +pub mod shared; +pub mod singletons; diff --git a/fil_actors_shared/src/v16/builtin/network.rs b/fil_actors_shared/src/v16/builtin/network.rs new file mode 100644 index 00000000..b57c91c2 --- /dev/null +++ b/fil_actors_shared/src/v16/builtin/network.rs @@ -0,0 +1,18 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +/// Assumed epoch duration. If this changes, a large state-migration will need to be run to update +/// expirations, etc. +pub const EPOCH_DURATION_SECONDS: i64 = 30; + +pub const SECONDS_IN_HOUR: i64 = 3600; +pub const SECONDS_IN_DAY: i64 = 86400; +pub const SECONDS_IN_YEAR: i64 = 31556925; +pub const EPOCHS_IN_HOUR: i64 = SECONDS_IN_HOUR / EPOCH_DURATION_SECONDS; +pub const EPOCHS_IN_DAY: i64 = SECONDS_IN_DAY / EPOCH_DURATION_SECONDS; +pub const EPOCHS_IN_YEAR: i64 = SECONDS_IN_YEAR / EPOCH_DURATION_SECONDS; + +/// This is a protocol constant from Filecoin and depends on expected consensus. Here it is used to +/// determine expected rewards, fault penalties, etc. This will need to be changed if expected +/// consensus ever changes (and, likely, so will pledge, etc.). +pub const EXPECTED_LEADERS_PER_EPOCH: u64 = 5; diff --git a/fil_actors_shared/src/v16/builtin/reward/math.rs b/fil_actors_shared/src/v16/builtin/reward/math.rs new file mode 100644 index 00000000..0d170988 --- /dev/null +++ b/fil_actors_shared/src/v16/builtin/reward/math.rs @@ -0,0 +1,23 @@ +// Copyright 2021-2023 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::bigint::{BigInt, ParseBigIntError}; + +pub const PRECISION: u64 = 128; + +/// polyval evaluates a polynomial given by coefficients `p` in Q.128 format +/// at point `x` in Q.128 format. Output is in Q.128. +/// Coefficients should be ordered from the highest order coefficient to the lowest. +pub fn poly_val(poly: &[BigInt], x: &BigInt) -> BigInt { + let mut res = BigInt::default(); + + for coeff in poly { + res = ((res * x) >> PRECISION) + coeff; + } + res +} + +pub fn poly_parse(coefs: &[&str]) -> Result, ParseBigIntError> { + coefs.iter().map(|c| c.parse()).collect() +} diff --git a/fil_actors_shared/src/v16/builtin/reward/mod.rs b/fil_actors_shared/src/v16/builtin/reward/mod.rs new file mode 100644 index 00000000..268d255f --- /dev/null +++ b/fil_actors_shared/src/v16/builtin/reward/mod.rs @@ -0,0 +1,18 @@ +// Copyright 2021-2023 Protocol Labs +// SPDX-License-Identifier: Apache-2.0, MIT +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::bigint::bigint_ser; +use fvm_shared4::sector::StoragePower; + +pub mod math; +pub mod smooth; + +pub use smooth::FilterEstimate; + +#[derive(Clone, Debug, PartialEq, Eq, Serialize_tuple, Deserialize_tuple)] +pub struct ThisEpochRewardReturn { + // * Removed this_epoch_reward in v2 + pub this_epoch_reward_smoothed: FilterEstimate, + #[serde(with = "bigint_ser")] + pub this_epoch_baseline_power: StoragePower, +} diff --git a/fil_actors_shared/src/v16/builtin/reward/smooth/alpha_beta_filter.rs b/fil_actors_shared/src/v16/builtin/reward/smooth/alpha_beta_filter.rs new file mode 100644 index 00000000..2fc88058 --- /dev/null +++ b/fil_actors_shared/src/v16/builtin/reward/smooth/alpha_beta_filter.rs @@ -0,0 +1,311 @@ +// Copyright 2021-2023 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::bigint::{bigint_ser, BigInt, Integer}; +use fvm_shared4::clock::ChainEpoch; + +use crate::v16::reward::math::PRECISION; + +#[derive(Default, Serialize_tuple, Deserialize_tuple, Clone, Debug, PartialEq, Eq)] +pub struct FilterEstimate { + #[serde(with = "bigint_ser")] + pub position: BigInt, + #[serde(with = "bigint_ser")] + pub velocity: BigInt, +} + +impl FilterEstimate { + /// Create a new filter estimate given two Q.0 format ints. + pub fn new(position: BigInt, velocity: BigInt) -> Self { + FilterEstimate { + position: position << PRECISION, + velocity: velocity << PRECISION, + } + } + + /// Returns the Q.0 position estimate of the filter + pub fn estimate(&self) -> BigInt { + &self.position >> PRECISION + } + + /// Extrapolate filter "position" delta epochs in the future. + pub fn extrapolate(&self, delta: ChainEpoch) -> BigInt { + let delta_t = BigInt::from(delta) << PRECISION; + let position = &self.position << PRECISION; + (&self.velocity * delta_t) + position + } +} + +pub struct AlphaBetaFilter<'a, 'b, 'f> { + alpha: &'a BigInt, + beta: &'b BigInt, + prev_est: &'f FilterEstimate, +} + +impl<'a, 'b, 'f> AlphaBetaFilter<'a, 'b, 'f> { + pub fn load(prev_est: &'f FilterEstimate, alpha: &'a BigInt, beta: &'b BigInt) -> Self { + Self { + alpha, + beta, + prev_est, + } + } + + pub fn next_estimate(&self, obs: &BigInt, epoch_delta: ChainEpoch) -> FilterEstimate { + let delta_t = BigInt::from(epoch_delta) << PRECISION; + let delta_x = (&delta_t * &self.prev_est.velocity) >> PRECISION; + let mut position = delta_x + &self.prev_est.position; + + let obs = obs << PRECISION; + let residual = obs - &position; + let revision_x = (self.alpha * &residual) >> PRECISION; + position += &revision_x; + + let revision_v = residual * self.beta; + let revision_v = revision_v.div_floor(&delta_t); + let velocity = revision_v + &self.prev_est.velocity; + FilterEstimate { position, velocity } + } +} + +#[cfg(test)] +mod tests { + use fvm_shared4::econ::TokenAmount; + use fvm_shared4::sector::StoragePower; + use num_traits::Signed; + + use crate::v16::reward::math; + use crate::v16::EPOCHS_IN_DAY; + + use super::super::smooth_func::{ + extrapolated_cum_sum_of_ratio as ecsor, ln, DEFAULT_ALPHA, DEFAULT_BETA, + }; + use super::*; + + const ERR_BOUND: u64 = 350; + + // Millionths of difference between val1 and val2 + // (val1 - val2) / val1 * 1e6 + // all inputs Q.128, output Q.0 + fn per_million_error(val_1: &BigInt, val_2: &BigInt) -> BigInt { + let diff = (val_1 - val_2) << PRECISION; + + let ratio = diff.div_floor(val_1); + let million = BigInt::from(1_000_000) << PRECISION; + + let diff_per_million = (ratio * million).abs(); + + diff_per_million >> (2 * PRECISION) + } + + fn iterative_cum_sum_of_ratio( + num: &FilterEstimate, + denom: &FilterEstimate, + t0: ChainEpoch, + delta: ChainEpoch, + ) -> BigInt { + let mut ratio = BigInt::from(0u8); + + for i in 0..delta { + let num_epsilon = num.extrapolate(t0 + i); // Q.256 + let denom_epsilon = denom.extrapolate(t0 + i) >> PRECISION; // Q.256 + let mut epsilon = num_epsilon.div_floor(&denom_epsilon); // Q.256 / Q.128 => Q.128 + + if i != 0 && i != delta - 1 { + epsilon *= 2; // Q.128 * Q.0 => Q.128 + } + ratio += epsilon; + } + + ratio.div_floor(&BigInt::from(2)) + } + + fn assert_err_bound( + num: &FilterEstimate, + denom: &FilterEstimate, + delta: ChainEpoch, + t0: ChainEpoch, + err_bound: BigInt, + ) { + let analytic = ecsor(delta, t0, num, denom); + let iterative = iterative_cum_sum_of_ratio(num, denom, t0, delta); + let actual_err = per_million_error(&analytic, &iterative); + assert!( + actual_err < err_bound, + "Values are {} and {}", + actual_err, + err_bound + ); + } + + // Returns an estimate with position val and velocity 0 + fn testing_constant_estimate(val: BigInt) -> FilterEstimate { + FilterEstimate::new(val, BigInt::from(0u8)) + } + + // Returns and estimate with postion x and velocity v + fn testing_estimate(x: BigInt, v: BigInt) -> FilterEstimate { + FilterEstimate::new(x, v) + } + + #[test] + fn test_natural_log() { + let ln_inputs: Vec = math::poly_parse(&[ + "340282366920938463463374607431768211456", // Q.128 format of 1 + "924990000000000000000000000000000000000", // Q.128 format of e (rounded up in 5th decimal place to handle truncation) + "34028236692093846346337460743176821145600000000000000000000", // Q.128 format of 100e18 + "6805647338418769269267492148635364229120000000000000000000000", // Q.128 format of 2e22 + "204169000000000000000000000000000000", // Q.128 format of 0.0006 + "34028236692093846346337460743", // Q.128 format of 1e-10 + ]) + .unwrap(); + + let expected_ln_outputs: Vec = math::poly_parse(&[ + "0", // Q.128 format of 0 = ln(1) + "340282366920938463463374607431768211456", // Q.128 format of 1 = ln(e) + "15670582109617661336106769654068947397831", // Q.128 format of 46.051... = ln(100e18) + "17473506083804940763855390762239996622013", // Q.128 format of 51.35... = ln(2e22) + "-2524410000000000000000000000000000000000", // Q.128 format of -7.41.. = ln(0.0006) + "-7835291054808830668053384827034473698915", // Q.128 format of -23.02.. = ln(1e-10) + ]) + .unwrap(); + + assert_eq!(ln_inputs.len(), expected_ln_outputs.len()); + let num_inputs = ln_inputs.len(); + + for i in 0..num_inputs { + let z = &ln_inputs[i]; + let ln_of_z = ln(z); + let expected_z = &expected_ln_outputs[i]; + assert_eq!(expected_z >> PRECISION, ln_of_z >> PRECISION); + } + } + + #[test] + fn constant_estimate() { + let num_estimate = testing_constant_estimate(BigInt::from(4_000_000)); + let denom_estimate = testing_constant_estimate(BigInt::from(1)); + + // 4e6/1 over 1000 epochs should give us 4e9 + let csr_1 = ecsor(1000, 0, &num_estimate, &denom_estimate) >> PRECISION; + assert_eq!(BigInt::from(4 * 10_i64.pow(9)), csr_1); + + // if we change t0 nothing should change because velocity is 0 + let csr_2 = ecsor(1000, 10_i64.pow(15), &num_estimate, &denom_estimate) >> PRECISION; + + assert_eq!(csr_1, csr_2); + + // 1e12 / 200e12 for 100 epochs should give ratio of 1/2 + let num_estimate = testing_constant_estimate(BigInt::from(10_i64.pow(12))); + let denom_estimate = testing_constant_estimate(BigInt::from(200 * 10_i64.pow(12))); + let csr_frac = ecsor(100, 0, &num_estimate, &denom_estimate); + + // If we didn't return Q.128 we'd just get zero + assert_eq!(BigInt::from(0u8), &csr_frac >> PRECISION); + + // multiply by 10k and we'll get 5k + // note: this is a bit sensative to input, lots of numbers approach from below + // (...99999) and so truncating division takes us off by one + let product = csr_frac * (BigInt::from(10_000) << PRECISION); // Q.256 + assert_eq!(BigInt::from(5000), product >> (2 * PRECISION)); + } + + #[test] + fn both_positive_velocity() { + let num_estimate = testing_estimate(BigInt::from(111), BigInt::from(12)); + let denom_estimate = testing_estimate(BigInt::from(3456), BigInt::from(8)); + assert_err_bound( + &num_estimate, + &denom_estimate, + 10_000, + 0, + BigInt::from(ERR_BOUND), + ); + } + + #[test] + fn flipped_signs() { + let num_estimate = testing_estimate(BigInt::from(1_000_000), BigInt::from(-100)); + let denom_estimate = testing_estimate(BigInt::from(70_000), BigInt::from(1000)); + assert_err_bound( + &num_estimate, + &denom_estimate, + 100_000, + 0, + BigInt::from(ERR_BOUND), + ); + } + + #[test] + fn values_in_range() { + let tens_of_fil = TokenAmount::from_whole(50).atto().clone(); + let one_fil_per_sec = BigInt::from(25); + let four_fil_per_second = BigInt::from(100); + + let slow_money = testing_estimate(tens_of_fil.clone(), one_fil_per_sec); + let fast_money = testing_estimate(tens_of_fil, four_fil_per_second); + + let tens_of_ei_bs = StoragePower::from(10_i128.pow(19)); + let thousands_of_ei_bs = StoragePower::from(2 * 10_i128.pow(22)); + + let one_byte_per_epoch_velocity = BigInt::from(1); + let ten_pi_bs_per_day_velocity = + BigInt::from(10 * 2_i128.pow(50)) / BigInt::from(EPOCHS_IN_DAY); + let one_ei_bs_per_day_velocity = BigInt::from(2_i128.pow(60)) / BigInt::from(EPOCHS_IN_DAY); + + let delta = EPOCHS_IN_DAY; + let t0 = 0; + let err_bound = BigInt::from(ERR_BOUND); + + let test_cases: Vec<(StoragePower, BigInt)> = vec![ + (tens_of_ei_bs.clone(), one_byte_per_epoch_velocity.clone()), + (tens_of_ei_bs.clone(), ten_pi_bs_per_day_velocity.clone()), + (tens_of_ei_bs, one_ei_bs_per_day_velocity.clone()), + (thousands_of_ei_bs.clone(), one_byte_per_epoch_velocity), + (thousands_of_ei_bs.clone(), ten_pi_bs_per_day_velocity), + (thousands_of_ei_bs, one_ei_bs_per_day_velocity), + ]; + + for test_case in test_cases { + let power = testing_estimate(test_case.0, test_case.1); + assert_err_bound(&slow_money, &power, delta, t0, err_bound.clone()); + assert_err_bound(&fast_money, &power, delta, t0, err_bound.clone()); + } + } + + #[test] + fn rounding() { + // Calculations in this mod are under the assumption division is euclidean and not truncated + let dd: BigInt = BigInt::from(-100); + let dv: BigInt = BigInt::from(3); + assert_eq!(dd.div_floor(&dv), BigInt::from(-34)); + + let dd: BigInt = BigInt::from(200); + let dv: BigInt = BigInt::from(3); + assert_eq!(dd.div_floor(&dv), BigInt::from(66)); + } + + #[test] + fn rounding_issue() { + let fe = FilterEstimate { + position: "12340768897043811082913117521041414330876498465539749838848" + .parse() + .unwrap(), + velocity: "-37396269384748225153347462373739139597454335279104" + .parse() + .unwrap(), + }; + let filter_reward = AlphaBetaFilter::load(&fe, &DEFAULT_ALPHA, &DEFAULT_BETA); + let next = filter_reward.next_estimate(&36266252337034982540u128.into(), 3); + assert_eq!( + next.position.to_string(), + "12340768782449774548722755900999027209659079673176744001536" + ); + assert_eq!( + next.velocity.to_string(), + "-37396515542149801792802995707072472930787668612438" + ); + } +} diff --git a/fil_actors_shared/src/v16/builtin/reward/smooth/mod.rs b/fil_actors_shared/src/v16/builtin/reward/smooth/mod.rs new file mode 100644 index 00000000..3218fba6 --- /dev/null +++ b/fil_actors_shared/src/v16/builtin/reward/smooth/mod.rs @@ -0,0 +1,9 @@ +// Copyright 2021-2023 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +mod alpha_beta_filter; +mod smooth_func; + +pub use alpha_beta_filter::*; +pub use smooth_func::*; diff --git a/fil_actors_shared/src/v16/builtin/reward/smooth/smooth_func.rs b/fil_actors_shared/src/v16/builtin/reward/smooth/smooth_func.rs new file mode 100644 index 00000000..105c464e --- /dev/null +++ b/fil_actors_shared/src/v16/builtin/reward/smooth/smooth_func.rs @@ -0,0 +1,99 @@ +// Copyright 2021-2023 Protocol Labs +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::bigint::{BigInt, Integer}; +use fvm_shared4::clock::ChainEpoch; +use lazy_static::lazy_static; + +use super::super::math::{poly_parse, poly_val, PRECISION}; +use super::alpha_beta_filter::*; + +lazy_static! { + static ref NUM: Vec = poly_parse(&[ + "261417938209272870992496419296200268025", + "7266615505142943436908456158054846846897", + "32458783941900493142649393804518050491988", + "17078670566130897220338060387082146864806", + "-35150353308172866634071793531642638290419", + "-20351202052858059355702509232125230498980", + "-1563932590352680681114104005183375350999", + ]) + .unwrap(); + static ref DENOM: Vec = poly_parse(&[ + "49928077726659937662124949977867279384", + "2508163877009111928787629628566491583994", + "21757751789594546643737445330202599887121", + "53400635271583923415775576342898617051826", + "41248834748603606604000911015235164348839", + "9015227820322455780436733526367238305537", + "340282366920938463463374607431768211456", + ]) + .unwrap(); + pub static ref DEFAULT_ALPHA: BigInt = "314760000000000000000000000000000000".parse().unwrap(); + pub static ref DEFAULT_BETA: BigInt = "96640100000000000000000000000000".parse().unwrap(); + static ref LN_2: BigInt = "235865763225513294137944142764154484399".parse().unwrap(); + static ref EPSILON: BigInt = "302231454903657293676544".parse().unwrap(); +} + +fn get_bit_len(z: &BigInt) -> u64 { + z.bits() +} + +/// Extrapolate the CumSumRatio given two filters. +pub fn extrapolated_cum_sum_of_ratio( + delta: ChainEpoch, + relative_start: ChainEpoch, + est_num: &FilterEstimate, + est_denom: &FilterEstimate, +) -> BigInt { + let delta_t = BigInt::from(delta) << PRECISION; + let t0 = BigInt::from(relative_start) << PRECISION; + + let pos_1 = &est_num.position; + let pos_2 = &est_denom.position; + let velo_1 = &est_num.velocity; + let velo_2 = &est_denom.velocity; + + let squared_velo_2 = (velo_2 * velo_2) >> PRECISION; + + if squared_velo_2 > *EPSILON { + let mut x2a = ((velo_2 * t0) >> PRECISION) + pos_2; + let mut x2b = ((velo_2 * &delta_t) >> PRECISION) + &x2a; + x2a = ln(&x2a); + x2b = ln(&x2b); + + let m1 = ((&x2b - &x2a) * pos_1 * velo_2) >> PRECISION; + + let m2_l = (&x2a - &x2b) * pos_2; + let m2_r = velo_2 * &delta_t; + let m2: BigInt = ((m2_l + m2_r) * velo_1) >> PRECISION; + + return (m2 + m1).div_floor(&squared_velo_2); + } + + let half_delta = &delta_t >> 1; + let mut x1m: BigInt = velo_1 * (t0 + half_delta); + x1m = (x1m >> PRECISION) + pos_1; + + (x1m * delta_t).div_floor(pos_2) +} + +/// The natural log of Q.128 x. +// public to parent for testing. +pub(super) fn ln(z: &BigInt) -> BigInt { + let k: i64 = get_bit_len(z) as i64 - 1 - PRECISION as i64; + + let x: BigInt = if k > 0 { z >> k } else { z << k.abs() }; + + (BigInt::from(k) * &*LN_2) + ln_between_one_and_two(x) +} + +/// The natural log of x, specified in Q.128 format +/// Should only use with 1 <= x <= 2 +/// Output is in Q.128 format. +fn ln_between_one_and_two(x: BigInt) -> BigInt { + let num = poly_val(&NUM, &x) << PRECISION; + let denom = poly_val(&DENOM, &x); + num.div_floor(&denom) +} diff --git a/fil_actors_shared/src/v16/builtin/shared.rs b/fil_actors_shared/src/v16/builtin/shared.rs new file mode 100644 index 00000000..f790fa57 --- /dev/null +++ b/fil_actors_shared/src/v16/builtin/shared.rs @@ -0,0 +1,162 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use crate::actor_error_v16; +use crate::v16::runtime::builtins::Type; +use crate::v16::{ActorContext, ActorError}; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared4::address::Address; +use fvm_shared4::METHOD_SEND; +use fvm_shared4::{ActorID, MethodNum}; +use std::fmt::{Display, Formatter}; + +use crate::v16::runtime::Runtime; + +pub const HAMT_BIT_WIDTH: u32 = 5; + +pub const FIRST_ACTOR_SPECIFIC_EXIT_CODE: u32 = 32; + +/// ResolveToActorID resolves the given address to its actor ID. +/// If an actor ID for the given address doesn't exist yet, it tries to create one by sending +/// a zero balance to the given address. +pub fn resolve_to_actor_id( + rt: &impl Runtime, + address: &Address, + check_existence: bool, +) -> Result { + let mut actor_id = None; + // if we are able to resolve it to an ID address, return the resolved address + if let Some(id) = rt.resolve_address(address) { + actor_id = Some(id) + } else { + // send 0 balance to the account so an ID address for it is created and then try to resolve + extract_send_result(rt.send_simple( + address, + METHOD_SEND, + Default::default(), + Default::default(), + )) + .with_context(|| format!("failed to send zero balance to address {}", address))?; + + if let Some(id) = rt.resolve_address(address) { + actor_id = Some(id) + } + } + + if let Some(id) = actor_id { + // check for actor existence + if check_existence { + rt.get_actor_code_cid(&id) + .ok_or_else(|| actor_error_v16!(not_found, "no code for address {}", address))?; + } + return Ok(id); + } + + Err(actor_error_v16!( + illegal_argument, + "failed to resolve or initialize address {}", + address + )) +} + +// The lowest FRC-42 method number. +pub const FIRST_EXPORTED_METHOD_NUMBER: MethodNum = 1 << 24; + +// Checks whether the caller is allowed to invoke some method number. +// All method numbers below the FRC-42 range are restricted to built-in actors +// (including the account and multisig actors). +// Methods may subsequently enforce tighter restrictions. +pub fn restrict_internal_api(rt: &RT, method: MethodNum) -> Result<(), ActorError> +where + RT: Runtime, +{ + if method >= FIRST_EXPORTED_METHOD_NUMBER { + return Ok(()); + } + let caller = rt.message().caller(); + let code_cid = rt.get_actor_code_cid(&caller.id().unwrap()); + match code_cid { + None => { + return Err( + actor_error_v16!(forbidden; "no code for caller {} of method {}", caller, method), + ); + } + Some(code_cid) => { + let builtin_type = rt.resolve_builtin_actor_type(&code_cid); + match builtin_type { + None | Some(Type::EVM) => { + return Err( + actor_error_v16!(forbidden; "caller {} of method {} must be built-in", caller, method), + ); + } + + // Anything else is a valid built-in caller of the internal API + Some(_) => {} + } + } + } + Ok(()) +} + +/// An error returned on a failed send. Can be automatically converted into an [`ActorError`] with +/// the question-mark operator. +#[derive(Debug, Eq, PartialEq, Copy, Clone)] +pub struct SendError(pub fvm_shared4::error::ErrorNumber); + +impl From for fvm_shared4::error::ErrorNumber { + fn from(s: SendError) -> fvm_shared4::error::ErrorNumber { + s.0 + } +} + +impl From for ActorError { + fn from(s: SendError) -> ActorError { + match s.0 { + // Some of these errors are from operations in the Runtime or SDK layer + // before or after the underlying VM send syscall. + fvm_shared4::error::ErrorNumber::NotFound => { + // This means that the receiving actor doesn't exist. + actor_error_v16!(unspecified; "receiver not found") + } + fvm_shared4::error::ErrorNumber::InsufficientFunds => { + // This means that the send failed because we have insufficient funds. We will + // get a _syscall error_, not an exit code, because the target actor will not + // run (and therefore will not exit). + actor_error_v16!(insufficient_funds; "not enough funds") + } + fvm_shared4::error::ErrorNumber::LimitExceeded => { + // This means we've exceeded the recursion limit. + actor_error_v16!(assertion_failed; "recursion limit exceeded") + } + fvm_shared4::error::ErrorNumber::ReadOnly => ActorError::unchecked( + fvm_shared4::error::ExitCode::USR_READ_ONLY, + "attempted to mutate state while in readonly mode".into(), + ), + err => { + // We don't expect any other syscall exit codes. + actor_error_v16!(assertion_failed; "unexpected error: {}", err) + } + } + } +} + +impl Display for SendError { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "send failed with error number {}", self.0) + } +} + +pub fn extract_send_result( + res: Result, +) -> Result, ActorError> { + let ret = res?; + if ret.exit_code.is_success() { + Ok(ret.return_data) + } else { + Err(ActorError::checked( + ret.exit_code, + format!("send aborted with code {}", ret.exit_code), + ret.return_data, + )) + } +} diff --git a/fil_actors_shared/src/v16/builtin/singletons.rs b/fil_actors_shared/src/v16/builtin/singletons.rs new file mode 100644 index 00000000..0c34d6b9 --- /dev/null +++ b/fil_actors_shared/src/v16/builtin/singletons.rs @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::address::Address; +use fvm_shared4::ActorID; + +use paste::paste; + +macro_rules! define_singletons { + ($($name:ident = $id:literal,)*) => { + $( + paste! { + pub const [<$name _ID>]: ActorID = $id; + pub const [<$name _ADDR>]: Address = Address::new_id([<$name _ID>]); + } + )* + } +} + +define_singletons! { + SYSTEM_ACTOR = 0, + INIT_ACTOR = 1, + REWARD_ACTOR = 2, + CRON_ACTOR = 3, + STORAGE_POWER_ACTOR = 4, + STORAGE_MARKET_ACTOR = 5, + VERIFIED_REGISTRY_ACTOR = 6, + DATACAP_TOKEN_ACTOR = 7, + EAM_ACTOR = 10, + BURNT_FUNDS_ACTOR = 99, +} + +/// Defines first available ID address after builtin actors +pub const FIRST_NON_SINGLETON_ADDR: ActorID = 100; diff --git a/fil_actors_shared/src/v16/mod.rs b/fil_actors_shared/src/v16/mod.rs new file mode 100644 index 00000000..93b8e921 --- /dev/null +++ b/fil_actors_shared/src/v16/mod.rs @@ -0,0 +1,92 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_amt::Amt; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_hamt::Sha256; +use fvm_ipld_hamt::{BytesKey, Error as HamtError, Hamt}; +use fvm_shared4::bigint::BigInt; +use serde::de::DeserializeOwned; +use serde::Serialize; +use unsigned_varint::decode::Error as UVarintError; + +pub use {fvm_ipld_amt, fvm_ipld_hamt}; + +pub use self::actor_error::*; +pub use self::builtin::*; +pub use self::util::*; + +pub mod actor_error; +pub mod builtin; +pub mod runtime; +pub mod util; +pub mod vm_api; + +type Hasher = Sha256; + +/// Map type to be used within actors. The underlying type is a HAMT. +pub type Map<'bs, BS, V> = Hamt<&'bs BS, V, BytesKey, Hasher>; + +/// Array type used within actors. The underlying type is an AMT. +pub type Array<'bs, V, BS> = Amt; + +/// Deal weight +pub type DealWeight = BigInt; + +/// Create a hamt with a custom bitwidth. +#[inline] +pub fn make_empty_map(store: &'_ BS, bitwidth: u32) -> Map<'_, BS, V> +where + BS: Blockstore, + V: DeserializeOwned + Serialize, +{ + Map::<_, V>::new_with_bit_width(store, bitwidth) +} + +/// Create a map with a root cid. +#[inline] +pub fn make_map_with_root_and_bitwidth<'bs, BS, V>( + root: &Cid, + store: &'bs BS, + bitwidth: u32, +) -> Result, HamtError> +where + BS: Blockstore, + V: DeserializeOwned + Serialize, +{ + Map::<_, V>::load_with_bit_width(root, store, bitwidth) +} + +pub fn u64_key(k: u64) -> BytesKey { + let mut bz = unsigned_varint::encode::u64_buffer(); + let slice = unsigned_varint::encode::u64(k, &mut bz); + slice.into() +} + +pub fn parse_uint_key(s: &[u8]) -> Result { + let (v, _) = unsigned_varint::decode::u64(s)?; + Ok(v) +} + +pub trait Keyer { + fn key(&self) -> BytesKey; +} + +impl Keyer for u64 { + fn key(&self) -> BytesKey { + u64_key(*self) + } +} + +impl Keyer for String { + fn key(&self) -> BytesKey { + BytesKey(self.as_bytes().to_owned()) + } +} + +impl Keyer for &str { + fn key(&self) -> BytesKey { + BytesKey(self.as_bytes().to_owned()) + } +} diff --git a/fil_actors_shared/src/v16/runtime/builtins.rs b/fil_actors_shared/src/v16/runtime/builtins.rs new file mode 100644 index 00000000..2ecf75b2 --- /dev/null +++ b/fil_actors_shared/src/v16/runtime/builtins.rs @@ -0,0 +1 @@ +pub use crate::v16::vm_api::builtin::Type; diff --git a/fil_actors_shared/src/v16/runtime/empty.rs b/fil_actors_shared/src/v16/runtime/empty.rs new file mode 100644 index 00000000..d5075c14 --- /dev/null +++ b/fil_actors_shared/src/v16/runtime/empty.rs @@ -0,0 +1,40 @@ +use std::mem; + +use cid::multihash::Multihash; +use cid::Cid; +use fvm_ipld_encoding::DAG_CBOR; +use fvm_shared4::crypto::hash::SupportedHashes; + +const fn const_unwrap(r: Result) -> T { + let v = match r { + Ok(r) => r, + Err(_) => panic!(), + }; + mem::forget(r); + v +} + +// 45b0cfc220ceec5b7c1c62c4d4193d38e4eba48e8815729ce75f9c0ab0e4c1c0 +const EMPTY_ARR_HASH_DIGEST: &[u8] = &[ + 0x45, 0xb0, 0xcf, 0xc2, 0x20, 0xce, 0xec, 0x5b, 0x7c, 0x1c, 0x62, 0xc4, 0xd4, 0x19, 0x3d, 0x38, + 0xe4, 0xeb, 0xa4, 0x8e, 0x88, 0x15, 0x72, 0x9c, 0xe7, 0x5f, 0x9c, 0x0a, 0xb0, 0xe4, 0xc1, 0xc0, +]; + +// bafy2bzacebc3bt6cedhoyw34drrmjvazhu4oj25er2ebk4u445pzycvq4ta4a +pub const EMPTY_ARR_CID: Cid = Cid::new_v1( + DAG_CBOR, + const_unwrap(Multihash::wrap( + SupportedHashes::Blake2b256 as u64, + EMPTY_ARR_HASH_DIGEST, + )), +); + +#[test] +fn test_empty_arr_cid() { + use fvm_ipld_encoding::to_vec; + use multihash_codetable::{Code, MultihashDigest}; + + let empty = to_vec::<[(); 0]>(&[]).unwrap(); + let expected = Cid::new_v1(DAG_CBOR, Code::Blake2b256.digest(&empty)); + assert_eq!(EMPTY_ARR_CID, expected); +} diff --git a/fil_actors_shared/src/v16/runtime/mod.rs b/fil_actors_shared/src/v16/runtime/mod.rs new file mode 100644 index 00000000..9f2e08f9 --- /dev/null +++ b/fil_actors_shared/src/v16/runtime/mod.rs @@ -0,0 +1,276 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::CborStore; +use fvm_shared4::address::Address; +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::randomness::RANDOMNESS_LENGTH; +use fvm_shared4::version::NetworkVersion; +use fvm_shared4::{ActorID, MethodNum, Response}; +use serde::de::DeserializeOwned; +use serde::Serialize; + +pub use self::policy::*; +pub use self::randomness::DomainSeparationTag; +use crate::actor_error_v16; +use crate::v16::runtime::builtins::Type; +use crate::v16::{ActorError, SendError}; + +pub mod builtins; +pub mod policy; +mod randomness; + +pub(crate) mod empty; + +pub use crate::v16::vm_api::Primitives; +pub use empty::EMPTY_ARR_CID; +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared4::chainid::ChainID; +use fvm_shared4::event::ActorEvent; +use fvm_shared4::sys::SendFlags; +use multihash_codetable::Code; + +/// Runtime is the VM's internal runtime object. +/// this is everything that is accessible to actors, beyond parameters. +pub trait Runtime: Primitives + RuntimePolicy { + type Blockstore: Blockstore; + + /// The network protocol version number at the current epoch. + fn network_version(&self) -> NetworkVersion; + + /// Information related to the current message being executed. + fn message(&self) -> &dyn MessageInfo; + + /// The current chain epoch number, corresponding to the epoch in which the message is executed. + /// The genesis block has epoch zero. + fn curr_epoch(&self) -> ChainEpoch; + + /// The ID for this chain. + /// Filecoin chain IDs are usually in the Ethereum namespace, see: https://github.com/ethereum-lists/chains. + fn chain_id(&self) -> ChainID; + + /// Validates the caller against some predicate. + /// Exported actor methods must invoke at least one caller validation before returning. + fn validate_immediate_caller_accept_any(&self) -> Result<(), ActorError>; + fn validate_immediate_caller_is<'a, I>(&self, addresses: I) -> Result<(), ActorError> + where + I: IntoIterator; + /// Validates that the caller has a delegated address that is a member of + /// one of the provided namespaces. + /// Addresses must be of Protocol ID. + fn validate_immediate_caller_namespace( + &self, + namespace_manager_addresses: I, + ) -> Result<(), ActorError> + where + I: IntoIterator; + fn validate_immediate_caller_type<'a, I>(&self, types: I) -> Result<(), ActorError> + where + I: IntoIterator; + + /// The balance of the receiver. + fn current_balance(&self) -> TokenAmount; + + /// The balance of an actor. + fn actor_balance(&self, id: ActorID) -> Option; + + /// Resolves an address of any protocol to an ID address (via the Init actor's table). + /// This allows resolution of externally-provided SECP, BLS, or actor addresses to the canonical form. + /// If the argument is an ID address it is returned directly. + fn resolve_address(&self, address: &Address) -> Option; + + /// Looks up the "delegated" address of an actor by ID, if any. Returns None if either the + /// target actor doesn't exist, or doesn't have an f4 address. + fn lookup_delegated_address(&self, id: ActorID) -> Option
; + + /// Look up the code ID at an actor address. + fn get_actor_code_cid(&self, id: &ActorID) -> Option; + + /// Randomness returns a (pseudo)random byte array drawing from the latest + /// ticket chain from a given epoch and incorporating requisite entropy. + /// This randomness is fork dependant but also biasable because of this. + fn get_randomness_from_tickets( + &self, + personalization: DomainSeparationTag, + rand_epoch: ChainEpoch, + entropy: &[u8], + ) -> Result<[u8; RANDOMNESS_LENGTH], ActorError>; + + /// Randomness returns a (pseudo)random byte array drawing from the latest + /// beacon from a given epoch and incorporating requisite entropy. + /// This randomness is not tied to any fork of the chain, and is unbiasable. + fn get_randomness_from_beacon( + &self, + personalization: DomainSeparationTag, + rand_epoch: ChainEpoch, + entropy: &[u8], + ) -> Result<[u8; RANDOMNESS_LENGTH], ActorError>; + + /// Returns a (pseudo)random byte array drawing from the latest + /// beacon from a given epoch. + /// This randomness is not tied to any fork of the chain, and is unbiasable. + fn get_beacon_randomness( + &self, + rand_epoch: ChainEpoch, + ) -> Result<[u8; RANDOMNESS_LENGTH], ActorError>; + + /// Initializes the state object. + /// This is only valid when the state has not yet been initialized. + /// NOTE: we should also limit this to being invoked during the constructor method + fn create(&self, obj: &T) -> Result<(), ActorError> { + let root = self.get_state_root()?; + if root != EMPTY_ARR_CID { + return Err( + actor_error_v16!(illegal_state; "failed to create state; expected empty array CID, got: {}", root), + ); + } + let new_root = self.store().put_cbor(obj, Code::Blake2b256) + .map_err(|e| actor_error_v16!(illegal_argument; "failed to write actor state during creation: {}", e.to_string()))?; + self.set_state_root(&new_root)?; + Ok(()) + } + + /// Loads a readonly copy of the state of the receiver into the argument. + fn state(&self) -> Result { + Ok(self + .store() + .get_cbor(&self.get_state_root()?) + .map_err( + |_| actor_error_v16!(illegal_argument; "failed to get actor for Readonly state"), + )? + .expect("State does not exist for actor state root")) + } + + /// Gets the state-root. + fn get_state_root(&self) -> Result; + + /// Sets the state-root. + fn set_state_root(&self, root: &Cid) -> Result<(), ActorError>; + + /// Loads a mutable copy of the state of the receiver, passes it to `f`, + /// and after `f` completes puts the state object back to the store and sets it as + /// the receiver's state root. + /// + /// During the call to `f`, execution is protected from side-effects, (including message send). + /// + /// Returns the result of `f`. + fn transaction(&self, f: F) -> Result + where + S: Serialize + DeserializeOwned, + F: FnOnce(&mut S, &Self) -> Result; + + /// Returns reference to blockstore + fn store(&self) -> &Self::Blockstore; + + /// Sends a message to another actor, returning the exit code and return value envelope. + /// If the invoked method does not return successfully, its state changes + /// (and that of any messages it sent in turn) will be rolled back. + fn send( + &self, + to: &Address, + method: MethodNum, + params: Option, + value: TokenAmount, + gas_limit: Option, + flags: SendFlags, + ) -> Result; + + /// Simplified version of [`Runtime::send`] that does not specify a gas limit, nor any send flags. + fn send_simple( + &self, + to: &Address, + method: MethodNum, + params: Option, + value: TokenAmount, + ) -> Result { + self.send(to, method, params, value, None, SendFlags::empty()) + } + + /// Computes an address for a new actor. The returned address is intended to uniquely refer to + /// the actor even in the event of a chain re-org (whereas an ID-address might refer to a + /// different actor after messages are re-ordered). + /// Always an ActorExec address. + fn new_actor_address(&self) -> Result; + + /// Creates an actor with code `codeID`, an empty state, id `actor_id`, and an optional predictable address. + /// May only be called by Init actor. + fn create_actor( + &self, + code_id: Cid, + actor_id: ActorID, + predictable_address: Option
, + ) -> Result<(), ActorError>; + + /// Deletes the executing actor from the state tree. Fails if there is any unspent balance in + /// the actor. + /// + /// May only be called by the actor itself. + fn delete_actor(&self) -> Result<(), ActorError>; + + /// Returns whether the specified CodeCID belongs to a built-in actor. + fn resolve_builtin_actor_type(&self, code_id: &Cid) -> Option; + + /// Returns the CodeCID for a built-in actor type. The kernel will abort + /// if the supplied type is invalid. + fn get_code_cid_for_type(&self, typ: Type) -> Cid; + + /// Returns the total token supply in circulation at the beginning of the current epoch. + /// The circulating supply is the sum of: + /// - rewards emitted by the reward actor, + /// - funds vested from lock-ups in the genesis state, + /// + /// less the sum of: + /// - funds burnt, + /// - pledge collateral locked in storage miner actors (recorded in the storage power actor) + /// - deal collateral locked by the storage market actor + fn total_fil_circ_supply(&self) -> TokenAmount; + + /// ChargeGas charges specified amount of `gas` for execution. + /// `name` provides information about gas charging point + fn charge_gas(&self, name: &'static str, compute: i64); + + /// Returns the gas base fee (cost per unit) for the current epoch. + fn base_fee(&self) -> TokenAmount; + + /// The gas still available for computation + fn gas_available(&self) -> u64; + + /// The timestamp of the tipset at the current epoch (see curr_epoch), as UNIX seconds. + fn tipset_timestamp(&self) -> u64; + + /// The CID of the tipset at the specified epoch. + /// The epoch must satisfy: (curr_epoch - FINALITY) < epoch <= curr_epoch + fn tipset_cid(&self, epoch: i64) -> Result; + + /// Emits an event denoting that something externally noteworthy has ocurred. + fn emit_event(&self, event: &ActorEvent) -> Result<(), ActorError>; + + /// Returns true if the call is read_only. + /// All state updates, including actor creation and balance transfers, are rejected in read_only calls. + fn read_only(&self) -> bool; +} + +/// Message information available to the actor about executing message. +pub trait MessageInfo { + /// The nonce of the currently executing message. + fn nonce(&self) -> u64; + + /// The address of the immediate calling actor. Always an ID-address. + fn caller(&self) -> Address; + + /// The address of the origin of the current invocation. Always an ID-address + fn origin(&self) -> Address; + + /// The address of the actor receiving the message. Always an ID-address. + fn receiver(&self) -> Address; + + /// The value attached to the message being processed, implicitly + /// added to current_balance() before method invocation. + fn value_received(&self) -> TokenAmount; + + /// The message gas premium + fn gas_premium(&self) -> TokenAmount; +} diff --git a/fil_actors_shared/src/v16/runtime/policy.rs b/fil_actors_shared/src/v16/runtime/policy.rs new file mode 100644 index 00000000..bb218d40 --- /dev/null +++ b/fil_actors_shared/src/v16/runtime/policy.rs @@ -0,0 +1,432 @@ +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::sector::{RegisteredPoStProof, RegisteredSealProof, StoragePower}; +use num_traits::FromPrimitive; +use serde::{Deserialize, Serialize}; + +// A trait for runtime policy configuration +pub trait RuntimePolicy { + fn policy(&self) -> &Policy; +} + +// The policy itself +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct Policy { + /// Maximum amount of sectors that can be aggregated. + pub max_aggregated_sectors: u64, + /// Minimum amount of sectors that can be aggregated. + pub min_aggregated_sectors: u64, + /// Maximum total aggregated proof size. + pub max_aggregated_proof_size: usize, + /// Maximum total replica update proof size. + pub max_replica_update_proof_size: usize, + + /// The maximum number of sector pre-commitments in a single batch. + pub pre_commit_sector_batch_max_size: usize, + /// The maximum number of sector replica updates in a single batch. + pub prove_replica_updates_max_size: usize, + + /// The delay between pre commit expiration and clean up from state. This enforces that expired pre-commits + /// stay in state for a period of time creating a grace period during which a late-running aggregated prove-commit + /// can still prove its non-expired precommits without resubmitting a message + pub expired_pre_commit_clean_up_delay: i64, + + /// The period over which all a miner's active sectors will be challenged. + pub wpost_proving_period: ChainEpoch, + /// The duration of a deadline's challenge window, the period before a deadline when the challenge is available. + /// Notice that the challenge window is assumed to have the same duration as a deadline itself both in FIP and implementation. + pub wpost_challenge_window: ChainEpoch, + /// The number of non-overlapping PoSt deadlines in each proving period. + pub wpost_period_deadlines: u64, + /// The maximum distance back that a valid Window PoSt must commit to the current chain. + pub wpost_max_chain_commit_age: ChainEpoch, + /// WPoStDisputeWindow is the period after a Window PoSt window ends during which + /// PoSts submitted during that period may be disputed. + pub wpost_dispute_window: ChainEpoch, + + /// The maximum number of sectors that a miner can have simultaneously active. + /// This also bounds the number of faults that can be declared, etc. + pub sectors_max: usize, + + /// Maximum number of partitions that will be assigned to a deadline. + pub max_partitions_per_deadline: u64, + + /// Maximum number of control addresses a miner may register. + pub max_control_addresses: usize, + + /// MaxPeerIDLength is the maximum length allowed for any on-chain peer ID. + pub max_peer_id_length: usize, + + /// MaxMultiaddrData is the maximum amount of data that can be stored in multiaddrs. + pub max_multiaddr_data: usize, + + /// The maximum number of partitions that may be required to be loaded in a single invocation. + /// This limits the number of simultaneous fault, recovery, or sector-extension declarations. + pub addressed_partitions_max: u64, + + /// Maximum number of unique "declarations" in batch operations. + pub declarations_max: u64, + + /// The maximum number of sector numbers addressable in a single invocation + /// (which implies also the max infos that may be loaded at once). + /// One upper bound on this is the max size of a storage block: 1MiB supports 130k at 8 bytes each, + /// though bitfields can compress this. + pub addressed_sectors_max: u64, + + /// The maximum number of partitions that can be proven in a single PoSt message. + pub posted_partitions_max: u64, + + pub max_pre_commit_randomness_lookback: ChainEpoch, + + /// Number of epochs between publishing the precommit and when the challenge for interactive PoRep is drawn + /// used to ensure it is not predictable by miner. + pub pre_commit_challenge_delay: ChainEpoch, + + /// Maximum amount of sectors that can be aggregated in NI PoRep. + pub max_aggregated_sectors_ni: u64, + + /// Minimum amount of sectors that can be aggregated. + pub min_aggregated_sectors_ni: u64, + + /// Number of epochs between publishing the commit and when the randomness for non interactive PoRep is drawn + pub max_prove_commit_ni_randomness_lookback: ChainEpoch, + + /// Allowed non interactive proof types for new miners + pub valid_prove_commit_ni_proof_type: ProofSet, + + /// Lookback from the deadline's challenge window opening from which to sample chain randomness for the challenge seed. + pub wpost_challenge_lookback: ChainEpoch, + + /// Minimum period before a deadline's challenge window opens that a fault must be declared for that deadline. + pub fault_declaration_cutoff: ChainEpoch, + + /// The maximum age of a fault before the sector is terminated. + pub fault_max_age: ChainEpoch, + + /// Staging period for a miner worker key change. + pub worker_key_change_delay: ChainEpoch, + + /// Minimum number of epochs past the current epoch a sector may be set to expire. + pub min_sector_expiration: i64, + + /// Maximum number of epochs past the current epoch a sector may be set to expire. + /// The actual maximum extension will be the minimum of CurrEpoch + MaximumSectorExpirationExtension + /// and sector.ActivationEpoch+sealProof.SectorMaximumLifetime() + pub max_sector_expiration_extension: i64, + + /// Ratio of sector size to maximum deals per sector. + /// The maximum number of deals is the sector size divided by this number. + pub deal_limit_denominator: u64, + + /// Number of epochs after a consensus fault for which a miner is ineligible + /// for permissioned actor methods and winning block elections. + pub consensus_fault_ineligibility_duration: ChainEpoch, + + /// The maximum number of new sectors that may be staged by a miner during a single proving period. + pub new_sectors_per_period_max: usize, + + /// Epochs after which chain state is final with overwhelming probability + /// (hence the likelihood of two fork of this size is negligible). + pub chain_finality: ChainEpoch, + + /// Allowed post proof types for new miners + pub valid_post_proof_type: ProofSet, + + /// Allowed pre commit proof types for new miners + pub valid_pre_commit_proof_type: ProofSet, + + // --- verifreg policy + /// Minimum verified deal size + pub minimum_verified_allocation_size: StoragePower, + /// Minimum term for a verified data allocation (epochs) + pub minimum_verified_allocation_term: i64, + /// Maximum term for a verified data allocaion (epochs) + pub maximum_verified_allocation_term: i64, + /// Maximum time a verified allocation can be active without being claimed (epochs). + /// Supports recovery of erroneous allocations and prevents indefinite squatting on datacap. + pub maximum_verified_allocation_expiration: i64, + // Period of time at the end of a sector's life during which claims can be dropped + pub end_of_life_claim_drop_period: ChainEpoch, + + // --- market policy --- + /// The number of blocks between payouts for deals + pub deal_updates_interval: i64, + + /// Numerator of the percentage of normalized cirulating + /// supply that must be covered by provider collateral + pub prov_collateral_percent_supply_num: i64, + + /// Denominator of the percentage of normalized cirulating + /// supply that must be covered by provider collateral + pub prov_collateral_percent_supply_denom: i64, + + /// The default duration after a verified deal's nominal term to set for the corresponding + /// allocation's maximum term. + pub market_default_allocation_term_buffer: i64, + + // --- power --- + /// Minimum miner consensus power + pub minimum_consensus_power: StoragePower, +} + +impl Default for Policy { + fn default() -> Policy { + Policy { + max_aggregated_sectors: policy_constants::MAX_AGGREGATED_SECTORS, + min_aggregated_sectors: policy_constants::MIN_AGGREGATED_SECTORS, + max_aggregated_proof_size: policy_constants::MAX_AGGREGATED_PROOF_SIZE, + max_replica_update_proof_size: policy_constants::MAX_REPLICA_UPDATE_PROOF_SIZE, + pre_commit_sector_batch_max_size: policy_constants::PRE_COMMIT_SECTOR_BATCH_MAX_SIZE, + prove_replica_updates_max_size: policy_constants::PROVE_REPLICA_UPDATES_MAX_SIZE, + expired_pre_commit_clean_up_delay: policy_constants::EXPIRED_PRE_COMMIT_CLEAN_UP_DELAY, + wpost_proving_period: policy_constants::WPOST_PROVING_PERIOD, + wpost_challenge_window: policy_constants::WPOST_CHALLENGE_WINDOW, + wpost_period_deadlines: policy_constants::WPOST_PERIOD_DEADLINES, + wpost_max_chain_commit_age: policy_constants::WPOST_MAX_CHAIN_COMMIT_AGE, + wpost_dispute_window: policy_constants::WPOST_DISPUTE_WINDOW, + sectors_max: policy_constants::SECTORS_MAX, + max_partitions_per_deadline: policy_constants::MAX_PARTITIONS_PER_DEADLINE, + max_control_addresses: policy_constants::MAX_CONTROL_ADDRESSES, + max_peer_id_length: policy_constants::MAX_PEER_ID_LENGTH, + max_multiaddr_data: policy_constants::MAX_MULTIADDR_DATA, + addressed_partitions_max: policy_constants::ADDRESSED_PARTITIONS_MAX, + declarations_max: policy_constants::DECLARATIONS_MAX, + addressed_sectors_max: policy_constants::ADDRESSED_SECTORS_MAX, + posted_partitions_max: policy_constants::POSTED_PARTITIONS_MAX, + max_pre_commit_randomness_lookback: + policy_constants::MAX_PRE_COMMIT_RANDOMNESS_LOOKBACK, + valid_prove_commit_ni_proof_type: ProofSet::default_seal_ni_proofs(), + max_aggregated_sectors_ni: policy_constants::MAX_AGGREGATED_SECTORS_NI, + min_aggregated_sectors_ni: policy_constants::MIN_AGGREGATED_SECTORS_NI, + max_prove_commit_ni_randomness_lookback: policy_constants::MAX_PROVE_COMMIT_NI_LOOKBACK, + pre_commit_challenge_delay: policy_constants::PRE_COMMIT_CHALLENGE_DELAY, + wpost_challenge_lookback: policy_constants::WPOST_CHALLENGE_LOOKBACK, + fault_declaration_cutoff: policy_constants::FAULT_DECLARATION_CUTOFF, + fault_max_age: policy_constants::FAULT_MAX_AGE, + worker_key_change_delay: policy_constants::WORKER_KEY_CHANGE_DELAY, + min_sector_expiration: policy_constants::MIN_SECTOR_EXPIRATION, + max_sector_expiration_extension: policy_constants::MAX_SECTOR_EXPIRATION_EXTENSION, + deal_limit_denominator: policy_constants::DEAL_LIMIT_DENOMINATOR, + consensus_fault_ineligibility_duration: + policy_constants::CONSENSUS_FAULT_INELIGIBILITY_DURATION, + new_sectors_per_period_max: policy_constants::NEW_SECTORS_PER_PERIOD_MAX, + chain_finality: policy_constants::CHAIN_FINALITY, + + valid_post_proof_type: ProofSet::default_post_proofs(), + valid_pre_commit_proof_type: ProofSet::default_precommit_seal_proofs(), + minimum_verified_allocation_size: StoragePower::from_i32( + policy_constants::MINIMUM_VERIFIED_ALLOCATION_SIZE, + ) + .unwrap(), + minimum_verified_allocation_term: policy_constants::MINIMUM_VERIFIED_ALLOCATION_TERM, + maximum_verified_allocation_term: policy_constants::MAXIMUM_VERIFIED_ALLOCATION_TERM, + maximum_verified_allocation_expiration: + policy_constants::MAXIMUM_VERIFIED_ALLOCATION_EXPIRATION, + end_of_life_claim_drop_period: policy_constants::END_OF_LIFE_CLAIM_DROP_PERIOD, + deal_updates_interval: policy_constants::DEAL_UPDATES_INTERVAL, + prov_collateral_percent_supply_num: + policy_constants::PROV_COLLATERAL_PERCENT_SUPPLY_NUM, + prov_collateral_percent_supply_denom: + policy_constants::PROV_COLLATERAL_PERCENT_SUPPLY_DENOM, + market_default_allocation_term_buffer: + policy_constants::MARKET_DEFAULT_ALLOCATION_TERM_BUFFER, + + minimum_consensus_power: StoragePower::from(policy_constants::MINIMUM_CONSENSUS_POWER), + } + } +} + +pub mod policy_constants { + use fvm_shared4::clock::ChainEpoch; + use fvm_shared4::sector::SectorNumber; + + use crate::v16::builtin::*; + + /// The maximum assignable sector number. + /// Raising this would require modifying our AMT implementation. + pub const MAX_SECTOR_NUMBER: SectorNumber = i64::MAX as u64; + + // See comments on Policy struct. + pub const MAX_AGGREGATED_SECTORS: u64 = 819; + + pub const MIN_AGGREGATED_SECTORS: u64 = 4; + + pub const MAX_AGGREGATED_PROOF_SIZE: usize = 81960; + + pub const MAX_REPLICA_UPDATE_PROOF_SIZE: usize = 4096; + + // 32 sectors per epoch would support a single miner onboarding 1EiB of 32GiB sectors in 1 year. + pub const PRE_COMMIT_SECTOR_BATCH_MAX_SIZE: usize = 256; + + // Same as PRE_COMMIT_SECTOR_BATCH_MAX_SIZE for consistency. + pub const PROVE_REPLICA_UPDATES_MAX_SIZE: usize = PRE_COMMIT_SECTOR_BATCH_MAX_SIZE; + + pub const EXPIRED_PRE_COMMIT_CLEAN_UP_DELAY: i64 = 8 * EPOCHS_IN_HOUR; + + pub const WPOST_PROVING_PERIOD: ChainEpoch = EPOCHS_IN_DAY; + + // Half an hour (=48 per day). + // This must be consistent with WPOST_PERIOD_DEADLINES. + pub const WPOST_CHALLENGE_WINDOW: ChainEpoch = 30 * 60 / EPOCH_DURATION_SECONDS; + + // This must be consistent with WPOST_CHALLENGE_WINDOW. + pub const WPOST_PERIOD_DEADLINES: u64 = 48; + + pub const WPOST_MAX_CHAIN_COMMIT_AGE: ChainEpoch = WPOST_CHALLENGE_WINDOW; + + pub const WPOST_DISPUTE_WINDOW: ChainEpoch = 2 * CHAIN_FINALITY; + + pub const SECTORS_MAX: usize = 32 << 20; + + // For a minimum storage of upto 1Eib, we need 300 partitions per deadline. + // 48 * 32GiB * 2349 * 300 = 1.00808144 EiB + // So, to support upto 10Eib storage, we set this to 3000. + pub const MAX_PARTITIONS_PER_DEADLINE: u64 = 3000; + + pub const MAX_CONTROL_ADDRESSES: usize = 10; + + // Most Peer IDs are expected to be less than 50 bytes. + pub const MAX_PEER_ID_LENGTH: usize = 128; + + pub const MAX_MULTIADDR_DATA: usize = 1024; + + // With 48 deadlines (half-hour), 300 partitions per declaration permits addressing a full EiB + // of partitions of 32GiB sectors with 1 message per epoch within a single half-hour deadline. + // A miner can of course submit more messages. + pub const ADDRESSED_PARTITIONS_MAX: u64 = MAX_PARTITIONS_PER_DEADLINE; + + pub const DECLARATIONS_MAX: u64 = ADDRESSED_PARTITIONS_MAX; + + pub const ADDRESSED_SECTORS_MAX: u64 = 25_000; + + pub const POSTED_PARTITIONS_MAX: u64 = 3; + + pub const MAX_PRE_COMMIT_RANDOMNESS_LOOKBACK: ChainEpoch = EPOCHS_IN_DAY + CHAIN_FINALITY; + + pub const PRE_COMMIT_CHALLENGE_DELAY: ChainEpoch = 150; + + // Maximum number of epochs within which to fetch a valid seal randomness from the chain for + // a non-interactive PoRep proof. This balances the need to tie the seal to a particular chain with + // but makes allowance for service providers to offer pre-sealed sectors within a larger window of + // time. + pub const MAX_PROVE_COMMIT_NI_LOOKBACK: ChainEpoch = 180 * EPOCHS_IN_DAY; + + pub const MAX_AGGREGATED_SECTORS_NI: u64 = 65; + + pub const MIN_AGGREGATED_SECTORS_NI: u64 = 1; + + // This lookback exists so that deadline windows can be non-overlapping (which make the programming simpler) + // but without making the miner wait for chain stability before being able to start on PoSt computation. + // The challenge is available this many epochs before the window is actually open to receiving a PoSt. + pub const WPOST_CHALLENGE_LOOKBACK: ChainEpoch = 20; + + // This lookback must not be less than WPoStChallengeLookback lest a malicious miner be able to selectively declare + // faults after learning the challenge value. + pub const FAULT_DECLARATION_CUTOFF: ChainEpoch = WPOST_CHALLENGE_LOOKBACK + 50; + + pub const FAULT_MAX_AGE: ChainEpoch = WPOST_PROVING_PERIOD * 42; + + // Finality is a harsh delay for a miner who has lost their worker key, as the miner will miss Window PoSts until + // it can be changed. It's the only safe value, though. We may implement a mitigation mechanism such as a second + // key or allowing the owner account to submit PoSts while a key change is pending. + pub const WORKER_KEY_CHANGE_DELAY: ChainEpoch = CHAIN_FINALITY; + + pub const MIN_SECTOR_EXPIRATION: i64 = 180 * EPOCHS_IN_DAY; + + pub const MAX_SECTOR_EXPIRATION_EXTENSION: i64 = 1278 * EPOCHS_IN_DAY; + + /// A value (2^27) limits 32GiB sectors to 256 deals and 64GiB sectors to 512. + pub const DEAL_LIMIT_DENOMINATOR: u64 = 134217728; + + pub const CONSENSUS_FAULT_INELIGIBILITY_DURATION: ChainEpoch = CHAIN_FINALITY; + + pub const NEW_SECTORS_PER_PERIOD_MAX: usize = 128 << 10; + + /// This is a conservative value that is chosen via simulations of all known attacks. + pub const CHAIN_FINALITY: ChainEpoch = 900; + + pub const MINIMUM_VERIFIED_ALLOCATION_SIZE: i32 = 1 << 20; + pub const MINIMUM_VERIFIED_ALLOCATION_TERM: i64 = 180 * EPOCHS_IN_DAY; + pub const MAXIMUM_VERIFIED_ALLOCATION_TERM: i64 = 5 * EPOCHS_IN_YEAR; + pub const MAXIMUM_VERIFIED_ALLOCATION_EXPIRATION: i64 = 60 * EPOCHS_IN_DAY; + pub const END_OF_LIFE_CLAIM_DROP_PERIOD: ChainEpoch = 30 * EPOCHS_IN_DAY; + + pub const DEAL_UPDATES_INTERVAL: i64 = 30 * EPOCHS_IN_DAY; + + pub const PROV_COLLATERAL_PERCENT_SUPPLY_NUM: i64 = 1; + + pub const PROV_COLLATERAL_PERCENT_SUPPLY_DENOM: i64 = 100; + + pub const MARKET_DEFAULT_ALLOCATION_TERM_BUFFER: i64 = 90 * EPOCHS_IN_DAY; + + pub const MINIMUM_CONSENSUS_POWER: i64 = 10 << 40; +} + +/// A set indicating which proofs are considered valid, optimised for lookup of a small number of +/// sequential enum variants. Backed by an array of booleans where each index indicates if that +/// proof type is valid +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize)] +pub struct ProofSet(Vec); + +/// The number of total possible types (enum variants) of RegisteredPoStProof +const REGISTERED_POST_PROOF_VARIANTS: usize = 15; + +/// The number of total possible types (enum variants) of RegisteredSealProof +const REGISTERED_SEAL_PROOF_VARIANTS: usize = 20; + +impl Default for ProofSet { + fn default() -> Self { + ProofSet(vec![ + false; + REGISTERED_POST_PROOF_VARIANTS + .max(REGISTERED_SEAL_PROOF_VARIANTS) + ]) + } +} + +impl ProofSet { + /// Create a `ProofSet` for enabled `RegisteredPoStProof`s + pub fn default_post_proofs() -> Self { + let mut proofs = vec![false; REGISTERED_POST_PROOF_VARIANTS]; + + proofs[i64::from(RegisteredPoStProof::StackedDRGWindow32GiBV1P1) as usize] = true; + proofs[i64::from(RegisteredPoStProof::StackedDRGWindow64GiBV1P1) as usize] = true; + + ProofSet(proofs) + } + + /// Create a `ProofSet` for enabled `RegisteredSealProof`s + pub fn default_precommit_seal_proofs() -> Self { + let mut proofs = vec![false; REGISTERED_SEAL_PROOF_VARIANTS]; + + proofs[i64::from(RegisteredSealProof::StackedDRG32GiBV1P1) as usize] = true; + proofs[i64::from(RegisteredSealProof::StackedDRG32GiBV1P1_Feat_SyntheticPoRep) as usize] = + true; + + proofs[i64::from(RegisteredSealProof::StackedDRG64GiBV1P1) as usize] = true; + proofs[i64::from(RegisteredSealProof::StackedDRG64GiBV1P1_Feat_SyntheticPoRep) as usize] = + true; + + ProofSet(proofs) + } + + pub fn default_seal_ni_proofs() -> Self { + let mut proofs = vec![false; REGISTERED_SEAL_PROOF_VARIANTS]; + + proofs[i64::from(RegisteredSealProof::StackedDRG32GiBV1P2_Feat_NiPoRep) as usize] = true; + proofs[i64::from(RegisteredSealProof::StackedDRG64GiBV1P2_Feat_NiPoRep) as usize] = true; + + ProofSet(proofs) + } + + /// Checks if the requested proof type exists in the set + pub fn contains>(&self, proof: P) -> bool { + let index: i64 = proof.into(); + *self.0.get(index as usize).unwrap_or(&false) + } + + /// Adds the requested proof type to the set of valid proofs + pub fn insert>(&mut self, proof: P) { + let index: i64 = proof.into(); + self.0[index as usize] = true; + } +} diff --git a/fil_actors_shared/src/v16/runtime/randomness.rs b/fil_actors_shared/src/v16/runtime/randomness.rs new file mode 100644 index 00000000..17769b3e --- /dev/null +++ b/fil_actors_shared/src/v16/runtime/randomness.rs @@ -0,0 +1,52 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use fvm_shared4::clock::ChainEpoch; +use fvm_shared4::randomness::RANDOMNESS_LENGTH; +use num_derive::FromPrimitive; +use serde_repr::*; + +/// Specifies a domain for randomness generation. +#[derive(PartialEq, Eq, Copy, Clone, FromPrimitive, Debug, Hash, Deserialize_repr)] +#[repr(i64)] +pub enum DomainSeparationTag { + TicketProduction = 1, + ElectionProofProduction = 2, + WinningPoStChallengeSeed = 3, + WindowedPoStChallengeSeed = 4, + SealRandomness = 5, + InteractiveSealChallengeSeed = 6, + WindowPoStDeadlineAssignment = 7, + MarketDealCronSeed = 8, + PoStChainCommit = 9, + EvmPrevRandao = 10, +} + +#[allow(unused)] +pub fn draw_randomness( + hasher: impl FnOnce(&[u8]) -> [u8; 32], + rbase: &[u8; RANDOMNESS_LENGTH], + pers: DomainSeparationTag, + round: ChainEpoch, + entropy: &[u8], +) -> [u8; RANDOMNESS_LENGTH] { + let mut data = Vec::with_capacity(RANDOMNESS_LENGTH + 8 + 8 + entropy.len()); + + // Append the personalization value + let i64_bytes = (pers as i64).to_be_bytes(); + data.extend_from_slice(&i64_bytes); + + // Append the randomness + data.extend_from_slice(rbase); + + // Append the round + let i64_bytes = round.to_be_bytes(); + data.extend_from_slice(&i64_bytes); + + // Append the entropy + data.extend_from_slice(entropy); + + hasher(&data) + // + // fvm::crypto::hash_blake2b(&data) +} diff --git a/fil_actors_shared/src/v16/util/batch_return.rs b/fil_actors_shared/src/v16/util/batch_return.rs new file mode 100644 index 00000000..aa9bc2da --- /dev/null +++ b/fil_actors_shared/src/v16/util/batch_return.rs @@ -0,0 +1,291 @@ +use fvm_ipld_encoding::tuple::*; +use fvm_shared4::error::ExitCode; +use std::fmt; + +#[derive(Serialize_tuple, Deserialize_tuple, Clone, Debug, PartialEq, Eq)] +pub struct FailCode { + pub idx: u32, + pub code: ExitCode, +} + +#[derive(Serialize_tuple, Deserialize_tuple, Clone, PartialEq, Eq, Debug)] +pub struct BatchReturn { + // Total successes in batch + pub success_count: u32, + // Failure code and index for each failure in batch + pub fail_codes: Vec, +} + +impl BatchReturn { + pub const fn empty() -> Self { + Self { + success_count: 0, + fail_codes: Vec::new(), + } + } + + pub const fn ok(n: u32) -> Self { + Self { + success_count: n, + fail_codes: Vec::new(), + } + } + + pub fn of(codes: &[ExitCode]) -> Self { + let mut gen = BatchReturnGen::new(codes.len()); + for code in codes { + gen.add(*code); + } + gen.gen() + } + + pub fn size(&self) -> usize { + self.success_count as usize + self.fail_codes.len() + } + + pub fn all_ok(&self) -> bool { + self.fail_codes.is_empty() + } + + /// Returns a vector of exit codes for each item (including successes). + pub fn codes(&self) -> Vec { + let mut ret = Vec::new(); + + for fail in &self.fail_codes { + for _ in ret.len()..fail.idx as usize { + ret.push(ExitCode::OK) + } + ret.push(fail.code) + } + for _ in ret.len()..self.size() { + ret.push(ExitCode::OK) + } + ret + } + + /// Returns a subset of items corresponding to the successful indices. + /// Panics if `items` is not the same length as this batch return. + pub fn successes<'i, T>(&self, items: &'i [T]) -> Vec<&'i T> { + if items.len() != self.size() { + panic!( + "items length {} does not match batch size {}", + items.len(), + self.size() + ); + } + let mut ret = Vec::new(); + let mut fail_idx = 0; + for (idx, item) in items.iter().enumerate() { + if fail_idx < self.fail_codes.len() && idx == self.fail_codes[fail_idx].idx as usize { + fail_idx += 1; + } else { + ret.push(item) + } + } + ret + } +} + +impl fmt::Display for BatchReturn { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let succ_str = format!("Batch successes {} / {}", self.success_count, self.size()); + if self.all_ok() { + return f.write_str(&succ_str); + } + let mut ret = format!("{}, Batch failing: [", succ_str); + let mut strs = Vec::new(); + for fail in &self.fail_codes { + strs.push(format!("code={} at idx={}", fail.code, fail.idx)) + } + let fail_str = strs.join(", "); + ret.push_str(&fail_str); + ret.push(']'); + f.write_str(&ret) + } +} + +/// Computes a batch return that is the result of a sequence of batch returns +/// applied to the previous successful results. +/// Each batch's size() must be equal to the previous batch's success_count. +/// Any fail codes then override the prior stack's successful items, +/// indexed against only those successful items. +/// E.g. stack([OK, E1, OK, E2], [OK, E3], [E4]) => [E4, E1, E3, E2] +pub fn stack(batch_returns: &[BatchReturn]) -> BatchReturn { + if batch_returns.is_empty() { + return BatchReturn::empty(); + } + let mut base = batch_returns[0].clone(); + for nxt in &batch_returns[1..] { + assert_eq!( + base.success_count as usize, + nxt.size(), + "can't stack batch of {} on batch with {} successes", + nxt.size(), + base.success_count + ); + let mut base_fail = base.fail_codes.iter().peekable(); + let mut offset = 0; + let new_fail_codes: Vec<_> = nxt + .fail_codes + .iter() + .map(|nxt_fail| { + while base_fail + .next_if(|f| f.idx <= nxt_fail.idx + offset) + .is_some() + { + offset += 1; + } + FailCode { + idx: nxt_fail.idx + offset, + code: nxt_fail.code, + } + }) + .collect(); + base.fail_codes.extend(new_fail_codes); + base.fail_codes.sort_by(|a, b| a.idx.cmp(&b.idx)); + base.success_count = nxt.success_count; + } + assert_eq!(base.size(), batch_returns[0].size()); + assert_eq!( + base.success_count, + batch_returns[batch_returns.len() - 1].success_count + ); + base +} + +pub struct BatchReturnGen { + success_count: usize, + fail_codes: Vec, + + // gen will only work if it has processed all of the expected batch + expect_count: usize, +} + +impl BatchReturnGen { + pub fn new(expect_count: usize) -> Self { + BatchReturnGen { + success_count: 0, + fail_codes: Vec::new(), + expect_count, + } + } + + pub fn add_success(&mut self) -> &mut Self { + self.add_successes(1) + } + + pub fn add_successes(&mut self, count: usize) -> &mut Self { + self.success_count += count; + self + } + + pub fn add_fail(&mut self, code: ExitCode) -> &mut Self { + self.fail_codes.push(FailCode { + idx: (self.success_count + self.fail_codes.len()) as u32, + code, + }); + self + } + + pub fn add(&mut self, code: ExitCode) -> &mut Self { + if code.is_success() { + self.add_success() + } else { + self.add_fail(code) + } + } + + pub fn gen(self) -> BatchReturn { + assert_eq!(self.expect_count, self.success_count + self.fail_codes.len(), "programmer error, mismatched batch size {} and processed count {} batch return must include success/fail for all inputs", self.expect_count, self.success_count + self.fail_codes.len()); + BatchReturn { + success_count: self.success_count as u32, + fail_codes: self.fail_codes, + } + } +} + +// Unit tests +#[cfg(test)] +mod test { + use crate::v16::util::batch_return::stack; + use crate::v16::{BatchReturn, FailCode}; + use fvm_shared4::error::ExitCode; + + const OK: ExitCode = ExitCode::OK; + const ERR1: ExitCode = ExitCode::USR_ILLEGAL_ARGUMENT; + const ERR2: ExitCode = ExitCode::USR_NOT_FOUND; + const ERR3: ExitCode = ExitCode::USR_FORBIDDEN; + + ///// Tests for stacking batch returns. ///// + + #[test] + fn test_stack_empty() { + let batch_returns = vec![]; + let stacked = stack(&batch_returns); + assert_eq!(stacked.success_count, 0); + assert_eq!(Vec::::new(), stacked.fail_codes); + } + + #[test] + fn test_stack_single() { + assert_stack(&[], &[]); + assert_stack(&[OK], &[&[OK]]); + assert_stack(&[ERR1], &[&[ERR1]]); + assert_stack(&[ERR1, OK, ERR2], &[&[ERR1, OK, ERR2]]); + } + + #[test] + fn test_stack_overwrites() { + assert_stack(&[OK], &[&[OK], &[OK]]); + assert_stack(&[ERR1], &[&[OK], &[ERR1]]); + + assert_stack(&[OK, ERR1], &[&[OK, OK], &[OK, ERR1]]); + assert_stack(&[ERR1, ERR2], &[&[OK, OK], &[ERR1, ERR2]]); + } + + #[test] + fn test_stack_offsets() { + assert_stack(&[ERR1], &[&[ERR1], &[]]); + assert_stack(&[ERR1, ERR2], &[&[ERR1, ERR2], &[]]); + + assert_stack(&[ERR2, ERR1], &[&[OK, ERR1], &[ERR2]]); + assert_stack(&[ERR1, ERR2], &[&[ERR1, OK], &[ERR2]]); + + assert_stack(&[ERR2, ERR1], &[&[OK, OK], &[OK, ERR1], &[ERR2]]); + assert_stack(&[ERR1, ERR2], &[&[OK, OK], &[ERR1, OK], &[ERR2]]); + + assert_stack(&[OK, ERR1, OK], &[&[OK, ERR1, OK], &[OK, OK]]); + assert_stack(&[ERR2, ERR1, OK], &[&[OK, ERR1, OK], &[ERR2, OK]]); + assert_stack(&[OK, ERR1, ERR2], &[&[OK, ERR1, OK], &[OK, ERR2]]); + assert_stack(&[ERR1, ERR2, OK], &[&[ERR1, OK, OK], &[ERR2, OK]]); + assert_stack(&[ERR1, OK, ERR2], &[&[ERR1, OK, OK], &[OK, ERR2]]); + assert_stack(&[ERR3, ERR1, ERR2], &[&[OK, ERR1, OK], &[ERR3, ERR2]]); + + assert_stack( + &[ERR1, ERR1, ERR1, ERR3, ERR2, ERR3], + &[&[ERR1, ERR1, ERR1, OK, ERR2, OK], &[ERR3, ERR3]], + ); + + assert_stack( + &[ERR1, ERR3, ERR2, OK, ERR1, ERR3], + &[&[ERR1, OK, ERR2, OK, ERR1, OK], &[ERR3, OK, ERR3]], + ); + + assert_stack( + &[ERR2, ERR1, OK, ERR3, ERR2], + &[ + &[OK; 5], + &[OK, ERR1, OK, OK, OK], + &[ERR2, OK, OK, ERR2], + &[OK, ERR3], + ], + ); + } + + fn assert_stack(expected: &[ExitCode], stacked: &[&[ExitCode]]) { + let expected = BatchReturn::of(expected); + let batches: Vec = stacked.iter().map(|b| BatchReturn::of(b)).collect(); + let stacked = stack(&batches); + assert_eq!(expected, stacked); + } +} diff --git a/fil_actors_shared/src/v16/util/cbor.rs b/fil_actors_shared/src/v16/util/cbor.rs new file mode 100644 index 00000000..9e7b728f --- /dev/null +++ b/fil_actors_shared/src/v16/util/cbor.rs @@ -0,0 +1,35 @@ +use fvm_ipld_encoding::{to_vec, RawBytes}; +use serde::{de, ser}; + +use crate::v16::ActorError; + +/// Serializes a structure as a CBOR vector of bytes, returning a serialization error on failure. +/// `desc` is a noun phrase for the object being serialized, included in any error message. +pub fn serialize_vec(value: &T, desc: &str) -> Result, ActorError> +where + T: ser::Serialize + ?Sized, +{ + to_vec(value) + .map_err(|e| ActorError::serialization(format!("failed to serialize {}: {}", desc, e))) +} + +/// Serializes a structure as CBOR bytes, returning a serialization error on failure. +/// `desc` is a noun phrase for the object being serialized, included in any error message. +pub fn serialize(value: &T, desc: &str) -> Result +where + T: ser::Serialize + ?Sized, +{ + Ok(RawBytes::new(serialize_vec(value, desc)?)) +} + +/// Deserialises CBOR-encoded bytes as a structure, returning a serialization error on failure. +/// `desc` is a noun phrase for the object being deserialized, included in any error message. +pub fn deserialize(v: &RawBytes, desc: &str) -> Result { + v.deserialize() + .map_err(|e| ActorError::serialization(format!("failed to deserialize {}: {}", desc, e))) +} + +/// Deserialises CBOR-encoded bytes as a method parameters object. +pub fn deserialize_params(params: &RawBytes) -> Result { + deserialize(params, "method parameters") +} diff --git a/fil_actors_shared/src/v16/util/downcast.rs b/fil_actors_shared/src/v16/util/downcast.rs new file mode 100644 index 00000000..c9b22308 --- /dev/null +++ b/fil_actors_shared/src/v16/util/downcast.rs @@ -0,0 +1,118 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use anyhow::anyhow; +use fvm_ipld_amt::Error as AmtError; +use fvm_ipld_encoding::Error as EncodingError; +use fvm_ipld_hamt::Error as HamtError; +use fvm_shared4::error::ExitCode; + +use crate::v16::ActorError; + +/// Trait to allow multiple error types to be able to be downcasted into an `ActorError`. +pub trait ActorDowncast { + /// Downcast a dynamic std Error into an `ActorError`. If the error cannot be downcasted + /// into an ActorError automatically, use the provided `ExitCode` to generate a new error. + fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError; + + /// Wrap the error with a message, without overwriting an exit code. + fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error; +} + +impl ActorDowncast for anyhow::Error { + fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError { + match downcast_util(self) { + Ok(actor_error) => actor_error.wrap(msg), + Err(other) => { + ActorError::unchecked(default_exit_code, format!("{}: {}", msg.as_ref(), other)) + } + } + } + fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error { + match downcast_util(self) { + Ok(actor_error) => anyhow!(actor_error.wrap(msg)), + Err(other) => anyhow!("{}: {}", msg.as_ref(), other), + } + } +} + +impl ActorDowncast for AmtError { + fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError { + match self { + AmtError::Dynamic(e) => e.downcast_default(default_exit_code, msg), + other => { + ActorError::unchecked(default_exit_code, format!("{}: {}", msg.as_ref(), other)) + } + } + } + fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error { + match self { + AmtError::Dynamic(e) => e.downcast_wrap(msg), + other => anyhow!("{}: {}", msg.as_ref(), other), + } + } +} + +impl ActorDowncast for HamtError { + fn downcast_default(self, default_exit_code: ExitCode, msg: impl AsRef) -> ActorError { + match self { + HamtError::Dynamic(e) => e.downcast_default(default_exit_code, msg), + other => { + ActorError::unchecked(default_exit_code, format!("{}: {}", msg.as_ref(), other)) + } + } + } + fn downcast_wrap(self, msg: impl AsRef) -> anyhow::Error { + match self { + HamtError::Dynamic(e) => e.downcast_wrap(msg), + other => anyhow!("{}: {}", msg.as_ref(), other), + } + } +} + +/// Attempts to downcast a `Box` into an actor error. +/// Returns `Ok` with the actor error if it can be downcasted automatically +/// and returns `Err` with the original error if it cannot. +fn downcast_util(error: anyhow::Error) -> anyhow::Result { + // Check if error is ActorError, return as such + let error = match error.downcast::() { + Ok(actor_err) => return Ok(actor_err), + Err(other) => other, + }; + + // Check if error is Encoding error, if so return `ErrSerialization` + let error = match error.downcast::() { + Ok(enc_error) => { + return Ok(ActorError::unchecked( + ExitCode::USR_SERIALIZATION, + enc_error.to_string(), + )) + } + Err(other) => other, + }; + + // Dynamic errors can come from Array and Hamt through blockstore usages, check them. + let error = match error.downcast::() { + Ok(amt_err) => match amt_err { + AmtError::Dynamic(de) => match downcast_util(de) { + Ok(a) => return Ok(a), + Err(other) => other, + }, + other => anyhow!(other), + }, + Err(other) => other, + }; + let error = match error.downcast::() { + Ok(amt_err) => match amt_err { + HamtError::Dynamic(de) => match downcast_util(de) { + Ok(a) => return Ok(a), + Err(other) => other, + }, + other => anyhow!(other), + }, + Err(other) => other, + }; + + // Could not be downcasted automatically to actor error, return initial dynamic error. + Err(error) +} diff --git a/fil_actors_shared/src/v16/util/events.rs b/fil_actors_shared/src/v16/util/events.rs new file mode 100644 index 00000000..5f686b25 --- /dev/null +++ b/fil_actors_shared/src/v16/util/events.rs @@ -0,0 +1,142 @@ +use crate::v16::cbor::serialize_vec; +use crate::v16::ActorError; +use fvm_shared4::event::{ActorEvent, Entry, Flags}; +use serde::ser; + +// Codec identifier for CBOR-encoded data. +const IPLD_CBOR: u64 = 0x51; + +const EVENT_TYPE_KEY: &str = "$type"; + +/// Builder for ActorEvent objects, accumulating key/value pairs. +pub struct EventBuilder { + entries: Result, ActorError>, +} + +impl EventBuilder { + /// Creates a new builder with no values. + pub fn new() -> Self { + Self { + entries: Ok(Vec::new()), + } + } + + /// Initialise the "type" of the event i.e. Actor event type. + pub fn typ(self, _type: &str) -> Self { + self.push_entry(EVENT_TYPE_KEY, _type, Flags::FLAG_INDEXED_ALL) + } + + /// Pushes an entry with an indexed key and an un-indexed, IPLD-CBOR-serialized value. + pub fn field(self, name: &str, value: &T) -> Self { + self.push_entry(name, value, Flags::FLAG_INDEXED_KEY) + } + + /// Pushes an entry with an indexed key and indexed, IPLD-CBOR-serialized value. + pub fn field_indexed(self, name: &str, value: &T) -> Self { + self.push_entry(name, value, Flags::FLAG_INDEXED_ALL) + } + + /// Returns an actor event ready to emit (consuming self). + pub fn build(self) -> Result { + Ok(ActorEvent { + entries: self.entries?, + }) + } + + /// Pushes an entry with an IPLD-CBOR-serialized value. + fn push_entry( + mut self, + key: &str, + value: &T, + flags: Flags, + ) -> Self { + if let Ok(ref mut entries) = self.entries { + match serialize_vec(&value, "event value") { + Ok(value) => entries.push(Entry { + flags, + key: key.to_string(), + codec: IPLD_CBOR, + value, + }), + Err(e) => { + self.entries = Err(e); + } + } + } + self + } +} + +impl Default for EventBuilder { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +mod test { + use crate::v16::cbor::serialize_vec; + use crate::v16::util::events::{EVENT_TYPE_KEY, IPLD_CBOR}; + use crate::v16::EventBuilder; + use fvm_shared4::event::{ActorEvent, Entry, Flags}; + + #[test] + fn event_type() { + let e = EventBuilder::new() + .typ("l1") + .field_indexed("v1", "abc") + .build() + .unwrap(); + + let l1_cbor = serialize_vec("l1", "event value").unwrap(); + let v_cbor = serialize_vec("abc", "event value").unwrap(); + + assert_eq!( + ActorEvent { + entries: vec![ + Entry { + flags: Flags::FLAG_INDEXED_ALL, + key: EVENT_TYPE_KEY.to_string(), + codec: IPLD_CBOR, + value: l1_cbor, // CBOR for "l1" + }, + Entry { + flags: Flags::FLAG_INDEXED_ALL, + key: "v1".to_string(), + codec: IPLD_CBOR, + value: v_cbor, // CBOR for "abc" + }, + ] + }, + e + ) + } + + #[test] + fn values() { + let e = EventBuilder::new() + .field("v1", &3) + .field_indexed("v2", "abc") + .build() + .unwrap(); + assert_eq!( + ActorEvent { + entries: vec![ + Entry { + flags: Flags::FLAG_INDEXED_KEY, + key: "v1".to_string(), + codec: IPLD_CBOR, + value: vec![0x03], + }, + Entry { + flags: Flags::FLAG_INDEXED_ALL, + key: "v2".to_string(), + codec: IPLD_CBOR, + value: vec![0x63, 0x61, 0x62, 0x63], // CBOR for "abc" + }, + ] + }, + e + ); + } +} diff --git a/fil_actors_shared/src/v16/util/map.rs b/fil_actors_shared/src/v16/util/map.rs new file mode 100644 index 00000000..598a232b --- /dev/null +++ b/fil_actors_shared/src/v16/util/map.rs @@ -0,0 +1,253 @@ +use crate::v16::builtin::HAMT_BIT_WIDTH; +use crate::v16::{ActorError, AsActorError, Hasher}; +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_hamt as hamt; +use fvm_shared4::address::Address; +use fvm_shared4::error::ExitCode; +use integer_encoding::VarInt; +use serde::de::DeserializeOwned; +use serde::Serialize; +use std::fmt::Debug; +use std::marker::PhantomData; + +/// Wraps a HAMT to provide a convenient map API. +/// Any errors are returned with exit code indicating illegal state. +/// The name is not persisted in state, but adorns any error messages. +pub struct Map2 +where + BS: Blockstore, + K: MapKey, + V: DeserializeOwned + Serialize, +{ + hamt: hamt::Hamt, + name: &'static str, + key_type: PhantomData, +} + +pub trait MapKey: Sized + Debug { + fn from_bytes(b: &[u8]) -> Result; + fn to_bytes(&self) -> Result, String>; +} + +pub type Config = hamt::Config; + +pub const DEFAULT_HAMT_CONFIG: Config = Config { + bit_width: HAMT_BIT_WIDTH, + min_data_depth: 0, + max_array_width: 3, +}; + +impl Map2 +where + BS: Blockstore, + K: MapKey, + V: DeserializeOwned + Serialize, +{ + /// Creates a new, empty map. + pub fn empty(store: BS, config: Config, name: &'static str) -> Self { + Self { + hamt: hamt::Hamt::new_with_config(store, config), + name, + key_type: Default::default(), + } + } + + /// Creates a new empty map and flushes it to the store. + /// Returns the CID of the empty map root. + pub fn flush_empty(store: BS, config: Config) -> Result { + // This CID is constant regardless of the HAMT's configuration, so as an optimisation + // we could hard-code it and merely check it is already stored. + Self::empty(store, config, "empty").flush() + } + + /// Loads a map from the store. + // There is no version of this method that doesn't take an explicit config parameter. + // The caller must know the configuration to interpret the HAMT correctly. + // Forcing them to provide it makes it harder to accidentally use an incorrect default. + pub fn load( + store: BS, + root: &Cid, + config: Config, + name: &'static str, + ) -> Result { + Ok(Self { + hamt: hamt::Hamt::load_with_config(root, store, config) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to load HAMT '{}'", name) + })?, + name, + key_type: Default::default(), + }) + } + + /// Flushes the map's contents to the store. + /// Returns the root node CID. + pub fn flush(&mut self) -> Result { + self.hamt + .flush() + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to flush HAMT '{}'", self.name) + }) + } + + /// Returns a reference to the underlying blockstore. + pub fn store(&self) -> &BS { + self.hamt.store() + } + + /// Returns whether the map is empty. + pub fn is_empty(&self) -> bool { + self.hamt.is_empty() + } + + /// Returns a reference to the value associated with a key, if present. + pub fn get(&self, key: &K) -> Result, ActorError> { + let k = key + .to_bytes() + .context_code(ExitCode::USR_ASSERTION_FAILED, "invalid key")?; + self.hamt + .get(&k) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to get key {key:?} from HAMT '{}'", self.name) + }) + } + + pub fn contains_key(&self, key: &K) -> Result { + let k = key + .to_bytes() + .context_code(ExitCode::USR_ASSERTION_FAILED, "invalid key")?; + self.hamt + .contains_key(&k) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to check key {key:?} in HAMT '{}'", self.name) + }) + } + + /// Inserts a key-value pair into the map. + /// Returns any value previously associated with the key. + pub fn set(&mut self, key: &K, value: V) -> Result, ActorError> + where + V: PartialEq, + { + let k = key + .to_bytes() + .context_code(ExitCode::USR_ASSERTION_FAILED, "invalid key")?; + self.hamt + .set(k.into(), value) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set key {key:?} in HAMT '{}'", self.name) + }) + } + + /// Inserts a key-value pair only if the key does not already exist. + /// Returns whether the map was modified (i.e. key was absent). + pub fn set_if_absent(&mut self, key: &K, value: V) -> Result + where + V: PartialEq, + { + let k = key + .to_bytes() + .context_code(ExitCode::USR_ASSERTION_FAILED, "invalid key")?; + self.hamt + .set_if_absent(k.into(), value) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to set key {key:?} in HAMT '{}'", self.name) + }) + } + + pub fn delete(&mut self, key: &K) -> Result, ActorError> { + let k = key + .to_bytes() + .with_context_code(ExitCode::USR_ASSERTION_FAILED, || { + format!("invalid key {key:?}") + })?; + self.hamt + .delete(&k) + .map(|delete_result| delete_result.map(|(_k, v)| v)) + .with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("failed to delete key {key:?} from HAMT '{}'", self.name) + }) + } + + /// Iterates over all key-value pairs in the map. + pub fn for_each(&self, mut f: F) -> Result<(), ActorError> + where + F: FnMut(K, &V) -> Result<(), ActorError>, + { + for kv in &self.hamt { + let (k, v) = kv.with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("error traversing HAMT {}", self.name) + })?; + let k = K::from_bytes(k).with_context_code(ExitCode::USR_ILLEGAL_STATE, || { + format!("invalid key in HAMT {}", self.name) + })?; + f(k, v)?; + } + Ok(()) + } +} + +impl MapKey for Vec { + fn from_bytes(b: &[u8]) -> Result { + Ok(b.to_vec()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.clone()) + } +} + +impl MapKey for u64 { + fn from_bytes(b: &[u8]) -> Result { + if let Some((result, size)) = VarInt::decode_var(b) { + if size != b.len() { + return Err(format!("trailing bytes after varint in {:?}", b)); + } + Ok(result) + } else { + Err(format!("failed to decode varint in {:?}", b)) + } + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.encode_var_vec()) + } +} + +impl MapKey for i64 { + fn from_bytes(b: &[u8]) -> Result { + if let Some((result, size)) = VarInt::decode_var(b) { + if size != b.len() { + return Err(format!("trailing bytes after varint in {:?}", b)); + } + Ok(result) + } else { + Err(format!("failed to decode varint in {:?}", b)) + } + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.encode_var_vec()) + } +} + +impl MapKey for Address { + fn from_bytes(b: &[u8]) -> Result { + Address::from_bytes(b).map_err(|e| e.to_string()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(Address::to_bytes(*self)) + } +} + +impl MapKey for Cid { + fn from_bytes(b: &[u8]) -> Result { + Cid::try_from(b).map_err(|e| e.to_string()) + } + + fn to_bytes(&self) -> Result, String> { + Ok(self.to_bytes()) + } +} diff --git a/fil_actors_shared/src/v16/util/mapmap.rs b/fil_actors_shared/src/v16/util/mapmap.rs new file mode 100644 index 00000000..eddb1446 --- /dev/null +++ b/fil_actors_shared/src/v16/util/mapmap.rs @@ -0,0 +1,170 @@ +use crate::v16::{make_empty_map, make_map_with_root_and_bitwidth, Keyer, Map}; +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_hamt::{BytesKey, Error}; +use serde::de::DeserializeOwned; +use serde::Serialize; +use serde::__private::PhantomData; +use std::collections::btree_map::Entry::{Occupied, Vacant}; +use std::collections::BTreeMap; + +// MapMap stores multiple values per key in a Hamt of Hamts +// Every element stored has a primary and secondary key +pub struct MapMap<'a, BS, V, K1, K2> { + outer: Map<'a, BS, Cid>, + inner_bitwidth: u32, + // cache all inner maps loaded since last load/flush + // get/put/remove operations load the inner map into the cache first and modify in memory + // flush writes all inner maps in the cache to the outer map before flushing the outer map + cache: BTreeMap, Map<'a, BS, V>>, + key_types: PhantomData<(K1, K2)>, +} +impl<'a, BS, V, K1, K2> MapMap<'a, BS, V, K1, K2> +where + BS: Blockstore, + V: Serialize + DeserializeOwned + Clone + std::cmp::PartialEq, + K1: Keyer + std::fmt::Debug + std::fmt::Display, + K2: Keyer + std::fmt::Debug + std::fmt::Display, +{ + pub fn new(bs: &'a BS, outer_bitwidth: u32, inner_bitwidth: u32) -> Self { + MapMap { + outer: make_empty_map(bs, outer_bitwidth), + inner_bitwidth, + cache: BTreeMap::, Map>::new(), + key_types: PhantomData, + } + } + + pub fn from_root( + bs: &'a BS, + cid: &Cid, + outer_bitwidth: u32, + inner_bitwidth: u32, + ) -> Result { + Ok(MapMap { + outer: make_map_with_root_and_bitwidth(cid, bs, outer_bitwidth)?, + inner_bitwidth, + cache: BTreeMap::, Map>::new(), + key_types: PhantomData, + }) + } + + pub fn flush(&mut self) -> Result { + for (k, in_map) in self.cache.iter_mut() { + if in_map.is_empty() { + self.outer.delete(&BytesKey(k.to_vec()))?; + } else { + let new_in_root = in_map.flush()?; + self.outer.set(BytesKey(k.to_vec()), new_in_root)?; + } + } + self.outer.flush() + } + + // load inner map while memoizing + // 1. ensure inner map is loaded into cache + // 2. return (inner map is empty, inner map) + fn load_inner_map(&mut self, k: K1) -> Result<(bool, &mut Map<'a, BS, V>), Error> { + let in_map_thunk = || -> Result<(bool, Map), Error> { + // lazy to avoid ipld operations in case of cache hit + match self.outer.get(&k.key())? { + // flush semantics guarantee all written inner maps are non empty + Some(root) => Ok(( + false, + make_map_with_root_and_bitwidth::( + root, + *self.outer.store(), + self.inner_bitwidth, + )?, + )), + None => Ok(( + true, + make_empty_map(*self.outer.store(), self.inner_bitwidth), + )), + } + }; + let raw_k = k.key().0; + match self.cache.entry(raw_k) { + Occupied(entry) => { + let in_map = entry.into_mut(); + // cached map could be empty + Ok((in_map.is_empty(), in_map)) + } + Vacant(entry) => { + let (empty, in_map) = in_map_thunk()?; + Ok((empty, entry.insert(in_map))) + } + } + } + + pub fn get(&mut self, outside_k: K1, inside_k: K2) -> Result, Error> { + let (is_empty, in_map) = self.load_inner_map(outside_k)?; + if is_empty { + return Ok(None); + } + in_map.get(&inside_k.key()) + } + + // Iterates over all outer keys. + pub fn for_each(&self, f: F) -> Result<(), Error> + where + F: FnMut(&BytesKey, &Cid) -> anyhow::Result<()>, + { + self.outer.for_each(f) + } + + // Runs a function over all values for one outer key. + pub fn for_each_in(&mut self, outside_k: K1, f: F) -> Result<(), Error> + where + F: FnMut(&BytesKey, &V) -> anyhow::Result<()>, + { + let (is_empty, in_map) = self.load_inner_map(outside_k)?; + if is_empty { + return Ok(()); + } + in_map.for_each(f) + } + + // Puts a key value pair in the MapMap, overwriting any existing value. + // Returns the previous value, if any. + pub fn put(&mut self, outside_k: K1, inside_k: K2, value: V) -> Result, Error> { + let in_map = self.load_inner_map(outside_k)?.1; + // defer flushing cached inner map until flush call + in_map.set(inside_k.key(), value) + } + + // Puts a key value pair in the MapMap if it is not already set. Returns true + // if key is newly set, false if it was already set. + pub fn put_if_absent(&mut self, outside_k: K1, inside_k: K2, value: V) -> Result { + let in_map = self.load_inner_map(outside_k)?.1; + + // defer flushing cached inner map until flush call + in_map.set_if_absent(inside_k.key(), value) + } + + // Puts many values in the MapMap under a single outside key. + // Overwrites any existing values. + pub fn put_many(&mut self, outside_k: K1, values: I) -> Result<(), Error> + where + I: Iterator, + { + let in_map = self.load_inner_map(outside_k)?.1; + for (k, v) in values { + in_map.set(k.key(), v)?; + } + // defer flushing cached inner map until flush call + Ok(()) + } + + /// Removes a key from the MapMap, returning the value at the key if the key + /// was previously set. + pub fn remove(&mut self, outside_k: K1, inside_k: K2) -> Result, Error> { + let (is_empty, in_map) = self.load_inner_map(outside_k)?; + if is_empty { + return Ok(None); + } + in_map + .delete(&inside_k.key()) + .map(|o: Option<(BytesKey, V)>| -> Option { o.map(|p: (BytesKey, V)| -> V { p.1 }) }) + } +} diff --git a/fil_actors_shared/src/v16/util/message_accumulator.rs b/fil_actors_shared/src/v16/util/message_accumulator.rs new file mode 100644 index 00000000..c1bb44c1 --- /dev/null +++ b/fil_actors_shared/src/v16/util/message_accumulator.rs @@ -0,0 +1,175 @@ +use itertools::Itertools; +use std::{cell::RefCell, fmt::Display, rc::Rc}; + +use regex::Regex; + +/// Accumulates a sequence of messages (e.g. validation failures). +#[derive(Debug, Default)] +pub struct MessageAccumulator { + /// Accumulated messages. + /// This is a `Rc` to support accumulators derived from `with_prefix()` accumulating to + /// the same underlying collection. + msgs: Rc>>, + /// Optional prefix to all new messages, e.g. describing higher level context. + prefix: String, +} + +impl MessageAccumulator { + /// Returns a new accumulator backed by the same collection, that will prefix each new message with + /// a formatted string. + pub fn with_prefix>(&self, prefix: S) -> Self { + MessageAccumulator { + msgs: self.msgs.clone(), + prefix: self.prefix.to_owned() + prefix.as_ref(), + } + } + + pub fn is_empty(&self) -> bool { + self.msgs.borrow().is_empty() + } + + pub fn messages(&self) -> Vec { + self.msgs.borrow().to_owned() + } + + /// Returns the number of accumulated messages + pub fn len(&self) -> usize { + self.msgs.borrow().len() + } + + /// Adds a message to the accumulator + pub fn add>(&self, msg: S) { + self.msgs + .borrow_mut() + .push(format!("{}{}", self.prefix, msg.as_ref())); + } + + /// Adds messages from another accumulator to this one + pub fn add_all(&self, other: &Self) { + self.msgs + .borrow_mut() + .extend_from_slice(&other.msgs.borrow()); + } + + /// Adds a message if predicate is false + pub fn require>(&self, predicate: bool, msg: S) { + if !predicate { + self.add(msg); + } + } + + /// Adds a message if result is `Err`. Underlying error must be `Display`. + pub fn require_no_error>(&self, result: Result, msg: S) { + if let Err(e) = result { + self.add(format!("{}: {e}", msg.as_ref())); + } + } + + /// Panic if the accumulator isn't empty. The acculumated messages are included in the panic message. + #[track_caller] + pub fn assert_empty(&self) { + assert!(self.is_empty(), "{}", self.messages().join("\n")) + } + + /// Asserts the accumulator contains messages matching provided pattern *in the given order*. + #[track_caller] + pub fn assert_expected(&self, expected_patterns: &[Regex]) { + let messages = self.messages(); + assert!( + messages.len() == expected_patterns.len(), + "Incorrect number of accumulator messages.\nActual: {}.\nExpected: {}", + messages.join("\n"), + expected_patterns + .iter() + .map(|regex| regex.as_str()) + .join("\n") + ); + + messages + .iter() + .zip(expected_patterns) + .for_each(|(message, pattern)| { + assert!( + pattern.is_match(message), + "message does not match. Actual: {}, expected: {}", + message, + pattern.as_str() + ); + }); + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn adds_messages() { + let acc = MessageAccumulator::default(); + acc.add("Cthulhu"); + assert_eq!(acc.len(), 1); + + let msgs = acc.messages(); + assert_eq!(msgs, vec!["Cthulhu"]); + + acc.add("Azathoth"); + assert_eq!(acc.len(), 2); + + let msgs = acc.messages(); + assert_eq!(msgs, vec!["Cthulhu", "Azathoth"]); + } + + #[test] + fn adds_on_predicate() { + let acc = MessageAccumulator::default(); + acc.require(true, "Cthulhu"); + + assert_eq!(acc.len(), 0); + assert!(acc.is_empty()); + + acc.require(false, "Azathoth"); + let msgs = acc.messages(); + assert_eq!(acc.len(), 1); + assert_eq!(msgs, vec!["Azathoth"]); + assert!(!acc.is_empty()); + } + + #[test] + fn require_no_error() { + let fiasco: Result<(), String> = Err("fiasco".to_owned()); + let acc = MessageAccumulator::default(); + acc.require_no_error(fiasco, "Cthulhu says"); + + let msgs = acc.messages(); + assert_eq!(acc.len(), 1); + assert_eq!(msgs, vec!["Cthulhu says: fiasco"]); + } + + #[test] + fn prefixes() { + let acc = MessageAccumulator::default(); + acc.add("peasant"); + + let gods_acc = acc.with_prefix("elder god -> "); + gods_acc.add("Cthulhu"); + + assert_eq!(acc.messages(), vec!["peasant", "elder god -> Cthulhu"]); + assert_eq!(gods_acc.messages(), vec!["peasant", "elder god -> Cthulhu"]); + } + + #[test] + fn add_all() { + let acc1 = MessageAccumulator::default(); + acc1.add("Cthulhu"); + + let acc2 = MessageAccumulator::default(); + acc2.add("Azathoth"); + + let acc3 = MessageAccumulator::default(); + acc3.add_all(&acc1); + acc3.add_all(&acc2); + + assert_eq!(2, acc3.len()); + assert_eq!(acc3.messages(), vec!["Cthulhu", "Azathoth"]); + } +} diff --git a/fil_actors_shared/src/v16/util/mod.rs b/fil_actors_shared/src/v16/util/mod.rs new file mode 100644 index 00000000..2857acf9 --- /dev/null +++ b/fil_actors_shared/src/v16/util/mod.rs @@ -0,0 +1,24 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +pub use self::batch_return::*; +pub use self::downcast::*; +pub use self::events::*; +pub use self::map::*; +pub use self::mapmap::MapMap; +pub use self::message_accumulator::MessageAccumulator; +pub use self::multimap::*; +pub use self::set::Set; +pub use self::set_multimap::SetMultimap; +pub use self::set_multimap::SetMultimapConfig; + +mod batch_return; +pub mod cbor; +mod downcast; +mod events; +mod map; +mod mapmap; +mod message_accumulator; +mod multimap; +mod set; +mod set_multimap; diff --git a/fil_actors_shared/src/v16/util/multimap.rs b/fil_actors_shared/src/v16/util/multimap.rs new file mode 100644 index 00000000..5be2c911 --- /dev/null +++ b/fil_actors_shared/src/v16/util/multimap.rs @@ -0,0 +1,118 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_hamt::Error; +use serde::de::DeserializeOwned; +use serde::Serialize; + +use crate::v16::{make_empty_map, make_map_with_root_and_bitwidth, Array, BytesKey, Map}; + +/// Multimap stores multiple values per key in a Hamt of Amts. +/// The order of insertion of values for each key is retained. +pub struct Multimap<'a, BS>(Map<'a, BS, Cid>, u32); +impl<'a, BS> Multimap<'a, BS> +where + BS: Blockstore, +{ + /// Initializes a new empty multimap. + /// The outer_bitwidth is the width of the HAMT and the + /// inner_bitwidth is the width of the AMTs inside of it. + pub fn new(bs: &'a BS, outer_bitwidth: u32, inner_bitwidth: u32) -> Self { + Self(make_empty_map(bs, outer_bitwidth), inner_bitwidth) + } + + /// Initializes a multimap from a root Cid + pub fn from_root( + bs: &'a BS, + cid: &Cid, + outer_bitwidth: u32, + inner_bitwidth: u32, + ) -> Result { + Ok(Self( + make_map_with_root_and_bitwidth(cid, bs, outer_bitwidth)?, + inner_bitwidth, + )) + } + + /// Retrieve root from the multimap. + #[inline] + pub fn root(&mut self) -> Result { + self.0.flush() + } + + /// Adds a value for a key. + pub fn add(&mut self, key: BytesKey, value: V) -> Result<(), Error> + where + V: Serialize + DeserializeOwned, + { + // Get construct amt from retrieved cid or create new + let mut arr = self + .get::(&key)? + .unwrap_or_else(|| Array::new_with_bit_width(self.0.store(), self.1)); + + // Set value at next index + arr.set(arr.count(), value) + .map_err(|e| anyhow::anyhow!(e))?; + + // flush to get new array root to put in hamt + let new_root = arr.flush().map_err(|e| anyhow::anyhow!(e))?; + + // Set hamt node to array root + self.0.set(key, new_root)?; + Ok(()) + } + + /// Gets the Array of value type `V` using the multimap store. + #[inline] + pub fn get(&self, key: &[u8]) -> Result>, Error> + where + V: DeserializeOwned + Serialize, + { + match self.0.get(key)? { + Some(cid) => Ok(Some( + Array::load(cid, *self.0.store()).map_err(|e| anyhow::anyhow!(e))?, + )), + None => Ok(None), + } + } + + /// Removes all values for a key. + #[inline] + pub fn remove_all(&mut self, key: &[u8]) -> Result<(), Error> { + // Remove entry from table + self.0 + .delete(key)? + .ok_or("failed to delete from multimap")?; + + Ok(()) + } + + /// Iterates through all values in the array at a given key. + pub fn for_each(&self, key: &[u8], f: F) -> Result<(), Error> + where + V: Serialize + DeserializeOwned, + F: FnMut(u64, &V) -> anyhow::Result<()>, + { + if let Some(amt) = self.get::(key)? { + amt.for_each(f).map_err(|e| anyhow::anyhow!(e))?; + } + + Ok(()) + } + + /// Iterates through all arrays in the multimap + pub fn for_all(&self, mut f: F) -> Result<(), Error> + where + V: Serialize + DeserializeOwned, + F: FnMut(&BytesKey, &Array) -> anyhow::Result<()>, + { + self.0.for_each::<_>(|key, arr_root| { + let arr = Array::load(arr_root, *self.0.store())?; + f(key, &arr) + })?; + + Ok(()) + } +} diff --git a/fil_actors_shared/src/v16/util/set.rs b/fil_actors_shared/src/v16/util/set.rs new file mode 100644 index 00000000..8a1e456f --- /dev/null +++ b/fil_actors_shared/src/v16/util/set.rs @@ -0,0 +1,76 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; + +use crate::v16::{ActorError, Config, Map2, MapKey}; + +/// Set is a HAMT with empty values. +pub struct Set(Map2) +where + BS: Blockstore, + K: MapKey; + +impl Set +where + BS: Blockstore, + K: MapKey, +{ + /// Initializes a new empty Set with the default bitwidth. + pub fn empty(bs: BS, config: Config, name: &'static str) -> Self { + Self(Map2::empty(bs, config, name)) + } + + /// Initializes a Set from a root Cid. + pub fn load( + bs: BS, + root: &Cid, + config: Config, + name: &'static str, + ) -> Result { + Ok(Self(Map2::load(bs, root, config, name)?)) + } + + /// Retrieve root from the Set. + #[inline] + pub fn flush(&mut self) -> Result { + self.0.flush() + } + + /// Adds key to the set. + #[inline] + pub fn put(&mut self, key: &K) -> Result, ActorError> { + self.0.set(key, ()) + } + + /// Checks if key exists in the set. + #[inline] + pub fn has(&self, key: &K) -> Result { + self.0.contains_key(key) + } + + /// Deletes key from set. + #[inline] + pub fn delete(&mut self, key: &K) -> Result, ActorError> { + self.0.delete(key) + } + + /// Iterates through all keys in the set. + pub fn for_each(&self, mut f: F) -> Result<(), ActorError> + where + F: FnMut(K) -> Result<(), ActorError>, + { + self.0.for_each(|s, _| f(s)) + } + + /// Collects all keys from the set into a vector. + pub fn collect_keys(&self) -> Result, ActorError> { + let mut ret_keys = Vec::new(); + self.for_each(|k| { + ret_keys.push(k); + Ok(()) + })?; + Ok(ret_keys) + } +} diff --git a/fil_actors_shared/src/v16/util/set_multimap.rs b/fil_actors_shared/src/v16/util/set_multimap.rs new file mode 100644 index 00000000..537bdb03 --- /dev/null +++ b/fil_actors_shared/src/v16/util/set_multimap.rs @@ -0,0 +1,156 @@ +// Copyright 2019-2022 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use std::marker::PhantomData; + +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; + +use crate::v16::{ActorError, Config, Map2, MapKey}; + +use super::Set; + +pub struct SetMultimapConfig { + pub outer: Config, + pub inner: Config, +} + +/// SetMultimap is a HAMT with values that are also a HAMT treated as a set of keys. +pub struct SetMultimap +where + BS: Blockstore, + K: MapKey, + V: MapKey, +{ + outer: Map2, + inner_config: Config, + value_type: PhantomData, +} + +impl SetMultimap +where + BS: Blockstore, + K: MapKey, + V: MapKey, +{ + /// Initializes a new empty SetMultimap. + pub fn empty(bs: BS, config: SetMultimapConfig, name: &'static str) -> Self { + Self { + outer: Map2::empty(bs, config.outer, name), + inner_config: config.inner, + value_type: Default::default(), + } + } + + /// Initializes a SetMultimap from a root Cid. + pub fn load( + bs: BS, + root: &Cid, + config: SetMultimapConfig, + name: &'static str, + ) -> Result { + Ok(Self { + outer: Map2::load(bs, root, config.outer, name)?, + inner_config: config.inner, + value_type: Default::default(), + }) + } + + /// Retrieve root from the SetMultimap. + #[inline] + pub fn flush(&mut self) -> Result { + self.outer.flush() + } + + /// Puts a value in the set associated with a key. + pub fn put(&mut self, key: &K, value: V) -> Result<(), ActorError> { + // Load HAMT from retrieved cid or create a new empty one. + let mut inner = self.get(key)?.unwrap_or_else(|| { + Set::empty( + self.outer.store(), + self.inner_config.clone(), + "multimap inner", + ) + }); + + inner.put(&value)?; + let new_root = inner.flush()?; + self.outer.set(key, new_root)?; + Ok(()) + } + + /// Puts slice of values in the hash set associated with a key. + pub fn put_many(&mut self, key: &K, values: &[V]) -> Result<(), ActorError> { + let mut inner = self.get(key)?.unwrap_or_else(|| { + Set::empty( + self.outer.store(), + self.inner_config.clone(), + "multimap inner", + ) + }); + + for v in values { + inner.put(v)?; + } + let new_root = inner.flush()?; + self.outer.set(key, new_root)?; + Ok(()) + } + + /// Gets the set of values for a key. + #[inline] + pub fn get(&self, key: &K) -> Result>, ActorError> { + match self.outer.get(key)? { + Some(cid) => Ok(Some(Set::load( + self.outer.store(), + cid, + self.inner_config.clone(), + "multimap inner", + )?)), + None => Ok(None), + } + } + + /// Removes a value from the set associated with a key, if it was present. + #[inline] + pub fn remove(&mut self, key: &K, value: V) -> Result<(), ActorError> { + let mut set = match self.get(key)? { + Some(s) => s, + None => return Ok(()), + }; + + set.delete(&value)?; + let new_root = set.flush()?; + self.outer.set(key, new_root)?; + Ok(()) + } + + /// Removes set at index. + #[inline] + pub fn remove_all(&mut self, key: &K) -> Result<(), ActorError> { + self.outer.delete(key)?; + Ok(()) + } + + /// Iterates over all keys. + pub fn for_each(&self, mut f: F) -> Result<(), ActorError> + where + F: FnMut(K, &Cid) -> Result<(), ActorError>, + { + self.outer.for_each(|k, v| f(k, v)) + } + + /// Iterates values for a key. + pub fn for_each_in(&self, key: &K, f: F) -> Result<(), ActorError> + where + F: FnMut(V) -> Result<(), ActorError>, + { + // Get construct amt from retrieved cid and return if no set exists + let set = match self.get(key)? { + Some(s) => s, + None => return Ok(()), + }; + + set.for_each(f) + } +} diff --git a/fil_actors_shared/src/v16/vm_api/builtin.rs b/fil_actors_shared/src/v16/vm_api/builtin.rs new file mode 100644 index 00000000..0279349e --- /dev/null +++ b/fil_actors_shared/src/v16/vm_api/builtin.rs @@ -0,0 +1,50 @@ +use num_derive::FromPrimitive; + +/// Identifies the builtin actor types for usage with the +/// actor::resolve_builtin_actor_type syscall. +/// Note that there is a mirror of this enum in the FVM SDK src/actors/builtins.rs. +/// These must be kept in sync for the syscall to work correctly, without either side +/// importing the other. +#[derive(PartialEq, Eq, Clone, Copy, PartialOrd, Ord, FromPrimitive, Debug)] +#[repr(i32)] +pub enum Type { + System = 1, + Init = 2, + Cron = 3, + Account = 4, + Power = 5, + Miner = 6, + Market = 7, + PaymentChannel = 8, + Multisig = 9, + Reward = 10, + VerifiedRegistry = 11, + DataCap = 12, + Placeholder = 13, + EVM = 14, + EAM = 15, + EthAccount = 16, +} + +impl Type { + pub fn name(&self) -> &'static str { + match *self { + Type::System => "system", + Type::Init => "init", + Type::Cron => "cron", + Type::Account => "account", + Type::Power => "storagepower", + Type::Miner => "storageminer", + Type::Market => "storagemarket", + Type::PaymentChannel => "paymentchannel", + Type::Multisig => "multisig", + Type::Reward => "reward", + Type::VerifiedRegistry => "verifiedregistry", + Type::DataCap => "datacap", + Type::Placeholder => "placeholder", + Type::EVM => "evm", + Type::EAM => "eam", + Type::EthAccount => "ethaccount", + } + } +} diff --git a/fil_actors_shared/src/v16/vm_api/error.rs b/fil_actors_shared/src/v16/vm_api/error.rs new file mode 100644 index 00000000..0869244f --- /dev/null +++ b/fil_actors_shared/src/v16/vm_api/error.rs @@ -0,0 +1,30 @@ +use std::{error::Error, fmt}; + +#[derive(Debug)] +pub struct VMError { + msg: String, +} + +impl fmt::Display for VMError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.msg) + } +} + +impl Error for VMError { + fn description(&self) -> &str { + &self.msg + } +} + +impl From for VMError { + fn from(h_err: fvm_ipld_hamt::Error) -> Self { + vm_err(h_err.to_string().as_str()) + } +} + +pub fn vm_err(msg: &str) -> VMError { + VMError { + msg: msg.to_string(), + } +} diff --git a/fil_actors_shared/src/v16/vm_api/mod.rs b/fil_actors_shared/src/v16/vm_api/mod.rs new file mode 100644 index 00000000..dab0d68e --- /dev/null +++ b/fil_actors_shared/src/v16/vm_api/mod.rs @@ -0,0 +1,269 @@ +use std::collections::BTreeMap; + +use anyhow::Error; +use cid::Cid; +use fvm_ipld_blockstore::Blockstore; +use fvm_ipld_encoding::{ + ipld_block::IpldBlock, + tuple::{serde_tuple, Deserialize_tuple, Serialize_tuple}, +}; +use fvm_shared4::{ + address::Address, + clock::ChainEpoch, + consensus::ConsensusFault, + crypto::{ + hash::SupportedHashes, + signature::{Signature, SECP_PUB_LEN, SECP_SIG_LEN, SECP_SIG_MESSAGE_HASH_SIZE}, + }, + econ::TokenAmount, + error::ExitCode, + piece::PieceInfo, + sector::{ + AggregateSealVerifyProofAndInfos, RegisteredSealProof, ReplicaUpdateInfo, SealVerifyInfo, + WindowPoStVerifyInfo, + }, + MethodNum, +}; + +use builtin::*; +pub use error::*; +use trace::*; + +pub mod builtin; +mod error; +pub mod trace; + +/// An abstract VM that is injected into integration tests +#[allow(clippy::type_complexity)] +pub trait VM { + /// Returns the underlying blockstore of the VM + fn blockstore(&self) -> &dyn Blockstore; + + /// Get information about an actor + fn actor(&self, address: &Address) -> Option; + + /// Upsert an actor into the state tree + fn set_actor(&self, key: &Address, a: ActorState); + + /// Get the balance of the specified actor + fn balance(&self, address: &Address) -> TokenAmount; + + /// Get the ID for the specified address + fn resolve_id_address(&self, address: &Address) -> Option
; + + /// Send a message between the two specified actors + fn execute_message( + &self, + from: &Address, + to: &Address, + value: &TokenAmount, + method: MethodNum, + params: Option, + ) -> Result; + + /// Send a message without charging gas + fn execute_message_implicit( + &self, + from: &Address, + to: &Address, + value: &TokenAmount, + method: MethodNum, + params: Option, + ) -> Result; + + /// Take all the invocations that have been made since the last call to this method + fn take_invocations(&self) -> Vec; + + /// Provides access to VM primitives + fn primitives(&self) -> &dyn Primitives; + + /// Provides access to VM primitives that can be mocked + fn mut_primitives(&self) -> &dyn MockPrimitives; + + /// Return a map of actor code CIDs to their corresponding types + fn actor_manifest(&self) -> BTreeMap; + + /// Returns a map of all actor addresses to their corresponding states + fn actor_states(&self) -> BTreeMap; + + // Overridable constants and extern behaviour + + /// Get the current chain epoch + fn epoch(&self) -> ChainEpoch; + + /// Sets the epoch to the specified value + fn set_epoch(&self, epoch: ChainEpoch); + + /// Get the circulating supply constant for the network + fn circulating_supply(&self) -> TokenAmount; + + /// Set the circulating supply constant for the network + fn set_circulating_supply(&self, supply: TokenAmount); + + /// Get the current base fee + fn base_fee(&self) -> TokenAmount; + + /// Set the current base fee + fn set_base_fee(&self, amount: TokenAmount); + + /// Get the current timestamp + fn timestamp(&self) -> u64; + + /// Set the current timestamp + fn set_timestamp(&self, timestamp: u64); +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct MessageResult { + pub code: ExitCode, + pub message: String, + pub ret: Option, +} + +// Duplicates an internal FVM type (fvm::state_tree::ActorState) that cannot be depended on here +#[derive(Serialize_tuple, Deserialize_tuple, Clone, PartialEq, Eq, Debug)] +pub struct ActorState { + /// Link to code for the actor. + pub code: Cid, + /// Link to the state of the actor. + pub state: Cid, + /// Sequence of the actor. + pub sequence: u64, + /// Tokens available to the actor. + pub balance: TokenAmount, + /// The actor's "delegated" address, if assigned. + /// + /// This field is set on actor creation and never modified. + pub delegated_address: Option
, +} + +pub fn new_actor( + code: Cid, + state: Cid, + sequence: u64, + balance: TokenAmount, + delegated_address: Option
, +) -> ActorState { + ActorState { + code, + state, + sequence, + balance, + delegated_address, + } +} + +/// Pure functions implemented as primitives by the runtime. +pub trait Primitives { + /// Hashes input data using blake2b with 256 bit output. + fn hash_blake2b(&self, data: &[u8]) -> [u8; 32]; + + /// Hashes input data using a supported hash function. + fn hash(&self, hasher: SupportedHashes, data: &[u8]) -> Vec; + + /// Hashes input into a 64 byte buffer + fn hash_64(&self, hasher: SupportedHashes, data: &[u8]) -> ([u8; 64], usize); + + /// Computes an unsealed sector CID (CommD) from its constituent piece CIDs (CommPs) and sizes. + fn compute_unsealed_sector_cid( + &self, + proof_type: RegisteredSealProof, + pieces: &[PieceInfo], + ) -> Result; + + /// Verifies that a signature is valid for an address and plaintext. + fn verify_signature( + &self, + signature: &Signature, + signer: &Address, + plaintext: &[u8], + ) -> Result<(), Error>; + + fn recover_secp_public_key( + &self, + hash: &[u8; SECP_SIG_MESSAGE_HASH_SIZE], + signature: &[u8; SECP_SIG_LEN], + ) -> Result<[u8; SECP_PUB_LEN], Error>; + + /// Verifies a window proof of spacetime. + fn verify_post(&self, verify_info: &WindowPoStVerifyInfo) -> Result<(), anyhow::Error>; + + /// Verifies that two block headers provide proof of a consensus fault: + /// - both headers mined by the same actor + /// - headers are different + /// - first header is of the same or lower epoch as the second + /// - at least one of the headers appears in the current chain at or after epoch `earliest` + /// - the headers provide evidence of a fault (see the spec for the different fault types). + /// + /// The parameters are all serialized block headers. The third "extra" parameter is consulted only for + /// the "parent grinding fault", in which case it must be the sibling of h1 (same parent tipset) and one of the + /// blocks in the parent of h2 (i.e. h2's grandparent). + /// Returns nil and an error if the headers don't prove a fault. + fn verify_consensus_fault( + &self, + h1: &[u8], + h2: &[u8], + extra: &[u8], + ) -> Result, anyhow::Error>; + + fn batch_verify_seals(&self, batch: &[SealVerifyInfo]) -> anyhow::Result>; + + fn verify_aggregate_seals( + &self, + aggregate: &AggregateSealVerifyProofAndInfos, + ) -> Result<(), anyhow::Error>; + + fn verify_replica_update(&self, replica: &ReplicaUpdateInfo) -> Result<(), anyhow::Error>; +} + +#[allow(clippy::type_complexity)] +pub trait MockPrimitives: Primitives { + /// Override the primitive hash_blake2b function + fn override_hash_blake2b(&self, f: fn(&[u8]) -> [u8; 32]); + + /// Override the primitive hash function + fn override_hash(&self, f: fn(SupportedHashes, &[u8]) -> Vec); + + /// Override the primitive hash_64 function + fn override_hash_64(&self, f: fn(SupportedHashes, &[u8]) -> ([u8; 64], usize)); + + ///Override the primitive compute_unsealed_sector_cid function + fn override_compute_unsealed_sector_cid( + &self, + f: fn(RegisteredSealProof, &[PieceInfo]) -> Result, + ); + + /// Override the primitive recover_secp_public_key function + fn override_recover_secp_public_key( + &self, + f: fn( + &[u8; SECP_SIG_MESSAGE_HASH_SIZE], + &[u8; SECP_SIG_LEN], + ) -> Result<[u8; SECP_PUB_LEN], Error>, + ); + + /// Override the primitive verify_post function + fn override_verify_post(&self, f: fn(&WindowPoStVerifyInfo) -> Result<(), Error>); + + /// Override the primitive verify_consensus_fault function + fn override_verify_consensus_fault( + &self, + f: fn(&[u8], &[u8], &[u8]) -> Result, Error>, + ); + /// Override the primitive batch_verify_seals function + fn override_batch_verify_seals(&self, f: fn(&[SealVerifyInfo]) -> Result, Error>); + + /// Override the primitive verify_aggregate_seals function + fn override_verify_aggregate_seals( + &self, + f: fn(&AggregateSealVerifyProofAndInfos) -> Result<(), Error>, + ); + + /// Override the primitive verify_signature function + fn override_verify_signature(&self, f: fn(&Signature, &Address, &[u8]) -> Result<(), Error>); + + /// Override the primitive verify_replica_update function + fn override_verify_replica_update(&self, f: fn(&ReplicaUpdateInfo) -> Result<(), Error>); + + fn as_primitives(&self) -> &dyn Primitives; +} diff --git a/fil_actors_shared/src/v16/vm_api/trace.rs b/fil_actors_shared/src/v16/vm_api/trace.rs new file mode 100644 index 00000000..6b50e5af --- /dev/null +++ b/fil_actors_shared/src/v16/vm_api/trace.rs @@ -0,0 +1,225 @@ +use fvm_ipld_encoding::ipld_block::IpldBlock; +use fvm_shared4::address::Address; +use fvm_shared4::econ::TokenAmount; +use fvm_shared4::error::{ErrorNumber, ExitCode}; +use fvm_shared4::event::ActorEvent; +use fvm_shared4::{ActorID, MethodNum}; + +type ReturnValue = Option; + +#[derive(Clone, Debug, Eq, PartialEq)] +pub struct EmittedEvent { + pub emitter: ActorID, + pub event: ActorEvent, +} + +/// A trace of an actor method invocation. +#[derive(Clone, Debug)] +pub struct InvocationTrace { + pub from: ActorID, + pub to: Address, + pub value: TokenAmount, + pub method: MethodNum, + pub params: Option, + /// error_number is set when an unexpected syscall error occurs + pub error_number: Option, + // no need to check return_value or exit_code if error_number is set + pub exit_code: ExitCode, + pub return_value: ReturnValue, + pub subinvocations: Vec, + pub events: Vec, +} + +/// An expectation for a method invocation trace. +/// Non-optional fields must always be specified, and are always checked against any trace. +/// Optional fields are ignored when checking the expectation against a trace. +// Future work: +// - Add mutator or factory methods to allow builder-style customisation of expectations. +// - Add a capture() option on value, params, ret etc to enable extraction of internal values +// while matching with an invocation trace. +// - Make value mandatory (requires specifying the currently unknown ones). +// - Return a top-level ExpectInvocation from helpers like util::apply_ok to save caller +// constructing it. +#[derive(Clone, Debug)] +pub struct ExpectInvocation { + pub from: ActorID, + pub to: Address, + pub method: MethodNum, + pub value: Option, + pub params: Option>, + /// If error_number is set, exit_code and return_value are not checked + pub error_number: Option, + pub exit_code: ExitCode, + pub return_value: Option, + pub subinvocs: Option>, + pub events: Option>, +} + +impl ExpectInvocation { + /// Asserts that a trace matches this expectation, including subinvocations. + pub fn matches(&self, invoc: &InvocationTrace) { + let id = format!("[{}→{}:{}]", invoc.from, invoc.to, invoc.method); + self.quick_match(invoc, String::new()); + + if self.error_number.is_some() && self.return_value.is_some() { + panic!( + "{} malformed expectation: expected error_number {} but also expected return_value", + id, + self.error_number.unwrap() + ); + } + + if let Some(error_number) = &self.error_number { + assert!( + invoc.error_number.is_some(), + "{} expected error_number: {}, was: None", + id, + error_number + ); + assert_eq!( + error_number, + &invoc.error_number.unwrap(), + "{} unexpected error_number: expected: {}, was: {}", + id, + error_number, + invoc.error_number.unwrap() + ); + } else { + assert_eq!( + self.exit_code, invoc.exit_code, + "{} unexpected exit_code: expected: {}, was: {}", + id, self.exit_code, invoc.exit_code + ); + + if let Some(v) = &self.return_value { + assert_eq!( + v, &invoc.return_value, + "{} unexpected return_value: expected: {:?}, was: {:?}", + id, v, invoc.return_value + ); + } + } + + if let Some(v) = &self.value { + assert_eq!( + v, &invoc.value, + "{} unexpected value: expected: {}, was: {} ", + id, v, invoc.value + ); + } + if let Some(p) = &self.params { + assert_eq!( + p, &invoc.params, + "{} unexpected params: expected: {:x?}, was: {:x?}", + id, p, invoc.params + ); + } + + // match emitted events + if let Some(expected_events) = &self.events { + let emitted_events = &invoc.events; + assert_eq!( + emitted_events.len(), + expected_events.len(), + "{} {} emitted={}, expected={}, {:?}, {:?}", + id, + "length of expected and emitted events do not match", + emitted_events.len(), + expected_events.len(), + emitted_events, + expected_events + ); + + // use the zip method to iterate over the emitted events and expected_events + // vectors at the same time + for (emitted, expected) in emitted_events.iter().zip(expected_events.iter()) { + // only try to match if required fields match + assert_eq!(*emitted, *expected); + } + } + + if let Some(expect_subinvocs) = &self.subinvocs { + let subinvocs = &invoc.subinvocations; + + let panic_str = format!( + "unexpected subinvocs:\n expected: \n[\n{}]\n was:\n[\n{}]\n", + self.fmt_expect_invocs(expect_subinvocs), + self.fmt_invocs(subinvocs) + ); + assert_eq!( + subinvocs.len(), + expect_subinvocs.len(), + "{} {}", + id, + panic_str + ); + + for (i, invoc) in subinvocs.iter().enumerate() { + let expect_invoc = expect_subinvocs.get(i).unwrap(); + // only try to match if required fields match + expect_invoc.quick_match(invoc, panic_str.clone()); + expect_invoc.matches(invoc); + } + } + } + + pub fn fmt_invocs(&self, invocs: &[InvocationTrace]) -> String { + invocs + .iter() + .enumerate() + .fold(String::new(), |mut s, (i, invoc)| { + use std::fmt::Write; + let _ = writeln!(s, "{}: [{}:{}],", i, invoc.to, invoc.method); + s + }) + } + + pub fn fmt_expect_invocs(&self, exs: &[ExpectInvocation]) -> String { + exs.iter() + .enumerate() + .fold(String::new(), |mut s, (i, ex)| { + use std::fmt::Write; + let _ = writeln!(s, "{}: [{}:{}],", i, ex.to, ex.method); + s + }) + } + + pub fn quick_match(&self, invoc: &InvocationTrace, extra_msg: String) { + let id = format!("[{}→{}:{}]", invoc.from, invoc.to, invoc.method); + assert_eq!( + self.from, invoc.from, + "{} unexpected from addr: expected: {}, was: {} \n{}", + id, self.from, invoc.from, extra_msg + ); + assert_eq!( + self.to, invoc.to, + "{} unexpected to addr: expected: {}, was: {} \n{}", + id, self.to, invoc.to, extra_msg + ); + assert_eq!( + self.method, invoc.method, + "{} unexpected method: expected: {}, was: {} \n{}", + id, self.method, invoc.method, extra_msg + ); + } +} + +impl Default for ExpectInvocation { + // Defaults are mainly useful for ignoring optional fields with a ..Default::default() clause. + // The addresses must generally be provided explicitly. + // Defaults include successful exit code. + fn default() -> Self { + Self { + from: 0, + to: Address::new_id(0), + method: 0, + value: None, + params: None, + error_number: None, + exit_code: ExitCode::OK, + return_value: None, + subinvocs: None, + events: None, + } + } +}