diff --git a/.changelog/unreleased/improvements/3088-emit-generic-events-wasm.md b/.changelog/unreleased/improvements/3088-emit-generic-events-wasm.md new file mode 100644 index 0000000000..9dd1b538a4 --- /dev/null +++ b/.changelog/unreleased/improvements/3088-emit-generic-events-wasm.md @@ -0,0 +1,2 @@ +- Emit core events (i.e. `namada_core::event::Event`) from tx wasms. + ([\#3088](https://github.com/anoma/namada/pull/3088)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/3102-move-events-to-submodules.md b/.changelog/unreleased/improvements/3102-move-events-to-submodules.md new file mode 100644 index 0000000000..284926e891 --- /dev/null +++ b/.changelog/unreleased/improvements/3102-move-events-to-submodules.md @@ -0,0 +1,2 @@ +- Move event types to their appropriate crates. + ([\#3102](https://github.com/anoma/namada/pull/3102)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/3141-balance-change-events.md b/.changelog/unreleased/improvements/3141-balance-change-events.md new file mode 100644 index 0000000000..0fc29ce8ab --- /dev/null +++ b/.changelog/unreleased/improvements/3141-balance-change-events.md @@ -0,0 +1,2 @@ +- Emit balance change events for various protocol actions. + ([\#3141](https://github.com/anoma/namada/pull/3141)) \ No newline at end of file diff --git a/.changelog/unreleased/improvements/3174-extra-event-attrs.md b/.changelog/unreleased/improvements/3174-extra-event-attrs.md new file mode 100644 index 0000000000..24fe2e4392 --- /dev/null +++ b/.changelog/unreleased/improvements/3174-extra-event-attrs.md @@ -0,0 +1,2 @@ +- Add the ability to read or write to event attributes with dynamically allocated + attribute keys. ([\#3174](https://github.com/anoma/namada/pull/3174)) \ No newline at end of file diff --git a/.changelog/v0.33.0/improvements/3032-generalize-events.md b/.changelog/v0.33.0/improvements/3032-generalize-events.md new file mode 100644 index 0000000000..4f72df6ab2 --- /dev/null +++ b/.changelog/v0.33.0/improvements/3032-generalize-events.md @@ -0,0 +1,2 @@ +- Erase protocol specific details from the core API of events in Namada. + ([\#3032](https://github.com/anoma/namada/pull/3032)) \ No newline at end of file diff --git a/.github/workflows/scripts/hermes.txt b/.github/workflows/scripts/hermes.txt index fc44e0ce9b..1d800b3113 100644 --- a/.github/workflows/scripts/hermes.txt +++ b/.github/workflows/scripts/hermes.txt @@ -1 +1 @@ -1.7.4-namada-beta8-rc2 +1.7.4-namada-beta9-rc diff --git a/Cargo.lock b/Cargo.lock index d1669b6096..11de4da55b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1171,6 +1171,12 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +[[package]] +name = "const_panic" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6051f239ecec86fde3410901ab7860d458d160371533842974fc61f96d15879b" + [[package]] name = "constant_time_eq" version = "0.3.0" @@ -3776,6 +3782,26 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "konst" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d712a8c49d4274f8d8a5cf61368cb5f3c143d149882b1a2918129e53395fdb0" +dependencies = [ + "const_panic", + "konst_kernel", + "typewit", +] + +[[package]] +name = "konst_kernel" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dac6ea8c376b6e208a81cf39b8e82bebf49652454d98a4829e907dac16ef1790" +dependencies = [ + "typewit", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -4305,6 +4331,7 @@ dependencies = [ "ibc-testkit", "itertools 0.10.5", "k256", + "konst", "linkme", "loupe", "masp_primitives", @@ -4312,6 +4339,7 @@ dependencies = [ "namada_account", "namada_core", "namada_ethereum_bridge", + "namada_events", "namada_gas", "namada_governance", "namada_ibc", @@ -4581,9 +4609,11 @@ dependencies = [ "ethers", "eyre", "itertools 0.10.5", + "konst", "linkme", "namada_account", "namada_core", + "namada_events", "namada_macros", "namada_migrations", "namada_parameters", @@ -4604,6 +4634,19 @@ dependencies = [ "tracing", ] +[[package]] +name = "namada_events" +version = "0.34.0" +dependencies = [ + "borsh 1.2.1", + "linkme", + "namada_core", + "namada_macros", + "namada_migrations", + "serde 1.0.193", + "thiserror", +] + [[package]] name = "namada_examples" version = "0.34.0" @@ -4632,6 +4675,7 @@ dependencies = [ "borsh 1.2.1", "linkme", "namada_core", + "namada_events", "namada_macros", "namada_migrations", "proptest", @@ -4645,8 +4689,10 @@ version = "0.34.0" dependencies = [ "borsh 1.2.1", "itertools 0.10.5", + "konst", "linkme", "namada_core", + "namada_events", "namada_macros", "namada_migrations", "namada_parameters", @@ -4668,10 +4714,15 @@ dependencies = [ "ibc-derive", "ibc-testkit", "ics23", + "konst", + "linkme", "masp_primitives", "namada_core", + "namada_events", "namada_gas", "namada_governance", + "namada_macros", + "namada_migrations", "namada_parameters", "namada_state", "namada_storage", @@ -4679,6 +4730,7 @@ dependencies = [ "primitive-types", "proptest", "prost 0.12.3", + "serde 1.0.193", "serde_json", "sha2 0.9.9", "thiserror", @@ -4764,10 +4816,12 @@ dependencies = [ "data-encoding", "derivative", "itertools 0.10.5", + "konst", "linkme", "namada_account", "namada_controller", "namada_core", + "namada_events", "namada_governance", "namada_macros", "namada_migrations", @@ -4823,6 +4877,7 @@ dependencies = [ "namada_account", "namada_core", "namada_ethereum_bridge", + "namada_events", "namada_gas", "namada_governance", "namada_ibc", @@ -4842,6 +4897,7 @@ dependencies = [ "owo-colors", "parse_duration", "paste", + "patricia_tree", "proptest", "prost 0.12.3", "rand 0.8.5", @@ -4894,6 +4950,7 @@ dependencies = [ "itertools 0.10.5", "linkme", "namada_core", + "namada_events", "namada_gas", "namada_macros", "namada_merkle_tree", @@ -4903,6 +4960,7 @@ dependencies = [ "namada_storage", "namada_trans_token", "namada_tx", + "patricia_tree", "pretty_assertions", "proptest", "sha2 0.9.9", @@ -5000,6 +5058,7 @@ name = "namada_token" version = "0.34.0" dependencies = [ "namada_core", + "namada_events", "namada_shielded_token", "namada_storage", "namada_trans_token", @@ -5009,8 +5068,10 @@ dependencies = [ name = "namada_trans_token" version = "0.34.0" dependencies = [ + "konst", "linkme", "namada_core", + "namada_events", "namada_storage", ] @@ -5023,9 +5084,11 @@ dependencies = [ "bitflags 2.5.0", "borsh 1.2.1", "data-encoding", + "konst", "linkme", "masp_primitives", "namada_core", + "namada_events", "namada_gas", "namada_macros", "namada_migrations", @@ -5047,6 +5110,7 @@ name = "namada_tx_env" version = "0.34.0" dependencies = [ "namada_core", + "namada_events", "namada_storage", ] @@ -5058,6 +5122,7 @@ dependencies = [ "masp_primitives", "namada_account", "namada_core", + "namada_events", "namada_governance", "namada_ibc", "namada_macros", @@ -5102,6 +5167,7 @@ dependencies = [ "derivative", "masp_primitives", "namada_core", + "namada_events", "namada_ibc", "namada_storage", "namada_tx", @@ -5115,6 +5181,7 @@ dependencies = [ "borsh 1.2.1", "namada_account", "namada_core", + "namada_events", "namada_governance", "namada_ibc", "namada_macros", @@ -5644,6 +5711,15 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +[[package]] +name = "patricia_tree" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31f2f4539bffe53fc4b4da301df49d114b845b077bd5727b7fe2bd9d8df2ae68" +dependencies = [ + "bitflags 2.5.0", +] + [[package]] name = "pbkdf2" version = "0.4.0" @@ -8191,6 +8267,21 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "typewit" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6fb9ae6a3cafaf0a5d14c2302ca525f9ae8e07a0f0e6949de88d882c37a6e24" +dependencies = [ + "typewit_proc_macros", +] + +[[package]] +name = "typewit_proc_macros" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36a83ea2b3c704935a01b4642946aadd445cea40b10935e3f8bd8052b8193d6" + [[package]] name = "ucd-trie" version = "0.1.6" diff --git a/Cargo.toml b/Cargo.toml index 24b09c5dab..4ca0dc057d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,6 +9,7 @@ members = [ "crates/core", "crates/encoding_spec", "crates/ethereum_bridge", + "crates/events", "crates/gas", "crates/governance", "crates/ibc", @@ -113,6 +114,7 @@ indexmap = { git = "https://github.com/heliaxdev/indexmap", tag = "2.2.4-heliax- itertools = "0.10.0" jubjub = "0.10" k256 = { version = "0.13.0", default-features = false, features = ["ecdsa", "pkcs8", "precomputed-tables", "serde", "std"]} +konst = { version = "0.3.8", default-features = false } lazy_static = "1.4.0" ledger-namada-rs = { git = "https://github.com/Zondax/ledger-namada", tag = "v0.0.12" } ledger-transport-hid = "0.10.0" @@ -130,6 +132,7 @@ num-traits = "0.2.14" once_cell = "1.8.0" orion = "0.16.0" paste = "1.0.9" +patricia_tree = "0.8.0" pretty_assertions = "1.4.0" primitive-types = "0.12.1" proptest = "1.4.0" diff --git a/Makefile b/Makefile index a53126b616..6deb843a3b 100644 --- a/Makefile +++ b/Makefile @@ -37,6 +37,7 @@ crates += namada_benchmarks crates += namada_core crates += namada_encoding_spec crates += namada_ethereum_bridge +crates += namada_events crates += namada_gas crates += namada_governance crates += namada_ibc diff --git a/crates/apps/src/lib/bench_utils.rs b/crates/apps/src/lib/bench_utils.rs index cc72bddb48..88dc06873a 100644 --- a/crates/apps/src/lib/bench_utils.rs +++ b/crates/apps/src/lib/bench_utils.rs @@ -25,6 +25,8 @@ use namada::core::masp::{ use namada::core::storage::{BlockHeight, Epoch, Key, KeySeg, TxIndex}; use namada::core::time::DateTimeUtc; use namada::core::token::{Amount, DenominatedAmount, Transfer}; +use namada::events::extend::{ComposeEvent, ValidMaspTx}; +use namada::events::Event; use namada::governance::storage::proposal::ProposalType; use namada::governance::InitProposalData; use namada::ibc::apps::transfer::types::msgs::transfer::MsgTransfer as IbcMsgTransfer; @@ -73,6 +75,7 @@ use namada::ledger::queries::{ use namada::state::StorageRead; use namada::tx::data::pos::Bond; use namada::tx::data::{Fee, TxResult, VpsResult}; +use namada::tx::event::{new_tx_event, InnerTx}; use namada::tx::{Authorization, Code, Data, Section, Tx}; use namada::vm::wasm::run; use namada::{proof_of_stake, tendermint}; @@ -882,32 +885,20 @@ impl Client for BenchShell { self.last_block_masp_txs .iter() .enumerate() - .map(|(idx, (_tx, changed_keys))| { + .map(|(idx, (tx, changed_keys))| { let tx_result = TxResult { gas_used: 0.into(), wrapper_changed_keys: Default::default(), changed_keys: changed_keys.to_owned(), vps_result: VpsResult::default(), initialized_accounts: vec![], - ibc_events: BTreeSet::default(), - eth_bridge_events: BTreeSet::default(), + events: BTreeSet::default(), }; - namada::tendermint::abci::Event { - kind: "applied".to_string(), - // Mock the masp and tx attributes - attributes: vec![ - namada::tendermint::abci::EventAttribute { - key: "is_valid_masp_tx".to_string(), - value: format!("{}", idx), - index: true, - }, - namada::tendermint::abci::EventAttribute { - key: "inner_tx".to_string(), - value: tx_result.to_string(), - index: true, - }, - ], - } + let event: Event = new_tx_event(tx, height.value()) + .with(InnerTx(&tx_result)) + .with(ValidMaspTx(TxIndex::must_from_usize(idx))) + .into(); + namada::tendermint::abci::Event::from(event) }) .collect(), ) diff --git a/crates/apps/src/lib/client/rpc.rs b/crates/apps/src/lib/client/rpc.rs index 7364e5a8c2..592567a246 100644 --- a/crates/apps/src/lib/client/rpc.rs +++ b/crates/apps/src/lib/client/rpc.rs @@ -38,6 +38,7 @@ use namada::proof_of_stake::types::{ ValidatorState, ValidatorStateInfo, WeightedValidator, }; use namada::{state as storage, token}; +use namada_sdk::control_flow::time::{Duration, Instant}; use namada_sdk::error::{ is_pinned_error, Error, PinnedBalanceError, QueryError, }; @@ -51,11 +52,9 @@ use namada_sdk::tendermint_rpc::endpoint::status; use namada_sdk::tx::display_inner_resp; use namada_sdk::wallet::AddressVpType; use namada_sdk::{display, display_line, edisplay_line, error, prompt, Namada}; -use tokio::time::Instant; use crate::cli::{self, args}; use crate::facade::tendermint::merkle::proof::ProofOps; -use crate::facade::tendermint_rpc::error::Error as TError; /// Query the status of a given transaction. /// @@ -2571,26 +2570,30 @@ pub async fn query_tx_events( namada_sdk::rpc::query_tx_events(client, tx_event_query).await } -/// Lookup the full response accompanying the specified transaction event -// TODO: maybe remove this in favor of `query_tx_status` -pub async fn query_tx_response( - client: &C, - tx_query: namada_sdk::rpc::TxEventQuery<'_>, -) -> Result { - namada_sdk::rpc::query_tx_response(client, tx_query).await -} - /// Lookup the results of applying the specified transaction to the /// blockchain. pub async fn query_result(context: &impl Namada, args: args::QueryResult) { - // First try looking up application event pertaining to given hash. - let inner_resp = query_tx_response( - context.client(), + display_line!( + context.io(), + "Checking if tx {} is applied...", + args.tx_hash + ); + + match rpc::query_tx_status( + context, namada_sdk::rpc::TxEventQuery::Applied(&args.tx_hash), + Instant::now() + Duration::from_secs(10), ) - .await; - match inner_resp { + .await + { Ok(resp) => { + let resp = match TxResponse::try_from(resp) { + Ok(resp) => resp, + Err(err) => { + edisplay_line!(context.io(), "{err}"); + cli::safe_exit(1) + } + }; display_inner_resp(context, &resp); } Err(err) => { diff --git a/crates/apps/src/lib/node/ledger/shell/finalize_block.rs b/crates/apps/src/lib/node/ledger/shell/finalize_block.rs index 4c6b6c419a..09b644ad68 100644 --- a/crates/apps/src/lib/node/ledger/shell/finalize_block.rs +++ b/crates/apps/src/lib/node/ledger/shell/finalize_block.rs @@ -4,11 +4,12 @@ use data_encoding::HEXUPPER; use masp_primitives::merkle_tree::CommitmentTree; use masp_primitives::sapling::Node; use namada::core::storage::{BlockResults, Epoch, Header}; -use namada::gas::event::WithGasUsed; +use namada::gas::event::GasUsed; use namada::governance::pgf::inflation as pgf_inflation; use namada::hash::Hash; -use namada::ledger::events::extend::{ComposeEvent, Height, Info, ValidMaspTx}; -use namada::ledger::events::EmitEvents; +use namada::ledger::events::extend::{ + ComposeEvent, Height, Info, TxHash, ValidMaspTx, +}; use namada::ledger::gas::GasMetering; use namada::ledger::ibc; use namada::ledger::pos::namada_proof_of_stake; @@ -17,6 +18,7 @@ use namada::proof_of_stake; use namada::proof_of_stake::storage::{ find_validator_by_raw_hash, write_last_block_proposer_address, }; +use namada::sdk::events::EmitEvents; use namada::state::write_log::StorageModification; use namada::state::{ResultExt, StorageWrite, EPOCH_SWITCH_BLOCKS_DELAY}; use namada::token::utils::is_masp_tx; @@ -179,7 +181,7 @@ where "Tx rejected: {}", &processed_tx.result.info ))) - .with(WithGasUsed(0.into())), + .with(GasUsed(0.into())), ); continue; } @@ -202,7 +204,7 @@ where "Tx rejected: {}", &processed_tx.result.info ))) - .with(WithGasUsed(0.into())), + .with(GasUsed(0.into())), ); continue; } @@ -311,11 +313,7 @@ where let tx_result = protocol::dispatch_tx( tx.clone(), processed_tx.tx.as_ref(), - TxIndex( - tx_index - .try_into() - .expect("transaction index out of bounds"), - ), + TxIndex::must_from_usize(tx_index), &tx_gas_meter, &mut self.state, &mut self.vp_wasm_cache, @@ -340,12 +338,16 @@ where .unwrap_or_default() || is_masp_tx(&result.changed_keys) { - tx_event.extend(ValidMaspTx(tx_index)); + tx_event.extend(ValidMaspTx( + TxIndex::must_from_usize(tx_index), + )); } tracing::trace!( "all VPs accepted transaction {} storage \ modification {:#?}", - tx_event["hash"], + tx_event + .raw_read_attribute::() + .unwrap_or(""), result ); @@ -358,7 +360,7 @@ where self.commit_inner_tx_hash(replay_protection_hashes); self.state.commit_tx(); - if !tx_event.contains_key("code") { + if !tx_event.has_attribute::() { tx_event.extend(Code(ResultCode::Ok)); self.state .in_mem_mut() @@ -367,29 +369,19 @@ where .accept(tx_index); } // events from other sources - response.events.extend( - // ibc events - result - .ibc_events - .iter() - .cloned() - .map(|ibc_event| { - ibc_event.with(Height(height)).into() - }) - // eth bridge events - .chain( - result - .eth_bridge_events - .iter() - .map(Event::from), - ), + response.events.emit_many( + result.events.iter().map(|event| { + event.clone().with(Height(height)) + }), ); } else { // this branch can only be reached by inner txs tracing::trace!( "some VPs rejected transaction {} storage \ modification {:#?}", - tx_event["hash"], + tx_event + .raw_read_attribute::() + .unwrap_or(""), result.vps_result.rejected_vps ); // The fee unshield operation could still have been @@ -398,7 +390,9 @@ where .map(|args| args.is_committed_fee_unshield) .unwrap_or_default() { - tx_event.extend(ValidMaspTx(tx_index)); + tx_event.extend(ValidMaspTx( + TxIndex::must_from_usize(tx_index), + )); } // If an inner tx failed for any reason but invalid @@ -417,7 +411,7 @@ where tx_event.extend(Code(ResultCode::InvalidTx)); } tx_event - .extend(WithGasUsed(result.gas_used)) + .extend(GasUsed(result.gas_used)) .extend(Info("Check inner_tx for result.".to_string())) .extend(InnerTx(&result)); } @@ -426,18 +420,22 @@ where ))) => { tracing::info!( "Wrapper transaction {} failed with: {}", - tx_event["hash"], + tx_event + .raw_read_attribute::() + .unwrap_or(""), msg, ); tx_event - .extend(WithGasUsed(tx_gas_meter.get_tx_consumed_gas())) + .extend(GasUsed(tx_gas_meter.get_tx_consumed_gas())) .extend(Info(msg.to_string())) .extend(Code(ResultCode::InvalidTx)); } Err(msg) => { tracing::info!( "Transaction {} failed with: {}", - tx_event["hash"], + tx_event + .raw_read_attribute::() + .unwrap_or(""), msg ); @@ -478,7 +476,7 @@ where self.state.drop_tx(); tx_event - .extend(WithGasUsed(tx_gas_meter.get_tx_consumed_gas())) + .extend(GasUsed(tx_gas_meter.get_tx_consumed_gas())) .extend(Info(msg.to_string())); // If wrapper, invalid tx error code @@ -489,7 +487,9 @@ where .map(|args| args.is_committed_fee_unshield) .unwrap_or_default() { - tx_event.extend(ValidMaspTx(tx_index)); + tx_event.extend(ValidMaspTx(TxIndex::must_from_usize( + tx_index, + ))); } tx_event.extend(Code(ResultCode::WasmRuntimeError)); } @@ -620,11 +620,10 @@ where namada::ibc::transfer_over_ibc, )?; - // Take IBC events that may be emitted from PGF - for ibc_event in self.state.write_log_mut().take_ibc_events() { - // Add the height for IBC event query + // Take events that may be emitted from PGF + for event in self.state.write_log_mut().take_events() { events.emit( - ibc_event.with(Height( + event.with(Height( self.state.in_mem().get_last_block_height() + 1, )), ); @@ -765,9 +764,12 @@ mod test_finalize_block { }; use namada::proof_of_stake::{unjail_validator, ADDRESS as pos_address}; use namada::replay_protection; + use namada::sdk::events::Event; use namada::tendermint::abci::types::{Misbehavior, MisbehaviorKind}; use namada::token::{Amount, DenominatedAmount, NATIVE_MAX_DECIMAL_PLACES}; use namada::tx::data::Fee; + use namada::tx::event::types::APPLIED as APPLIED_TX; + use namada::tx::event::Code as CodeAttr; use namada::tx::{Authorization, Code, Data}; use namada::vote_ext::ethereum_events; use namada_sdk::eth_bridge::storage::vote_tallies::BridgePoolRoot; @@ -868,9 +870,12 @@ mod test_finalize_block { .iter() .enumerate() { - assert_eq!(event.event_type.to_string(), String::from("applied")); - let code = event.attributes.get("code").expect("Test failed"); - assert_eq!(code, &index.rem_euclid(2).to_string()); + assert_eq!(*event.kind(), APPLIED_TX); + let code = event + .read_attribute::() + .expect("Test failed") + .to_usize(); + assert_eq!(code, index.rem_euclid(2)); } } @@ -903,9 +908,9 @@ mod test_finalize_block { let mut resp = shell.finalize_block(req).expect("Test failed"); assert_eq!(resp.len(), 1); let event = resp.remove(0); - assert_eq!(event.event_type.to_string(), String::from("applied")); - let code = event.attributes.get("code").expect("Test failed"); - assert_eq!(code, &String::from(ResultCode::InvalidTx)); + assert_eq!(*event.kind(), APPLIED_TX); + let code = event.read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::InvalidTx); } /// Test that once a validator's vote for an Ethereum event lands @@ -979,9 +984,9 @@ mod test_finalize_block { .expect("Test failed") .try_into() .expect("Test failed"); - assert_eq!(result.event_type.to_string(), String::from("applied")); - let code = result.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ResultCode::Ok).as_str()); + assert_eq!(*result.kind(), APPLIED_TX); + let code = result.read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::Ok); // --- The event is removed from the queue assert!(shell.new_ethereum_events().is_empty()); @@ -1038,9 +1043,9 @@ mod test_finalize_block { .expect("Test failed") .try_into() .expect("Test failed"); - assert_eq!(result.event_type.to_string(), String::from("applied")); - let code = result.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ResultCode::Ok).as_str()); + assert_eq!(*result.kind(), APPLIED_TX); + let code = result.read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::Ok); // --- The event is removed from the queue assert!(shell.new_ethereum_events().is_empty()); @@ -2438,9 +2443,9 @@ mod test_finalize_block { ..Default::default() }) .expect("Test failed")[0]; - assert_eq!(event.event_type.to_string(), String::from("applied")); - let code = event.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ResultCode::Ok).as_str()); + assert_eq!(*event.kind(), APPLIED_TX); + let code = event.read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::Ok); // the merkle tree root should not change after finalize_block let root_post = shell.shell.state.in_mem().block.tree.root(); @@ -2596,12 +2601,12 @@ mod test_finalize_block { let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); - assert_eq!(event[0].event_type.to_string(), String::from("applied")); - let code = event[0].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ResultCode::Ok).as_str()); - assert_eq!(event[1].event_type.to_string(), String::from("applied")); - let code = event[1].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); + assert_eq!(*event[0].kind(), APPLIED_TX); + let code = event[0].read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::Ok); + assert_eq!(*event[1].kind(), APPLIED_TX); + let code = event[1].read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::WasmRuntimeError); for wrapper in [&wrapper, &new_wrapper] { assert!( @@ -2728,12 +2733,12 @@ mod test_finalize_block { let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); - assert_eq!(event[0].event_type.to_string(), String::from("applied")); - let code = event[0].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); - assert_eq!(event[1].event_type.to_string(), String::from("applied")); - let code = event[1].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ResultCode::Ok).as_str()); + assert_eq!(*event[0].kind(), APPLIED_TX); + let code = event[0].read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::InvalidTx); + assert_eq!(*event[1].kind(), APPLIED_TX); + let code = event[1].read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::Ok); // This hash must be present as succesfully added by the second // transaction @@ -2871,18 +2876,18 @@ mod test_finalize_block { let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); - assert_eq!(event[0].event_type.to_string(), String::from("applied")); - let code = event[0].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); - assert_eq!(event[1].event_type.to_string(), String::from("applied")); - let code = event[1].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); - assert_eq!(event[2].event_type.to_string(), String::from("applied")); - let code = event[2].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); - assert_eq!(event[3].event_type.to_string(), String::from("applied")); - let code = event[3].attributes.get("code").unwrap().as_str(); - assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); + assert_eq!(*event[0].kind(), APPLIED_TX); + let code = event[0].read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::InvalidTx); + assert_eq!(*event[1].kind(), APPLIED_TX); + let code = event[1].read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::InvalidTx); + assert_eq!(*event[2].kind(), APPLIED_TX); + let code = event[2].read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::WasmRuntimeError); + assert_eq!(*event[3].kind(), APPLIED_TX); + let code = event[3].read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::WasmRuntimeError); for valid_wrapper in [ &out_of_gas_wrapper, @@ -3004,13 +3009,9 @@ mod test_finalize_block { let root_post = shell.shell.state.in_mem().block.tree.root(); assert_eq!(root_pre.0, root_post.0); - assert_eq!(event[0].event_type.to_string(), String::from("applied")); - let code = event[0] - .attributes - .get("code") - .expect("Test failed") - .as_str(); - assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); + assert_eq!(*event[0].kind(), APPLIED_TX); + let code = event[0].read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::InvalidTx); assert!( shell @@ -3083,9 +3084,9 @@ mod test_finalize_block { .expect("Test failed")[0]; // Check balance of fee payer - assert_eq!(event.event_type.to_string(), String::from("applied")); - let code = event.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ResultCode::WasmRuntimeError).as_str()); + assert_eq!(*event.kind(), APPLIED_TX); + let code = event.read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::WasmRuntimeError); let new_signer_balance = namada::token::read_balance( &shell.state, @@ -3175,9 +3176,9 @@ mod test_finalize_block { .expect("Test failed")[0]; // Check balance of fee payer is 0 - assert_eq!(event.event_type.to_string(), String::from("applied")); - let code = event.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ResultCode::InvalidTx).as_str()); + assert_eq!(*event.kind(), APPLIED_TX); + let code = event.read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::InvalidTx); let balance: Amount = shell.state.read(&balance_key).unwrap().unwrap_or_default(); @@ -3270,9 +3271,9 @@ mod test_finalize_block { .expect("Test failed")[0]; // Check fee payment - assert_eq!(event.event_type.to_string(), String::from("applied")); - let code = event.attributes.get("code").expect("Test failed").as_str(); - assert_eq!(code, String::from(ResultCode::Ok).as_str()); + assert_eq!(*event.kind(), APPLIED_TX); + let code = event.read_attribute::().expect("Test failed"); + assert_eq!(code, ResultCode::Ok); let new_proposer_balance = namada::token::read_balance( &shell.state, diff --git a/crates/apps/src/lib/node/ledger/shell/governance.rs b/crates/apps/src/lib/node/ledger/shell/governance.rs index 3381cd4f8c..594b2b346e 100644 --- a/crates/apps/src/lib/node/ledger/shell/governance.rs +++ b/crates/apps/src/lib/node/ledger/shell/governance.rs @@ -1,7 +1,7 @@ use namada::core::collections::HashMap; use namada::core::encode; -use namada::core::event::EmitEvents; use namada::core::storage::Epoch; +use namada::governance::event::ProposalEvent; use namada::governance::pgf::storage::keys as pgf_storage; use namada::governance::pgf::storage::steward::StewardDetail; use namada::governance::pgf::{storage as pgf, ADDRESS}; @@ -17,14 +17,16 @@ use namada::governance::{ }; use namada::ibc; use namada::ledger::events::extend::{ComposeEvent, Height}; -use namada::ledger::governance::utils::ProposalEvent; use namada::proof_of_stake::bond_amount; use namada::proof_of_stake::parameters::PosParams; use namada::proof_of_stake::storage::{ read_total_active_stake, validator_state_handle, }; use namada::proof_of_stake::types::{BondId, ValidatorState}; +use namada::sdk::events::{EmitEvents, EventLevel}; use namada::state::StorageWrite; +use namada::token::event::{TokenEvent, TokenOperation, UserAccount}; +use namada::token::read_balance; use namada::tx::{Code, Data}; use namada_sdk::proof_of_stake::storage::read_validator_stake; @@ -163,6 +165,7 @@ where let native_token = &shell.state.get_native_token()?; let result = execute_pgf_funding_proposal( &mut shell.state, + events, native_token, payments, id, @@ -181,11 +184,17 @@ where // Take events that could have been emitted by PGF // over IBC, governance proposal execution, etc - for event in shell.state.write_log_mut().take_ibc_events() { - events.emit(event.with(Height( - shell.state.in_mem().get_last_block_height() + 1, - ))); - } + let current_height = + shell.state.in_mem().get_last_block_height() + 1; + + events.emit_many( + shell + .state + .write_log_mut() + .take_events() + .into_iter() + .map(|event| event.with(Height(current_height))), + ); gov_api::get_proposal_author(&shell.state, id)? } @@ -229,6 +238,26 @@ where &address, funds, )?; + + const DESCRIPTOR: &str = "governance-locked-funds-refund"; + + let final_gov_balance = + read_balance(&shell.state, &native_token, &gov_address)?.into(); + let final_target_balance = + read_balance(&shell.state, &native_token, &address)?.into(); + + events.emit(TokenEvent { + descriptor: DESCRIPTOR.into(), + level: EventLevel::Block, + token: native_token.clone(), + operation: TokenOperation::Transfer { + amount: funds.into(), + source: UserAccount::Internal(gov_address), + target: UserAccount::Internal(address), + source_post_balance: final_gov_balance, + target_post_balance: Some(final_target_balance), + }, + }); } else { token::burn_tokens( &mut shell.state, @@ -236,6 +265,22 @@ where &gov_address, funds, )?; + + const DESCRIPTOR: &str = "governance-locked-funds-burn"; + + let final_gov_balance = + read_balance(&shell.state, &native_token, &gov_address)?.into(); + + events.emit(TokenEvent { + descriptor: DESCRIPTOR.into(), + level: EventLevel::Block, + token: native_token.clone(), + operation: TokenOperation::Burn { + amount: funds.into(), + target_account: UserAccount::Internal(gov_address), + post_balance: final_gov_balance, + }, + }); } } @@ -393,6 +438,7 @@ where fn execute_pgf_funding_proposal( state: &mut WlState, + events: &mut impl EmitEvents, token: &Address, fundings: BTreeSet, proposal_id: u64, @@ -431,25 +477,68 @@ where } }, PGFAction::Retro(target) => { - let result = match &target { - PGFTarget::Internal(target) => token::transfer( - state, - token, - &ADDRESS, - &target.target, - target.amount, + let (result, event) = match &target { + PGFTarget::Internal(target) => ( + token::transfer( + state, + token, + &ADDRESS, + &target.target, + target.amount, + ), + TokenEvent { + descriptor: "pgf-payments".into(), + level: EventLevel::Block, + token: token.clone(), + operation: TokenOperation::Transfer { + amount: target.amount.into(), + source: UserAccount::Internal(ADDRESS), + target: UserAccount::Internal( + target.target.clone(), + ), + source_post_balance: read_balance( + state, token, &ADDRESS, + )? + .into(), + target_post_balance: Some( + read_balance(state, token, &target.target)? + .into(), + ), + }, + }, + ), + PGFTarget::Ibc(target) => ( + ibc::transfer_over_ibc(state, token, &ADDRESS, target), + TokenEvent { + descriptor: "pgf-payments-over-ibc".into(), + level: EventLevel::Block, + token: token.clone(), + operation: TokenOperation::Transfer { + amount: target.amount.into(), + source: UserAccount::Internal(ADDRESS), + target: UserAccount::External( + target.target.clone(), + ), + source_post_balance: read_balance( + state, token, &ADDRESS, + )? + .into(), + target_post_balance: None, + }, + }, ), - PGFTarget::Ibc(target) => { - ibc::transfer_over_ibc(state, token, &ADDRESS, target) - } }; match result { - Ok(()) => tracing::info!( - "Execute RetroPgf from proposal id {}: sent {} to {}.", - proposal_id, - target.amount().to_string_native(), - target.target() - ), + Ok(()) => { + tracing::info!( + "Execute RetroPgf from proposal id {}: sent {} to \ + {}.", + proposal_id, + target.amount().to_string_native(), + target.target() + ); + events.emit(event); + } Err(e) => tracing::warn!( "Error in RetroPgf transfer from proposal id {}, \ amount {} to {}: {}", diff --git a/crates/apps/src/lib/node/ledger/shell/init_chain.rs b/crates/apps/src/lib/node/ledger/shell/init_chain.rs index c75e72615f..0042ef924c 100644 --- a/crates/apps/src/lib/node/ledger/shell/init_chain.rs +++ b/crates/apps/src/lib/node/ledger/shell/init_chain.rs @@ -1202,7 +1202,7 @@ mod test { token::Amount::from_uint(1, 6).unwrap(), 6.into(), ), - "Insufficient source balance".to_string(), + format!("{albert_address_str} has insufficient balance"), )]; assert_eq!(expected, initializer.warnings); initializer.warnings.clear(); diff --git a/crates/apps/src/lib/node/ledger/shell/mod.rs b/crates/apps/src/lib/node/ledger/shell/mod.rs index a225598b5b..efba79b924 100644 --- a/crates/apps/src/lib/node/ledger/shell/mod.rs +++ b/crates/apps/src/lib/node/ledger/shell/mod.rs @@ -43,7 +43,6 @@ use namada::ethereum_bridge::protocol::validation::bridge_pool_roots::validate_b use namada::ethereum_bridge::protocol::validation::ethereum_events::validate_eth_events_vext; use namada::ethereum_bridge::protocol::validation::validator_set_update::validate_valset_upd_vext; use namada::ledger::events::log::EventLog; -use namada::ledger::events::Event; use namada::ledger::gas::{Gas, TxGasMeter}; use namada::ledger::pos::namada_proof_of_stake::types::{ ConsensusValidator, ValidatorSetUpdate, @@ -1431,6 +1430,7 @@ mod test_utils { use namada::core::keccak::KeccakHash; use namada::core::key::*; use namada::core::storage::{Epoch, Header}; + use namada::ledger::events::Event; use namada::proof_of_stake::parameters::PosParams; use namada::proof_of_stake::storage::validator_consensus_key_handle; use namada::state::mockdb::MockDB; diff --git a/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs b/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs index 75e0dfafe2..fa98273fcd 100644 --- a/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs +++ b/crates/apps/src/lib/node/ledger/shell/prepare_proposal.rs @@ -6,6 +6,7 @@ use masp_primitives::transaction::Transaction; use namada::core::address::Address; use namada::core::key::tm_raw_hash_to_string; use namada::gas::TxGasMeter; +use namada::hash::Hash; use namada::ledger::protocol::{self, ShellParams}; use namada::proof_of_stake::storage::find_validator_by_raw_hash; use namada::state::{DBIter, StorageHasher, TempWlState, DB}; @@ -295,6 +296,7 @@ where // Check fees and extract the gas limit of this transaction match prepare_proposal_fee_check( &wrapper, + tx.header_hash(), protocol::get_fee_unshielding_transaction(&tx, &wrapper), block_proposer, proposer_local_config, @@ -313,8 +315,10 @@ where } } +#[allow(clippy::too_many_arguments)] fn prepare_proposal_fee_check( wrapper: &WrapperTx, + wrapper_tx_hash: Hash, masp_transaction: Option, proposer: &Address, proposer_local_config: Option<&ValidatorLocalConfig>, @@ -357,8 +361,13 @@ where shell_params, )?; - protocol::transfer_fee(shell_params.state, proposer, wrapper) - .map_err(Error::TxApply) + protocol::transfer_fee( + shell_params.state, + proposer, + wrapper, + wrapper_tx_hash, + ) + .map_err(Error::TxApply) } #[cfg(test)] diff --git a/crates/apps/src/lib/node/ledger/shell/process_proposal.rs b/crates/apps/src/lib/node/ledger/shell/process_proposal.rs index 4d3ed0b063..f73375a8d5 100644 --- a/crates/apps/src/lib/node/ledger/shell/process_proposal.rs +++ b/crates/apps/src/lib/node/ledger/shell/process_proposal.rs @@ -2,6 +2,7 @@ //! and [`RevertProposal`] ABCI++ methods for the Shell use data_encoding::HEXUPPER; +use namada::hash::Hash; use namada::ledger::pos::PosQueries; use namada::proof_of_stake::storage::find_validator_by_raw_hash; use namada::tx::data::protocol::ProtocolTxType; @@ -465,6 +466,7 @@ where // Check that the fee payer has sufficient balance. match process_proposal_fee_check( &wrapper, + tx.header_hash(), get_fee_unshielding_transaction(&tx, &wrapper), block_proposer, &mut ShellParams::new( @@ -498,6 +500,7 @@ where fn process_proposal_fee_check( wrapper: &WrapperTx, + wrapper_tx_hash: Hash, masp_transaction: Option, proposer: &Address, shell_params: &mut ShellParams<'_, TempWlState, D, H, CA>, @@ -524,8 +527,13 @@ where shell_params, )?; - protocol::transfer_fee(shell_params.state, proposer, wrapper) - .map_err(Error::TxApply) + protocol::transfer_fee( + shell_params.state, + proposer, + wrapper, + wrapper_tx_hash, + ) + .map_err(Error::TxApply) } /// We test the failure cases of [`process_proposal`]. The happy flows diff --git a/crates/apps/src/lib/node/ledger/shell/testing/node.rs b/crates/apps/src/lib/node/ledger/shell/testing/node.rs index 21887b98de..224515a1f1 100644 --- a/crates/apps/src/lib/node/ledger/shell/testing/node.rs +++ b/crates/apps/src/lib/node/ledger/shell/testing/node.rs @@ -2,7 +2,6 @@ use std::fmt::{Debug, Formatter}; use std::future::poll_fn; use std::mem::ManuallyDrop; use std::path::PathBuf; -use std::str::FromStr; use std::sync::{Arc, Mutex}; use std::task::Poll; @@ -31,11 +30,14 @@ use namada::proof_of_stake::storage::{ validator_consensus_key_handle, }; use namada::proof_of_stake::types::WeightedValidator; +use namada::sdk::events::extend::Height as HeightAttr; +use namada::sdk::events::Event; use namada::state::{ LastBlock, Sha256Hasher, StorageRead, EPOCH_SWITCH_BLOCKS_DELAY, }; use namada::tendermint::abci::response::Info; use namada::tendermint::abci::types::VoteInfo; +use namada::tx::event::Code as CodeAttr; use namada_sdk::queries::Client; use namada_sdk::tendermint_proto::google::protobuf::Timestamp; use namada_sdk::tx::data::ResultCode; @@ -459,13 +461,10 @@ impl MockNode { .events .into_iter() .map(|e| { - let code = ResultCode::from_u32( - e.attributes - .get("code") - .map(|e| u32::from_str(e).unwrap()) - .unwrap_or_default(), - ) - .unwrap(); + let code = e + .read_attribute_opt::() + .unwrap() + .unwrap_or_default(); if code == ResultCode::Ok { NodeResults::Ok } else { @@ -585,13 +584,10 @@ impl MockNode { .events .into_iter() .map(|e| { - let code = ResultCode::from_u32( - e.attributes - .get("code") - .map(|e| u32::from_str(e).unwrap()) - .unwrap_or_default(), - ) - .unwrap(); + let code = e + .read_attribute_opt::() + .unwrap() + .unwrap_or_default(); if code == ResultCode::Ok { NodeResults::Ok } else { @@ -799,57 +795,18 @@ impl<'a> Client for &'a MockNode { self.drive_mock_services_bg().await; let matcher = parse_tm_query(query); let borrowed = self.shell.lock().unwrap(); - // we store an index into the event log as a block + + // we store a hash of some event in the log as a block // height in the response of the query... VERY NAISSSE - let matching_events = borrowed.event_log().iter().enumerate().flat_map( - |(index, event)| { - if matcher.matches(event) { - Some(EncodedEvent(index as u64)) - } else { - None - } - }, - ); + let matching_events = borrowed.event_log().iter().flat_map(|event| { + if matcher.matches(event) { + Some(EncodedEvent::encode(event)) + } else { + None + } + }); let blocks = matching_events - .map(|encoded_event| namada::tendermint_rpc::endpoint::block::Response { - block_id: Default::default(), - block: namada::tendermint_proto::types::Block { - header: Some(namada::tendermint_proto::types::Header { - version: Some(namada::tendermint_proto::version::Consensus { - block: 0, - app: 0, - }), - chain_id: "Namada".into(), - height: encoded_event.0 as i64, - time: None, - last_block_id: None, - last_commit_hash: vec![], - data_hash: vec![], - validators_hash: vec![], - next_validators_hash: vec![], - consensus_hash: vec![], - app_hash: vec![], - last_results_hash: vec![], - evidence_hash: vec![], - proposer_address: vec![] - - }), - data: Default::default(), - evidence: Default::default(), - last_commit: Some(namada::tendermint_proto::types::Commit { - height: encoded_event.0 as i64, - round: 0, - block_id: Some(namada::tendermint_proto::types::BlockId { - hash: vec![0u8; 32], - part_set_header: Some(namada::tendermint_proto::types::PartSetHeader { - total: 1, - hash: vec![1; 32], - }), - }), - signatures: vec![], - }), - }.try_into().unwrap(), - }) + .map(block_search_response) .collect::>(); Ok(namada::tendermint_rpc::endpoint::block_search::Response { @@ -868,33 +825,27 @@ impl<'a> Client for &'a MockNode { { self.drive_mock_services_bg().await; let height = height.into(); - let encoded_event = EncodedEvent(height.value()); let locked = self.shell.lock().unwrap(); let events: Vec<_> = locked .event_log() .iter() .flat_map(|event| { - if usize::from_str(event.attributes.get("height").unwrap()) - .unwrap() - == encoded_event.log_index() - { + let same_block_height = event + .read_attribute::() + .map(|event_height| { + BlockHeight(height.value()) == event_height + }) + .unwrap_or(false); + let same_encoded_event = + EncodedEvent::encode(event) == EncodedEvent(height.value()); + + if same_block_height || same_encoded_event { Some(event) } else { None } }) - .map(|event| namada::tendermint::abci::Event { - kind: event.event_type.to_string(), - attributes: event - .attributes - .iter() - .map(|(k, v)| namada::tendermint::abci::EventAttribute { - key: k.parse().unwrap(), - value: v.parse().unwrap(), - index: true, - }) - .collect(), - }) + .map(|event| namada::tendermint::abci::Event::from(event.clone())) .collect(); let has_events = !events.is_empty(); Ok(tendermint_rpc::endpoint::block_results::Response { @@ -980,14 +931,87 @@ fn parse_tm_query( } } -/// A Namada event log index and event type encoded as -/// a Tendermint block height. +/// A Namada event hash encoded as a Tendermint block height. #[derive(Copy, Clone, Eq, PartialEq, Debug)] struct EncodedEvent(u64); impl EncodedEvent { - /// Get the encoded event log index. - const fn log_index(self) -> usize { - self.0 as usize + /// Encode an event. + fn encode(event: &Event) -> Self { + use std::hash::{DefaultHasher, Hasher}; + + let mut hasher = DefaultHasher::default(); + borsh::to_writer(HasherWriter(&mut hasher), event).unwrap(); + + Self(hasher.finish()) + } +} + +/// Hasher that implements [`std::io::Write`]. +struct HasherWriter(H); + +impl std::io::Write for HasherWriter +where + H: std::hash::Hasher, +{ + #[inline] + fn write(&mut self, buf: &[u8]) -> std::io::Result { + std::hash::Hasher::write(&mut self.0, buf); + Ok(buf.len()) + } + + #[inline] + fn flush(&mut self) -> std::io::Result<()> { + Ok(()) + } +} + +#[inline] +fn block_search_response( + encoded_event: EncodedEvent, +) -> namada::tendermint_rpc::endpoint::block::Response { + namada::tendermint_rpc::endpoint::block::Response { + block_id: Default::default(), + block: namada::tendermint_proto::types::Block { + header: Some(namada::tendermint_proto::types::Header { + version: Some(namada::tendermint_proto::version::Consensus { + block: 0, + app: 0, + }), + chain_id: String::new(), + // NB: this is the only field that matters to us, + // everything else is junk + height: encoded_event.0 as i64, + time: None, + last_block_id: None, + last_commit_hash: vec![], + data_hash: vec![], + validators_hash: vec![], + next_validators_hash: vec![], + consensus_hash: vec![], + app_hash: vec![], + last_results_hash: vec![], + evidence_hash: vec![], + proposer_address: vec![], + }), + data: Default::default(), + evidence: Default::default(), + last_commit: Some(namada::tendermint_proto::types::Commit { + height: encoded_event.0 as i64, + round: 0, + block_id: Some(namada::tendermint_proto::types::BlockId { + hash: vec![0u8; 32], + part_set_header: Some( + namada::tendermint_proto::types::PartSetHeader { + total: 1, + hash: vec![1; 32], + }, + ), + }), + signatures: vec![], + }), + } + .try_into() + .unwrap(), } } diff --git a/crates/core/src/ethereum_structs.rs b/crates/core/src/ethereum_structs.rs index 3236dfc95d..a3e055a1ac 100644 --- a/crates/core/src/ethereum_structs.rs +++ b/crates/core/src/ethereum_structs.rs @@ -1,4 +1,5 @@ //! Ethereum bridge struct re-exports and types to do with ethereum. + use std::fmt; use std::io::Read; use std::num::NonZeroU64; @@ -6,79 +7,9 @@ use std::ops::{Add, AddAssign, Deref}; use borsh::{BorshDeserialize, BorshSerialize}; pub use ethbridge_structs::*; -use namada_macros::BorshDeserializer; -#[cfg(feature = "migrations")] -use namada_migrations::*; use num256::Uint256; use serde::{Deserialize, Serialize}; -use crate::keccak::KeccakHash; - -/// Status of some Bridge pool transfer. -#[derive( - Hash, - Clone, - Debug, - Eq, - PartialEq, - Ord, - PartialOrd, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - Serialize, - Deserialize, -)] -pub enum BpTransferStatus { - /// The transfer has been relayed. - Relayed, - /// The transfer has expired. - Expired, -} - -/// Ethereum bridge events on Namada's event log. -#[derive( - Hash, - Clone, - Debug, - Eq, - PartialEq, - Ord, - PartialOrd, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - Serialize, - Deserialize, -)] -pub enum EthBridgeEvent { - /// Bridge pool transfer status update event. - BridgePool { - /// Hash of the Bridge pool transfer. - tx_hash: KeccakHash, - /// Status of the Bridge pool transfer. - status: BpTransferStatus, - }, -} - -impl EthBridgeEvent { - /// Return a new Bridge pool expired transfer event. - pub const fn new_bridge_pool_expired(tx_hash: KeccakHash) -> Self { - Self::BridgePool { - tx_hash, - status: BpTransferStatus::Expired, - } - } - - /// Return a new Bridge pool relayed transfer event. - pub const fn new_bridge_pool_relayed(tx_hash: KeccakHash) -> Self { - Self::BridgePool { - tx_hash, - status: BpTransferStatus::Relayed, - } - } -} - /// This type must be able to represent any valid Ethereum block height. It must /// also be Borsh serializeable, so that it can be stored in blockchain storage. /// diff --git a/crates/core/src/event.rs b/crates/core/src/event.rs deleted file mode 100644 index a49cd0f491..0000000000 --- a/crates/core/src/event.rs +++ /dev/null @@ -1,292 +0,0 @@ -//! Ledger events - -pub mod extend; - -use std::fmt::{self, Display}; -use std::ops::{Index, IndexMut}; -use std::str::FromStr; - -use namada_macros::BorshDeserializer; -#[cfg(feature = "migrations")] -use namada_migrations::*; -use thiserror::Error; - -use crate::borsh::{BorshDeserialize, BorshSerialize}; -use crate::collections::HashMap; -use crate::ethereum_structs::{BpTransferStatus, EthBridgeEvent}; -use crate::ibc::IbcEvent; - -/// Used in sub-systems that may emit events. -pub trait EmitEvents { - /// Emit a single [event](Event). - fn emit(&mut self, event: E) - where - E: Into; - - /// Emit a batch of [events](Event). - fn emit_many(&mut self, event_batch: B) - where - B: IntoIterator, - E: Into; -} - -impl EmitEvents for Vec { - #[inline] - fn emit(&mut self, event: E) - where - E: Into, - { - self.push(event.into()); - } - - /// Emit a batch of [events](Event). - fn emit_many(&mut self, event_batch: B) - where - B: IntoIterator, - E: Into, - { - self.extend(event_batch.into_iter().map(Into::into)); - } -} - -/// Indicates if an event is emitted do to -/// an individual Tx or the nature of a finalized block -#[derive( - Clone, - Debug, - Eq, - PartialEq, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, -)] -pub enum EventLevel { - /// Indicates an event is to do with a finalized block. - Block, - /// Indicates an event is to do with an individual transaction. - Tx, -} - -impl Display for EventLevel { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "{}", - match self { - EventLevel::Block => "block", - EventLevel::Tx => "tx", - } - ) - } -} - -/// Custom events that can be queried from Tendermint -/// using a websocket client -#[derive( - Clone, - Debug, - Eq, - PartialEq, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, -)] -pub struct Event { - /// The type of event. - pub event_type: EventType, - /// The level of the event - whether it relates to a block or an individual - /// transaction. - pub level: EventLevel, - /// Key-value attributes of the event. - pub attributes: HashMap, -} - -impl Display for Event { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - // TODO: print attributes, too - write!(f, "{} in {}", self.event_type, self.level) - } -} - -/// The two types of custom events we currently use -#[derive( - Clone, - Debug, - Eq, - PartialEq, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, -)] -pub enum EventType { - /// The transaction was applied during block finalization - Applied, - /// The IBC transaction was applied during block finalization - // TODO: create type-safe wrapper for all ibc event kinds - Ibc(String), - /// The proposal that has been executed - Proposal, - /// The pgf payment - PgfPayment, - /// Ethereum Bridge event - EthereumBridge, -} - -impl Display for EventType { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - EventType::Applied => write!(f, "applied"), - EventType::Ibc(t) => write!(f, "{}", t), - EventType::Proposal => write!(f, "proposal"), - EventType::PgfPayment => write!(f, "pgf_payment"), - EventType::EthereumBridge => write!(f, "ethereum_bridge"), - }?; - Ok(()) - } -} - -impl FromStr for EventType { - type Err = EventError; - - fn from_str(s: &str) -> Result { - match s { - "applied" => Ok(EventType::Applied), - "proposal" => Ok(EventType::Proposal), - "pgf_payments" => Ok(EventType::PgfPayment), - // - "update_client" => Ok(EventType::Ibc("update_client".to_string())), - "send_packet" => Ok(EventType::Ibc("send_packet".to_string())), - "write_acknowledgement" => { - Ok(EventType::Ibc("write_acknowledgement".to_string())) - } - // - "ethereum_bridge" => Ok(EventType::EthereumBridge), - _ => Err(EventError::InvalidEventType), - } - } -} - -/// Errors to do with emitting events. -#[derive(Error, Debug, Clone)] -pub enum EventError { - /// Error when parsing an event type - #[error("Invalid event type")] - InvalidEventType, - /// Error when parsing attributes from an event JSON. - #[error("Json missing `attributes` field")] - MissingAttributes, - /// Missing key in attributes. - #[error("Attributes missing key: {0}")] - MissingKey(String), - /// Missing value in attributes. - #[error("Attributes missing value: {0}")] - MissingValue(String), -} - -impl Event { - /// Create an applied tx event with empty attributes. - pub fn applied_tx() -> Self { - Self { - event_type: EventType::Applied, - level: EventLevel::Tx, - attributes: HashMap::new(), - } - } - - /// Check if the events keys contains a given string - pub fn contains_key(&self, key: &str) -> bool { - self.attributes.contains_key(key) - } - - /// Get the value corresponding to a given key, if it exists. - /// Else return None. - pub fn get(&self, key: &str) -> Option<&String> { - self.attributes.get(key) - } - - /// Extend this [`Event`] with additional data. - #[inline] - pub fn extend(&mut self, data: DATA) -> &mut Self - where - DATA: extend::ExtendEvent, - { - data.extend_event(self); - self - } -} - -impl From for Event { - #[inline] - fn from(event: EthBridgeEvent) -> Event { - Self::from(&event) - } -} - -impl From<&EthBridgeEvent> for Event { - fn from(event: &EthBridgeEvent) -> Event { - match event { - EthBridgeEvent::BridgePool { tx_hash, status } => Event { - event_type: EventType::EthereumBridge, - level: EventLevel::Tx, - attributes: { - let mut attrs = HashMap::new(); - attrs.insert( - "kind".into(), - match status { - BpTransferStatus::Relayed => "bridge_pool_relayed", - BpTransferStatus::Expired => "bridge_pool_expired", - } - .into(), - ); - attrs.insert("tx_hash".into(), tx_hash.to_string()); - attrs - }, - }, - } - } -} - -impl Index<&str> for Event { - type Output = String; - - fn index(&self, index: &str) -> &Self::Output { - &self.attributes[index] - } -} - -impl IndexMut<&str> for Event { - fn index_mut(&mut self, index: &str) -> &mut Self::Output { - let entry = self.attributes.entry(index.into()).or_default(); - &mut *entry - } -} - -impl From for Event { - fn from(ibc_event: IbcEvent) -> Self { - Self { - event_type: EventType::Ibc(ibc_event.event_type), - level: EventLevel::Tx, - attributes: ibc_event.attributes, - } - } -} - -/// Convert our custom event into the necessary tendermint proto type -impl From for crate::tendermint_proto::v0_37::abci::Event { - fn from(event: Event) -> Self { - Self { - r#type: event.event_type.to_string(), - attributes: event - .attributes - .into_iter() - .map(|(key, value)| { - crate::tendermint_proto::v0_37::abci::EventAttribute { - key, - value, - index: true, - } - }) - .collect(), - } - } -} diff --git a/crates/core/src/event/extend.rs b/crates/core/src/event/extend.rs deleted file mode 100644 index 768090eb56..0000000000 --- a/crates/core/src/event/extend.rs +++ /dev/null @@ -1,180 +0,0 @@ -//! Extend [events](Event) with additional fields. - -use super::*; -use crate::hash::Hash; -use crate::storage::BlockHeight; - -/// Provides event composition routines. -pub trait ComposeEvent { - /// Compose an [event](Event) with new data. - fn with(self, data: NEW) -> CompositeEvent - where - Self: Sized; -} - -impl ComposeEvent for E -where - E: Into, -{ - #[inline(always)] - fn with(self, data: NEW) -> CompositeEvent { - CompositeEvent::new(self, data) - } -} - -/// Event composed of various other event extensions. -#[derive(Clone, Debug)] -pub struct CompositeEvent { - base_event: E, - data: DATA, -} - -impl CompositeEvent { - /// Create a new composed event. - pub const fn new(base_event: E, data: DATA) -> Self { - Self { base_event, data } - } -} - -impl From> for Event -where - E: Into, - DATA: ExtendEvent, -{ - #[inline] - fn from(composite: CompositeEvent) -> Event { - let CompositeEvent { base_event, data } = composite; - - let mut base_event = base_event.into(); - data.extend_event(&mut base_event); - - base_event - } -} - -/// Extend an [event](Event) with additional fields. -pub trait ExtendEvent { - /// Add additional fields to the specified `event`. - fn extend_event(self, event: &mut Event); -} - -/// Leaves an [`Event`] as is. -pub struct WithNoOp; - -impl ExtendEvent for WithNoOp { - #[inline] - fn extend_event(self, _: &mut Event) {} -} - -/// Extend an [`Event`] with block height information. -pub struct Height(pub BlockHeight); - -impl ExtendEvent for Height { - #[inline] - fn extend_event(self, event: &mut Event) { - let Self(height) = self; - event["height"] = height.to_string(); - } -} - -/// Extend an [`Event`] with transaction hash information. -pub struct TxHash(pub Hash); - -impl ExtendEvent for TxHash { - #[inline] - fn extend_event(self, event: &mut Event) { - let Self(hash) = self; - event["hash"] = hash.to_string(); - } -} - -/// Extend an [`Event`] with log data. -pub struct Log(pub String); - -impl ExtendEvent for Log { - #[inline] - fn extend_event(self, event: &mut Event) { - let Self(log) = self; - event["log"] = log; - } -} - -/// Extend an [`Event`] with info data. -pub struct Info(pub String); - -impl ExtendEvent for Info { - #[inline] - fn extend_event(self, event: &mut Event) { - let Self(info) = self; - event["info"] = info; - } -} - -/// Extend an [`Event`] with `is_valid_masp_tx` data. -pub struct ValidMaspTx(pub usize); - -impl ExtendEvent for ValidMaspTx { - #[inline] - fn extend_event(self, event: &mut Event) { - let Self(masp_tx_index) = self; - event["is_valid_masp_tx"] = masp_tx_index.to_string(); - } -} - -#[cfg(test)] -mod event_composition_tests { - use super::*; - - #[test] - fn test_event_compose_basic() { - let expected_attrs = { - let mut attrs = HashMap::new(); - attrs.insert("log".to_string(), "this is sparta!".to_string()); - attrs.insert("height".to_string(), "300".to_string()); - attrs.insert("hash".to_string(), Hash::default().to_string()); - attrs - }; - - let base_event: Event = Event::applied_tx() - .with(Log("this is sparta!".to_string())) - .with(Height(300.into())) - .with(TxHash(Hash::default())) - .into(); - - assert_eq!(base_event.attributes, expected_attrs); - } - - #[test] - fn test_event_compose_repeated() { - let expected_attrs = { - let mut attrs = HashMap::new(); - attrs.insert("log".to_string(), "dejavu".to_string()); - attrs - }; - - let base_event: Event = Event::applied_tx() - .with(Log("dejavu".to_string())) - .with(Log("dejavu".to_string())) - .with(Log("dejavu".to_string())) - .into(); - - assert_eq!(base_event.attributes, expected_attrs); - } - - #[test] - fn test_event_compose_last_one_kept() { - let expected_attrs = { - let mut attrs = HashMap::new(); - attrs.insert("log".to_string(), "last".to_string()); - attrs - }; - - let base_event: Event = Event::applied_tx() - .with(Log("fist".to_string())) - .with(Log("second".to_string())) - .with(Log("last".to_string())) - .into(); - - assert_eq!(base_event.attributes, expected_attrs); - } -} diff --git a/crates/core/src/ibc.rs b/crates/core/src/ibc.rs index 1b880b779d..e0d65cdaa8 100644 --- a/crates/core/src/ibc.rs +++ b/crates/core/src/ibc.rs @@ -1,6 +1,5 @@ //! IBC-related data types -use std::cmp::Ordering; use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; @@ -13,7 +12,6 @@ use serde::{Deserialize, Serialize}; use thiserror::Error; use super::address::{Address, InternalAddress, HASH_LEN}; -use crate::collections::HashMap; use crate::ibc::apps::nft_transfer::context::{NftClassContext, NftContext}; use crate::ibc::apps::nft_transfer::types::error::NftTransferError; use crate::ibc::apps::nft_transfer::types::msgs::transfer::MsgTransfer as IbcMsgNftTransfer; @@ -27,22 +25,14 @@ use crate::ibc::core::channel::types::msgs::{ MsgAcknowledgement as IbcMsgAcknowledgement, MsgRecvPacket as IbcMsgRecvPacket, MsgTimeout as IbcMsgTimeout, }; -use crate::ibc::core::handler::types::events::{ - Error as IbcEventError, IbcEvent as RawIbcEvent, -}; use crate::ibc::core::handler::types::msgs::MsgEnvelope; use crate::ibc::primitives::proto::Protobuf; -use crate::tendermint::abci::Event as AbciEvent; use crate::token::Transfer; /// The event type defined in ibc-rs for receiving a token pub const EVENT_TYPE_PACKET: &str = "fungible_token_packet"; /// The event type defined in ibc-rs for receiving an NFT pub const EVENT_TYPE_NFT_PACKET: &str = "non_fungible_token_packet"; -/// The event attribute key defined in ibc-rs for receiving result -pub const EVENT_ATTRIBUTE_SUCCESS: &str = "success"; -/// The event attribute value defined in ibc-rs for receiving success -pub const EVENT_VALUE_SUCCESS: &str = "true"; /// The escrow address for IBC transfer pub const IBC_ESCROW_ADDRESS: Address = Address::Internal(InternalAddress::Ibc); @@ -75,62 +65,13 @@ impl std::fmt::Display for IbcTokenHash { impl FromStr for IbcTokenHash { type Err = DecodePartial; - fn from_str(h: &str) -> std::result::Result { + fn from_str(h: &str) -> Result { let mut output = [0u8; HASH_LEN]; HEXLOWER_PERMISSIVE.decode_mut(h.as_ref(), &mut output)?; Ok(IbcTokenHash(output)) } } -/// Wrapped IbcEvent -#[derive( - Debug, - Clone, - BorshSerialize, - BorshDeserialize, - BorshDeserializer, - BorshSchema, - PartialEq, - Eq, - Serialize, - Deserialize, -)] -pub struct IbcEvent { - /// The IBC event type - pub event_type: String, - /// The attributes of the IBC event - pub attributes: HashMap, -} - -impl std::cmp::PartialOrd for IbcEvent { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -impl std::cmp::Ord for IbcEvent { - fn cmp(&self, other: &Self) -> Ordering { - // should not compare the same event type - self.event_type.cmp(&other.event_type) - } -} - -impl std::fmt::Display for IbcEvent { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let attributes = self - .attributes - .iter() - .map(|(k, v)| format!("{}: {};", k, v)) - .collect::>() - .join(", "); - write!( - f, - "Event type: {}, Attributes: {}", - self.event_type, attributes - ) - } -} - /// The different variants of an Ibc message pub enum IbcMessage { /// Ibc Envelop @@ -326,35 +267,12 @@ impl BorshDeserialize for MsgTimeout { #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { - #[error("IBC event error: {0}")] - IbcEvent(IbcEventError), #[error("IBC transfer memo HEX decoding error: {0}")] DecodingHex(data_encoding::DecodeError), #[error("IBC transfer memo decoding error: {0}")] DecodingShieldedTransfer(std::io::Error), } -/// Conversion functions result -type Result = std::result::Result; - -impl TryFrom for IbcEvent { - type Error = Error; - - fn try_from(e: RawIbcEvent) -> Result { - let event_type = e.event_type().to_string(); - let abci_event = AbciEvent::try_from(e).map_err(Error::IbcEvent)?; - let attributes: HashMap<_, _> = abci_event - .attributes - .iter() - .map(|tag| (tag.key.to_string(), tag.value.to_string())) - .collect(); - Ok(Self { - event_type, - attributes, - }) - } -} - /// Returns the trace path and the token string if the denom is an IBC /// denom. pub fn is_ibc_denom(denom: impl AsRef) -> Option<(TracePath, String)> { diff --git a/crates/core/src/keccak.rs b/crates/core/src/keccak.rs index 70ee83799b..ad53bc9b6a 100644 --- a/crates/core/src/keccak.rs +++ b/crates/core/src/keccak.rs @@ -1,7 +1,9 @@ //! This module is for hashing Namada types using the keccak256 //! hash function in a way that is compatible with smart contracts //! on Ethereum. + use std::fmt; +use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use data_encoding::HEXUPPER; @@ -106,6 +108,14 @@ impl TryFrom<&str> for KeccakHash { } } +impl FromStr for KeccakHash { + type Err = TryFromError; + + fn from_str(s: &str) -> Result { + s.try_into() + } +} + impl AsRef<[u8]> for KeccakHash { fn as_ref(&self) -> &[u8] { &self.0 diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 0af456c454..7f084790cb 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -7,7 +7,6 @@ #![deny(rustdoc::private_intra_doc_links)] pub mod bytes; -pub mod event; pub mod hints; pub use {masp_primitives, tendermint, tendermint_proto}; diff --git a/crates/core/src/storage.rs b/crates/core/src/storage.rs index 5023ff9f4a..a90a3942aa 100644 --- a/crates/core/src/storage.rs +++ b/crates/core/src/storage.rs @@ -42,6 +42,8 @@ pub enum Error { ParseKeySeg(String), #[error("Error parsing block hash: {0}")] ParseBlockHash(String), + #[error("Error parsing tx index: {0}")] + ParseTxIndex(String), #[error("The key is empty")] EmptyKey, #[error("The key is missing sub-key segments: {0}")] @@ -162,6 +164,27 @@ impl FromStr for DbColFam { )] pub struct TxIndex(pub u32); +impl TxIndex { + /// Convert from a [`usize`] or panic. + pub fn must_from_usize(tx_index: usize) -> Self { + Self( + tx_index + .try_into() + .expect("Transaction index out of bounds"), + ) + } +} + +impl FromStr for TxIndex { + type Err = Error; + + fn from_str(s: &str) -> std::result::Result { + let tx_index = u32::from_str(s) + .map_err(|err| Error::ParseTxIndex(err.to_string()))?; + Ok(TxIndex(tx_index)) + } +} + impl Display for TxIndex { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) diff --git a/crates/core/src/token.rs b/crates/core/src/token.rs index 9765a017bc..7eaca47a4e 100644 --- a/crates/core/src/token.rs +++ b/crates/core/src/token.rs @@ -211,7 +211,7 @@ impl Amount { let denom = denom.into(); let uint = uint.into(); if denom == 0 { - return Ok(Self { raw: uint }); + return Ok(uint.into()); } match Uint::from(10) .checked_pow(Uint::from(denom)) @@ -907,6 +907,12 @@ impl From for Uint { } } +impl From for Amount { + fn from(raw: Uint) -> Self { + Self { raw } + } +} + /// The four possible u64 words in a [`Uint`]. /// Used for converting to MASP amounts. #[derive( diff --git a/crates/core/src/uint.rs b/crates/core/src/uint.rs index 60cfd14952..284018296e 100644 --- a/crates/core/src/uint.rs +++ b/crates/core/src/uint.rs @@ -4,6 +4,7 @@ use std::cmp::Ordering; use std::fmt; use std::ops::{Add, AddAssign, BitAnd, Div, Mul, Neg, Rem, Sub, SubAssign}; +use std::str::FromStr; use borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use impl_num_traits::impl_uint_num_traits; @@ -507,7 +508,26 @@ impl fmt::Display for I256 { } } +impl FromStr for I256 { + type Err = Box; + + fn from_str(num: &str) -> Result { + if let Some(("", neg_num)) = num.split_once('-') { + let uint = neg_num.parse::()?.negate(); + Ok(I256(uint)) + } else { + let uint = num.parse::()?; + Ok(I256(uint)) + } + } +} + impl I256 { + /// Compute the two's complement of a number. + pub fn negate(&self) -> Self { + Self(self.0.negate()) + } + /// Check if the amount is not negative (greater /// than or equal to zero) pub fn non_negative(&self) -> bool { @@ -1064,4 +1084,14 @@ mod test_uint { assert_eq!(e.checked_mul_div(c, b), Some((Uint::zero(), c))); assert_eq!(d.checked_mul_div(a, e), None); } + + #[test] + fn test_i256_str_roundtrip() { + let minus_one = I256::one().negate(); + let minus_one_str = minus_one.to_string(); + assert_eq!(minus_one_str, "-1"); + + let parsed: I256 = minus_one_str.parse().unwrap(); + assert_eq!(minus_one, parsed); + } } diff --git a/crates/ethereum_bridge/Cargo.toml b/crates/ethereum_bridge/Cargo.toml index 281113b984..e0ff01cbf1 100644 --- a/crates/ethereum_bridge/Cargo.toml +++ b/crates/ethereum_bridge/Cargo.toml @@ -28,6 +28,7 @@ migrations = [ [dependencies] namada_account = {path = "../account", optional = true} namada_core = {path = "../core", default-features = false, features = ["ethers-derive"]} +namada_events = { path = "../events", default-features = false } namada_macros = {path = "../macros"} namada_migrations = {path = "../migrations", optional = true} namada_parameters = {path = "../parameters"} @@ -43,6 +44,7 @@ ethabi.workspace = true ethers.workspace = true eyre.workspace = true itertools.workspace = true +konst.workspace = true linkme = {workspace = true, optional = true} serde.workspace = true serde_json.workspace = true diff --git a/crates/ethereum_bridge/src/event.rs b/crates/ethereum_bridge/src/event.rs new file mode 100644 index 0000000000..b7e2d87e0b --- /dev/null +++ b/crates/ethereum_bridge/src/event.rs @@ -0,0 +1,164 @@ +//! Ethereum Bridge transaction events. + +use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_core::keccak::KeccakHash; +use namada_events::extend::{ComposeEvent, EventAttributeEntry}; +use namada_events::{Event, EventError, EventLevel, EventToEmit, EventType}; +use namada_macros::BorshDeserializer; +#[cfg(feature = "migrations")] +use namada_migrations::*; +use serde::{Deserialize, Serialize}; + +pub mod types { + //! Ethereum bridge event types. + + use namada_events::{event_type, EventType}; + + use super::EthBridgeEvent; + + /// Bridge pool relay event. + pub const BRIDGE_POOL_RELAYED: EventType = + event_type!(EthBridgeEvent, "bridge-pool", "relayed"); + + /// Bridge pool expiration event. + pub const BRIDGE_POOL_EXPIRED: EventType = + event_type!(EthBridgeEvent, "bridge-pool", "expired"); +} + +/// Status of some Bridge pool transfer. +#[derive( + Hash, + Clone, + Debug, + Eq, + PartialEq, + Ord, + PartialOrd, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + Serialize, + Deserialize, +)] +pub enum BpTransferStatus { + /// The transfer has been relayed. + Relayed, + /// The transfer has expired. + Expired, +} + +impl From for EventType { + fn from(transfer_status: BpTransferStatus) -> Self { + (&transfer_status).into() + } +} + +impl From<&BpTransferStatus> for EventType { + fn from(transfer_status: &BpTransferStatus) -> Self { + match transfer_status { + BpTransferStatus::Relayed => types::BRIDGE_POOL_RELAYED, + BpTransferStatus::Expired => types::BRIDGE_POOL_EXPIRED, + } + } +} + +impl TryFrom for BpTransferStatus { + type Error = EventError; + + fn try_from(event_type: EventType) -> Result { + (&event_type).try_into() + } +} + +impl TryFrom<&EventType> for BpTransferStatus { + type Error = EventError; + + fn try_from(event_type: &EventType) -> Result { + if *event_type == types::BRIDGE_POOL_RELAYED { + Ok(BpTransferStatus::Relayed) + } else if *event_type == types::BRIDGE_POOL_EXPIRED { + Ok(BpTransferStatus::Expired) + } else { + Err(EventError::InvalidEventType) + } + } +} + +/// Ethereum bridge events on Namada's event log. +#[derive( + Hash, + Clone, + Debug, + Eq, + PartialEq, + Ord, + PartialOrd, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + Serialize, + Deserialize, +)] +pub enum EthBridgeEvent { + /// Bridge pool transfer status update event. + BridgePool { + /// Hash of the Bridge pool transfer. + tx_hash: KeccakHash, + /// Status of the Bridge pool transfer. + status: BpTransferStatus, + }, +} + +impl EthBridgeEvent { + /// Return a new Bridge pool expired transfer event. + pub const fn new_bridge_pool_expired(tx_hash: KeccakHash) -> Self { + Self::BridgePool { + tx_hash, + status: BpTransferStatus::Expired, + } + } + + /// Return a new Bridge pool relayed transfer event. + pub const fn new_bridge_pool_relayed(tx_hash: KeccakHash) -> Self { + Self::BridgePool { + tx_hash, + status: BpTransferStatus::Relayed, + } + } +} + +impl From for Event { + #[inline] + fn from(event: EthBridgeEvent) -> Event { + Self::from(&event) + } +} + +impl From<&EthBridgeEvent> for Event { + fn from(event: &EthBridgeEvent) -> Event { + match event { + EthBridgeEvent::BridgePool { tx_hash, status } => { + Event::new(status.into(), EventLevel::Tx) + .with(BridgePoolTxHash(tx_hash)) + .into() + } + } + } +} + +impl EventToEmit for EthBridgeEvent { + const DOMAIN: &'static str = "eth-bridge"; +} + +pub struct BridgePoolTxHash<'tx>(pub &'tx KeccakHash); + +impl<'tx> EventAttributeEntry<'tx> for BridgePoolTxHash<'tx> { + type Value = &'tx KeccakHash; + type ValueOwned = KeccakHash; + + const KEY: &'static str = "bridge_pool_tx_hash"; + + fn into_value(self) -> Self::Value { + self.0 + } +} diff --git a/crates/ethereum_bridge/src/lib.rs b/crates/ethereum_bridge/src/lib.rs index eef7126bab..7bf152bb77 100644 --- a/crates/ethereum_bridge/src/lib.rs +++ b/crates/ethereum_bridge/src/lib.rs @@ -1,5 +1,6 @@ extern crate core; +pub mod event; pub mod oracle; pub mod protocol; pub mod storage; diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs index 12055b4e44..5b97c1d226 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/events.rs @@ -16,7 +16,6 @@ use namada_core::ethereum_events::{ EthAddress, EthereumEvent, TransferToEthereum, TransferToNamada, TransfersToNamada, }; -use namada_core::ethereum_structs::EthBridgeEvent; use namada_core::hints; use namada_core::storage::{BlockHeight, Key, KeySeg}; use namada_parameters::read_epoch_duration_parameter; @@ -24,6 +23,7 @@ use namada_state::{DBIter, StorageHasher, WlState, DB}; use namada_storage::{StorageRead, StorageWrite}; use namada_trans_token::storage_key::{balance_key, minted_balance_key}; +use crate::event::EthBridgeEvent; use crate::protocol::transactions::update; use crate::storage::bridge_pool::{ get_nonce_key, is_pending_transfer_key, BRIDGE_POOL_ADDRESS, diff --git a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs index 47a7738749..8bf67c2205 100644 --- a/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs +++ b/crates/ethereum_bridge/src/protocol/transactions/ethereum_events/mod.rs @@ -11,7 +11,6 @@ use eyre::Result; use namada_core::address::Address; use namada_core::collections::{HashMap, HashSet}; use namada_core::ethereum_events::EthereumEvent; -use namada_core::ethereum_structs::EthBridgeEvent; use namada_core::key::common; use namada_core::storage::{BlockHeight, Epoch, Key}; use namada_core::token::Amount; @@ -22,6 +21,7 @@ use namada_tx::data::TxResult; use namada_vote_ext::ethereum_events::{MultiSignedEthEvent, SignedVext, Vext}; use super::ChangedKeys; +use crate::event::EthBridgeEvent; use crate::protocol::transactions::utils; use crate::protocol::transactions::votes::update::NewVotes; use crate::protocol::transactions::votes::{self, calculate_new}; @@ -121,7 +121,10 @@ where Ok(TxResult { changed_keys, - eth_bridge_events, + events: eth_bridge_events + .into_iter() + .map(|event| event.into()) + .collect(), ..Default::default() }) } @@ -520,7 +523,6 @@ mod tests { assert!(tx_result.vps_result.rejected_vps.is_empty()); assert!(tx_result.vps_result.errors.is_empty()); assert!(tx_result.initialized_accounts.is_empty()); - assert!(tx_result.ibc_events.is_empty()); } /// Test calling apply_derived_tx for an event that isn't backed by enough diff --git a/crates/events/Cargo.toml b/crates/events/Cargo.toml new file mode 100644 index 0000000000..0e6972db4e --- /dev/null +++ b/crates/events/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "namada_events" +description = "Namada events" +resolver = "2" +authors.workspace = true +edition.workspace = true +documentation.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +version.workspace = true + +[features] +default = [] +mainnet = [] +migrations = [ + "namada_migrations", + "linkme", +] +testing = [] + +[dependencies] +namada_core = {path = "../core"} +namada_macros = {path = "../macros"} +namada_migrations = {path = "../migrations", optional = true} +borsh.workspace = true +linkme = {workspace = true, optional = true} +serde.workspace = true +thiserror.workspace = true diff --git a/crates/events/src/extend.rs b/crates/events/src/extend.rs new file mode 100644 index 0000000000..061b33ea3c --- /dev/null +++ b/crates/events/src/extend.rs @@ -0,0 +1,764 @@ +//! Extend [events](Event) with additional fields. + +pub mod dynamic; + +use std::fmt::Display; +use std::marker::PhantomData; +use std::ops::ControlFlow; +use std::str::FromStr; + +use namada_core::collections::HashMap; +use namada_core::hash::Hash; +use namada_core::storage::{BlockHeight, TxIndex}; + +use super::*; + +impl Event { + /// Check if this [`Event`] has a subset of the keys and values + /// in `attrs`. + #[inline] + pub fn has_subset_of_attrs(&self, attrs: &A) -> bool { + attrs.iter_attributes().all(|(key, value)| { + match self.attributes.get(key) { + Some(v) => v == value, + None => false, + } + }) + } + + /// Get the raw string value corresponding to a given attribute, if it + /// exists. + #[inline] + pub fn raw_read_attribute<'value, DATA>(&self) -> Option<&str> + where + DATA: RawReadFromEventAttributes<'value>, + { + DATA::raw_read_opt_from_event_attributes(&self.attributes) + } + + /// Get the value corresponding to a given attribute. + #[inline] + pub fn read_attribute<'value, DATA>( + &self, + ) -> Result<>::Value, EventError> + where + DATA: ReadFromEventAttributes<'value>, + { + DATA::read_from_event_attributes(&self.attributes) + } + + /// Get the value corresponding to a given attribute, if it exists. + #[inline] + pub fn read_attribute_opt<'value, DATA>( + &self, + ) -> Result< + Option<>::Value>, + EventError, + > + where + DATA: ReadFromEventAttributes<'value>, + { + DATA::read_opt_from_event_attributes(&self.attributes) + } + + /// Check if a certain attribute is present in the event. + #[inline] + pub fn has_attribute<'value, DATA>(&self) -> bool + where + DATA: RawReadFromEventAttributes<'value>, + { + DATA::check_if_present_in(&self.attributes) + } + + /// Extend this [`Event`] with additional data. + #[inline] + pub fn extend(&mut self, data: DATA) -> &mut Self + where + DATA: ExtendEvent, + { + data.extend_event(self); + self + } +} + +/// Map of event attributes. +pub trait AttributesMap { + /// Insert a new attribute. + fn insert_attribute(&mut self, key: K, value: V) + where + K: Into, + V: Into; + + /// Retrieve an attribute. + fn retrieve_attribute(&self, key: &str) -> Option<&str>; + + /// Check for the existence of an attribute. + fn is_attribute(&self, key: &str) -> bool; + + /// Iterate over all the key value pairs. + fn iter_attributes(&self) -> impl Iterator; +} + +impl AttributesMap for HashMap { + #[inline] + fn insert_attribute(&mut self, key: K, value: V) + where + K: Into, + V: Into, + { + self.insert(key.into(), value.into()); + } + + #[inline] + fn retrieve_attribute(&self, key: &str) -> Option<&str> { + self.get(key).map(String::as_ref) + } + + #[inline] + fn is_attribute(&self, key: &str) -> bool { + self.contains_key(key) + } + + #[inline] + fn iter_attributes(&self) -> impl Iterator { + self.iter().map(|(k, v)| (k.as_str(), v.as_str())) + } +} + +impl AttributesMap for BTreeMap { + #[inline] + fn insert_attribute(&mut self, key: K, value: V) + where + K: Into, + V: Into, + { + self.insert(key.into(), value.into()); + } + + #[inline] + fn retrieve_attribute(&self, key: &str) -> Option<&str> { + self.get(key).map(String::as_ref) + } + + #[inline] + fn is_attribute(&self, key: &str) -> bool { + self.contains_key(key) + } + + #[inline] + fn iter_attributes(&self) -> impl Iterator { + self.iter().map(|(k, v)| (k.as_str(), v.as_str())) + } +} + +impl AttributesMap for Vec { + #[inline] + fn insert_attribute(&mut self, key: K, value: V) + where + K: Into, + V: Into, + { + self.push(namada_core::tendermint::abci::EventAttribute { + key: key.into(), + value: value.into(), + index: true, + }); + } + + #[inline] + fn retrieve_attribute(&self, key: &str) -> Option<&str> { + self.iter().find_map(|attr| { + if attr.key == key { + Some(attr.value.as_str()) + } else { + None + } + }) + } + + #[inline] + fn is_attribute(&self, key: &str) -> bool { + self.iter().any(|attr| attr.key == key) + } + + #[inline] + fn iter_attributes(&self) -> impl Iterator { + self.iter() + .map(|attr| (attr.key.as_str(), attr.value.as_str())) + } +} + +impl AttributesMap + for Vec +{ + #[inline] + fn insert_attribute(&mut self, key: K, value: V) + where + K: Into, + V: Into, + { + self.push(namada_core::tendermint_proto::v0_37::abci::EventAttribute { + key: key.into(), + value: value.into(), + index: true, + }); + } + + #[inline] + fn retrieve_attribute(&self, key: &str) -> Option<&str> { + self.iter().find_map(|attr| { + if attr.key == key { + Some(attr.value.as_str()) + } else { + None + } + }) + } + + #[inline] + fn is_attribute(&self, key: &str) -> bool { + self.iter().any(|attr| attr.key == key) + } + + #[inline] + fn iter_attributes(&self) -> impl Iterator { + self.iter() + .map(|attr| (attr.key.as_str(), attr.value.as_str())) + } +} + +/// Provides event composition routines. +pub trait ComposeEvent { + /// Compose an [event](Event) with new data. + fn with(self, data: NEW) -> CompositeEvent + where + Self: Sized; +} + +impl ComposeEvent for E +where + E: Into, +{ + #[inline(always)] + fn with(self, data: NEW) -> CompositeEvent { + CompositeEvent::new(self, data) + } +} + +/// Event composed of various other event extensions. +#[derive(Clone, Debug)] +pub struct CompositeEvent { + base_event: E, + data: DATA, +} + +impl CompositeEvent { + /// Create a new composed event. + pub const fn new(base_event: E, data: DATA) -> Self { + Self { base_event, data } + } +} + +impl From> for Event +where + E: Into, + DATA: ExtendEvent, +{ + #[inline] + fn from(composite: CompositeEvent) -> Event { + let CompositeEvent { base_event, data } = composite; + + let mut base_event = base_event.into(); + data.extend_event(&mut base_event); + + base_event + } +} + +impl EventToEmit for CompositeEvent +where + E: EventToEmit, + DATA: ExtendEvent, +{ + const DOMAIN: &'static str = E::DOMAIN; +} + +/// Extend an [`AttributesMap`] implementation with the ability +/// to add new attributes from domain types. +pub trait ExtendAttributesMap: Sized { + /// Insert a new attribute into a map of event attributes. + fn with_attribute(&mut self, data: DATA) -> &mut Self + where + DATA: ExtendEventAttributes; +} + +impl ExtendAttributesMap for A { + #[inline(always)] + fn with_attribute(&mut self, data: DATA) -> &mut Self + where + DATA: ExtendEventAttributes, + { + data.extend_event_attributes(self); + self + } +} + +/// Represents an entry in the attributes of an [`Event`]. +pub trait EventAttributeEntry<'a> { + /// Key to read or write and event attribute to. + const KEY: &'static str; + + /// Data to be stored in the given `KEY`. + type Value; + + /// Identical to [`Self::Value`], with the exception that this + /// should be an owned variant of that type. + type ValueOwned; + + /// Return the data to be stored in the given `KEY`. + fn into_value(self) -> Self::Value; +} + +/// Extend an [event](Event) with additional attributes. +pub trait ExtendEventAttributes { + /// Add additional attributes to some `event`. + fn extend_event_attributes(self, attributes: &mut A) + where + A: AttributesMap; +} + +impl<'value, DATA> ExtendEventAttributes for DATA +where + DATA: EventAttributeEntry<'value>, + DATA::Value: ToString, +{ + #[inline] + fn extend_event_attributes(self, attributes: &mut A) + where + A: AttributesMap, + { + attributes.insert_attribute( + DATA::KEY.to_string(), + self.into_value().to_string(), + ); + } +} + +/// Read an attribute from an [event](Event)'s attributes. +pub trait ReadFromEventAttributes<'value> { + /// The attribute to be read. + type Value; + + /// Read an attribute from the provided event attributes. + fn read_opt_from_event_attributes( + attributes: &A, + ) -> Result, EventError> + where + A: AttributesMap; + + /// Read an attribute from the provided event attributes. + fn read_from_event_attributes( + attributes: &A, + ) -> Result + where + A: AttributesMap; +} + +// NB: some domain specific types take references instead of owned +// values as arguments, so we must decode into the owned counterparts +// of these types... hence the trait spaghetti +impl<'value, DATA> ReadFromEventAttributes<'value> for DATA +where + DATA: EventAttributeEntry<'value>, + >::ValueOwned: FromStr, + <>::ValueOwned as FromStr>::Err: + Display, +{ + type Value = >::ValueOwned; + + #[inline] + fn read_opt_from_event_attributes( + attributes: &A, + ) -> Result, EventError> + where + A: AttributesMap, + { + attributes + .retrieve_attribute(DATA::KEY) + .map(|encoded_value| { + encoded_value.parse().map_err( + |err: ::Err| { + EventError::AttributeEncoding(err.to_string()) + }, + ) + }) + .transpose() + } + + #[inline] + fn read_from_event_attributes( + attributes: &A, + ) -> Result + where + A: AttributesMap, + { + Self::read_opt_from_event_attributes(attributes)?.ok_or_else(|| { + EventError::MissingAttribute( + >::KEY.to_string(), + ) + }) + } +} + +/// Read a raw (string encoded) attribute from an [event](Event)'s attributes. +pub trait RawReadFromEventAttributes<'value> { + /// Check if the associated attribute is present in the provided event + /// attributes. + fn check_if_present_in(attributes: &A) -> bool + where + A: AttributesMap; + + /// Read a string encoded attribute from the provided event attributes. + fn raw_read_opt_from_event_attributes(attributes: &A) -> Option<&str> + where + A: AttributesMap; + + /// Read a string encoded attribute from the provided event attributes. + fn raw_read_from_event_attributes( + attributes: &A, + ) -> Result<&str, EventError> + where + A: AttributesMap; +} + +impl<'value, DATA> RawReadFromEventAttributes<'value> for DATA +where + DATA: EventAttributeEntry<'value>, +{ + #[inline] + fn check_if_present_in(attributes: &A) -> bool + where + A: AttributesMap, + { + attributes.is_attribute(DATA::KEY) + } + + #[inline] + fn raw_read_opt_from_event_attributes(attributes: &A) -> Option<&str> + where + A: AttributesMap, + { + attributes.retrieve_attribute(DATA::KEY) + } + + #[inline] + fn raw_read_from_event_attributes( + attributes: &A, + ) -> Result<&str, EventError> + where + A: AttributesMap, + { + Self::raw_read_opt_from_event_attributes(attributes).ok_or_else(|| { + EventError::MissingAttribute( + >::KEY.to_string(), + ) + }) + } +} + +/// Extend an [event](Event) with additional data. +pub trait ExtendEvent { + /// Add additional data to the specified `event`. + fn extend_event(self, event: &mut Event); +} + +impl ExtendEvent for E { + #[inline] + fn extend_event(self, event: &mut Event) { + self.extend_event_attributes(&mut event.attributes); + } +} + +/// Extend an [`Event`] with block height information. +pub struct Height(pub BlockHeight); + +impl EventAttributeEntry<'static> for Height { + type Value = BlockHeight; + type ValueOwned = Self::Value; + + const KEY: &'static str = "height"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with transaction hash information. +pub struct TxHash(pub Hash); + +impl EventAttributeEntry<'static> for TxHash { + type Value = Hash; + type ValueOwned = Self::Value; + + const KEY: &'static str = "hash"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with log data. +pub struct Log(pub String); + +impl EventAttributeEntry<'static> for Log { + type Value = String; + type ValueOwned = Self::Value; + + const KEY: &'static str = "log"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with info data. +pub struct Info(pub String); + +impl EventAttributeEntry<'static> for Info { + type Value = String; + type ValueOwned = Self::Value; + + const KEY: &'static str = "info"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with `is_valid_masp_tx` data. +pub struct ValidMaspTx(pub TxIndex); + +impl EventAttributeEntry<'static> for ValidMaspTx { + type Value = TxIndex; + type ValueOwned = Self::Value; + + const KEY: &'static str = "is_valid_masp_tx"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with success data. +pub struct Success(pub bool); + +impl EventAttributeEntry<'static> for Success { + type Value = bool; + type ValueOwned = Self::Value; + + const KEY: &'static str = "success"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with a new domain. +pub struct Domain(PhantomData); + +/// Build a new [`Domain`] to extend an [event](Event) with. +pub const fn event_domain_of() -> Domain { + Domain(PhantomData) +} + +/// Parsed domain of some [event](Event). +pub struct ParsedDomain { + domain: String, + _marker: PhantomData, +} + +impl ParsedDomain { + /// Return the inner domain as a [`String`]. + #[inline] + pub fn into_inner(self) -> String { + self.domain + } +} + +impl From> for String { + #[inline] + fn from(parsed_domain: ParsedDomain) -> String { + parsed_domain.into_inner() + } +} + +impl FromStr for ParsedDomain +where + E: EventToEmit, +{ + type Err = EventError; + + fn from_str(s: &str) -> Result { + if s == E::DOMAIN { + Ok(Self { + domain: s.to_owned(), + _marker: PhantomData, + }) + } else { + Err(EventError::InvalidDomain(format!( + "Expected {:?}, but found {s:?}", + E::DOMAIN + ))) + } + } +} + +impl EventAttributeEntry<'static> for Domain +where + E: EventToEmit, +{ + type Value = &'static str; + type ValueOwned = ParsedDomain; + + const KEY: &'static str = "event-domain"; + + fn into_value(self) -> Self::Value { + E::DOMAIN + } +} + +/// Extend an [`Event`] with metadata pertaining to its origin +/// in the source code. +pub struct Origin { + #[doc(hidden)] + pub __origin: &'static str, +} + +#[macro_export] +macro_rules! event_origin { + () => { + $crate::extend::Origin { + __origin: ::konst::string::str_concat!(&[ + ::core::env!("CARGO_CRATE_NAME"), + "-", + ::core::env!("CARGO_PKG_VERSION"), + ":", + ::core::file!(), + ":", + ::core::line!() + ]), + } + }; +} + +impl EventAttributeEntry<'static> for Origin { + type Value = &'static str; + type ValueOwned = String; + + const KEY: &'static str = "event-origin"; + + fn into_value(self) -> Self::Value { + self.__origin + } +} + +/// Extend an [`Event`] with the given closure. +pub struct Closure(pub F); + +impl ExtendEvent for Closure +where + F: FnOnce(&mut Event), +{ + #[inline] + fn extend_event(self, event: &mut Event) { + let Self(closure) = self; + closure(event); + } +} + +#[cfg(test)] +mod event_composition_tests { + use super::*; + + struct DummyEvent; + + impl From for Event { + fn from(_: DummyEvent) -> Event { + Event::new( + EventTypeBuilder::new_of::() + .with_segment("event") + .build(), + EventLevel::Tx, + ) + } + } + + impl EventToEmit for DummyEvent { + const DOMAIN: &'static str = "dummy"; + } + + #[test] + fn test_event_height_parse() { + let event: Event = DummyEvent.with(Height(BlockHeight(300))).into(); + + let height = event.raw_read_attribute::().unwrap(); + assert_eq!(height, "300"); + assert_eq!(height.parse::().unwrap(), 300u64); + + let height = event.read_attribute::().unwrap(); + assert_eq!(height, BlockHeight(300)); + } + + #[test] + fn test_event_compose_basic() { + let expected_attrs = { + let mut attrs = BTreeMap::new(); + attrs.insert("log".to_string(), "this is sparta!".to_string()); + attrs.insert("height".to_string(), "300".to_string()); + attrs.insert("hash".to_string(), Hash::default().to_string()); + attrs + }; + + let base_event: Event = DummyEvent + .with(Log("this is sparta!".to_string())) + .with(Height(300.into())) + .with(TxHash(Hash::default())) + .into(); + + assert_eq!(base_event.attributes, expected_attrs); + } + + #[test] + fn test_event_compose_repeated() { + let expected_attrs = { + let mut attrs = BTreeMap::new(); + attrs.insert("log".to_string(), "dejavu".to_string()); + attrs + }; + + let base_event: Event = DummyEvent + .with(Log("dejavu".to_string())) + .with(Log("dejavu".to_string())) + .with(Log("dejavu".to_string())) + .into(); + + assert_eq!(base_event.attributes, expected_attrs); + } + + #[test] + fn test_event_compose_last_one_kept() { + let expected_attrs = { + let mut attrs = BTreeMap::new(); + attrs.insert("log".to_string(), "last".to_string()); + attrs + }; + + let base_event: Event = DummyEvent + .with(Log("fist".to_string())) + .with(Log("second".to_string())) + .with(Log("last".to_string())) + .into(); + + assert_eq!(base_event.attributes, expected_attrs); + } +} diff --git a/crates/events/src/extend/dynamic.rs b/crates/events/src/extend/dynamic.rs new file mode 100644 index 0000000000..7e051f7554 --- /dev/null +++ b/crates/events/src/extend/dynamic.rs @@ -0,0 +1,196 @@ +//! Extend [events](Event) with additional fields, +//! whose attributes are determined dynamically at +//! runtime. + +use super::*; + +impl Event { + /// Get the value corresponding to a given attribute. + #[inline] + pub fn dyn_read_attribute<'value, DATA>( + &self, + reader: &DATA, + ) -> Result<>::Value, EventError> + where + DATA: DynReadFromEventAttributes<'value>, + { + reader.dyn_read_from_event_attributes(&self.attributes) + } + + /// Get the value corresponding to a given attribute, if it exists. + #[inline] + pub fn dyn_read_attribute_opt<'value, DATA>( + &self, + reader: &DATA, + ) -> Result< + Option<>::Value>, + EventError, + > + where + DATA: DynReadFromEventAttributes<'value>, + { + reader.dyn_read_opt_from_event_attributes(&self.attributes) + } + + /// Check if a certain attribute is present in the event. + #[inline] + pub fn dyn_has_attribute<'value, DATA>(&self, reader: &DATA) -> bool + where + DATA: DynRawReadFromEventAttributes<'value>, + { + reader.dyn_check_if_present_in(&self.attributes) + } + + /// Get the raw string value corresponding to a given attribute, if it + /// exists. + #[inline] + pub fn dyn_raw_read_attribute<'this, 'reader: 'this, 'value, DATA>( + &'this self, + reader: &'reader DATA, + ) -> Option<&'this str> + where + DATA: DynRawReadFromEventAttributes<'value>, + { + reader.dyn_raw_read_opt_from_event_attributes(&self.attributes) + } +} + +/// Checks for the presence of an attribute in the +/// provided attributes map. +pub trait EventAttributeChecker<'value, A: AttributesMap> { + /// Check if the associated attribute is present in the provided event + /// attributes. + fn is_present(&self, attributes: &A) -> bool; +} + +/// Return a new implementation of [`EventAttributeChecker`]. +pub fn attribute_checker<'value, DATA, ATTR>() +-> Box> +where + DATA: EventAttributeEntry<'value> + 'static, + ATTR: AttributesMap, +{ + Box::new(EventAttributeCheckerImpl(PhantomData::)) +} + +/// Dispatch a callback on a list of attribute kinds. +pub fn dispatch_attribute<'value, I, K, A, F>( + attributes: &A, + dispatch_list: I, + mut dispatch: F, +) where + A: AttributesMap, + I: IntoIterator>)>, + F: FnMut(K) -> ControlFlow<()>, +{ + for (kind, checker) in dispatch_list { + if !checker.is_present(attributes) { + continue; + } + if let ControlFlow::Break(_) = dispatch(kind) { + break; + } + } +} + +struct EventAttributeCheckerImpl(PhantomData); + +impl<'value, DATA, A> EventAttributeChecker<'value, A> + for EventAttributeCheckerImpl +where + DATA: EventAttributeEntry<'value>, + A: AttributesMap, +{ + fn is_present(&self, attributes: &A) -> bool { + attributes.is_attribute(DATA::KEY) + } +} + +/// Read an attribute from an [event](Event)'s attributes. +pub trait DynReadFromEventAttributes<'value> { + /// The attribute to be read. + type Value; + + /// Read an attribute from the provided event attributes. + fn dyn_read_opt_from_event_attributes( + &self, + attributes: &A, + ) -> Result, EventError> + where + A: AttributesMap; + + /// Read an attribute from the provided event attributes. + fn dyn_read_from_event_attributes( + &self, + attributes: &A, + ) -> Result + where + A: AttributesMap; +} + +/// Read a raw (string encoded) attribute from an [event](Event)'s attributes. +pub trait DynRawReadFromEventAttributes<'value> { + /// Check if the associated attribute is present in the provided event + /// attributes. + fn dyn_check_if_present_in(&self, attributes: &A) -> bool + where + A: AttributesMap; + + /// Read a string encoded attribute from the provided event attributes. + fn dyn_raw_read_opt_from_event_attributes( + &self, + attributes: &A, + ) -> Option<&str> + where + A: AttributesMap; + + /// Read a string encoded attribute from the provided event attributes. + fn dyn_raw_read_from_event_attributes( + &self, + attributes: &A, + ) -> Result<&str, EventError> + where + A: AttributesMap; +} + +#[cfg(test)] +mod dyn_event_composition_tests { + use super::*; + + #[test] + fn test_event_attribute_dispatching() { + enum AttrKind { + Log, + Info, + } + + let attributes = { + let mut attrs = BTreeMap::new(); + attrs.with_attribute(Info(String::new())); + attrs + }; + + let log_attribute = attribute_checker::(); + let info_attribute = attribute_checker::(); + + let mut found_info = false; + let mut found_log = false; + + dispatch_attribute( + &attributes, + [ + (AttrKind::Info, info_attribute), + (AttrKind::Log, log_attribute), + ], + |kind| { + match kind { + AttrKind::Info => found_info = true, + AttrKind::Log => found_log = true, + } + ControlFlow::Continue(()) + }, + ); + + assert!(found_info && !found_log); + } +} diff --git a/crates/events/src/lib.rs b/crates/events/src/lib.rs new file mode 100644 index 0000000000..03681d9649 --- /dev/null +++ b/crates/events/src/lib.rs @@ -0,0 +1,451 @@ +//! Events emitted by the Namada ledger. + +pub mod extend; +#[cfg(any(test, feature = "testing"))] +pub mod testing; + +use std::borrow::Cow; +use std::collections::BTreeMap; +use std::fmt::{self, Display}; +use std::ops::Deref; +use std::str::FromStr; + +use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_macros::BorshDeserializer; +#[cfg(feature = "migrations")] +use namada_migrations::*; +use serde::{Deserialize, Serialize}; +use thiserror::Error; + +#[doc(hidden)] +#[macro_export] +macro_rules! __event_type_impl { + ($domain:ty) => { + <$domain as $crate::EventToEmit>::DOMAIN + }; + ($domain:ty, $($subdomain:expr),*) => { + ::konst::string::str_join!( + "/", + &[ + $crate::__event_type_impl!($domain), + $($subdomain),* + ], + ) + }; +} + +/// Instantiate a new [`EventType`] in const contexts. Mostly +/// useful to define new event types in the protocol. +/// +/// # Example +/// +/// ```ignore +/// const RELAYED: EventType = event_type!(EthBridgeEvent, "bridge-pool", "relayed"); +/// ``` +#[macro_export] +macro_rules! event_type { + ($($tt:tt)*) => { + $crate::EventType::new($crate::__event_type_impl!($($tt)*)) + }; +} + +/// An event to be emitted in Namada. +pub trait EventToEmit: Into { + /// The domain of the event to emit. + /// + /// This may be used to group events of a certain kind. + const DOMAIN: &'static str; +} + +impl EventToEmit for Event { + const DOMAIN: &'static str = "unknown"; +} + +/// Used in sub-systems that may emit events. +pub trait EmitEvents { + /// Emit a single [event](Event). + fn emit(&mut self, event: E) + where + E: EventToEmit; + + /// Emit a batch of [events](Event). + fn emit_many(&mut self, event_batch: B) + where + B: IntoIterator, + E: EventToEmit; +} + +impl EmitEvents for Vec { + #[inline] + fn emit(&mut self, event: E) + where + E: Into, + { + self.push(event.into()); + } + + /// Emit a batch of [events](Event). + fn emit_many(&mut self, event_batch: B) + where + B: IntoIterator, + E: Into, + { + self.extend(event_batch.into_iter().map(Into::into)); + } +} + +/// Indicates if an event is emitted do to +/// an individual Tx or the nature of a finalized block +#[derive( + Clone, + Debug, + Eq, + PartialEq, + Ord, + PartialOrd, + Hash, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + Serialize, + Deserialize, +)] +pub enum EventLevel { + /// Indicates an event is to do with a finalized block. + Block, + /// Indicates an event is to do with an individual transaction. + Tx, +} + +impl Display for EventLevel { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "{}", + match self { + EventLevel::Block => "block", + EventLevel::Tx => "tx", + } + ) + } +} + +/// ABCI event type. +/// +/// It is comprised of an event domain and sub-domain, plus any other +/// specifiers. +#[derive( + Clone, + Debug, + Eq, + PartialEq, + Ord, + PartialOrd, + Hash, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + Serialize, + Deserialize, +)] +#[repr(transparent)] +pub struct EventType { + inner: Cow<'static, str>, +} + +impl Deref for EventType { + type Target = str; + + #[inline(always)] + fn deref(&self) -> &str { + &self.inner + } +} + +impl EventType { + /// Create a new event type. + pub const fn new(event_type: &'static str) -> Self { + Self { + inner: Cow::Borrowed(event_type), + } + } + + /// Retrieve the domain of some event. + #[inline] + pub fn domain(&self) -> &str { + self.inner + .split_once('/') + .map(|(domain, _sub_domain)| domain) + .unwrap_or("unknown") + } + + /// Retrieve the sub-domain of some event. + #[inline] + pub fn sub_domain(&self) -> &str { + self.inner + .split_once('/') + .map(|(_domain, sub_domain)| sub_domain) + .unwrap_or("") + } +} + +impl Display for EventType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.inner) + } +} + +impl FromStr for EventType { + type Err = EventError; + + fn from_str(s: &str) -> Result { + s.split_once('/').ok_or(EventError::MissingDomain)?; + Ok(Self { + inner: Cow::Owned(s.into()), + }) + } +} + +/// Build an [`EventType`] segment by segment. +pub struct EventTypeBuilder { + inner: String, +} + +impl EventTypeBuilder { + /// Create a new [`EventTypeBuilder`] with the given type. + #[inline] + pub fn new_with_type(ty: impl Into) -> Self { + Self { inner: ty.into() } + } + + /// Create a new [`EventTypeBuilder`] with the domain of the + /// given event type. + #[inline] + pub fn new_of() -> Self { + Self::new_with_type(E::DOMAIN) + } + + /// Append a new segment to the final [`EventType`] and return + /// a mutable reference to the builder. + #[inline] + pub fn append_segment(&mut self, segment: impl AsRef) -> &mut Self { + let segment = segment.as_ref(); + + if !segment.is_empty() { + self.inner.push('/'); + self.inner.push_str(segment.as_ref()); + } + + self + } + + /// Append a new segment to the final [`EventType`] and return + /// the builder. + #[inline] + pub fn with_segment(mut self, segment: impl AsRef) -> Self { + self.append_segment(segment); + self + } + + /// Build the final [`EventType`]. + #[inline] + pub fn build(self) -> EventType { + EventType { + inner: Cow::Owned(self.inner), + } + } +} + +/// Custom events that can be queried from Tendermint +/// using a websocket client +#[derive( + Clone, + Debug, + Eq, + PartialEq, + Ord, + PartialOrd, + Hash, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + Serialize, + Deserialize, +)] +pub struct Event { + /// The level of the event - whether it relates to a block or an individual + /// transaction. + level: EventLevel, + /// The type of event. + event_type: EventType, + /// Key-value attributes of the event. + attributes: BTreeMap, +} + +impl Display for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // TODO: print attributes, too + write!(f, "{} in {}", self.event_type, self.level) + } +} + +/// Errors to do with emitting events. +#[derive(Error, Debug, Clone)] +pub enum EventError { + /// Invalid event domain. + #[error("Invalid event domain: {0}")] + InvalidDomain(String), + /// Missing event domain. + #[error("Missing the domain of the event")] + MissingDomain, + /// Error resulting from a missing event attribute. + #[error("Missing event attribute {0:?}")] + MissingAttribute(String), + /// Error resulting from an invalid encoding of an event attribute. + #[error("Failed to parse event attribute: {0}")] + AttributeEncoding(String), + /// Error when parsing an event type + #[error("Invalid event type")] + InvalidEventType, + /// Error when parsing attributes from an event JSON. + #[error("Json missing `attributes` field")] + MissingAttributes, + /// Missing key in attributes. + #[error("Attributes missing key: {0}")] + MissingKey(String), + /// Missing value in attributes. + #[error("Attributes missing value: {0}")] + MissingValue(String), +} + +impl Event { + /// Create a new event with no attributes and the given parameters. + pub fn new(event_type: EventType, level: EventLevel) -> Self { + Self { + event_type, + level, + attributes: BTreeMap::new(), + } + } + + /// Return the level of the event. + #[inline] + pub fn level(&self) -> &EventLevel { + &self.level + } + + /// Return the type of the event. + #[inline] + pub fn kind(&self) -> &EventType { + &self.event_type + } + + /// Return a reference to the event's attributes. + #[deprecated = "Accessing the event attributes directly is deprecated. \ + Consider using domain types to compose events with \ + attributes."] + #[inline] + pub fn attributes(&self) -> &BTreeMap { + &self.attributes + } + + /// Return a mutable reference to the event's attributes. + #[deprecated = "Accessing the event attributes directly is deprecated. \ + Consider using domain types to compose events with \ + attributes."] + #[inline] + pub fn attributes_mut(&mut self) -> &mut BTreeMap { + &mut self.attributes + } + + /// Return the attributes of the event, destroying + /// it in the process. + #[inline] + pub fn into_attributes(self) -> BTreeMap { + self.attributes + } + + /// Compute the gas cost of emitting this event. + #[inline] + pub fn emission_gas_cost(&self, cost_per_byte: u64) -> u64 { + let len = self + .attributes + .iter() + .fold(0, |acc, (k, v)| acc + k.len() + v.len()); + len as u64 * cost_per_byte + } +} + +impl From for namada_core::tendermint_proto::v0_37::abci::Event { + fn from(event: Event) -> Self { + Self { + r#type: { + use extend::{Domain, RawReadFromEventAttributes}; + + if Domain::::check_if_present_in(&event.attributes) { + // NB: encode the domain of the event in the attributes. + // this is necessary for ibc events, as hermes is not + // compatible with our event type format. + event.event_type.sub_domain().to_string() + } else { + event.event_type.to_string() + } + }, + attributes: event + .attributes + .into_iter() + .map(|(key, value)| { + namada_core::tendermint_proto::v0_37::abci::EventAttribute { + key, + value, + index: true, + } + }) + .chain(std::iter::once_with(|| { + namada_core::tendermint_proto::v0_37::abci::EventAttribute { + key: "event-level".to_string(), + value: event.level.to_string(), + index: true, + } + })) + .collect(), + } + } +} + +impl From for namada_core::tendermint::abci::Event { + fn from(event: Event) -> Self { + Self { + kind: { + use extend::{Domain, RawReadFromEventAttributes}; + + if Domain::::check_if_present_in(&event.attributes) { + // NB: encode the domain of the event in the attributes. + // this is necessary for ibc events, as hermes is not + // compatible with our event type format. + event.event_type.sub_domain().to_string() + } else { + event.event_type.to_string() + } + }, + attributes: event + .attributes + .into_iter() + .map(|(key, value)| { + namada_core::tendermint::abci::EventAttribute { + key, + value, + index: true, + } + }) + .chain(std::iter::once_with(|| { + namada_core::tendermint::abci::EventAttribute { + key: "event-level".to_string(), + value: event.level.to_string(), + index: true, + } + })) + .collect(), + } + } +} diff --git a/crates/events/src/testing.rs b/crates/events/src/testing.rs new file mode 100644 index 0000000000..e02a566b18 --- /dev/null +++ b/crates/events/src/testing.rs @@ -0,0 +1,21 @@ +//! Events testing utilities. + +use super::{EmitEvents, Event}; + +/// Event sink that drops any emitted events. +pub struct VoidEventSink; + +impl EmitEvents for VoidEventSink { + fn emit(&mut self, _: E) + where + E: Into, + { + } + + fn emit_many(&mut self, _: B) + where + B: IntoIterator, + E: Into, + { + } +} diff --git a/crates/gas/Cargo.toml b/crates/gas/Cargo.toml index 70e4799370..02f849df8d 100644 --- a/crates/gas/Cargo.toml +++ b/crates/gas/Cargo.toml @@ -20,6 +20,7 @@ migrations = [ [dependencies] namada_core = { path = "../core" } +namada_events = { path = "../events", default-features = false } namada_macros = {path = "../macros"} namada_migrations = {path = "../migrations", optional = true} borsh.workspace = true diff --git a/crates/gas/src/event.rs b/crates/gas/src/event.rs index 25a7fb06e7..a586298c8f 100644 --- a/crates/gas/src/event.rs +++ b/crates/gas/src/event.rs @@ -1,17 +1,19 @@ //! Gas related events. -use namada_core::event::extend::ExtendEvent; -use namada_core::event::Event; +use namada_events::extend::EventAttributeEntry; use super::Gas; /// Extend an [`Event`] with gas used data. -pub struct WithGasUsed(pub Gas); +pub struct GasUsed(pub Gas); -impl ExtendEvent for WithGasUsed { - #[inline] - fn extend_event(self, event: &mut Event) { - let Self(gas_used) = self; - event["gas_used"] = gas_used.to_string(); +impl EventAttributeEntry<'static> for GasUsed { + type Value = Gas; + type ValueOwned = Self::Value; + + const KEY: &'static str = "gas_used"; + + fn into_value(self) -> Self::Value { + self.0 } } diff --git a/crates/gas/src/lib.rs b/crates/gas/src/lib.rs index 34ac3675cd..124d4c14cd 100644 --- a/crates/gas/src/lib.rs +++ b/crates/gas/src/lib.rs @@ -5,7 +5,9 @@ pub mod event; pub mod storage; use std::fmt::Display; +use std::num::ParseIntError; use std::ops::Div; +use std::str::FromStr; use namada_core::borsh::{BorshDeserialize, BorshSchema, BorshSerialize}; use namada_core::hints; @@ -26,6 +28,11 @@ pub enum Error { GasOverflow, } +#[allow(missing_docs)] +#[derive(Error, Debug, Clone, PartialEq, Eq)] +#[error("Failed to parse gas: {0}")] +pub struct GasParseError(pub ParseIntError); + const COMPILE_GAS_PER_BYTE: u64 = 1_955; const PARALLEL_GAS_DIVIDER: u64 = 10; const WASM_CODE_VALIDATION_GAS_PER_BYTE: u64 = 67; @@ -155,6 +162,15 @@ impl Display for Gas { } } +impl FromStr for Gas { + type Err = GasParseError; + + fn from_str(s: &str) -> std::result::Result { + let gas: u64 = s.parse().map_err(GasParseError)?; + Ok(Gas::from_whole_units(gas)) + } +} + /// Trait to share gas operations for transactions and validity predicates pub trait GasMetering { /// Add gas cost. It will return error when the diff --git a/crates/governance/Cargo.toml b/crates/governance/Cargo.toml index 3f48aef3cd..27b87a2941 100644 --- a/crates/governance/Cargo.toml +++ b/crates/governance/Cargo.toml @@ -21,6 +21,7 @@ migrations = [ [dependencies] namada_core = { path = "../core" } +namada_events = { path = "../events", default-features = false } namada_macros = {path = "../macros"} namada_migrations = { path= "../migrations", optional = true } namada_parameters = {path = "../parameters"} @@ -30,6 +31,7 @@ namada_trans_token = {path = "../trans_token"} borsh.workspace = true itertools.workspace = true linkme = {workspace = true, optional = true} +konst.workspace = true proptest = { workspace = true, optional = true } serde_json.workspace = true serde.workspace = true diff --git a/crates/governance/src/event.rs b/crates/governance/src/event.rs new file mode 100644 index 0000000000..75d0c3027f --- /dev/null +++ b/crates/governance/src/event.rs @@ -0,0 +1,265 @@ +//! Governance transaction events. + +use std::collections::BTreeMap; + +use namada_events::extend::{EventAttributeEntry, ExtendAttributesMap}; +use namada_events::{Event, EventLevel, EventToEmit}; + +use crate::utils::TallyResult as GovTallyResult; + +pub mod types { + //! Governance event types. + + use namada_events::EventType; + + use super::ProposalEvent; + + /// Sub-domain of governance proposals. + const PROPOSAL_SUBDOMAIN: &str = "proposal"; + + /// Proposal rejected. + pub const PROPOSAL_REJECTED: EventType = namada_events::event_type!( + ProposalEvent, + PROPOSAL_SUBDOMAIN, + "rejected" + ); + + /// Proposal passed. + pub const PROPOSAL_PASSED: EventType = + namada_events::event_type!(ProposalEvent, PROPOSAL_SUBDOMAIN, "passed"); + + /// PGF steward proposal. + pub const PROPOSAL_PGF_STEWARD: EventType = namada_events::event_type!( + ProposalEvent, + PROPOSAL_SUBDOMAIN, + "pgf-steward" + ); + + /// PGF payments proposal. + pub const PROPOSAL_PGF_PAYMENTS: EventType = namada_events::event_type!( + ProposalEvent, + PROPOSAL_SUBDOMAIN, + "pgf-payments" + ); + + #[cfg(test)] + mod tests { + use super::*; + + #[test] + fn test_gov_event_type_as_str() { + assert_eq!(&*PROPOSAL_PASSED, "governance/proposal/passed"); + } + } +} + +/// Governance proposal event. +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub struct ProposalEvent { + /// ID of the governance proposal. + pub id: u64, + /// Governance proposal kind. + pub kind: ProposalEventKind, +} + +impl ProposalEvent { + /// Create a new proposal event for rejected proposal + pub fn rejected_proposal_event(proposal_id: u64) -> Self { + Self { + id: proposal_id, + kind: ProposalEventKind::Rejected, + } + } + + /// Create a new proposal event for default proposal + pub fn default_proposal_event( + proposal_id: u64, + has_code: bool, + execution_status: bool, + ) -> Self { + Self { + id: proposal_id, + kind: ProposalEventKind::Passed { + has_code, + execution_status, + }, + } + } + + /// Create a new proposal event for pgf stewards proposal + pub fn pgf_steward_proposal_event(proposal_id: u64, result: bool) -> Self { + Self { + id: proposal_id, + kind: ProposalEventKind::PgfSteward { result }, + } + } + + /// Create a new proposal event for pgf payments proposal + pub fn pgf_payments_proposal_event(proposal_id: u64, result: bool) -> Self { + Self { + id: proposal_id, + kind: ProposalEventKind::PgfPayments { result }, + } + } +} + +/// Proposal event kinds. +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +pub enum ProposalEventKind { + /// Governance proposal that has passed. + Passed { + /// Whether the proposal has WASM code to be executed or not. + has_code: bool, + /// The execution status of the proposal. + execution_status: bool, + }, + /// Governance proposal that has been rejected. + Rejected, + /// PGF steward governance proposal. + PgfSteward { + /// The outcome of the proposal. + result: bool, + }, + /// PGF payments governance proposal. + PgfPayments { + /// The outcome of the proposal. + result: bool, + }, +} + +impl From for Event { + fn from(proposal_event: ProposalEvent) -> Self { + let ProposalEvent { + id: proposal_id, + kind, + } = proposal_event; + + let (event_type, attributes) = match kind { + ProposalEventKind::Passed { + has_code, + execution_status, + } => ( + types::PROPOSAL_PASSED, + governance_proposal_attributes( + GovTallyResult::Passed, + proposal_id, + has_code, + execution_status, + ), + ), + ProposalEventKind::Rejected => ( + types::PROPOSAL_REJECTED, + governance_proposal_attributes( + GovTallyResult::Rejected, + proposal_id, + false, + false, + ), + ), + ProposalEventKind::PgfSteward { result } => ( + types::PROPOSAL_PGF_STEWARD, + governance_proposal_attributes( + GovTallyResult::Passed, + proposal_id, + false, + result, + ), + ), + ProposalEventKind::PgfPayments { result } => ( + types::PROPOSAL_PGF_PAYMENTS, + governance_proposal_attributes( + GovTallyResult::Passed, + proposal_id, + false, + result, + ), + ), + }; + + let mut event = Self::new(event_type, EventLevel::Block); + + #[allow(deprecated)] + { + *event.attributes_mut() = attributes; + } + + event + } +} + +/// Return the attributes of a governance proposal. +#[inline] +fn governance_proposal_attributes( + tally: GovTallyResult, + id: u64, + has_proposal_code: bool, + proposal_code_exit_status: bool, +) -> BTreeMap { + let mut attrs = BTreeMap::new(); + attrs + .with_attribute(TallyResult(tally)) + .with_attribute(ProposalId(id)) + .with_attribute(HasProposalCode(has_proposal_code)) + .with_attribute(ProposalCodeExitStatus(proposal_code_exit_status)); + attrs +} + +impl EventToEmit for ProposalEvent { + const DOMAIN: &'static str = "governance"; +} + +/// Extend an [`Event`] with tally result data. +pub struct TallyResult(pub GovTallyResult); + +impl EventAttributeEntry<'static> for TallyResult { + type Value = GovTallyResult; + type ValueOwned = Self::Value; + + const KEY: &'static str = "tally_result"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with proposal id data. +pub struct ProposalId(pub u64); + +impl EventAttributeEntry<'static> for ProposalId { + type Value = u64; + type ValueOwned = Self::Value; + + const KEY: &'static str = "proposal_id"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with has proposal code data. +pub struct HasProposalCode(pub bool); + +impl EventAttributeEntry<'static> for HasProposalCode { + type Value = bool; + type ValueOwned = Self::Value; + + const KEY: &'static str = "has_proposal_code"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with proposal code exit status data. +pub struct ProposalCodeExitStatus(pub bool); + +impl EventAttributeEntry<'static> for ProposalCodeExitStatus { + type Value = bool; + type ValueOwned = Self::Value; + + const KEY: &'static str = "proposal_code_exit_status"; + + fn into_value(self) -> Self::Value { + self.0 + } +} diff --git a/crates/governance/src/lib.rs b/crates/governance/src/lib.rs index f163ffc295..925195bb41 100644 --- a/crates/governance/src/lib.rs +++ b/crates/governance/src/lib.rs @@ -4,6 +4,7 @@ use namada_core::address::{self, Address}; /// governance CLI structures pub mod cli; +pub mod event; /// governance parameters pub mod parameters; /// governance public good fundings diff --git a/crates/governance/src/utils.rs b/crates/governance/src/utils.rs index 1602d532be..d20a72298e 100644 --- a/crates/governance/src/utils.rs +++ b/crates/governance/src/utils.rs @@ -1,4 +1,5 @@ use std::fmt::Display; +use std::str::FromStr; use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize}; @@ -114,6 +115,21 @@ impl Display for TallyResult { } } +impl FromStr for TallyResult { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "passed" => Ok(Self::Passed), + "rejected" => Ok(Self::Rejected), + t => Err(format!( + "Tally result value of {t:?} does not match \"passed\" nor \ + \"rejected\"" + )), + } + } +} + impl TallyResult { /// Create a new tally result pub fn new( diff --git a/crates/ibc/Cargo.toml b/crates/ibc/Cargo.toml index 3d37da69fb..431d1d143e 100644 --- a/crates/ibc/Cargo.toml +++ b/crates/ibc/Cargo.toml @@ -14,18 +14,27 @@ version.workspace = true [features] default = [] +migrations = [ + "namada_migrations", + "linkme", +] testing = ["namada_core/testing", "ibc-testkit", "proptest"] [dependencies] namada_core = { path = "../core" } +namada_events = { path = "../events", default-features = false } namada_gas = { path = "../gas" } namada_governance = { path = "../governance" } +namada_macros = {path = "../macros"} +namada_migrations = {path = "../migrations", optional = true} namada_parameters = { path = "../parameters" } namada_state = { path = "../state" } namada_storage = { path = "../storage" } namada_token = { path = "../token" } borsh.workspace = true +konst.workspace = true +linkme = {workspace = true, optional = true} ibc.workspace = true ibc-derive.workspace = true ibc-testkit = {workspace = true, optional = true} @@ -34,6 +43,7 @@ masp_primitives.workspace = true primitive-types.workspace = true proptest = { workspace = true, optional = true } prost.workspace = true +serde.workspace = true serde_json.workspace = true sha2.workspace = true thiserror.workspace = true diff --git a/crates/ibc/src/actions.rs b/crates/ibc/src/actions.rs index 9ff0d7e323..eb71c9e940 100644 --- a/crates/ibc/src/actions.rs +++ b/crates/ibc/src/actions.rs @@ -4,15 +4,16 @@ use std::cell::RefCell; use std::collections::BTreeSet; use std::rc::Rc; -use namada_core::address::{Address, InternalAddress}; +use namada_core::address::Address; use namada_core::borsh::BorshSerializeExt; use namada_core::ibc::apps::transfer::types::msgs::transfer::MsgTransfer as IbcMsgTransfer; use namada_core::ibc::apps::transfer::types::packet::PacketData; use namada_core::ibc::apps::transfer::types::PrefixedCoin; use namada_core::ibc::core::channel::types::timeout::TimeoutHeight; -use namada_core::ibc::{IbcEvent, MsgTransfer}; +use namada_core::ibc::MsgTransfer; use namada_core::tendermint::Time as TmTime; use namada_core::token::Amount; +use namada_events::{EmitEvents, EventTypeBuilder}; use namada_governance::storage::proposal::PGFIbcTarget; use namada_parameters::read_epoch_duration_parameter; use namada_state::{ @@ -22,14 +23,14 @@ use namada_state::{ use namada_token as token; use token::DenominatedAmount; -use crate::{IbcActions, IbcCommonContext, IbcStorageContext}; +use crate::event::IbcEvent; +use crate::{ + storage as ibc_storage, IbcActions, IbcCommonContext, IbcStorageContext, +}; /// IBC protocol context #[derive(Debug)] -pub struct IbcProtocolContext<'a, S> -where - S: State, -{ +pub struct IbcProtocolContext<'a, S> { state: &'a mut S, } @@ -119,7 +120,7 @@ where H: 'static + StorageHasher, { fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), StorageError> { - let gas = self.write_log_mut().emit_ibc_event(event); + let gas = self.write_log_mut().emit_event(event); self.charge_gas(gas).into_storage_result()?; Ok(()) } @@ -128,12 +129,14 @@ where &self, event_type: impl AsRef, ) -> Result, StorageError> { + let event_type = EventTypeBuilder::new_of::() + .with_segment(event_type) + .build(); + Ok(self .write_log() - .get_ibc_events() - .iter() - .filter(|event| event.event_type == event_type.as_ref()) - .cloned() + .lookup_events_with_prefix(&event_type) + .filter_map(|event| IbcEvent::try_from(event).ok()) .collect()) } @@ -152,8 +155,8 @@ where shielded: &masp_primitives::transaction::Transaction, pin_key: Option<&str>, ) -> Result<(), StorageError> { - namada_token::utils::handle_masp_tx(self, shielded, pin_key)?; - namada_token::utils::update_note_commitment_tree(self, shielded) + token::utils::handle_masp_tx(self, shielded, pin_key)?; + token::utils::update_note_commitment_tree(self, shielded) } fn mint_token( @@ -162,9 +165,7 @@ where token: &Address, amount: Amount, ) -> Result<(), StorageError> { - token::credit_tokens(self, token, target, amount)?; - let minter_key = token::storage_key::minter_key(token); - self.write(&minter_key, Address::Internal(InternalAddress::Ibc)) + ibc_storage::mint_tokens(self, target, token, amount) } fn burn_token( @@ -173,7 +174,7 @@ where token: &Address, amount: Amount, ) -> Result<(), StorageError> { - token::burn_tokens(self, token, target, amount) + ibc_storage::burn_tokens(self, target, token, amount) } fn log_string(&self, message: String) { @@ -190,10 +191,10 @@ where impl IbcStorageContext for IbcProtocolContext<'_, S> where - S: State, + S: State + EmitEvents, { fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<(), StorageError> { - self.state.write_log_mut().emit_ibc_event(event); + self.state.write_log_mut().emit_event(event); Ok(()) } @@ -202,13 +203,15 @@ where &self, event_type: impl AsRef, ) -> Result, StorageError> { + let event_type = EventTypeBuilder::new_of::() + .with_segment(event_type) + .build(); + Ok(self .state .write_log() - .get_ibc_events() - .iter() - .filter(|event| event.event_type == event_type.as_ref()) - .cloned() + .lookup_events_with_prefix(&event_type) + .filter_map(|event| IbcEvent::try_from(event).ok()) .collect()) } @@ -239,10 +242,7 @@ where token: &Address, amount: Amount, ) -> Result<(), StorageError> { - token::credit_tokens(self.state, token, target, amount)?; - let minter_key = token::storage_key::minter_key(token); - self.state - .write(&minter_key, Address::Internal(InternalAddress::Ibc)) + ibc_storage::mint_tokens(self.state, target, token, amount) } /// Burn token @@ -252,7 +252,7 @@ where token: &Address, amount: Amount, ) -> Result<(), StorageError> { - token::burn_tokens(self.state, token, target, amount) + ibc_storage::burn_tokens(self.state, target, token, amount) } fn log_string(&self, message: String) { @@ -260,7 +260,10 @@ where } } -impl IbcCommonContext for IbcProtocolContext<'_, S> where S: State {} +impl IbcCommonContext for IbcProtocolContext<'_, S> where + S: State + EmitEvents +{ +} /// Transfer tokens over IBC pub fn transfer_over_ibc( diff --git a/crates/ibc/src/context/storage.rs b/crates/ibc/src/context/storage.rs index 21c0dc4b1d..138042d723 100644 --- a/crates/ibc/src/context/storage.rs +++ b/crates/ibc/src/context/storage.rs @@ -2,10 +2,11 @@ pub use ics23::ProofSpec; use namada_core::address::Address; -use namada_core::ibc::IbcEvent; use namada_core::token::Amount; use namada_storage::{Error, StorageRead, StorageWrite}; +use crate::event::IbcEvent; + /// IBC context trait to be implemented in integration that can read and write pub trait IbcStorageContext: StorageRead + StorageWrite { /// Emit an IBC event diff --git a/crates/ibc/src/event.rs b/crates/ibc/src/event.rs new file mode 100644 index 0000000000..98e514de6a --- /dev/null +++ b/crates/ibc/src/event.rs @@ -0,0 +1,524 @@ +//! IBC event related types + +use std::cmp::Ordering; +use std::str::FromStr; + +use namada_core::borsh::*; +use namada_core::collections::HashMap; +use namada_core::ibc::core::channel::types::packet::Packet; +use namada_core::ibc::core::channel::types::timeout::TimeoutHeight as IbcTimeoutHeight; +use namada_core::ibc::core::client::types::events::{ + CLIENT_ID_ATTRIBUTE_KEY, CONSENSUS_HEIGHTS_ATTRIBUTE_KEY, +}; +use namada_core::ibc::core::client::types::{Height as IbcHeight, HeightError}; +use namada_core::ibc::core::handler::types::events::IbcEvent as RawIbcEvent; +use namada_core::ibc::core::host::types::identifiers::{ + ChannelId as IbcChannelId, ClientId as IbcClientId, + ConnectionId as IbcConnectionId, PortId, Sequence, +}; +use namada_core::ibc::primitives::Timestamp; +use namada_core::tendermint::abci::Event as AbciEvent; +use namada_events::extend::{ + event_domain_of, AttributesMap, EventAttributeEntry, ExtendAttributesMap, + ReadFromEventAttributes as _, +}; +use namada_events::{ + Event, EventError, EventLevel, EventToEmit, EventTypeBuilder, +}; +use namada_macros::BorshDeserializer; +#[cfg(feature = "migrations")] +use namada_migrations::*; +use serde::{Deserialize, Serialize}; + +/// Describes a token event within IBC. +pub const TOKEN_EVENT_DESCRIPTOR: &str = IbcEvent::DOMAIN; + +pub mod types { + //! IBC event types. + + use namada_core::ibc::core::client::types::events::UPDATE_CLIENT_EVENT; + use namada_events::{event_type, EventType}; + + use super::IbcEvent; + + /// Update client. + pub const UPDATE_CLIENT: EventType = + event_type!(IbcEvent, UPDATE_CLIENT_EVENT); +} + +/// IBC event kind. +#[derive( + Debug, + Clone, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + PartialEq, + Eq, + PartialOrd, + Ord, + Serialize, + Deserialize, +)] +#[repr(transparent)] +pub struct IbcEventType(pub String); + +impl EventToEmit for IbcEvent { + const DOMAIN: &'static str = "ibc"; +} + +impl From for Event { + fn from(ibc_event: IbcEvent) -> Self { + let mut event = Self::new( + EventTypeBuilder::new_of::() + .with_segment(ibc_event.event_type.0) + .build(), + EventLevel::Tx, + ); + #[allow(deprecated)] + { + *event.attributes_mut() = + ibc_event.attributes.into_iter().collect(); + } + event.extend(event_domain_of::()); + event + } +} + +impl std::fmt::Display for IbcEventType { + #[inline] + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl FromStr for IbcEventType { + type Err = std::convert::Infallible; + + #[inline(always)] + fn from_str(s: &str) -> Result { + Ok(IbcEventType(s.to_owned())) + } +} + +impl std::cmp::PartialEq for IbcEventType { + fn eq(&self, other: &String) -> bool { + self.0.eq(other) + } +} + +impl std::cmp::PartialEq for IbcEventType { + fn eq(&self, other: &str) -> bool { + self.0.eq(other) + } +} + +impl std::cmp::PartialEq<&str> for IbcEventType { + fn eq(&self, other: &&str) -> bool { + self.0.eq(other) + } +} + +/// Wrapped IbcEvent +#[derive( + Debug, + Clone, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, + BorshSchema, + PartialEq, + Eq, + Serialize, + Deserialize, +)] +pub struct IbcEvent { + /// The IBC event type + pub event_type: IbcEventType, + /// The attributes of the IBC event + pub attributes: HashMap, +} + +fn validate_ibc_event_type( + namada_event: &Event, +) -> Result { + if namada_event.kind().domain() != IbcEvent::DOMAIN { + return Err(EventError::InvalidEventType); + } + + let event_type = namada_event.kind().sub_domain(); + + // if !matches!( + // event_type, + // // TODO: add other ibc event types that we use in namada + // "update_client" | "send_packet" | "write_acknowledgement" + //) { + // return Err(EventError::InvalidEventType); + //} + + Ok(IbcEventType(event_type.to_owned())) +} + +impl TryFrom<&Event> for IbcEvent { + type Error = EventError; + + fn try_from( + namada_event: &Event, + ) -> std::result::Result { + Ok(Self { + event_type: validate_ibc_event_type(namada_event)?, + #[allow(deprecated)] + attributes: namada_event + .attributes() + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(), + }) + } +} + +impl TryFrom for IbcEvent { + type Error = EventError; + + fn try_from(namada_event: Event) -> std::result::Result { + Ok(Self { + event_type: validate_ibc_event_type(&namada_event)?, + attributes: { + let mut attrs: HashMap<_, _> = + namada_event.into_attributes().into_iter().collect(); + attrs.with_attribute(event_domain_of::()); + attrs + }, + }) + } +} + +impl std::cmp::PartialOrd for IbcEvent { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl std::cmp::Ord for IbcEvent { + fn cmp(&self, other: &Self) -> Ordering { + // should not compare the same event type + self.event_type.cmp(&other.event_type) + } +} + +impl std::fmt::Display for IbcEvent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let attributes = self + .attributes + .iter() + .map(|(k, v)| format!("{}: {};", k, v)) + .collect::>() + .join(", "); + write!( + f, + "Event type: {}, Attributes: {}", + self.event_type, attributes + ) + } +} + +impl TryFrom for IbcEvent { + type Error = super::Error; + + fn try_from(e: RawIbcEvent) -> Result { + let event_type = IbcEventType(e.event_type().to_string()); + let abci_event = + AbciEvent::try_from(e).map_err(super::Error::IbcEvent)?; + let attributes: HashMap<_, _> = abci_event + .attributes + .iter() + .map(|tag| (tag.key.to_string(), tag.value.to_string())) + .collect(); + Ok(Self { + event_type, + attributes, + }) + } +} + +/// Extend an [`Event`] with packet sequence data. +pub struct PacketSequence(pub Sequence); + +impl EventAttributeEntry<'static> for PacketSequence { + type Value = Sequence; + type ValueOwned = Self::Value; + + const KEY: &'static str = "packet_sequence"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with packet source port data. +pub struct PacketSrcPort(pub PortId); + +impl EventAttributeEntry<'static> for PacketSrcPort { + type Value = PortId; + type ValueOwned = Self::Value; + + const KEY: &'static str = "packet_src_port"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with packet source channel data. +pub struct PacketSrcChannel(pub IbcChannelId); + +impl EventAttributeEntry<'static> for PacketSrcChannel { + type Value = IbcChannelId; + type ValueOwned = Self::Value; + + const KEY: &'static str = "packet_src_channel"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with packet destination port data. +pub struct PacketDstPort(pub PortId); + +impl EventAttributeEntry<'static> for PacketDstPort { + type Value = PortId; + type ValueOwned = Self::Value; + + const KEY: &'static str = "packet_dst_port"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with packet destination channel data. +pub struct PacketDstChannel(pub IbcChannelId); + +impl EventAttributeEntry<'static> for PacketDstChannel { + type Value = IbcChannelId; + type ValueOwned = Self::Value; + + const KEY: &'static str = "packet_dst_channel"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with client id data. +pub struct ClientId(pub IbcClientId); + +impl EventAttributeEntry<'static> for ClientId { + type Value = IbcClientId; + type ValueOwned = Self::Value; + + const KEY: &'static str = CLIENT_ID_ATTRIBUTE_KEY; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with consensus heights data. +pub struct ConsensusHeights(pub IbcHeight); + +impl EventAttributeEntry<'static> for ConsensusHeights { + type Value = IbcHeight; + type ValueOwned = Self::Value; + + const KEY: &'static str = CONSENSUS_HEIGHTS_ATTRIBUTE_KEY; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with connection id data. +pub struct ConnectionId(pub IbcConnectionId); + +impl EventAttributeEntry<'static> for ConnectionId { + type Value = IbcConnectionId; + type ValueOwned = Self::Value; + + const KEY: &'static str = "connection_id"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with packet data. +pub struct PacketData<'data>(pub &'data str); + +impl<'data> EventAttributeEntry<'data> for PacketData<'data> { + type Value = &'data str; + type ValueOwned = String; + + const KEY: &'static str = "packet_data"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Represents an IBC timeout height. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct TimeoutHeight(pub IbcTimeoutHeight); + +impl FromStr for TimeoutHeight { + type Err = HeightError; + + fn from_str(s: &str) -> Result { + namada_core::ibc::core::client::types::Height::from_str(s).map_or_else( + |err| match err { + HeightError::ZeroHeight => { + Ok(TimeoutHeight(IbcTimeoutHeight::Never)) + } + err => Err(err), + }, + |height| Ok(TimeoutHeight(IbcTimeoutHeight::At(height))), + ) + } +} + +impl std::fmt::Display for TimeoutHeight { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match &self.0 { + IbcTimeoutHeight::Never => write!(f, "0-0"), + IbcTimeoutHeight::At(h) => write!(f, "{h}"), + } + } +} + +/// Extend an [`Event`] with packet timeout height data. +pub struct PacketTimeoutHeight(pub TimeoutHeight); + +impl EventAttributeEntry<'static> for PacketTimeoutHeight { + type Value = TimeoutHeight; + type ValueOwned = Self::Value; + + const KEY: &'static str = "packet_timeout_height"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with packet timeout timestamp data. +pub struct PacketTimeoutTimestamp(pub Timestamp); + +impl EventAttributeEntry<'static> for PacketTimeoutTimestamp { + type Value = Timestamp; + type ValueOwned = Self::Value; + + const KEY: &'static str = "packet_timeout_timestamp"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with channel id data. +pub struct ChannelId(pub IbcChannelId); + +impl EventAttributeEntry<'static> for ChannelId { + type Value = IbcChannelId; + type ValueOwned = Self::Value; + + const KEY: &'static str = "channel_id"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with packet ack data. +pub struct PacketAck<'ack>(pub &'ack str); + +impl<'ack> EventAttributeEntry<'ack> for PacketAck<'ack> { + type Value = &'ack str; + type ValueOwned = String; + + const KEY: &'static str = "packet_ack"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Attempt to parse an IBC [`Packet`] from a set of event attributes. +pub fn packet_from_event_attributes( + attributes: &A, +) -> Result { + Ok(Packet { + seq_on_a: PacketSequence::read_from_event_attributes(attributes)?, + port_id_on_a: PacketSrcPort::read_from_event_attributes(attributes)?, + chan_id_on_a: PacketSrcChannel::read_from_event_attributes(attributes)?, + port_id_on_b: PacketDstPort::read_from_event_attributes(attributes)?, + chan_id_on_b: PacketDstChannel::read_from_event_attributes(attributes)?, + data: PacketData::read_from_event_attributes(attributes)?.into_bytes(), + timeout_height_on_b: PacketTimeoutHeight::read_from_event_attributes( + attributes, + )? + .0, + timeout_timestamp_on_b: + PacketTimeoutTimestamp::read_from_event_attributes(attributes)?, + }) +} + +#[cfg(test)] +mod tests { + use namada_core::hash::Hash; + use namada_core::tendermint_proto::v0_37::abci::Event as AbciEventV037; + use namada_events::extend::{ + ComposeEvent as _, Domain, Height, Log, + RawReadFromEventAttributes as _, TxHash, + }; + + use super::*; + + #[test] + fn test_ibc_domain_encoded_in_abci_event_attrs() { + const EVENT_TYPE: &str = "update_account"; + + let event: Event = IbcEvent { + event_type: IbcEventType(EVENT_TYPE.into()), + attributes: Default::default(), + } + .into(); + + let event: AbciEventV037 = event.into(); + + assert_eq!(event.r#type, EVENT_TYPE); + assert_eq!( + Some(IbcEvent::DOMAIN), + Domain::::raw_read_opt_from_event_attributes( + &event.attributes + ) + ); + } + + #[test] + fn test_domain_of_composed_ibc_event() { + let composite_event = IbcEvent { + event_type: IbcEventType("update_account".into()), + attributes: Default::default(), + } + .with(Log("this is sparta!".to_string())) + .with(Height(300.into())) + .with(TxHash(Hash::default())); + + fn event_domain(_: &E) -> &'static str { + E::DOMAIN + } + + assert_eq!(event_domain(&composite_event), IbcEvent::DOMAIN); + } +} diff --git a/crates/ibc/src/lib.rs b/crates/ibc/src/lib.rs index dd686f5fe9..b7efdbc268 100644 --- a/crates/ibc/src/lib.rs +++ b/crates/ibc/src/lib.rs @@ -2,6 +2,7 @@ mod actions; pub mod context; +pub mod event; pub mod parameters; pub mod storage; @@ -9,7 +10,6 @@ use std::cell::RefCell; use std::collections::BTreeSet; use std::fmt::Debug; use std::rc::Rc; -use std::str::FromStr; pub use actions::transfer_over_ibc; use borsh::BorshDeserialize; @@ -48,6 +48,7 @@ use namada_core::ibc::core::channel::types::msgs::{ }; use namada_core::ibc::core::entrypoint::{execute, validate}; use namada_core::ibc::core::handler::types::error::ContextError; +use namada_core::ibc::core::handler::types::events::Error as RawIbcEventError; use namada_core::ibc::core::handler::types::msgs::MsgEnvelope; use namada_core::ibc::core::host::types::error::IdentifierError; use namada_core::ibc::core::host::types::identifiers::{ChannelId, PortId}; @@ -55,6 +56,7 @@ use namada_core::ibc::core::router::types::error::RouterError; use namada_core::ibc::primitives::proto::Any; pub use namada_core::ibc::*; use namada_core::masp::PaymentAddress; +use namada_events::extend::{ReadFromEventAttributes, Success as SuccessAttr}; use namada_token::Transfer; use prost::Message; use thiserror::Error; @@ -62,6 +64,8 @@ use thiserror::Error; #[allow(missing_docs)] #[derive(Error, Debug)] pub enum Error { + #[error("IBC event error: {0}")] + IbcEvent(RawIbcEventError), #[error("Decoding IBC data error")] DecodingData, #[error("Decoding message error: {0}")] @@ -219,7 +223,7 @@ where return Ok(()); } let receiver = - if PaymentAddress::from_str(data.receiver.as_ref()).is_ok() { + if data.receiver.as_ref().parse::().is_ok() { MASP.to_string() } else { data.receiver.to_string() @@ -244,7 +248,7 @@ where }) .collect(); let receiver = - if PaymentAddress::from_str(data.receiver.as_ref()).is_ok() { + if data.receiver.as_ref().parse::().is_ok() { MASP.to_string() } else { data.receiver.to_string() @@ -318,14 +322,18 @@ where Error::Trace("Reading the IBC event failed".to_string()) })?; } - match receive_event - .first() - .as_ref() - .and_then(|event| event.attributes.get(EVENT_ATTRIBUTE_SUCCESS)) - { - Some(success) if success == EVENT_VALUE_SUCCESS => Ok(true), - _ => Ok(false), - } + receive_event.first().as_ref().map_or_else( + || Ok(false), + |event| { + let success = SuccessAttr::read_opt_from_event_attributes( + &event.attributes, + ) + .map_err(|err| { + Error::Trace(format!("Reading the IBC event failed: {err}")) + })?; + Ok(success.unwrap_or(false)) + }, + ) } /// Validate according to the message in IBC VP diff --git a/crates/ibc/src/storage.rs b/crates/ibc/src/storage.rs index e506cfbb0e..c9fa19839a 100644 --- a/crates/ibc/src/storage.rs +++ b/crates/ibc/src/storage.rs @@ -16,10 +16,14 @@ use namada_core::ibc::core::host::types::path::{ use namada_core::ibc::IbcTokenHash; use namada_core::storage::{DbKeySeg, Key, KeySeg}; use namada_core::token::Amount; -use namada_state::{StorageRead, StorageResult}; +use namada_events::{EmitEvents, EventLevel}; +use namada_state::{StorageRead, StorageResult, StorageWrite}; +use namada_token as token; +use namada_token::event::{TokenEvent, TokenOperation, UserAccount}; use sha2::{Digest, Sha256}; use thiserror::Error; +use crate::event::TOKEN_EVENT_DESCRIPTOR; use crate::parameters::IbcParameters; const CLIENTS_COUNTER_PREFIX: &str = "clients"; @@ -50,6 +54,64 @@ pub enum Error { /// IBC storage functions result pub type Result = std::result::Result; +/// Mint tokens, and emit an IBC token mint event. +pub fn mint_tokens( + state: &mut S, + target: &Address, + token: &Address, + amount: Amount, +) -> StorageResult<()> +where + S: StorageRead + StorageWrite + EmitEvents, +{ + token::mint_tokens( + state, + &Address::Internal(InternalAddress::Ibc), + token, + target, + amount, + )?; + + state.emit(TokenEvent { + descriptor: TOKEN_EVENT_DESCRIPTOR.into(), + level: EventLevel::Tx, + token: token.clone(), + operation: TokenOperation::Mint { + amount: amount.into(), + post_balance: token::read_balance(state, token, target)?.into(), + target_account: UserAccount::Internal(target.clone()), + }, + }); + + Ok(()) +} + +/// Burn tokens, and emit an IBC token burn event. +pub fn burn_tokens( + state: &mut S, + target: &Address, + token: &Address, + amount: Amount, +) -> StorageResult<()> +where + S: StorageRead + StorageWrite + EmitEvents, +{ + token::burn_tokens(state, token, target, amount)?; + + state.emit(TokenEvent { + descriptor: TOKEN_EVENT_DESCRIPTOR.into(), + level: EventLevel::Tx, + token: token.clone(), + operation: TokenOperation::Burn { + amount: amount.into(), + post_balance: token::read_balance(state, token, target)?.into(), + target_account: UserAccount::Internal(target.clone()), + }, + }); + + Ok(()) +} + /// Returns a key of the IBC-related data pub fn ibc_key(path: impl AsRef) -> Result { let path = Key::parse(path).map_err(Error::StorageKey)?; diff --git a/crates/light_sdk/src/reading/asynchronous/tx.rs b/crates/light_sdk/src/reading/asynchronous/tx.rs index 63cd3577ef..01c1f06cb0 100644 --- a/crates/light_sdk/src/reading/asynchronous/tx.rs +++ b/crates/light_sdk/src/reading/asynchronous/tx.rs @@ -50,15 +50,8 @@ pub async fn query_tx_response( tendermint_addr: &str, tx_hash: &str, ) -> Result { - let client = HttpClient::new( - TendermintAddress::from_str(tendermint_addr) - .map_err(|e| Error::Other(e.to_string()))?, - ) - .map_err(|e| Error::Other(e.to_string()))?; - let tx_query = TxEventQuery::Applied(tx_hash); - rpc::query_tx_response(&client, tx_query) - .await - .map_err(|e| Error::Other(e.to_string())) + let event = query_tx_status(tendermint_addr, tx_hash).await?; + event.try_into().map_err(Error::Other) } /// Query the status of a given transaction. diff --git a/crates/light_sdk/src/reading/blocking/tx.rs b/crates/light_sdk/src/reading/blocking/tx.rs index 19a3513dbf..6e98d9adb2 100644 --- a/crates/light_sdk/src/reading/blocking/tx.rs +++ b/crates/light_sdk/src/reading/blocking/tx.rs @@ -50,15 +50,8 @@ pub fn query_tx_response( tendermint_addr: &str, tx_hash: &str, ) -> Result { - let client = HttpClient::new( - TendermintAddress::from_str(tendermint_addr) - .map_err(|e| Error::Other(e.to_string()))?, - ) - .map_err(|e| Error::Other(e.to_string()))?; - let tx_query = TxEventQuery::Applied(tx_hash); - let rt = Runtime::new().unwrap(); - rt.block_on(rpc::query_tx_response(&client, tx_query)) - .map_err(|e| Error::Other(e.to_string())) + let event = query_tx_status(tendermint_addr, tx_hash)?; + event.try_into().map_err(Error::Other) } /// Query the status of a given transaction. diff --git a/crates/namada/Cargo.toml b/crates/namada/Cargo.toml index 7d3538cab5..6db90a8355 100644 --- a/crates/namada/Cargo.toml +++ b/crates/namada/Cargo.toml @@ -88,6 +88,7 @@ benches = ["namada_core/benches", "namada_state/benches"] [dependencies] namada_account = { path = "../account" } namada_core = { path = "../core" } +namada_events = { path = "../events", default-features = false } namada_ethereum_bridge = { path = "../ethereum_bridge", default-features = false } namada_gas = { path = "../gas" } namada_governance = { path = "../governance" } @@ -119,6 +120,7 @@ ethers.workspace = true eyre.workspace = true futures.workspace = true itertools.workspace = true +konst.workspace = true linkme = {workspace = true, optional = true} loupe = { version = "0.1.3", optional = true } masp_primitives.workspace = true diff --git a/crates/namada/src/ledger/governance/utils.rs b/crates/namada/src/ledger/governance/utils.rs index ec31e87d54..34c04ba7a8 100644 --- a/crates/namada/src/ledger/governance/utils.rs +++ b/crates/namada/src/ledger/governance/utils.rs @@ -1,12 +1,7 @@ //! Governance utility functions -use namada_core::collections::HashMap; -use namada_governance::utils::TallyResult; -use namada_sdk::events::{Event, EventLevel}; use thiserror::Error; -use crate::ledger::events::EventType; - pub(super) enum ReadType { Pre, Post, @@ -25,108 +20,3 @@ pub enum Error { #[error("Error while tallying proposal: {0}")] Tally(String), } - -/// Proposal event definition -pub struct ProposalEvent { - /// Proposal event type - pub event_type: String, - /// Proposal event attributes - pub attributes: HashMap, -} - -impl From for Event { - fn from(proposal_event: ProposalEvent) -> Self { - Self { - event_type: EventType::Proposal, - level: EventLevel::Block, - attributes: proposal_event.attributes, - } - } -} - -impl ProposalEvent { - /// Create a proposal event - pub fn new( - event_type: String, - tally: TallyResult, - id: u64, - has_proposal_code: bool, - proposal_code_exit_status: bool, - ) -> Self { - let attributes = HashMap::from([ - ("tally_result".to_string(), tally.to_string()), - ("proposal_id".to_string(), id.to_string()), - ( - "has_proposal_code".to_string(), - (!has_proposal_code as u64).to_string(), - ), - ( - "proposal_code_exit_status".to_string(), - (!proposal_code_exit_status as u64).to_string(), - ), - ]); - Self { - event_type, - attributes, - } - } - - /// Create a new proposal event for rejected proposal - pub fn rejected_proposal_event(proposal_id: u64) -> Self { - ProposalEvent::new( - EventType::Proposal.to_string(), - TallyResult::Rejected, - proposal_id, - false, - false, - ) - } - - /// Create a new proposal event for default proposal - pub fn default_proposal_event( - proposal_id: u64, - has_code: bool, - execution_status: bool, - ) -> Self { - ProposalEvent::new( - EventType::Proposal.to_string(), - TallyResult::Passed, - proposal_id, - has_code, - execution_status, - ) - } - - /// Create a new proposal event for pgf stewards proposal - pub fn pgf_steward_proposal_event(proposal_id: u64, result: bool) -> Self { - ProposalEvent::new( - EventType::Proposal.to_string(), - TallyResult::Passed, - proposal_id, - false, - result, - ) - } - - /// Create a new proposal event for pgf payments proposal - pub fn pgf_payments_proposal_event(proposal_id: u64, result: bool) -> Self { - ProposalEvent::new( - EventType::Proposal.to_string(), - TallyResult::Passed, - proposal_id, - false, - result, - ) - } - - /// Create a new proposal event for eth proposal - pub fn eth_proposal_event(proposal_id: u64, result: bool) -> Self { - ProposalEvent::new( - EventType::Proposal.to_string(), - TallyResult::Passed, - proposal_id, - false, - result, - ) - } -} diff --git a/crates/namada/src/ledger/ibc/mod.rs b/crates/namada/src/ledger/ibc/mod.rs index a3746e62e5..4076a1de92 100644 --- a/crates/namada/src/ledger/ibc/mod.rs +++ b/crates/namada/src/ledger/ibc/mod.rs @@ -1,7 +1,7 @@ //! IBC integration -use namada_core::event::EmitEvents; use namada_core::token::Amount; +use namada_events::EmitEvents; use namada_ibc::storage::{ channel_counter_key, client_counter_key, connection_counter_key, deposit_prefix, withdraw_prefix, diff --git a/crates/namada/src/ledger/native_vp/ibc/context.rs b/crates/namada/src/ledger/native_vp/ibc/context.rs index c9824a35f4..99dcc14a5b 100644 --- a/crates/namada/src/ledger/native_vp/ibc/context.rs +++ b/crates/namada/src/ledger/native_vp/ibc/context.rs @@ -6,12 +6,14 @@ use borsh_ext::BorshSerializeExt; use namada_core::collections::{HashMap, HashSet}; use namada_core::storage::Epochs; use namada_gas::MEMORY_ACCESS_GAS_PER_BYTE; +use namada_ibc::event::IbcEvent; use namada_ibc::{IbcCommonContext, IbcStorageContext}; +use namada_sdk::events::log::dumb_queries; +use namada_sdk::events::{Event, EventTypeBuilder}; use namada_state::{StateRead, StorageError, StorageRead, StorageWrite}; use namada_vp_env::VpEnv; use crate::address::{Address, InternalAddress}; -use crate::ibc::IbcEvent; use crate::ledger::ibc::storage::is_ibc_key; use crate::ledger::native_vp::CtxPreStorageRead; use crate::state::write_log::StorageModification; @@ -37,7 +39,7 @@ where /// Context to read the previous value ctx: CtxPreStorageRead<'view, 'a, S, CA>, /// IBC event - pub event: BTreeSet, + pub event: BTreeSet, } impl<'view, 'a, S, CA> PseudoExecutionContext<'view, 'a, S, CA> @@ -185,7 +187,7 @@ where CA: 'static + WasmCacheAccess, { fn emit_ibc_event(&mut self, event: IbcEvent) -> Result<()> { - self.event.insert(event); + self.event.insert(event.into()); Ok(()) } @@ -193,11 +195,21 @@ where &self, event_type: impl AsRef, ) -> Result> { + let matcher = dumb_queries::QueryMatcher::with_prefix( + EventTypeBuilder::new_of::() + .with_segment(event_type) + .build(), + ); Ok(self .event .iter() - .filter(|event| event.event_type == *event_type.as_ref()) - .cloned() + .filter_map(|event| { + if matcher.matches(event) { + IbcEvent::try_from(event).ok() + } else { + None + } + }) .collect()) } diff --git a/crates/namada/src/ledger/native_vp/ibc/mod.rs b/crates/namada/src/ledger/native_vp/ibc/mod.rs index 5160671e3f..b86d5e5546 100644 --- a/crates/namada/src/ledger/native_vp/ibc/mod.rs +++ b/crates/namada/src/ledger/native_vp/ibc/mod.rs @@ -12,6 +12,7 @@ use namada_core::address::Address; use namada_core::collections::HashSet; use namada_core::storage::Key; use namada_gas::{IBC_ACTION_EXECUTE_GAS, IBC_ACTION_VALIDATE_GAS}; +use namada_ibc::event::IbcEvent; use namada_ibc::{ Error as ActionError, IbcActions, NftTransferModule, TransferModule, ValidationParams, @@ -143,13 +144,19 @@ where match_value(key, actual, ctx.borrow().get_changed_value(key))?; } - // check the event - let actual = self.ctx.state.write_log().get_ibc_events(); - if *actual != ctx.borrow().event { + // check the events + let actual: BTreeSet<_> = self + .ctx + .state + .write_log() + .get_events_of::() + .collect(); + let ctx_borrow = ctx.borrow(); + let expected: BTreeSet<_> = ctx_borrow.event.iter().collect(); + if actual != expected { return Err(Error::IbcEvent(format!( - "The IBC event is invalid: Actual {:?}, Expected {:?}", - actual, - ctx.borrow().event + "The IBC event is invalid: Actual {actual:?}, Expected \ + {expected:?}", ))); } @@ -393,6 +400,7 @@ mod tests { use namada_core::address::InternalAddress; use namada_gas::TxGasMeter; use namada_governance::parameters::GovernanceParameters; + use namada_ibc::event::IbcEventType; use namada_state::testing::TestState; use namada_state::StorageRead; use namada_token::NATIVE_MAX_DECIMAL_PLACES; @@ -874,10 +882,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Client); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1073,10 +1081,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Client); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1174,10 +1182,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Connection); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1387,10 +1395,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Connection); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1488,10 +1496,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Connection); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_code = vec![]; let tx_index = TxIndex::default(); @@ -1581,10 +1589,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Connection); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_code = vec![]; let tx_index = TxIndex::default(); @@ -1702,10 +1710,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1822,10 +1830,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -1927,10 +1935,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -2030,10 +2038,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -2169,7 +2177,7 @@ mod tests { let event = RawIbcEvent::Module(ModuleEvent::from(transfer_event)); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let event = RawIbcEvent::SendPacket(SendPacket::new( packet, Order::Unordered, @@ -2178,10 +2186,15 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); + state + .write_log_mut() + .emit_event::(event.try_into().unwrap()); + let message_event = + RawIbcEvent::Message(MessageEvent::Module("transfer".to_owned())); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -2357,7 +2370,7 @@ mod tests { let event = RawIbcEvent::Module(ModuleEvent::from(recv_event)); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let denom_trace_event = DenomTraceEvent { trace_hash: Some(trace_hash), denom: coin.denom, @@ -2365,7 +2378,7 @@ mod tests { let event = RawIbcEvent::Module(ModuleEvent::from(denom_trace_event)); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let event = RawIbcEvent::ReceivePacket(ReceivePacket::new( msg.packet.clone(), Order::Unordered, @@ -2374,10 +2387,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let event = RawIbcEvent::WriteAcknowledgement(WriteAcknowledgement::new( packet, @@ -2387,10 +2400,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -2518,7 +2531,7 @@ mod tests { let event = RawIbcEvent::Module(ModuleEvent::from(ack_event)); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let event = RawIbcEvent::AcknowledgePacket(AcknowledgePacket::new( packet, Order::Unordered, @@ -2527,10 +2540,20 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); + state.write_log_mut().emit_event(IbcEvent { + event_type: IbcEventType("fungible_token_packet".to_owned()), + attributes: { + let mut attrs = namada_core::collections::HashMap::new(); + // NB: fuck it, not worth adding a domain + // type for this + attrs.insert("success".to_owned(), "AQ==".to_owned()); + attrs + }, + }); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -2676,7 +2699,7 @@ mod tests { let event = RawIbcEvent::Module(ModuleEvent::from(timeout_event)); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let event = RawIbcEvent::TimeoutPacket(TimeoutPacket::new( packet, Order::Unordered, @@ -2684,10 +2707,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -2834,7 +2857,7 @@ mod tests { let event = RawIbcEvent::Module(ModuleEvent::from(timeout_event)); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let event = RawIbcEvent::TimeoutPacket(TimeoutPacket::new( packet, Order::Unordered, @@ -2842,10 +2865,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -2997,7 +3020,7 @@ mod tests { let event = RawIbcEvent::Module(ModuleEvent::from(transfer_event)); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let event = RawIbcEvent::SendPacket(SendPacket::new( packet, Order::Unordered, @@ -3006,10 +3029,16 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); + let message_event = RawIbcEvent::Message(MessageEvent::Module( + "nft_transfer".to_owned(), + )); + state + .write_log_mut() + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; @@ -3207,7 +3236,7 @@ mod tests { let event = RawIbcEvent::Module(ModuleEvent::from(recv_event)); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let trace_event = TokenTraceEvent { trace_hash: Some(trace_hash), class: class_id, @@ -3216,7 +3245,7 @@ mod tests { let event = RawIbcEvent::Module(ModuleEvent::from(trace_event)); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let event = RawIbcEvent::ReceivePacket(ReceivePacket::new( msg.packet.clone(), Order::Unordered, @@ -3225,10 +3254,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let event = RawIbcEvent::WriteAcknowledgement(WriteAcknowledgement::new( packet, @@ -3238,10 +3267,10 @@ mod tests { let message_event = RawIbcEvent::Message(MessageEvent::Channel); state .write_log_mut() - .emit_ibc_event(message_event.try_into().unwrap()); + .emit_event::(message_event.try_into().unwrap()); state .write_log_mut() - .emit_ibc_event(event.try_into().unwrap()); + .emit_event::(event.try_into().unwrap()); let tx_index = TxIndex::default(); let tx_code = vec![]; diff --git a/crates/namada/src/ledger/native_vp/mod.rs b/crates/namada/src/ledger/native_vp/mod.rs index c09836ce3e..4db6f4ad03 100644 --- a/crates/namada/src/ledger/native_vp/mod.rs +++ b/crates/namada/src/ledger/native_vp/mod.rs @@ -14,6 +14,7 @@ use std::fmt::Debug; use borsh::BorshDeserialize; use namada_core::storage; use namada_core::storage::Epochs; +use namada_events::{Event, EventType}; use namada_gas::GasMetering; use namada_tx::Tx; pub use namada_vp_env::VpEnv; @@ -22,7 +23,6 @@ use state::StateRead; use super::vp_host_fns; use crate::address::Address; use crate::hash::Hash; -use crate::ibc::IbcEvent; use crate::ledger::gas::VpGasMeter; use crate::state; use crate::state::{ResultExt, StorageRead}; @@ -381,12 +381,16 @@ where .into_storage_result() } - fn get_ibc_events( + fn get_events( &self, - event_type: String, - ) -> Result, state::StorageError> { - vp_host_fns::get_ibc_events(self.gas_meter, self.state, event_type) - .into_storage_result() + event_type: &EventType, + ) -> Result, state::StorageError> { + vp_host_fns::get_events( + self.gas_meter, + self.state, + event_type.to_string(), + ) + .into_storage_result() } fn iter_prefix<'iter>( diff --git a/crates/namada/src/ledger/native_vp/multitoken.rs b/crates/namada/src/ledger/native_vp/multitoken.rs index 08676ca949..bd454a1c4b 100644 --- a/crates/namada/src/ledger/native_vp/multitoken.rs +++ b/crates/namada/src/ledger/native_vp/multitoken.rs @@ -272,9 +272,9 @@ where .into()), } } - _ => Err(native_vp::Error::new_const( - "Only IBC tokens can be minted by a user transaction", - ) + _ => Err(native_vp::Error::new_alloc(format!( + "Attempted to mint non-IBC token {token}" + )) .into()), } } diff --git a/crates/namada/src/ledger/pgf/mod.rs b/crates/namada/src/ledger/pgf/mod.rs index 43fb175f45..b87d5de529 100644 --- a/crates/namada/src/ledger/pgf/mod.rs +++ b/crates/namada/src/ledger/pgf/mod.rs @@ -1,8 +1,5 @@ //! Pgf VP -/// Pgf utility functions and structures -pub mod utils; - use std::collections::BTreeSet; use namada_core::booleans::BoolResultUnitExt; diff --git a/crates/namada/src/ledger/pgf/utils.rs b/crates/namada/src/ledger/pgf/utils.rs deleted file mode 100644 index 88945f43f6..0000000000 --- a/crates/namada/src/ledger/pgf/utils.rs +++ /dev/null @@ -1,65 +0,0 @@ -use namada_core::address::Address; -use namada_core::collections::HashMap; - -use crate::ledger::events::EventType; -use crate::token; - -/// Proposal event definition -pub struct ProposalEvent { - /// Proposal event type - pub event_type: String, - /// Proposal event attributes - pub attributes: HashMap, -} - -impl ProposalEvent { - /// Create a proposal event - pub fn new( - event_type: String, - target: Address, - amount: token::Amount, - is_steward: bool, - success: bool, - ) -> Self { - let attributes = HashMap::from([ - ("target".to_string(), target.to_string()), - ("amount".to_string(), amount.to_string_native()), - ("is_steward".to_string(), is_steward.to_string()), - ("successed".to_string(), success.to_string()), - ]); - Self { - event_type, - attributes, - } - } - - /// Create a new proposal event for pgf continuous funding - pub fn pgf_funding_payment( - target: Address, - amount: token::Amount, - success: bool, - ) -> Self { - ProposalEvent::new( - EventType::PgfPayment.to_string(), - target, - amount, - false, - success, - ) - } - - /// Create a new proposal event for steward payments - pub fn pgf_steward_payment( - target: Address, - amount: token::Amount, - success: bool, - ) -> Self { - ProposalEvent::new( - EventType::PgfPayment.to_string(), - target, - amount, - true, - success, - ) - } -} diff --git a/crates/namada/src/ledger/protocol/mod.rs b/crates/namada/src/ledger/protocol/mod.rs index b48a99d234..97ad0cc7e5 100644 --- a/crates/namada/src/ledger/protocol/mod.rs +++ b/crates/namada/src/ledger/protocol/mod.rs @@ -9,9 +9,14 @@ use masp_primitives::transaction::Transaction; use namada_core::booleans::BoolResultUnitExt; use namada_core::hash::Hash; use namada_core::storage::Key; +use namada_events::extend::{ + ComposeEvent, Height as HeightAttr, TxHash as TxHashAttr, +}; +use namada_events::EventLevel; use namada_gas::TxGasMeter; use namada_sdk::tx::TX_TRANSFER_WASM; use namada_state::StorageWrite; +use namada_token::event::{TokenEvent, TokenOperation, UserAccount}; use namada_tx::data::protocol::ProtocolTxType; use namada_tx::data::{ GasLimit, TxResult, TxType, VpStatusFlags, VpsResult, WrapperTx, @@ -82,7 +87,7 @@ pub enum Error { PosNativeVpRuntime, #[error("Parameters native VP: {0}")] ParametersNativeVpError(parameters::Error), - #[error("IBC Token native VP: {0}")] + #[error("Multitoken native VP: {0}")] MultitokenNativeVpError(crate::ledger::native_vp::multitoken::Error), #[error("Governance native VP error: {0}")] GovernanceNativeVpError(crate::ledger::governance::Error), @@ -272,16 +277,19 @@ where { let mut changed_keys = BTreeSet::default(); + let wrapper_tx_hash = tx.header_hash(); + // Write wrapper tx hash to storage shell_params .state .write_log_mut() - .write_tx_hash(tx.header_hash()) + .write_tx_hash(wrapper_tx_hash) .expect("Error while writing tx hash to storage"); // Charge fee before performing any fallible operations charge_fee( wrapper, + wrapper_tx_hash, fee_unshield_transaction, &mut shell_params, &mut changed_keys, @@ -325,6 +333,7 @@ pub fn get_fee_unshielding_transaction( /// - The accumulated fee amount to be credited to the block proposer overflows fn charge_fee( wrapper: &WrapperTx, + wrapper_tx_hash: Hash, masp_transaction: Option, shell_params: &mut ShellParams<'_, S, D, H, CA>, changed_keys: &mut BTreeSet, @@ -351,7 +360,12 @@ where Some(WrapperArgs { block_proposer, is_committed_fee_unshield: _, - }) => transfer_fee(shell_params.state, block_proposer, wrapper)?, + }) => transfer_fee( + shell_params.state, + block_proposer, + wrapper, + wrapper_tx_hash, + )?, None => check_fees(shell_params.state, wrapper)?, } @@ -482,6 +496,7 @@ pub fn transfer_fee( state: &mut S, block_proposer: &Address, wrapper: &WrapperTx, + wrapper_tx_hash: Hash, ) -> Result<()> where S: State + StorageRead + StorageWrite, @@ -493,12 +508,19 @@ where ) .unwrap(); + const FEE_PAYMENT_DESCRIPTOR: std::borrow::Cow<'static, str> = + std::borrow::Cow::Borrowed("wrapper-fee-payment"); + match wrapper.get_tx_fee() { Ok(fees) => { let fees = crate::token::denom_to_amount(fees, &wrapper.fee.token, state) .map_err(|e| Error::FeeError(e.to_string()))?; - if balance.checked_sub(fees).is_some() { + + let current_block_height = + state.in_mem().get_last_block_height() + 1; + + if let Some(post_bal) = balance.checked_sub(fees) { token_transfer( state, &wrapper.fee.token, @@ -506,7 +528,38 @@ where block_proposer, fees, ) - .map_err(|e| Error::FeeError(e.to_string())) + .map_err(|e| Error::FeeError(e.to_string()))?; + + let target_post_balance = Some( + namada_token::read_balance( + state, + &wrapper.fee.token, + block_proposer, + ) + .map_err(Error::StorageError)? + .into(), + ); + + state.write_log_mut().emit_event( + TokenEvent { + descriptor: FEE_PAYMENT_DESCRIPTOR, + level: EventLevel::Tx, + token: wrapper.fee.token.clone(), + operation: TokenOperation::Transfer { + amount: fees.into(), + source: UserAccount::Internal(wrapper.fee_payer()), + target: UserAccount::Internal( + block_proposer.clone(), + ), + source_post_balance: post_bal.into(), + target_post_balance, + }, + } + .with(HeightAttr(current_block_height)) + .with(TxHashAttr(wrapper_tx_hash)), + ); + + Ok(()) } else { // Balance was insufficient for fee payment, move all the // available funds in the transparent balance of @@ -527,6 +580,35 @@ where ) .map_err(|e| Error::FeeError(e.to_string()))?; + let target_post_balance = Some( + namada_token::read_balance( + state, + &wrapper.fee.token, + block_proposer, + ) + .map_err(Error::StorageError)? + .into(), + ); + + state.write_log_mut().emit_event( + TokenEvent { + descriptor: FEE_PAYMENT_DESCRIPTOR, + level: EventLevel::Tx, + token: wrapper.fee.token.clone(), + operation: TokenOperation::Transfer { + amount: balance.into(), + source: UserAccount::Internal(wrapper.fee_payer()), + target: UserAccount::Internal( + block_proposer.clone(), + ), + source_post_balance: namada_core::uint::ZERO, + target_post_balance, + }, + } + .with(HeightAttr(current_block_height)) + .with(TxHashAttr(wrapper_tx_hash)), + ); + Err(Error::FeeError( "Transparent balance of wrapper's signer was insufficient \ to pay fee. All the available transparent funds have \ @@ -673,7 +755,7 @@ where let gas_used = tx_gas_meter.borrow().get_tx_consumed_gas(); let initialized_accounts = state.write_log().get_initialized_accounts(); let changed_keys = state.write_log().get_keys(); - let ibc_events = state.write_log_mut().take_ibc_events(); + let events = state.write_log_mut().take_events(); Ok(TxResult { gas_used, @@ -681,8 +763,7 @@ where changed_keys, vps_result, initialized_accounts, - ibc_events, - eth_bridge_events: BTreeSet::default(), + events, }) } diff --git a/crates/namada/src/ledger/vp_host_fns.rs b/crates/namada/src/ledger/vp_host_fns.rs index e30d0af9b4..2e5d1f5780 100644 --- a/crates/namada/src/ledger/vp_host_fns.rs +++ b/crates/namada/src/ledger/vp_host_fns.rs @@ -9,13 +9,13 @@ use namada_core::hash::{Hash, HASH_LENGTH}; use namada_core::storage::{ BlockHeight, Epoch, Epochs, Header, Key, TxIndex, TX_INDEX_LENGTH, }; +use namada_events::{Event, EventTypeBuilder}; use namada_gas::MEMORY_ACCESS_GAS_PER_BYTE; use namada_state::write_log::WriteLog; use namada_state::{write_log, DBIter, StateRead, DB}; use namada_tx::{Section, Tx}; use thiserror::Error; -use crate::ibc::IbcEvent; use crate::ledger::gas; use crate::ledger::gas::{GasMetering, VpGasMeter}; @@ -322,20 +322,20 @@ where Ok(state.in_mem().block.pred_epochs.clone()) } -/// Getting the IBC event. -pub fn get_ibc_events( +/// Query events emitted by the current transaction. +pub fn get_events( _gas_meter: &RefCell, state: &S, event_type: String, -) -> EnvResult> +) -> EnvResult> where S: StateRead + Debug, { + let event_type = EventTypeBuilder::new_with_type(event_type).build(); + Ok(state .write_log() - .get_ibc_events() - .iter() - .filter(|event| event.event_type == event_type) + .lookup_events_with_prefix(&event_type) .cloned() .collect()) } diff --git a/crates/namada/src/lib.rs b/crates/namada/src/lib.rs index 47f2db3691..15290a714f 100644 --- a/crates/namada/src/lib.rs +++ b/crates/namada/src/lib.rs @@ -17,8 +17,8 @@ pub use namada_sdk::{control_flow, io}; pub use tendermint_rpc; pub use { bip39, namada_account as account, namada_core as core, - namada_ethereum_bridge as ethereum_bridge, namada_gas as gas, - namada_governance as governance, namada_ibc as ibc, + namada_ethereum_bridge as ethereum_bridge, namada_events as events, + namada_gas as gas, namada_governance as governance, namada_ibc as ibc, namada_parameters as parameters, namada_proof_of_stake as proof_of_stake, namada_replay_protection as replay_protection, namada_sdk as sdk, namada_state as state, namada_token as token, namada_tx as tx, diff --git a/crates/namada/src/vm/host_env.rs b/crates/namada/src/vm/host_env.rs index d61371c9bc..2b52406f49 100644 --- a/crates/namada/src/vm/host_env.rs +++ b/crates/namada/src/vm/host_env.rs @@ -12,6 +12,7 @@ use masp_primitives::transaction::Transaction; use namada_core::address::ESTABLISHED_ADDRESS_BYTES_LEN; use namada_core::internal::KeyVal; use namada_core::storage::TX_INDEX_LENGTH; +use namada_events::{Event, EventTypeBuilder}; use namada_gas::{ self as gas, GasMetering, TxGasMeter, VpGasMeter, MEMORY_ACCESS_GAS_PER_BYTE, @@ -33,7 +34,6 @@ use super::wasm::VpCache; use super::WasmCacheAccess; use crate::address::{self, Address}; use crate::hash::Hash; -use crate::ibc::IbcEvent; use crate::internal::HostEnvResult; use crate::ledger::vp_host_fns; use crate::storage::{BlockHeight, Key, TxIndex}; @@ -1014,9 +1014,9 @@ where state.delete(&key).map_err(TxRuntimeError::StorageError) } -/// Emitting an IBC event function exposed to the wasm VM Tx environment. -/// The given IBC event will be set to the write log. -pub fn tx_emit_ibc_event( +/// Expose the functionality to emit events to the wasm VM's Tx environment. +/// An emitted event will land in the write log. +pub fn tx_emit_event( env: &TxVmEnv, event_ptr: u64, event_len: u64, @@ -1032,15 +1032,15 @@ where .read_bytes(event_ptr, event_len as _) .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; tx_charge_gas::(env, gas)?; - let event: IbcEvent = BorshDeserialize::try_from_slice(&event) + let event: Event = BorshDeserialize::try_from_slice(&event) .map_err(TxRuntimeError::EncodingError)?; let mut state = env.state(); - let gas = state.write_log_mut().emit_ibc_event(event); + let gas = state.write_log_mut().emit_event(event); tx_charge_gas::(env, gas) } -/// Getting an IBC event function exposed to the wasm VM Tx environment. -pub fn tx_get_ibc_events( +/// Expose the functionality to query events from the wasm VM's Tx environment. +pub fn tx_get_events( env: &TxVmEnv, event_type_ptr: u64, event_type_len: u64, @@ -1057,14 +1057,16 @@ where .map_err(|e| TxRuntimeError::MemoryError(Box::new(e)))?; tx_charge_gas::(env, gas)?; let state = env.state(); - let events: Vec = state - .write_log() - .get_ibc_events() - .iter() - .filter(|event| event.event_type == event_type) - .cloned() - .collect(); - let value = events.serialize_to_vec(); + let value = { + let event_type = EventTypeBuilder::new_with_type(event_type).build(); + + let events: Vec<_> = state + .write_log() + .lookup_events_with_prefix(&event_type) + .collect(); + + events.serialize_to_vec() + }; let len: i64 = value .len() .try_into() @@ -1880,8 +1882,8 @@ where Ok(len) } -/// Getting the IBC event function exposed to the wasm VM VP environment. -pub fn vp_get_ibc_events( +/// Expose the functionality to query events from the wasm VM's VP environment. +pub fn vp_get_events( env: &VpVmEnv, event_type_ptr: u64, event_type_len: u64, @@ -1901,7 +1903,7 @@ where vp_host_fns::add_gas(gas_meter, gas)?; let state = env.state(); - let events = vp_host_fns::get_ibc_events(gas_meter, &state, event_type)?; + let events = vp_host_fns::get_events(gas_meter, &state, event_type)?; let value = events.serialize_to_vec(); let len: i64 = value .len() diff --git a/crates/namada/src/vm/wasm/host_env.rs b/crates/namada/src/vm/wasm/host_env.rs index cf181e580e..855121fc83 100644 --- a/crates/namada/src/vm/wasm/host_env.rs +++ b/crates/namada/src/vm/wasm/host_env.rs @@ -73,8 +73,8 @@ where "namada_tx_insert_verifier" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_insert_verifier), "namada_tx_update_validity_predicate" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_update_validity_predicate), "namada_tx_init_account" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_init_account), - "namada_tx_emit_ibc_event" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_emit_ibc_event), - "namada_tx_get_ibc_events" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_ibc_events), + "namada_tx_emit_event" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_emit_event), + "namada_tx_get_events" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_events), "namada_tx_get_chain_id" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_chain_id), "namada_tx_get_tx_index" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_tx_index), "namada_tx_get_block_height" => Function::new_native_with_env(wasm_store, env.clone(), host_env::tx_get_block_height), @@ -127,7 +127,7 @@ where "namada_vp_get_tx_code_hash" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_tx_code_hash), "namada_vp_get_block_epoch" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_block_epoch), "namada_vp_get_pred_epochs" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_pred_epochs), - "namada_vp_get_ibc_events" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_ibc_events), + "namada_vp_get_events" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_get_events), "namada_vp_yield_value" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_yield_value), "namada_vp_verify_tx_section_signature" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_verify_tx_section_signature), "namada_vp_eval" => Function::new_native_with_env(wasm_store, env.clone(), host_env::vp_eval), diff --git a/crates/proof_of_stake/Cargo.toml b/crates/proof_of_stake/Cargo.toml index f14c3ad49b..e8ac302a28 100644 --- a/crates/proof_of_stake/Cargo.toml +++ b/crates/proof_of_stake/Cargo.toml @@ -25,6 +25,7 @@ migrations = [ namada_account = { path = "../account" } namada_controller = { path = "../controller" } namada_core = { path = "../core" } +namada_events = { path = "../events", default-features = false } namada_governance = { path = "../governance" } namada_macros = { path = "../macros" } namada_migrations = { path = "../migrations", optional = true } @@ -35,6 +36,7 @@ namada_trans_token = { path = "../trans_token" } borsh.workspace = true data-encoding.workspace = true derivative.workspace = true +konst.workspace = true linkme = {workspace = true, optional = true} num-traits.workspace = true once_cell.workspace = true @@ -46,6 +48,7 @@ tracing.workspace = true [dev-dependencies] namada_core = { path = "../core", features = ["testing"] } +namada_events = { path = "../events", features = ["testing"] } namada_state = { path = "../state", features = ["testing"] } assert_matches.workspace = true diff --git a/crates/proof_of_stake/src/event.rs b/crates/proof_of_stake/src/event.rs new file mode 100644 index 0000000000..2b9407f2dc --- /dev/null +++ b/crates/proof_of_stake/src/event.rs @@ -0,0 +1,75 @@ +//! Proof of Stake events. + +use namada_core::address::Address; +use namada_core::token; +use namada_core::uint::Uint; +use namada_events::extend::{ComposeEvent, EventAttributeEntry}; +use namada_events::{Event, EventLevel, EventToEmit}; + +pub mod types { + //! Proof of Stake event types. + + use namada_events::{event_type, EventType}; + + use super::PosEvent; + + /// Slash event. + pub const SLASH: EventType = event_type!(PosEvent, "slash"); +} + +/// Proof of Stake event. +#[derive(Debug)] +pub enum PosEvent { + /// Slashing event. + Slash { + /// The address of the slashed validator. + validator: Address, + /// Amount of tokens that have been slashed. + amount: token::Amount, + }, +} + +impl EventToEmit for PosEvent { + const DOMAIN: &'static str = "proof-of-stake"; +} + +impl From for Event { + fn from(pos_event: PosEvent) -> Self { + match pos_event { + PosEvent::Slash { validator, amount } => { + Event::new(types::SLASH, EventLevel::Block) + .with(SlashedValidator(validator)) + .with(SlashedAmount(&amount.into())) + .into() + } + } + } +} + +/// Extend an [`Event`] with slashed validator data. +pub struct SlashedValidator(pub Address); + +impl EventAttributeEntry<'static> for SlashedValidator { + type Value = Address; + type ValueOwned = Self::Value; + + const KEY: &'static str = "slashed-validator"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with slashed amount data. +pub struct SlashedAmount<'amt>(pub &'amt Uint); + +impl<'amt> EventAttributeEntry<'amt> for SlashedAmount<'amt> { + type Value = &'amt Uint; + type ValueOwned = Uint; + + const KEY: &'static str = "slashed-amount"; + + fn into_value(self) -> Self::Value { + self.0 + } +} diff --git a/crates/proof_of_stake/src/lib.rs b/crates/proof_of_stake/src/lib.rs index d10e97e977..26cd0aad97 100644 --- a/crates/proof_of_stake/src/lib.rs +++ b/crates/proof_of_stake/src/lib.rs @@ -7,6 +7,7 @@ #![deny(rustdoc::private_intra_doc_links)] pub mod epoched; +pub mod event; pub mod parameters; pub mod pos_queries; pub mod queries; @@ -31,11 +32,11 @@ pub use error::*; use namada_core::address::{Address, InternalAddress}; use namada_core::collections::HashSet; use namada_core::dec::Dec; -use namada_core::event::EmitEvents; use namada_core::key::common; use namada_core::storage::BlockHeight; pub use namada_core::storage::{Epoch, Key, KeySeg}; use namada_core::tendermint::abci::types::Misbehavior; +use namada_events::EmitEvents; use namada_storage::collections::lazy_map::{self, Collectable, LazyMap}; use namada_storage::{StorageRead, StorageWrite}; pub use namada_trans_token as token; @@ -2869,7 +2870,7 @@ where /// Apply PoS updates for a block pub fn finalize_block( storage: &mut S, - _events: &mut impl EmitEvents, + events: &mut impl EmitEvents, is_new_epoch: bool, validator_set_update_epoch: Epoch, votes: Vec, @@ -2928,7 +2929,9 @@ where // Process and apply slashes that have already been recorded for the // current epoch - if let Err(err) = slashing::process_slashes(storage, current_epoch) { + if let Err(err) = + slashing::process_slashes(storage, events, current_epoch) + { tracing::error!( "Error while processing slashes queued for epoch {}: {}", current_epoch, diff --git a/crates/proof_of_stake/src/slashing.rs b/crates/proof_of_stake/src/slashing.rs index 3965ff6014..6b2a662bb1 100644 --- a/crates/proof_of_stake/src/slashing.rs +++ b/crates/proof_of_stake/src/slashing.rs @@ -11,12 +11,14 @@ use namada_core::key::tm_raw_hash_to_string; use namada_core::storage::{BlockHeight, Epoch}; use namada_core::tendermint::abci::types::{Misbehavior, MisbehaviorKind}; use namada_core::token; +use namada_events::EmitEvents; use namada_storage::collections::lazy_map::{ Collectable, NestedMap, NestedSubKey, SubKey, }; use namada_storage::collections::LazyMap; use namada_storage::{StorageRead, StorageWrite}; +use crate::event::PosEvent; use crate::storage::{ enqueued_slashes_handle, read_pos_params, read_validator_last_slash_epoch, read_validator_stake, total_bonded_handle, total_unbonded_handle, @@ -201,6 +203,7 @@ where /// validators. pub fn process_slashes( storage: &mut S, + events: &mut impl EmitEvents, current_epoch: Epoch, ) -> namada_storage::Result<()> where @@ -318,6 +321,11 @@ where epoch, Some(0), )?; + + events.emit(PosEvent::Slash { + validator: validator.clone(), + amount: slash_amount, + }); } } // Then update validator and total deltas diff --git a/crates/proof_of_stake/src/tests/state_machine.rs b/crates/proof_of_stake/src/tests/state_machine.rs index 0ad85e9448..3529323149 100644 --- a/crates/proof_of_stake/src/tests/state_machine.rs +++ b/crates/proof_of_stake/src/tests/state_machine.rs @@ -268,8 +268,12 @@ impl StateMachineTest for ConcretePosState { // Need to apply some slashing let current_epoch = state.s.in_mem().block.epoch; - crate::slashing::process_slashes(&mut state.s, current_epoch) - .unwrap(); + crate::slashing::process_slashes( + &mut state.s, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); let params = read_pos_params(&state.s).unwrap(); state.check_next_epoch_post_conditions(¶ms); diff --git a/crates/proof_of_stake/src/tests/state_machine_v2.rs b/crates/proof_of_stake/src/tests/state_machine_v2.rs index 5c89bc6d98..1b8dbbd642 100644 --- a/crates/proof_of_stake/src/tests/state_machine_v2.rs +++ b/crates/proof_of_stake/src/tests/state_machine_v2.rs @@ -1986,8 +1986,12 @@ impl StateMachineTest for ConcretePosState { // Need to apply some slashing let current_epoch = state.s.in_mem().block.epoch; - crate::slashing::process_slashes(&mut state.s, current_epoch) - .unwrap(); + crate::slashing::process_slashes( + &mut state.s, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); let params = read_pos_params(&state.s).unwrap(); state.check_next_epoch_post_conditions(¶ms); diff --git a/crates/proof_of_stake/src/tests/test_pos.rs b/crates/proof_of_stake/src/tests/test_pos.rs index 0150cba4d5..a26764788b 100644 --- a/crates/proof_of_stake/src/tests/test_pos.rs +++ b/crates/proof_of_stake/src/tests/test_pos.rs @@ -854,7 +854,12 @@ fn test_unjail_validator_aux( s.commit_block().unwrap(); current_epoch = advance_epoch(&mut s, ¶ms); - process_slashes(&mut s, current_epoch).unwrap(); + process_slashes( + &mut s, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Discover first slash let slash_0_evidence_epoch = current_epoch; @@ -914,7 +919,12 @@ fn test_unjail_validator_aux( slash_0_evidence_epoch + params.slash_processing_epoch_offset(); while current_epoch < unfreeze_epoch + 4u64 { current_epoch = advance_epoch(&mut s, ¶ms); - process_slashes(&mut s, current_epoch).unwrap(); + process_slashes( + &mut s, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); } // Unjail the validator @@ -960,7 +970,12 @@ fn test_unjail_validator_aux( // Advance another epoch current_epoch = advance_epoch(&mut s, ¶ms); - process_slashes(&mut s, current_epoch).unwrap(); + process_slashes( + &mut s, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); let second_att = unjail_validator(&mut s, val_addr, current_epoch); assert!(second_att.is_err()); @@ -1040,7 +1055,12 @@ fn test_unslashed_bond_amount_aux(validators: Vec) { // Advance an epoch current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Bond to validator 1 bond_tokens( @@ -1088,7 +1108,12 @@ fn test_unslashed_bond_amount_aux(validators: Vec) { // Advance an epoch current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Bond to validator 1 bond_tokens( @@ -1630,7 +1655,12 @@ fn test_is_delegator_aux(mut validators: Vec) { // Advance to epoch 1 current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Delegate in epoch 1 to validator1 let del1_epoch = current_epoch; @@ -1646,7 +1676,12 @@ fn test_is_delegator_aux(mut validators: Vec) { // Advance to epoch 2 current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Delegate in epoch 2 to validator2 let del2_epoch = current_epoch; diff --git a/crates/proof_of_stake/src/tests/test_slash_and_redel.rs b/crates/proof_of_stake/src/tests/test_slash_and_redel.rs index 9df89eeef4..d866220cc0 100644 --- a/crates/proof_of_stake/src/tests/test_slash_and_redel.rs +++ b/crates/proof_of_stake/src/tests/test_slash_and_redel.rs @@ -117,7 +117,12 @@ fn test_simple_redelegation_aux( for _ in 0..5 { current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); } let init_epoch = current_epoch; @@ -135,11 +140,26 @@ fn test_simple_redelegation_aux( // Advance three epochs current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Redelegate in epoch 3 redelegate_tokens( @@ -183,11 +203,26 @@ fn test_simple_redelegation_aux( // Advance three epochs current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Unbond in epoch 5 from dest_validator let _ = unbond_tokens( @@ -239,7 +274,12 @@ fn test_simple_redelegation_aux( // Advance to withdrawal epoch loop { current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); if current_epoch == unbond_end { break; } @@ -324,7 +364,12 @@ fn test_slashes_with_unbonding_aux( s.commit_block().unwrap(); current_epoch = advance_epoch(&mut s, ¶ms); - process_slashes(&mut s, current_epoch).unwrap(); + process_slashes( + &mut s, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Discover first slash let slash_0_evidence_epoch = current_epoch; @@ -349,7 +394,12 @@ fn test_slashes_with_unbonding_aux( slash_0_evidence_epoch + params.slash_processing_epoch_offset(); while current_epoch < unfreeze_epoch { current_epoch = advance_epoch(&mut s, ¶ms); - process_slashes(&mut s, current_epoch).unwrap(); + process_slashes( + &mut s, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); } // Advance more epochs randomly from the generated delay @@ -386,7 +436,12 @@ fn test_slashes_with_unbonding_aux( let withdraw_epoch = unbond_epoch + params.withdrawable_epoch_offset(); while current_epoch < withdraw_epoch { current_epoch = advance_epoch(&mut s, ¶ms); - process_slashes(&mut s, current_epoch).unwrap(); + process_slashes( + &mut s, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); } let token = staking_token_address(&s); let val_balance_pre = read_balance(&s, &token, val_addr).unwrap(); @@ -496,7 +551,12 @@ fn test_redelegation_with_slashing_aux( for _ in 0..5 { current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); } let init_epoch = current_epoch; @@ -514,11 +574,26 @@ fn test_redelegation_with_slashing_aux( // Advance three epochs current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Redelegate in epoch 8 redelegate_tokens( @@ -558,11 +633,26 @@ fn test_redelegation_with_slashing_aux( // Advance three epochs current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Unbond in epoch 11 from dest_validator let _ = unbond_tokens( @@ -577,7 +667,12 @@ fn test_redelegation_with_slashing_aux( // Advance one epoch current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Discover evidence slash( @@ -631,7 +726,12 @@ fn test_redelegation_with_slashing_aux( // Advance to withdrawal epoch loop { current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); if current_epoch == unbond_end { break; } @@ -729,7 +829,12 @@ fn test_chain_redelegations_aux(mut validators: Vec) { // Advance one epoch current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Redelegate in epoch 1 to dest_validator let redel_amount_1: token::Amount = 58.into(); @@ -842,9 +947,19 @@ fn test_chain_redelegations_aux(mut validators: Vec) { // Attempt to redelegate in epoch 3 to dest_validator current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); let redel_amount_2: token::Amount = 23.into(); let redel_att = redelegate_tokens( @@ -863,7 +978,12 @@ fn test_chain_redelegations_aux(mut validators: Vec) { redel_end.prev() + params.slash_processing_epoch_offset(); loop { current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); if current_epoch == epoch_can_redel.prev() { break; } @@ -882,7 +1002,12 @@ fn test_chain_redelegations_aux(mut validators: Vec) { // Advance one more epoch current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Redelegate from dest_validator to dest_validator_2 now redelegate_tokens( @@ -1154,7 +1279,12 @@ fn test_overslashing_aux(mut validators: Vec) { // Advance to processing epoch 1 loop { current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); if current_epoch == processing_epoch_1 { break; } @@ -1190,7 +1320,12 @@ fn test_overslashing_aux(mut validators: Vec) { // Advance to processing epoch 2 loop { current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); if current_epoch == processing_epoch_2 { break; } @@ -1333,7 +1468,12 @@ fn test_slashed_bond_amount_aux(validators: Vec) { // Advance an epoch to 1 current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Bond to validator 1 bond_tokens( @@ -1381,7 +1521,12 @@ fn test_slashed_bond_amount_aux(validators: Vec) { // Advance an epoch to ep 2 current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); // Bond to validator 1 bond_tokens( @@ -1419,7 +1564,12 @@ fn test_slashed_bond_amount_aux(validators: Vec) { // Advance two epochs to ep 4 for _ in 0..2 { current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); } // Find some slashes committed in various epochs @@ -1471,7 +1621,12 @@ fn test_slashed_bond_amount_aux(validators: Vec) { // Advance such that these slashes are all processed for _ in 0..params.slash_processing_epoch_offset() { current_epoch = advance_epoch(&mut storage, ¶ms); - process_slashes(&mut storage, current_epoch).unwrap(); + process_slashes( + &mut storage, + &mut namada_events::testing::VoidEventSink, + current_epoch, + ) + .unwrap(); } let pipeline_epoch = current_epoch + params.pipeline_len; diff --git a/crates/sdk/Cargo.toml b/crates/sdk/Cargo.toml index 77b6920fb7..d840f82240 100644 --- a/crates/sdk/Cargo.toml +++ b/crates/sdk/Cargo.toml @@ -60,12 +60,14 @@ migrations = [ "namada_account/migrations", "namada_core/migrations", "namada_ethereum_bridge/migrations", + "namada_events/migrations", "namada_governance/migrations", "namada_proof_of_stake/migrations", "namada_state/migrations", "namada_storage/migrations", "namada_tx/migrations", "namada_vote_ext/migrations", + "namada_gas/migrations", "linkme", ] @@ -73,6 +75,7 @@ migrations = [ namada_account = { path = "../account" } namada_core = { path = "../core" } namada_ethereum_bridge = { path = "../ethereum_bridge", default-features = false } +namada_events = { path = "../events" } namada_gas = { path = "../gas" } namada_governance = { path = "../governance" } namada_ibc = { path = "../ibc" } @@ -111,6 +114,7 @@ orion.workspace = true owo-colors = "3.5.0" parse_duration = "2.1.1" paste.workspace = true +patricia_tree.workspace = true proptest = { workspace = true, optional = true } prost.workspace = true rand = { workspace = true, optional = true } diff --git a/crates/sdk/src/error.rs b/crates/sdk/src/error.rs index c2f87b7651..e05be884b2 100644 --- a/crates/sdk/src/error.rs +++ b/crates/sdk/src/error.rs @@ -3,9 +3,9 @@ use namada_core::address::Address; use namada_core::dec::Dec; use namada_core::ethereum_events::EthAddress; -use namada_core::event::EventError; use namada_core::storage; use namada_core::storage::Epoch; +use namada_events::EventError; use namada_tx::Tx; use prost::EncodeError; use tendermint_rpc::Error as RpcError; diff --git a/crates/sdk/src/events/log.rs b/crates/sdk/src/events/log.rs index 95e96574cd..5409006dab 100644 --- a/crates/sdk/src/events/log.rs +++ b/crates/sdk/src/events/log.rs @@ -1,30 +1,31 @@ //! A log to store events emitted by `FinalizeBlock` calls in the ledger. //! -//! The log can only hold `N` events at a time, where `N` is a configurable -//! parameter. If the log is holding `N` events, and a new event is logged, -//! old events are pruned. +//! The log will hold up to `N` events of a certain kind at a time, before +//! resorting to pruning older events contained within. use circular_queue::CircularQueue; +use patricia_tree::map::StringPatriciaMap; -use super::{EmitEvents, Event}; +use super::{EmitEvents, Event, EventType}; pub mod dumb_queries; /// Parameters to configure the pruning of the event log. #[derive(Debug, Copy, Clone)] pub struct Params { - /// Soft limit on the maximum number of events the event log can hold. + /// Soft limit on the maximum number of events the event log can hold, + /// for a given event kind. /// - /// If the number of events in the log exceeds this value, the log - /// will be pruned. - pub max_log_events: usize, + /// If the number of events of a given type in the log exceeds this value, + /// events of that kind in the log will be pruned. + pub max_log_events_per_kind: usize, } impl Default for Params { fn default() -> Self { // TODO: tune the default params Self { - max_log_events: 50000, + max_log_events_per_kind: 50000, } } } @@ -33,7 +34,8 @@ impl Default for Params { /// `FinalizeBlock` calls, in the ledger. #[derive(Debug)] pub struct EventLog { - queue: CircularQueue, + cap: usize, + map: StringPatriciaMap>, } impl Default for EventLog { @@ -63,10 +65,27 @@ impl EmitEvents for EventLog { } impl EventLog { + /// Retrieve an event queue of a given type. + fn get_queue_of_type( + &mut self, + event_type: &EventType, + ) -> &mut CircularQueue { + let event_type = event_type.to_string(); + + if namada_core::hints::unlikely(!self.map.contains_key(&event_type)) { + // some monkey business + self.map + .insert(&event_type, CircularQueue::with_capacity(self.cap)); + } + + self.map.get_mut(&event_type).unwrap() + } + /// Return a new event log. pub fn new(params: Params) -> Self { Self { - queue: CircularQueue::with_capacity(params.max_log_events), + cap: params.max_log_events_per_kind, + map: StringPatriciaMap::new(), } } @@ -77,7 +96,7 @@ impl EventLog { { let mut num_entries = 0; for event in events.into_iter() { - self.queue.push(event); + self.get_queue_of_type(event.kind()).push(event); num_entries += 1; } tracing::debug!(num_entries, "Added new entries to the event log"); @@ -86,59 +105,89 @@ impl EventLog { /// Returns a new iterator over this [`EventLog`]. #[inline] pub fn iter(&self) -> impl Iterator { - self.queue.iter() + self.map.values().flat_map(|queue| queue.iter()) } - /// Returns a filtering iterator over this [`EventLog`]. + /// Returns an adapter that turns this [`EventLog`] into + /// a filtering iterator over the events contained within. #[inline] - pub fn iter_with_matcher( + pub fn with_matcher( &self, matcher: dumb_queries::QueryMatcher, - ) -> impl Iterator { - self.queue - .iter() - .filter(move |&event| matcher.matches(event)) + ) -> WithMatcher<'_> { + WithMatcher { matcher, log: self } + } +} + +/// Iterator over an [`EventLog`] taking a [matcher](dumb_queries::QueryMatcher) +/// in order to filter events within. +pub struct WithMatcher<'log> { + log: &'log EventLog, + matcher: dumb_queries::QueryMatcher, +} + +impl<'log> WithMatcher<'log> { + /// Iterates and filters events in the associated [`EventLog`] + /// using the provided [event matcher](dumb_queries::QueryMatcher). + pub fn iter<'this: 'log>( + &'this self, + ) -> impl Iterator + 'log { + self.log + .map + .iter_prefix(self.matcher.event_type()) + .flat_map(|(_, queue)| { + queue.iter().filter(|&event| self.matcher.matches(event)) + }) } } #[cfg(test)] -mod tests { +mod event_log_tests { use namada_core::hash::Hash; + use namada_core::keccak::KeccakHash; + use namada_ethereum_bridge::event::types::BRIDGE_POOL_RELAYED; + use namada_ethereum_bridge::event::BridgePoolTxHash; use super::*; - use crate::events::{EventLevel, EventType}; + use crate::events::extend::{ComposeEvent, TxHash}; + use crate::events::EventLevel; + use crate::tx::event::types::APPLIED as APPLIED_TX; const HASH: &str = "DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF"; - /// An accepted tx hash query. + /// An applied tx hash query. macro_rules! applied { ($hash:expr) => { dumb_queries::QueryMatcher::applied(Hash::try_from($hash).unwrap()) }; } + /// An applied tx hash query. + macro_rules! bridge_pool_relayed { + ($hash:expr) => { + dumb_queries::QueryMatcher::bridge_pool_relayed( + &KeccakHash::try_from($hash).unwrap(), + ) + }; + } + + /// Return a mock `FinalizeBlock` event. + fn mock_event(event_type: EventType, hash: impl AsRef) -> Event { + Event::new(event_type, EventLevel::Tx) + .with(TxHash(Hash::try_from(hash.as_ref()).unwrap())) + .with(BridgePoolTxHash( + &KeccakHash::try_from(hash.as_ref()).unwrap(), + )) + .into() + } + /// Return a vector of mock `FinalizeBlock` events. fn mock_tx_events(hash: &str) -> Vec { - let event_1 = Event { - event_type: EventType::Applied, - level: EventLevel::Block, - attributes: { - let mut attrs = namada_core::collections::HashMap::new(); - attrs.insert("hash".to_string(), hash.to_string()); - attrs - }, - }; - let event_2 = Event { - event_type: EventType::Proposal, - level: EventLevel::Block, - attributes: { - let mut attrs = namada_core::collections::HashMap::new(); - attrs.insert("hash".to_string(), hash.to_string()); - attrs - }, - }; - vec![event_1, event_2] + vec![ + mock_event(BRIDGE_POOL_RELAYED, hash), + mock_event(APPLIED_TX, hash), + ] } /// Test adding a couple of events to the event log, and @@ -157,14 +206,28 @@ mod tests { } // inspect log - let events_in_log: Vec<_> = - log.iter_with_matcher(applied!(HASH)).cloned().collect(); + assert_eq!(log.iter().count(), NUM_HEIGHTS * events.len()); + + let events_in_log: Vec<_> = log + .with_matcher(bridge_pool_relayed!(HASH)) + .iter() + .cloned() + .collect(); assert_eq!(events_in_log.len(), NUM_HEIGHTS); for event in events_in_log { assert_eq!(events[0], event); } + + let events_in_log: Vec<_> = + log.with_matcher(applied!(HASH)).iter().cloned().collect(); + + assert_eq!(events_in_log.len(), NUM_HEIGHTS); + + for event in events_in_log { + assert_eq!(events[1], event); + } } /// Test pruning old events from the log. @@ -172,51 +235,47 @@ mod tests { fn test_log_prune() { const LOG_CAP: usize = 4; - // log cap has to be a multiple of two - // for this test - if LOG_CAP < 2 || LOG_CAP & 1 != 0 { + if LOG_CAP == 0 { panic!(); } - const MATCHED_EVENTS: usize = LOG_CAP / 2; - let mut log = EventLog::new(Params { - max_log_events: LOG_CAP, + max_log_events_per_kind: LOG_CAP, }); // completely fill the log with events - // - // `mock_tx_events` returns 2 events, so - // we do `LOG_CAP / 2` iters to fill the log - let events = mock_tx_events(HASH); - assert_eq!(events.len(), 2); - - for _ in 0..(LOG_CAP / 2) { - log.log_events(events.clone()); + for i in 0..LOG_CAP { + log.emit(mock_event(APPLIED_TX, format!("{i:064X}"))); } // inspect log - it should be full - let events_in_log: Vec<_> = - log.iter_with_matcher(applied!(HASH)).cloned().collect(); + let events_in_log: Vec<_> = log.iter().cloned().collect(); - assert_eq!(events_in_log.len(), MATCHED_EVENTS); + assert_eq!(events_in_log.len(), LOG_CAP); - for event in events_in_log { - assert_eq!(events[0], event); + // iter in reverse since the ringbuf gives us items + // in the order of the last insertion + for (i, event) in events_in_log.into_iter().rev().enumerate() { + assert_eq!(mock_event(APPLIED_TX, format!("{i:064X}")), event); } - // add a new PROPOSAL event to the log, - // pruning the first APPLIED event we added - log.log_events(Some(events[1].clone())); + // add a new APPLIED event to the log + log.emit(mock_event(APPLIED_TX, HASH)); - let events_in_log: Vec<_> = - log.iter_with_matcher(applied!(HASH)).cloned().collect(); + // inspect log - oldest event should have been pruned + let events_in_log: Vec<_> = log.iter().cloned().collect(); - const ACCEPTED_EVENTS: usize = MATCHED_EVENTS - 1; - assert_eq!(events_in_log.len(), ACCEPTED_EVENTS); + assert_eq!(events_in_log.len(), LOG_CAP); + assert_eq!(events_in_log[0], mock_event(APPLIED_TX, HASH)); - for event in events_in_log { - assert_eq!(events[0], event); + for (i, event) in events_in_log + .into_iter() + .rev() + .enumerate() + .take(LOG_CAP - 1) + { + let i = i + 1; // last elem was pruned + assert_eq!(mock_event(APPLIED_TX, format!("{i:064X}")), event); } } } diff --git a/crates/sdk/src/events/log/dumb_queries.rs b/crates/sdk/src/events/log/dumb_queries.rs index adf645ab10..f0c770ce8d 100644 --- a/crates/sdk/src/events/log/dumb_queries.rs +++ b/crates/sdk/src/events/log/dumb_queries.rs @@ -1,53 +1,136 @@ -//! Silly simple Tendermint query parser. -//! -//! This parser will only work with simple queries of the form: -//! -//! ```text -//! tm.event='NewBlock' AND .<$attr>='<$value>' -//! ``` +//! Silly simple event matcher. use namada_core::collections::HashMap; use namada_core::hash::Hash; +use namada_core::keccak::KeccakHash; use namada_core::storage::BlockHeight; +use namada_ethereum_bridge::event::types::{ + BRIDGE_POOL_EXPIRED, BRIDGE_POOL_RELAYED, +}; +use namada_ethereum_bridge::event::BridgePoolTxHash; +use namada_ibc::event::types::UPDATE_CLIENT; +use namada_ibc::event::{ + ClientId as ClientIdAttr, ConsensusHeights, IbcEvent, IbcEventType, + PacketDstChannel, PacketDstPort, PacketSequence, PacketSrcChannel, + PacketSrcPort, +}; -use crate::events::{Event, EventType}; +use crate::events::extend::{ + ExtendAttributesMap, ExtendEventAttributes, TxHash as TxHashAttr, +}; +use crate::events::{Event, EventToEmit, EventType, EventTypeBuilder}; use crate::ibc::core::client::types::Height as IbcHeight; use crate::ibc::core::host::types::identifiers::{ ChannelId, ClientId, PortId, Sequence, }; +use crate::tx::event::types::APPLIED as APPLIED_TX; /// A [`QueryMatcher`] verifies if a Namada event matches a /// given Tendermint query. #[derive(Debug, Clone)] pub struct QueryMatcher { + event_type_match: MatchType, event_type: EventType, attributes: HashMap, } +/// Determine which kind of match will be performed over a series of event +/// types. +#[derive(Debug, Clone)] +pub enum MatchType { + /// Exact match. + Exact, + /// Prefix match. + Prefix, +} + +impl ExtendAttributesMap for QueryMatcher { + fn with_attribute(&mut self, data: DATA) -> &mut Self + where + DATA: ExtendEventAttributes, + { + data.extend_event_attributes(&mut self.attributes); + self + } +} + impl QueryMatcher { + /// Returns the event type that this [`QueryMatcher`] + /// attempts to match. + pub fn event_type(&self) -> &EventType { + &self.event_type + } + + /// Return the match type performed over the + /// [`EventType`]. + pub fn match_type(&self) -> &MatchType { + &self.event_type_match + } + + /// Create a new [`QueryMatcher`] matching event types + /// with the given `prefix`. + pub fn of_event_type() -> Self { + Self::with_prefix(EventType::new(E::DOMAIN)) + } + + /// Create a new [`QueryMatcher`] matching event types + /// with the given `prefix`. + pub fn with_prefix(prefix: EventType) -> Self { + Self { + event_type: prefix, + event_type_match: MatchType::Prefix, + attributes: Default::default(), + } + } + + /// Create a new [`QueryMatcher`] with the given event type. + pub fn with_event_type(event_type: EventType) -> Self { + Self { + event_type, + event_type_match: MatchType::Exact, + attributes: Default::default(), + } + } + + /// Add a new attribute to the [`QueryMatcher`]. + #[inline] + pub fn and_attribute(mut self, data: DATA) -> Self + where + DATA: ExtendEventAttributes, + { + self.with_attribute(data); + self + } + /// Checks if this [`QueryMatcher`] validates the /// given [`Event`]. pub fn matches(&self, event: &Event) -> bool { - if event.event_type != self.event_type { + let matches_event_type = match self.match_type() { + MatchType::Exact => *event.kind() == self.event_type, + MatchType::Prefix => event.kind().starts_with(&*self.event_type), + }; + + if !matches_event_type { return false; } + event.has_subset_of_attrs(&self.attributes) + } - self.attributes.iter().all(|(key, value)| { - match event.attributes.get(key) { - Some(v) => v == value, - None => false, - } - }) + /// Returns a query matching the given relayed Bridge pool transaction hash. + pub fn bridge_pool_relayed(tx_hash: &KeccakHash) -> Self { + Self::with_event_type(BRIDGE_POOL_RELAYED) + .and_attribute(BridgePoolTxHash(tx_hash)) + } + + /// Returns a query matching the given expired Bridge pool transaction hash. + pub fn bridge_pool_expired(tx_hash: &KeccakHash) -> Self { + Self::with_event_type(BRIDGE_POOL_EXPIRED) + .and_attribute(BridgePoolTxHash(tx_hash)) } /// Returns a query matching the given applied transaction hash. pub fn applied(tx_hash: Hash) -> Self { - let mut attributes = HashMap::new(); - attributes.insert("hash".to_string(), tx_hash.to_string()); - Self { - event_type: EventType::Applied, - attributes, - } + Self::with_event_type(APPLIED_TX).and_attribute(TxHashAttr(tx_hash)) } /// Returns a query matching the given IBC UpdateClient parameters @@ -55,100 +138,106 @@ impl QueryMatcher { client_id: ClientId, consensus_height: BlockHeight, ) -> Self { - use crate::ibc::core::client::types::events::{ - CLIENT_ID_ATTRIBUTE_KEY, CONSENSUS_HEIGHTS_ATTRIBUTE_KEY, - UPDATE_CLIENT_EVENT, - }; - - let mut attributes = HashMap::new(); - attributes - .insert(CLIENT_ID_ATTRIBUTE_KEY.to_string(), client_id.to_string()); - attributes.insert( - CONSENSUS_HEIGHTS_ATTRIBUTE_KEY.to_string(), - IbcHeight::new(0, consensus_height.0) - .expect("invalid height") - .to_string(), - ); - Self { - event_type: EventType::Ibc(UPDATE_CLIENT_EVENT.to_string()), - attributes, - } + Self::with_event_type(UPDATE_CLIENT) + .and_attribute(ClientIdAttr(client_id)) + .and_attribute(ConsensusHeights( + IbcHeight::new(0, consensus_height.0).expect("invalid height"), + )) } /// Returns a query matching the given IBC packet parameters pub fn ibc_packet( - event_type: EventType, + event_type: IbcEventType, source_port: PortId, source_channel: ChannelId, destination_port: PortId, destination_channel: ChannelId, sequence: Sequence, ) -> Self { - let mut attributes = HashMap::new(); - attributes - .insert("packet_src_port".to_string(), source_port.to_string()); - attributes.insert( - "packet_src_channel".to_string(), - source_channel.to_string(), - ); - attributes.insert( - "packet_dst_port".to_string(), - destination_port.to_string(), - ); - attributes.insert( - "packet_dst_channel".to_string(), - destination_channel.to_string(), - ); - attributes.insert("packet_sequence".to_string(), sequence.to_string()); - Self { - event_type, - attributes, - } + Self::with_event_type( + EventTypeBuilder::new_of::() + .with_segment(event_type.0) + .build(), + ) + .and_attribute(PacketSrcPort(source_port)) + .and_attribute(PacketSrcChannel(source_channel)) + .and_attribute(PacketDstPort(destination_port)) + .and_attribute(PacketDstChannel(destination_channel)) + .and_attribute(PacketSequence(sequence)) } } #[cfg(test)] mod tests { + use namada_ethereum_bridge::event::EthBridgeEvent; + use super::*; + use crate::events::extend::ComposeEvent; use crate::events::EventLevel; + const HASH: &str = + "DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF"; + + /// Test if matching the prefix of an event type works as expected. + #[test] + fn test_query_matching_prefix() { + let matcher = QueryMatcher::of_event_type::(); + + let tests = { + let bp_hash: KeccakHash = HASH.parse().unwrap(); + let tx_hash: Hash = HASH.parse().unwrap(); + + let event_1: Event = + Event::new(BRIDGE_POOL_RELAYED, EventLevel::Tx) + .with(BridgePoolTxHash(&bp_hash)) + .into(); + let matches_1 = true; + + let event_2: Event = + Event::new(BRIDGE_POOL_EXPIRED, EventLevel::Tx) + .with(BridgePoolTxHash(&bp_hash)) + .into(); + let matches_2 = true; + + let event_3: Event = Event::new(UPDATE_CLIENT, EventLevel::Tx) + .with(TxHashAttr(tx_hash)) + .into(); + let matches_3 = false; + + [ + (event_1, matches_1), + (event_2, matches_2), + (event_3, matches_3), + ] + }; + + for (ref ev, status) in tests { + if matcher.matches(ev) != status { + panic!("Test failed"); + } + } + } + /// Test if query matching is working as expected. #[test] fn test_tm_query_matching() { - const HASH: &str = - "DEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF"; - - let mut attributes = HashMap::new(); - attributes.insert("hash".to_string(), HASH.to_string()); - let matcher = QueryMatcher { - event_type: EventType::Proposal, - attributes, - }; + let tx_hash: Hash = HASH.parse().unwrap(); + + let matcher = QueryMatcher::with_event_type(APPLIED_TX) + .and_attribute(TxHashAttr(tx_hash)); let tests = { - let event_1 = Event { - event_type: EventType::Proposal, - level: EventLevel::Block, - attributes: { - let mut attrs = namada_core::collections::HashMap::new(); - attrs.insert("hash".to_string(), HASH.to_string()); - attrs - }, - }; - let accepted_1 = true; - - let event_2 = Event { - event_type: EventType::Applied, - level: EventLevel::Block, - attributes: { - let mut attrs = namada_core::collections::HashMap::new(); - attrs.insert("hash".to_string(), HASH.to_string()); - attrs - }, - }; - let accepted_2 = false; - - [(event_1, accepted_1), (event_2, accepted_2)] + let event_1: Event = Event::new(UPDATE_CLIENT, EventLevel::Tx) + .with(TxHashAttr(tx_hash)) + .into(); + let applied_1 = false; + + let event_2: Event = Event::new(APPLIED_TX, EventLevel::Tx) + .with(TxHashAttr(tx_hash)) + .into(); + let applied_2 = true; + + [(event_1, applied_1), (event_2, applied_2)] }; for (ref ev, status) in tests { diff --git a/crates/sdk/src/events/mod.rs b/crates/sdk/src/events/mod.rs index 0d33f8dc89..1558e54c84 100644 --- a/crates/sdk/src/events/mod.rs +++ b/crates/sdk/src/events/mod.rs @@ -2,9 +2,7 @@ pub mod log; use namada_core::collections::HashMap; -pub use namada_core::event::{ - extend, EmitEvents, Event, EventError, EventLevel, EventType, -}; +pub use namada_events::*; use serde_json::Value; // use crate::ledger::governance::utils::ProposalEvent; diff --git a/crates/sdk/src/lib.rs b/crates/sdk/src/lib.rs index f7012f019e..afc3b39c99 100644 --- a/crates/sdk/src/lib.rs +++ b/crates/sdk/src/lib.rs @@ -6,8 +6,9 @@ pub use namada_core::*; pub use tendermint_rpc; pub use { bip39, masp_primitives, masp_proofs, namada_account as account, - namada_governance as governance, namada_proof_of_stake as proof_of_stake, - namada_state as state, namada_storage as storage, zeroize, + namada_gas as gas, namada_governance as governance, + namada_proof_of_stake as proof_of_stake, namada_state as state, + namada_storage as storage, zeroize, }; pub mod eth_bridge; diff --git a/crates/sdk/src/masp.rs b/crates/sdk/src/masp.rs index 87b90b5d91..03906fa42f 100644 --- a/crates/sdk/src/masp.rs +++ b/crates/sdk/src/masp.rs @@ -6,7 +6,6 @@ use std::env; use std::fmt::Debug; use std::ops::Deref; use std::path::PathBuf; -use std::str::FromStr; use borsh::{BorshDeserialize, BorshSerialize}; use borsh_ext::BorshSerializeExt; @@ -60,6 +59,9 @@ pub use namada_core::masp::{ use namada_core::storage::{BlockHeight, Epoch, IndexedTx, TxIndex}; use namada_core::time::{DateTimeUtc, DurationSecs}; use namada_core::uint::Uint; +use namada_events::extend::{ + ReadFromEventAttributes, ValidMaspTx as ValidMaspTxAttr, +}; use namada_ibc::IbcMessage; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] @@ -67,6 +69,7 @@ use namada_migrations::*; use namada_state::StorageError; use namada_token::{self as token, Denomination, MaspDigitPos, Transfer}; use namada_tx::data::{TxResult, WrapperTx}; +use namada_tx::event::InnerTx as InnerTxAttr; use namada_tx::Tx; use rand_core::{CryptoRng, OsRng, RngCore}; use ripemd::Digest as RipemdDigest; @@ -924,23 +927,12 @@ impl ShieldedContext { // because those are what the masp validity predicate works on let (wrapper_changed_keys, changed_keys) = if let ExtractShieldedActionArg::Event(tx_event) = action_arg { - let tx_result_str = tx_event - .attributes - .iter() - .find_map(|attr| { - if attr.key == "inner_tx" { - Some(&attr.value) - } else { - None - } - }) - .ok_or_else(|| { - Error::Other( - "Missing required tx result in event".to_string(), - ) - })?; - let result = TxResult::from_str(tx_result_str) - .map_err(|e| Error::Other(e.to_string()))?; + let result = InnerTxAttr::read_from_event_attributes( + &tx_event.attributes, + ) + .map_err(|err| { + Error::Other(format!("Failed to extract masp tx: {err}")) + })?; (result.wrapper_changed_keys, result.changed_keys) } else { (Default::default(), Default::default()) @@ -1993,8 +1985,8 @@ impl ShieldedContext { .and_then(|seed| { let exp_str = format!("Env var {ENV_VAR_MASP_TEST_SEED} must be a u64."); - let parsed_seed: u64 = FromStr::from_str(&seed) - .map_err(|_| Error::Other(exp_str))?; + let parsed_seed: u64 = + seed.parse().map_err(|_| Error::Other(exp_str))?; Ok(parsed_seed) }) { tracing::warn!( @@ -2622,25 +2614,15 @@ async fn get_indexed_masp_events_at_height( events .into_iter() .filter_map(|event| { - let tx_index = - event.attributes.iter().find_map(|attribute| { - if attribute.key == "is_valid_masp_tx" { - Some(TxIndex( - u32::from_str(&attribute.value).unwrap(), - )) - } else { - None - } - }); - match tx_index { - Some(idx) => { - if idx >= first_idx_to_query { - Some((idx, event)) - } else { - None - } - } - None => None, + let tx_index = ValidMaspTxAttr::read_from_event_attributes( + &event.attributes, + ) + .ok()?; + + if tx_index >= first_idx_to_query { + Some((tx_index, event)) + } else { + None } }) .collect::>() @@ -2769,25 +2751,9 @@ async fn get_receiving_result( fn get_tx_result( tx_event: &crate::tendermint::abci::Event, ) -> Result { - tx_event - .attributes - .iter() - .find_map(|attribute| { - if attribute.key == "inner_tx" { - let tx_result = TxResult::from_str(&attribute.value) - .expect("The event value should be parsable"); - Some(tx_result) - } else { - None - } - }) - .ok_or_else(|| { - Error::Other( - "Couldn't find changed keys in the event for the provided \ - transaction" - .to_string(), - ) - }) + InnerTxAttr::read_from_event_attributes(&tx_event.attributes).map_err( + |err| Error::Other(format!("Failed to parse tx result: {err}")), + ) } mod tests { diff --git a/crates/sdk/src/queries/shell.rs b/crates/sdk/src/queries/shell.rs index d6cb69ad88..d10e2c1db9 100644 --- a/crates/sdk/src/queries/shell.rs +++ b/crates/sdk/src/queries/shell.rs @@ -18,6 +18,7 @@ use namada_core::storage::{ }; use namada_core::token::{Denomination, MaspDigitPos}; use namada_core::uint::Uint; +use namada_ibc::event::IbcEventType; use namada_state::{DBIter, LastBlock, StateRead, StorageHasher, DB}; use namada_storage::{ResultExt, StorageRead}; use namada_token::storage_key::masp_token_map_key; @@ -26,7 +27,7 @@ use namada_tx::data::TxResult; use self::eth_bridge::{EthBridge, ETH_BRIDGE}; use crate::events::log::dumb_queries; -use crate::events::{Event, EventType}; +use crate::events::Event; use crate::ibc::core::host::types::identifiers::{ ChannelId, ClientId, PortId, Sequence, }; @@ -113,7 +114,7 @@ router! {SHELL, ( "ibc_client_update" / [client_id: ClientId] / [consensus_height: BlockHeight] ) -> Option = ibc_client_update, // IBC packet event - ( "ibc_packet" / [event_type: EventType] / [source_port: PortId] / [source_channel: ChannelId] / [destination_port: PortId] / [destination_channel: ChannelId] / [sequence: Sequence]) -> Option = ibc_packet, + ( "ibc_packet" / [event_type: IbcEventType] / [source_port: PortId] / [source_channel: ChannelId] / [destination_port: PortId] / [destination_channel: ChannelId] / [sequence: Sequence]) -> Option = ibc_packet, } // Handlers: @@ -517,12 +518,7 @@ where H: 'static + StorageHasher + Sync, { let matcher = dumb_queries::QueryMatcher::applied(tx_hash); - Ok(ctx - .event_log - .iter_with_matcher(matcher) - .by_ref() - .next() - .cloned()) + Ok(ctx.event_log.with_matcher(matcher).iter().next().cloned()) } fn ibc_client_update( @@ -538,17 +534,12 @@ where client_id, consensus_height, ); - Ok(ctx - .event_log - .iter_with_matcher(matcher) - .by_ref() - .next() - .cloned()) + Ok(ctx.event_log.with_matcher(matcher).iter().next().cloned()) } fn ibc_packet( ctx: RequestCtx<'_, D, H, V, T>, - event_type: EventType, + event_type: IbcEventType, source_port: PortId, source_channel: ChannelId, destination_port: PortId, @@ -567,12 +558,7 @@ where destination_channel, sequence, ); - Ok(ctx - .event_log - .iter_with_matcher(matcher) - .by_ref() - .next() - .cloned()) + Ok(ctx.event_log.with_matcher(matcher).iter().next().cloned()) } fn account( diff --git a/crates/sdk/src/queries/shell/eth_bridge.rs b/crates/sdk/src/queries/shell/eth_bridge.rs index 28f112c37e..07c7f6a1b9 100644 --- a/crates/sdk/src/queries/shell/eth_bridge.rs +++ b/crates/sdk/src/queries/shell/eth_bridge.rs @@ -17,6 +17,7 @@ use namada_core::storage::{BlockHeight, DbKeySeg, Epoch, Key}; use namada_core::token::Amount; use namada_core::voting_power::FractionalVotingPower; use namada_core::{ethereum_structs, hints}; +use namada_ethereum_bridge::event::{BpTransferStatus, BridgePoolTxHash}; use namada_ethereum_bridge::protocol::transactions::votes::{ EpochedVotingPower, EpochedVotingPowerExt, }; @@ -41,7 +42,6 @@ use namada_vote_ext::validator_set_update::{ use serde::{Deserialize, Serialize}; use crate::eth_bridge::ethers::abi::AbiDecode; -use crate::events::EventType; use crate::queries::{EncodedResponseQuery, RequestCtx, RequestQuery}; /// Container for the status of queried transfers to Ethereum. @@ -296,32 +296,20 @@ where // INVARIANT: transfers that are in the event log will have already // been processed and therefore removed from the Bridge pool at the // time of this query - let kind_key: String = "kind".into(); let completed_transfers = ctx.event_log.iter().filter_map(|ev| { - if !matches!(&ev.event_type, EventType::EthereumBridge) { + let Ok(transfer_status) = BpTransferStatus::try_from(ev.kind()) else { return None; - } - let eth_event_kind = - ev.attributes.get(&kind_key).map(|k| k.as_str())?; - let is_relayed = match eth_event_kind { - "bridge_pool_relayed" => true, - "bridge_pool_expired" => false, - _ => return None, }; let tx_hash: KeccakHash = ev - .attributes - .get("tx_hash") - .expect("The transfer hash must be available") - .as_str() - .try_into() - .expect("We must have a valid KeccakHash"); + .read_attribute::() + .expect("The transfer hash must be available"); if !transfer_hashes.swap_remove(&tx_hash) { return None; } - Some((tx_hash, is_relayed, transfer_hashes.is_empty())) + Some((tx_hash, transfer_status, transfer_hashes.is_empty())) }); - for (hash, is_relayed, early_exit) in completed_transfers { - if hints::likely(is_relayed) { + for (hash, transfer_status, early_exit) in completed_transfers { + if hints::likely(matches!(transfer_status, BpTransferStatus::Relayed)) { status.relayed.insert(hash.clone()); } else { status.expired.insert(hash.clone()); @@ -1690,14 +1678,14 @@ mod test_ethbridge_router { let mut transfer3 = transfer.clone(); transfer3.transfer.amount = 2.into(); client.event_log.log_events(vec![ - ethereum_structs::EthBridgeEvent::BridgePool { + crate::eth_bridge::event::EthBridgeEvent::BridgePool { tx_hash: transfer2.keccak256(), - status: ethereum_structs::BpTransferStatus::Expired, + status: crate::eth_bridge::event::BpTransferStatus::Expired, } .into(), - ethereum_structs::EthBridgeEvent::BridgePool { + crate::eth_bridge::event::EthBridgeEvent::BridgePool { tx_hash: transfer3.keccak256(), - status: ethereum_structs::BpTransferStatus::Relayed, + status: crate::eth_bridge::event::BpTransferStatus::Relayed, } .into(), ]); diff --git a/crates/sdk/src/rpc.rs b/crates/sdk/src/rpc.rs index a3863bc95d..c4d29d48bb 100644 --- a/crates/sdk/src/rpc.rs +++ b/crates/sdk/src/rpc.rs @@ -3,7 +3,6 @@ use std::cell::Cell; use std::collections::{BTreeMap, BTreeSet}; use std::ops::ControlFlow; -use std::str::FromStr; use borsh::BorshDeserialize; use masp_primitives::asset_type::AssetType; @@ -22,6 +21,8 @@ use namada_core::token::{ Amount, DenominatedAmount, Denomination, MaspDigitPos, }; use namada_core::{storage, token}; +use namada_gas::event::GasUsed as GasUsedAttr; +use namada_gas::Gas; use namada_governance::parameters::GovernanceParameters; use namada_governance::pgf::parameters::PgfParameters; use namada_governance::pgf::storage::steward::StewardDetail; @@ -39,12 +40,13 @@ use namada_proof_of_stake::types::{ }; use namada_state::LastBlock; use namada_tx::data::{ResultCode, TxResult}; +use namada_tx::event::{Code as CodeAttr, InnerTx as InnerTxAttr}; use serde::Serialize; use crate::args::InputAmount; use crate::control_flow::time; use crate::error::{EncodingError, Error, QueryError, TxSubmitError}; -use crate::events::Event; +use crate::events::{extend, Event}; use crate::internal_macros::echo_error; use crate::io::Io; use crate::masp::MaspTokenRewardData; @@ -54,20 +56,21 @@ use crate::queries::vp::pos::{ use crate::queries::{Client, RPC}; use crate::tendermint::block::Height; use crate::tendermint::merkle::proof::ProofOps; -use crate::tendermint_rpc::error::Error as TError; use crate::tendermint_rpc::query::Query; -use crate::tendermint_rpc::Order; use crate::{display_line, edisplay_line, error, Namada, Tx}; -/// Query the status of a given transaction. -/// -/// If a response is not delivered until `deadline`, we exit the cli with an -/// error. -pub async fn query_tx_status( - context: &impl Namada, +/// Identical to [`query_tx_status`], but does not need a [`Namada`] +/// context. +pub async fn query_tx_status2( + client: &C, + io: &IO, status: TxEventQuery<'_>, deadline: time::Instant, -) -> Result { +) -> Result +where + C: crate::queries::Client + Sync, + IO: crate::io::Io + crate::MaybeSend + crate::MaybeSync, +{ time::Sleep { strategy: time::LinearBackoff { delta: time::Duration::from_secs(1), @@ -75,8 +78,7 @@ pub async fn query_tx_status( } .timeout(deadline, || async { tracing::debug!(query = ?status, "Querying tx status"); - let maybe_event = match query_tx_events(context.client(), status).await - { + let maybe_event = match query_tx_events(client, status).await { Ok(response) => response, Err(err) => { tracing::debug!( @@ -103,7 +105,7 @@ pub async fn query_tx_status( .await .map_err(|_| { edisplay_line!( - context.io(), + io, "Transaction status query deadline of {deadline:?} exceeded" ); match status { @@ -114,6 +116,18 @@ pub async fn query_tx_status( }) } +/// Query the status of a given transaction. +/// +/// If a response is not delivered until `deadline`, we exit the cli with an +/// error. +pub async fn query_tx_status( + context: &impl Namada, + status: TxEventQuery<'_>, + deadline: time::Instant, +) -> Result { + query_tx_status2(context.client(), context.io(), status, deadline).await +} + /// Query the epoch of the last committed block pub async fn query_epoch( client: &C, @@ -565,11 +579,11 @@ pub struct TxResponse { /// Block height pub height: BlockHeight, /// Transaction height - pub hash: String, + pub hash: Hash, /// Response code pub code: ResultCode, /// Gas used. If there's an `inner_tx`, its gas is equal to this value. - pub gas_used: String, + pub gas_used: Gas, } /// Determines a result of an inner tx from [`TxResponse::inner_tx_result`]. @@ -586,39 +600,25 @@ impl TryFrom for TxResponse { type Error = String; fn try_from(event: Event) -> Result { - fn missing_field_err(field: &str) -> String { - format!("Field \"{field}\" not present in event") - } - - let inner_tx = event - .get("inner_tx") - .map(|s| TxResult::from_str(s).unwrap()); + let inner_tx = event.read_attribute::().ok(); let hash = event - .get("hash") - .ok_or_else(|| missing_field_err("hash"))? - .clone(); + .read_attribute::() + .map_err(|err| err.to_string())?; let info = event - .get("info") - .ok_or_else(|| missing_field_err("info"))? - .clone(); + .read_attribute::() + .map_err(|err| err.to_string())?; let log = event - .get("log") - .ok_or_else(|| missing_field_err("log"))? - .clone(); - let height = BlockHeight::from_str( - event - .get("height") - .ok_or_else(|| missing_field_err("height"))?, - ) - .map_err(|e| e.to_string())?; - let code = ResultCode::from_str( - event.get("code").ok_or_else(|| missing_field_err("code"))?, - ) - .map_err(|e| e.to_string())?; + .read_attribute::() + .map_err(|err| err.to_string())?; + let height = event + .read_attribute::() + .map_err(|err| err.to_string())?; + let code = event + .read_attribute::() + .map_err(|err| err.to_string())?; let gas_used = event - .get("gas_used") - .ok_or_else(|| missing_field_err("gas_used"))? - .clone(); + .read_attribute::() + .map_err(|err| err.to_string())?; Ok(TxResponse { inner_tx, @@ -655,86 +655,6 @@ impl TxResponse { } } -/// Lookup the full response accompanying the specified transaction event -// TODO: maybe remove this in favor of `query_tx_status` -pub async fn query_tx_response( - client: &C, - tx_query: TxEventQuery<'_>, -) -> Result { - // Find all blocks that apply a transaction with the specified hash - let blocks = &client - .block_search(tx_query.into(), 1, 255, Order::Ascending) - .await - .expect("Unable to query for transaction with given hash") - .blocks; - // Get the block results corresponding to a block to which - // the specified transaction belongs - let block = &blocks - .first() - .ok_or_else(|| { - TError::server( - "Unable to find a block applying the given transaction" - .to_string(), - ) - })? - .block; - let response_block_results = client - .block_results(block.header.height) - .await - .expect("Unable to retrieve block containing transaction"); - // Search for the event where the specified transaction is - // applied to the blockchain - let query_event_opt = - response_block_results.end_block_events.and_then(|events| { - events - .iter() - .find(|event| { - event.kind == tx_query.event_type() - && event.attributes.iter().any(|tag| { - &tag.key == "hash" - && tag.value == tx_query.tx_hash() - }) - }) - .cloned() - }); - let query_event = query_event_opt.ok_or_else(|| { - TError::server( - "Unable to find the event corresponding to the specified \ - transaction" - .to_string(), - ) - })?; - // Reformat the event attributes so as to ease value extraction - let event_map: namada_core::collections::HashMap<&str, &str> = query_event - .attributes - .iter() - .map(|tag| (tag.key.as_ref(), tag.value.as_ref())) - .collect(); - // Summarize the transaction results that we were searching for - let inner_tx = event_map - .get("inner_tx") - .map(|s| { - TxResult::from_str(s).map_err(|_| { - TError::parse("Error parsing TxResult".to_string()) - }) - }) - .transpose()?; - let code = ResultCode::from_str(event_map["code"]) - .map_err(|_| TError::parse("Error parsing ResultCode".to_string()))?; - let height = BlockHeight::from_str(event_map["height"]) - .map_err(|_| TError::parse("Error parsing BlockHeight".to_string()))?; - let result = TxResponse { - inner_tx, - info: event_map["info"].to_string(), - log: event_map["log"].to_string(), - height, - hash: event_map["hash"].to_string(), - code, - gas_used: event_map["gas_used"].to_string(), - }; - Ok(result) -} - /// Get the PoS parameters pub async fn get_pos_params( client: &C, diff --git a/crates/state/Cargo.toml b/crates/state/Cargo.toml index 174407218f..7b587ca938 100644 --- a/crates/state/Cargo.toml +++ b/crates/state/Cargo.toml @@ -25,6 +25,7 @@ benches = [] [dependencies] namada_core = { path = "../core", default-features = false } +namada_events = { path = "../events", default-features = false } namada_gas = { path = "../gas" } namada_macros = { path = "../macros" } namada_merkle_tree = { path = "../merkle_tree" } @@ -44,6 +45,7 @@ sha2.workspace = true thiserror.workspace = true tiny-keccak.workspace = true tracing.workspace = true +patricia_tree.workspace = true proptest = { workspace = true, optional = true } [dev-dependencies] diff --git a/crates/state/src/host_env.rs b/crates/state/src/host_env.rs index 5a6804b5ba..d270c11d19 100644 --- a/crates/state/src/host_env.rs +++ b/crates/state/src/host_env.rs @@ -1,5 +1,6 @@ use std::cell::RefCell; +use namada_events::{EmitEvents, EventToEmit}; use namada_gas::{GasMetering, TxGasMeter, VpGasMeter}; use namada_tx::data::TxSentinel; @@ -91,6 +92,30 @@ where } } +impl EmitEvents for TxHostEnvState<'_, D, H> +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + #[inline] + fn emit(&mut self, event: E) + where + E: EventToEmit, + { + self.write_log_mut().emit_event(event); + } + + fn emit_many(&mut self, event_batch: B) + where + B: IntoIterator, + E: EventToEmit, + { + for event in event_batch { + self.emit(event.into()); + } + } +} + impl StateRead for VpHostEnvState<'_, D, H> where D: 'static + DB + for<'iter> DBIter<'iter>, diff --git a/crates/state/src/wl_state.rs b/crates/state/src/wl_state.rs index d40481ffcf..05af157f38 100644 --- a/crates/state/src/wl_state.rs +++ b/crates/state/src/wl_state.rs @@ -6,6 +6,7 @@ use namada_core::borsh::BorshSerializeExt; use namada_core::chain::ChainId; use namada_core::storage; use namada_core::time::DateTimeUtc; +use namada_events::{EmitEvents, EventToEmit}; use namada_parameters::EpochDuration; use namada_replay_protection as replay_protection; use namada_storage::conversion_state::{ConversionState, WithConversionState}; @@ -1087,6 +1088,30 @@ where } } +impl EmitEvents for FullAccessState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + #[inline] + fn emit(&mut self, event: E) + where + E: EventToEmit, + { + self.write_log_mut().emit_event(event); + } + + fn emit_many(&mut self, event_batch: B) + where + B: IntoIterator, + E: EventToEmit, + { + for event in event_batch { + self.emit(event.into()); + } + } +} + impl WithConversionState for FullAccessState where D: 'static + DB + for<'iter> DBIter<'iter>, @@ -1142,6 +1167,30 @@ where } } +impl EmitEvents for WlState +where + D: 'static + DB + for<'iter> DBIter<'iter>, + H: 'static + StorageHasher, +{ + #[inline] + fn emit(&mut self, event: E) + where + E: EventToEmit, + { + self.write_log_mut().emit_event(event); + } + + fn emit_many(&mut self, event_batch: B) + where + B: IntoIterator, + E: EventToEmit, + { + for event in event_batch { + self.emit(event.into()); + } + } +} + impl StateRead for TempWlState<'_, D, H> where D: 'static + DB + for<'iter> DBIter<'iter>, diff --git a/crates/state/src/wl_storage.rs b/crates/state/src/wl_storage.rs deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/crates/state/src/write_log.rs b/crates/state/src/write_log.rs index e5ad70d760..8c904af99a 100644 --- a/crates/state/src/write_log.rs +++ b/crates/state/src/write_log.rs @@ -7,9 +7,10 @@ use itertools::Itertools; use namada_core::address::{Address, EstablishedAddressGen}; use namada_core::collections::{HashMap, HashSet}; use namada_core::hash::Hash; -use namada_core::ibc::IbcEvent; use namada_core::storage; +use namada_events::{Event, EventToEmit, EventType}; use namada_gas::{MEMORY_ACCESS_GAS_PER_BYTE, STORAGE_WRITE_GAS_PER_BYTE}; +use patricia_tree::map::StringPatriciaMap; use thiserror::Error; #[allow(missing_docs)] @@ -56,6 +57,30 @@ pub enum StorageModification { }, } +/// Log of events in the write log. +#[derive(Debug, Clone)] +pub(crate) struct WriteLogEvents { + pub tree: StringPatriciaMap>, +} + +impl std::cmp::PartialEq for WriteLogEvents { + fn eq(&self, other: &WriteLogEvents) -> bool { + if self.tree.len() != other.tree.len() { + return false; + } + + self.tree.iter().all(|(event_type, event_set)| { + other + .tree + .get(event_type) + .map(|other_event_set| event_set == other_event_set) + .unwrap_or_default() + }) + } +} + +impl std::cmp::Eq for WriteLogEvents {} + /// The write log storage #[derive(Debug, Clone, PartialEq, Eq)] pub struct WriteLog { @@ -79,8 +104,8 @@ pub struct WriteLog { /// cleaned either when committing or dumping the `tx_write_log` pub(crate) tx_precommit_write_log: HashMap, - /// The IBC events for the current transaction - pub(crate) ibc_events: BTreeSet, + /// The events emitted by the current transaction + pub(crate) events: WriteLogEvents, /// Storage modifications for the replay protection storage, always /// committed regardless of the result of the transaction pub(crate) replay_protection: HashSet, @@ -110,7 +135,9 @@ impl Default for WriteLog { tx_write_log: HashMap::with_capacity(100), tx_temp_log: HashMap::with_capacity(1), tx_precommit_write_log: HashMap::with_capacity(100), - ibc_events: BTreeSet::new(), + events: WriteLogEvents { + tree: StringPatriciaMap::new(), + }, replay_protection: HashSet::with_capacity(1_000), } } @@ -384,14 +411,16 @@ impl WriteLog { (addr, gas) } - /// Set an IBC event and return the gas cost. - pub fn emit_ibc_event(&mut self, event: IbcEvent) -> u64 { - let len = event - .attributes - .iter() - .fold(0, |acc, (k, v)| acc + k.len() + v.len()); - self.ibc_events.insert(event); - len as u64 * MEMORY_ACCESS_GAS_PER_BYTE + /// Set an event and return the gas cost. + pub fn emit_event(&mut self, event: E) -> u64 { + let event = event.into(); + let gas_cost = event.emission_gas_cost(MEMORY_ACCESS_GAS_PER_BYTE); + let event_type = event.kind().to_string(); + if !self.events.tree.contains_key(&event_type) { + self.events.tree.insert(&event_type, HashSet::new()); + } + self.events.tree.get_mut(&event_type).unwrap().insert(event); + gas_cost } /// Get the non-temporary storage keys changed and accounts keys initialized @@ -451,14 +480,43 @@ impl WriteLog { .collect() } - /// Take the IBC event of the current transaction - pub fn take_ibc_events(&mut self) -> BTreeSet { - std::mem::take(&mut self.ibc_events) + /// Take the events of the current transaction + pub fn take_events(&mut self) -> BTreeSet { + std::mem::take(&mut self.events.tree) + .into_iter() + .flat_map(|(_, event_set)| event_set) + .collect() + } + + /// Get events emitted by the current transaction of + /// a certain type. + #[inline] + pub fn lookup_events_with_prefix<'this, 'ty: 'this>( + &'this self, + event_type: &'ty EventType, + ) -> impl Iterator + 'this { + self.events + .tree + .iter_prefix(event_type) + .flat_map(|(_, event_set)| event_set) + } + + /// Get events emitted by the current transaction of + /// type `E`. + #[inline] + pub fn get_events_of( + &self, + ) -> impl Iterator { + self.events + .tree + .iter_prefix(E::DOMAIN) + .flat_map(|(_, event_set)| event_set) } - /// Get the IBC event of the current transaction - pub fn get_ibc_events(&self) -> &BTreeSet { - &self.ibc_events + /// Get all events emitted by the current transaction. + #[inline] + pub fn get_events(&self) -> impl Iterator { + self.events.tree.values().flatten() } /// Add the entire content of the tx write log to the precommit one. The tx @@ -487,7 +545,7 @@ impl WriteLog { self.block_write_log.extend(tx_precommit_write_log); self.tx_temp_log.clear(); - self.take_ibc_events(); + self.events.tree.clear(); } /// Drop the current transaction's write log and IBC events and precommit @@ -497,7 +555,7 @@ impl WriteLog { self.tx_precommit_write_log.clear(); self.tx_write_log.clear(); self.tx_temp_log.clear(); - self.ibc_events.clear(); + self.events.tree.clear(); } /// Drop the current transaction's write log but keep the precommit one. diff --git a/crates/tests/src/e2e/ibc_tests.rs b/crates/tests/src/e2e/ibc_tests.rs index 1e34120586..fd5b477a43 100644 --- a/crates/tests/src/e2e/ibc_tests.rs +++ b/crates/tests/src/e2e/ibc_tests.rs @@ -36,7 +36,6 @@ use namada::ibc::core::channel::types::msgs::{ MsgTimeout as IbcMsgTimeout, }; use namada::ibc::core::channel::types::packet::Packet; -use namada::ibc::core::channel::types::timeout::TimeoutHeight; use namada::ibc::core::channel::types::Version as ChanVersion; use namada::ibc::core::client::context::client_state::ClientStateCommon; use namada::ibc::core::client::types::msgs::{ @@ -56,14 +55,16 @@ use namada::ibc::core::connection::types::Counterparty as ConnCounterparty; use namada::ibc::core::host::types::identifiers::{ ChainId, ChannelId, ClientId, ConnectionId, PortId, }; +use namada::ibc::event as ibc_events; +use namada::ibc::event::IbcEventType; use namada::ibc::primitives::proto::Any; -use namada::ibc::primitives::{Signer, Timestamp, ToProto}; -use namada::ledger::events::EventType; +use namada::ibc::primitives::{Signer, ToProto}; use namada::ledger::ibc::storage::*; use namada::ledger::parameters::{storage as param_storage, EpochDuration}; use namada::ledger::pgf::ADDRESS as PGF_ADDRESS; use namada::ledger::queries::RPC; use namada::ledger::storage::ics23_specs::ibc_proof_specs; +use namada::sdk::events::extend::ReadFromEventAttributes; use namada::state::Sha256Hasher; use namada::tendermint::abci::Event as AbciEvent; use namada::tendermint::block::Height as TmHeight; @@ -78,7 +79,6 @@ use namada_apps::config::{ethereum_bridge, TendermintMode}; use namada_apps::facade::tendermint::block::Header as TmHeader; use namada_apps::facade::tendermint::merkle::proof::ProofOps as TmProof; use namada_apps::facade::tendermint_rpc::{Client, HttpClient, Url}; -use namada_core::collections::HashMap; use namada_core::string_encoding::StringEncoded; use namada_sdk::masp::fs::FsShieldedUtils; use namada_test_utils::TestWasms; @@ -1388,7 +1388,7 @@ fn transfer_token( )?; let events = get_events(test_a, height)?; let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; - check_ibc_packet_query(test_a, &"send_packet".parse().unwrap(), &packet)?; + check_ibc_packet_query(test_a, "send_packet", &packet)?; let height_a = query_height(test_a)?; let proof_commitment_on_a = @@ -1412,11 +1412,7 @@ fn transfer_token( let events = get_events(test_b, height)?; let packet = get_packet_from_events(&events).ok_or(eyre!(TX_FAILED))?; let ack = get_ack_from_events(&events).ok_or(eyre!(TX_FAILED))?; - check_ibc_packet_query( - test_b, - &"write_acknowledgement".parse().unwrap(), - &packet, - )?; + check_ibc_packet_query(test_b, "write_acknowledgement", &packet)?; // get the proof on Chain B let height_b = query_height(test_b)?; @@ -2056,7 +2052,7 @@ fn check_ibc_update_query( fn check_ibc_packet_query( test: &Test, - event_type: &EventType, + event_type: &str, packet: &Packet, ) -> Result<()> { let rpc = get_actor_rpc(test, Who::Validator(0)); @@ -2064,7 +2060,7 @@ fn check_ibc_packet_query( let client = HttpClient::new(tendermint_url).unwrap(); match test.async_runtime().block_on(RPC.shell().ibc_packet( &client, - event_type, + &IbcEventType(event_type.to_owned()), &packet.port_id_on_a, &packet.chan_id_on_a, &packet.port_id_on_b, @@ -2342,92 +2338,38 @@ fn signer() -> Signer { "signer".to_string().into() } -fn get_client_id_from_events(events: &Vec) -> Option { - get_attribute_from_events(events, "client_id").map(|v| v.parse().unwrap()) +fn get_client_id_from_events(events: &[AbciEvent]) -> Option { + get_attribute_from_events::(events) } -fn get_connection_id_from_events( - events: &Vec, -) -> Option { - get_attribute_from_events(events, "connection_id") - .map(|v| v.parse().unwrap()) +fn get_connection_id_from_events(events: &[AbciEvent]) -> Option { + get_attribute_from_events::(events) } -fn get_channel_id_from_events(events: &Vec) -> Option { - get_attribute_from_events(events, "channel_id").map(|v| v.parse().unwrap()) +fn get_channel_id_from_events(events: &[AbciEvent]) -> Option { + get_attribute_from_events::(events) } -fn get_ack_from_events(events: &Vec) -> Option> { - get_attribute_from_events(events, "packet_ack") - .map(|v| Vec::from(v.as_bytes())) +fn get_ack_from_events(events: &[AbciEvent]) -> Option> { + get_attribute_from_events::(events) + .map(String::into_bytes) } -fn get_attribute_from_events( - events: &Vec, - key: &str, -) -> Option { - for event in events { - let attributes = get_attributes_from_event(event); - if let Some(value) = attributes.get(key) { - return Some(value.clone()); - } - } - None -} - -fn get_packet_from_events(events: &Vec) -> Option { - for event in events { - let attributes = get_attributes_from_event(event); - if !attributes.contains_key("packet_src_port") { - continue; - } - let mut packet = Packet { - seq_on_a: 0.into(), - port_id_on_a: PortId::transfer(), - chan_id_on_a: ChannelId::default(), - port_id_on_b: PortId::transfer(), - chan_id_on_b: ChannelId::default(), - data: vec![], - timeout_height_on_b: TimeoutHeight::default(), - timeout_timestamp_on_b: Timestamp::default(), - }; - for (key, val) in attributes { - match key.as_str() { - "packet_src_port" => packet.port_id_on_a = val.parse().unwrap(), - "packet_src_channel" => { - packet.chan_id_on_a = val.parse().unwrap() - } - "packet_dst_port" => packet.port_id_on_b = val.parse().unwrap(), - "packet_dst_channel" => { - packet.chan_id_on_b = val.parse().unwrap() - } - "packet_timeout_height" => { - packet.timeout_height_on_b = match Height::from_str(&val) { - Ok(height) => TimeoutHeight::At(height), - Err(_) => TimeoutHeight::Never, - } - } - "packet_timeout_timestamp" => { - packet.timeout_timestamp_on_b = val.parse().unwrap() - } - "packet_sequence" => { - packet.seq_on_a = u64::from_str(&val).unwrap().into() - } - "packet_data" => packet.data = Vec::from(val.as_bytes()), - _ => {} - } - } - return Some(packet); - } - None +fn get_attribute_from_events<'value, DATA>( + events: &[AbciEvent], +) -> Option<>::Value> +where + DATA: ReadFromEventAttributes<'value>, +{ + events.iter().find_map(|event| { + DATA::read_from_event_attributes(&event.attributes).ok() + }) } -fn get_attributes_from_event(event: &AbciEvent) -> HashMap { - event - .attributes - .iter() - .map(|tag| (tag.key.to_string(), tag.value.to_string())) - .collect() +fn get_packet_from_events(events: &[AbciEvent]) -> Option { + events.iter().find_map(|event| { + ibc_events::packet_from_event_attributes(&event.attributes).ok() + }) } fn get_events(test: &Test, height: u32) -> Result> { diff --git a/crates/tests/src/vm_host_env/tx.rs b/crates/tests/src/vm_host_env/tx.rs index 0580fc0699..1b1f7d4317 100644 --- a/crates/tests/src/vm_host_env/tx.rs +++ b/crates/tests/src/vm_host_env/tx.rs @@ -498,8 +498,8 @@ mod native_tx_host_env { entropy_source_len: u64, result_ptr: u64 )); - native_host_fn!(tx_emit_ibc_event(event_ptr: u64, event_len: u64)); - native_host_fn!(tx_get_ibc_events(event_type_ptr: u64, event_type_len: u64) -> i64); + native_host_fn!(tx_emit_event(event_ptr: u64, event_len: u64)); + native_host_fn!(tx_get_events(event_type_ptr: u64, event_type_len: u64) -> i64); native_host_fn!(tx_get_chain_id(result_ptr: u64)); native_host_fn!(tx_get_block_height() -> u64); native_host_fn!(tx_get_tx_index() -> u32); diff --git a/crates/token/Cargo.toml b/crates/token/Cargo.toml index 1029be2b38..1f0849ebe5 100644 --- a/crates/token/Cargo.toml +++ b/crates/token/Cargo.toml @@ -18,6 +18,7 @@ multicore = ["namada_shielded_token/multicore"] [dependencies] namada_core = { path = "../core" } +namada_events = { path = "../events", default-features = false } namada_shielded_token = { path = "../shielded_token" } namada_storage = { path = "../storage" } namada_trans_token = { path = "../trans_token" } diff --git a/crates/token/src/lib.rs b/crates/token/src/lib.rs index 136b324001..f613b47106 100644 --- a/crates/token/src/lib.rs +++ b/crates/token/src/lib.rs @@ -9,8 +9,12 @@ pub mod storage_key { pub use namada_trans_token::storage_key::*; } +pub mod event { + pub use namada_trans_token::event::*; +} + use namada_core::address::Address; -use namada_core::event::EmitEvents; +use namada_events::EmitEvents; use namada_storage::{Result, StorageRead, StorageWrite}; /// Initialize parameters for the token in storage during the genesis block. diff --git a/crates/trans_token/Cargo.toml b/crates/trans_token/Cargo.toml index a3302c4de8..a2a6f38a98 100644 --- a/crates/trans_token/Cargo.toml +++ b/crates/trans_token/Cargo.toml @@ -20,8 +20,10 @@ migrations = [ [dependencies] namada_core = { path = "../core" } +namada_events = { path = "../events", default-features = false } namada_storage = { path = "../storage" } +konst.workspace = true linkme = {workspace = true, optional = true} [dev-dependencies] diff --git a/crates/trans_token/src/event.rs b/crates/trans_token/src/event.rs new file mode 100644 index 0000000000..2f4435d8f8 --- /dev/null +++ b/crates/trans_token/src/event.rs @@ -0,0 +1,326 @@ +//! Token transaction events. + +use std::borrow::Cow; +use std::fmt; +use std::str::FromStr; + +use namada_core::address::Address; +use namada_core::uint::Uint; +use namada_events::extend::{Closure, ComposeEvent, EventAttributeEntry}; +use namada_events::{Event, EventLevel, EventToEmit, EventType}; + +pub mod types { + //! Token event types. + + use namada_events::{event_type, EventType}; + + use super::TokenEvent; + + /// Mint token event. + pub const MINT: EventType = event_type!(TokenEvent, "mint"); + + /// Burn token event. + pub const BURN: EventType = event_type!(TokenEvent, "burn"); + + /// Transfer token event. + pub const TRANSFER: EventType = event_type!(TokenEvent, "transfer"); +} + +/// A user account. +#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] +pub enum UserAccount { + /// Internal chain address in Namada. + Internal(Address), + /// External chain address. + External(String), +} + +impl fmt::Display for UserAccount { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::Internal(addr) => write!(f, "internal-address/{addr}"), + Self::External(addr) => write!(f, "external-address/{addr}"), + } + } +} + +impl FromStr for UserAccount { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.split_once('/') { + Some(("internal-address", addr)) => { + Ok(Self::Internal(Address::decode(addr).map_err(|err| { + format!( + "Unknown internal address balance change target \ + {s:?}: {err}" + ) + })?)) + } + Some(("external-address", addr)) => { + Ok(Self::External(addr.to_owned())) + } + _ => Err(format!("Unknown balance change target {s:?}")), + } + } +} + +/// Token event kind. +#[derive(Debug, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] +pub enum TokenEventKind { + /// Token mint operation. + Mint, + /// Token burn operation. + Burn, + /// Token transfer operation. + Transfer, +} + +impl From<&TokenEventKind> for EventType { + fn from(token_event_kind: &TokenEventKind) -> Self { + match token_event_kind { + TokenEventKind::Mint => types::MINT, + TokenEventKind::Burn => types::BURN, + TokenEventKind::Transfer => types::TRANSFER, + } + } +} + +impl From for EventType { + fn from(token_event_kind: TokenEventKind) -> Self { + (&token_event_kind).into() + } +} + +/// Namada token event. +#[derive(Debug)] +pub struct TokenEvent { + /// The event level. + pub level: EventLevel, + /// The affected token address. + pub token: Address, + /// The operation that took place. + pub operation: TokenOperation, + /// Additional description of the token event. + pub descriptor: Cow<'static, str>, +} + +/// Namada token operation. +#[derive(Debug)] +pub enum TokenOperation { + /// Token mint event. + Mint { + /// The target account whose balance was changed. + target_account: UserAccount, + /// The amount of minted tokens. + amount: Uint, + /// The balance that `target_account` ended up with. + post_balance: Uint, + }, + /// Token burn event. + Burn { + /// The target account whose balance was changed. + target_account: UserAccount, + /// The amount of minted tokens. + amount: Uint, + /// The balance that `target_account` ended up with. + post_balance: Uint, + }, + /// Token transfer event. + Transfer { + /// The source of the token transfer. + source: UserAccount, + /// The target of the token transfer. + target: UserAccount, + /// The transferred amount. + amount: Uint, + /// The balance that `source` ended up with. + source_post_balance: Uint, + /// The balance that `target` ended up with, + /// if it is known. + target_post_balance: Option, + }, +} + +impl TokenOperation { + /// The token event kind associated with this operation. + pub fn kind(&self) -> TokenEventKind { + match self { + Self::Mint { .. } => TokenEventKind::Mint, + Self::Burn { .. } => TokenEventKind::Burn, + Self::Transfer { .. } => TokenEventKind::Transfer, + } + } +} + +impl EventToEmit for TokenEvent { + const DOMAIN: &'static str = "token"; +} + +impl From for Event { + fn from(token_event: TokenEvent) -> Self { + let event = + Self::new(token_event.operation.kind().into(), token_event.level) + .with(TokenAddress(token_event.token)) + .with(Descriptor(&token_event.descriptor)); + + match token_event.operation { + TokenOperation::Mint { + target_account, + amount, + post_balance, + } + | TokenOperation::Burn { + target_account, + amount, + post_balance, + } => event + .with(TargetAccount(target_account)) + .with(Amount(&amount)) + .with(TargetPostBalance(&post_balance)) + .into(), + TokenOperation::Transfer { + source, + target, + amount, + source_post_balance, + target_post_balance, + } => event + .with(SourceAccount(source)) + .with(TargetAccount(target)) + .with(Amount(&amount)) + .with(SourcePostBalance(&source_post_balance)) + .with(Closure(|event: &mut Event| { + if let Some(post_balance) = target_post_balance { + event.extend(TargetPostBalance(&post_balance)); + } + })) + .into(), + } + } +} + +/// Extend an [`Event`] with token event descriptor data. +pub struct Descriptor<'k>(pub &'k str); + +impl<'k> EventAttributeEntry<'k> for Descriptor<'k> { + type Value = &'k str; + type ValueOwned = String; + + const KEY: &'static str = "token-event-descriptor"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with token address data. +pub struct TokenAddress(pub Address); + +impl EventAttributeEntry<'static> for TokenAddress { + type Value = Address; + type ValueOwned = Self::Value; + + const KEY: &'static str = "token-address"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with source account data. +pub struct SourceAccount(pub UserAccount); + +impl EventAttributeEntry<'static> for SourceAccount { + type Value = UserAccount; + type ValueOwned = Self::Value; + + const KEY: &'static str = "source-account"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with target account data. +pub struct TargetAccount(pub UserAccount); + +impl EventAttributeEntry<'static> for TargetAccount { + type Value = UserAccount; + type ValueOwned = Self::Value; + + const KEY: &'static str = "target-account"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with amount data. +pub struct Amount<'amt>(pub &'amt Uint); + +impl<'amt> EventAttributeEntry<'amt> for Amount<'amt> { + type Value = &'amt Uint; + type ValueOwned = Uint; + + const KEY: &'static str = "amount"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with source post balance data. +pub struct SourcePostBalance<'bal>(pub &'bal Uint); + +impl<'bal> EventAttributeEntry<'bal> for SourcePostBalance<'bal> { + type Value = &'bal Uint; + type ValueOwned = Uint; + + const KEY: &'static str = "source-post-balance"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +/// Extend an [`Event`] with target post balance data. +pub struct TargetPostBalance<'bal>(pub &'bal Uint); + +impl<'bal> EventAttributeEntry<'bal> for TargetPostBalance<'bal> { + type Value = &'bal Uint; + type ValueOwned = Uint; + + const KEY: &'static str = "target-post-balance"; + + fn into_value(self) -> Self::Value { + self.0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn user_account_str_roundtrip() { + let targets = [ + UserAccount::External( + "cosmos1hkgjfuznl4af5ayzn6gzl6kwwkcu28urxmqejg".to_owned(), + ), + UserAccount::Internal( + Address::decode( + "tnam1q82t25z5f9gmnv5sztyr8ht9tqhrw4u875qjhy56", + ) + .unwrap(), + ), + ]; + + for target in targets { + let as_str = target.to_string(); + let decoded: UserAccount = as_str.parse().unwrap(); + + assert_eq!(decoded, target); + } + } +} diff --git a/crates/trans_token/src/lib.rs b/crates/trans_token/src/lib.rs index 6644f73d7a..04b80b152c 100644 --- a/crates/trans_token/src/lib.rs +++ b/crates/trans_token/src/lib.rs @@ -1,5 +1,6 @@ //! Transparent token types, storage functions, and validation. +pub mod event; mod storage; pub mod storage_key; diff --git a/crates/trans_token/src/storage.rs b/crates/trans_token/src/storage.rs index 4324f0be29..962b60c3c2 100644 --- a/crates/trans_token/src/storage.rs +++ b/crates/trans_token/src/storage.rs @@ -143,15 +143,32 @@ where storage.write(&src_key, new_src_balance)?; storage.write(&dest_key, new_dest_balance) } - None => Err(storage::Error::new_const( - "The transfer would overflow destination balance", - )), + None => Err(storage::Error::new_alloc(format!( + "The transfer would overflow balance of {dest}" + ))), } } - None => Err(storage::Error::new_const("Insufficient source balance")), + None => Err(storage::Error::new_alloc(format!( + "{src} has insufficient balance" + ))), } } +/// Mint `amount` of `token` as `minter` to `dest`. +pub fn mint_tokens( + storage: &mut S, + minter: &Address, + token: &Address, + dest: &Address, + amount: token::Amount, +) -> storage::Result<()> +where + S: StorageRead + StorageWrite, +{ + credit_tokens(storage, token, dest, amount)?; + storage.write(&minter_key(token), minter.clone()) +} + /// Credit tokens to an account, to be used only by protocol. In transactions, /// this would get rejected by the default `vp_token`. pub fn credit_tokens( diff --git a/crates/tx/Cargo.toml b/crates/tx/Cargo.toml index fa8eb22802..17ee6dc397 100644 --- a/crates/tx/Cargo.toml +++ b/crates/tx/Cargo.toml @@ -22,6 +22,7 @@ migrations = [ [dependencies] namada_core = { path = "../core" } +namada_events = { path = "../events", default-features = false } namada_gas = { path = "../gas" } namada_macros = { path = "../macros" } namada_migrations = {path = "../migrations", optional = true } @@ -30,6 +31,7 @@ ark-bls12-381.workspace = true bitflags.workspace = true borsh.workspace = true data-encoding.workspace = true +konst.workspace = true linkme = {workspace = true, optional = true} masp_primitives.workspace = true num-derive.workspace = true diff --git a/crates/tx/src/data/mod.rs b/crates/tx/src/data/mod.rs index ef66fb488c..868098aa95 100644 --- a/crates/tx/src/data/mod.rs +++ b/crates/tx/src/data/mod.rs @@ -23,10 +23,9 @@ use namada_core::address::Address; use namada_core::borsh::{ BorshDeserialize, BorshSchema, BorshSerialize, BorshSerializeExt, }; -use namada_core::ethereum_structs::EthBridgeEvent; use namada_core::hash::Hash; -use namada_core::ibc::IbcEvent; use namada_core::storage; +use namada_events::Event; use namada_gas::{Gas, VpsGas}; use namada_macros::BorshDeserializer; #[cfg(feature = "migrations")] @@ -43,6 +42,7 @@ use crate::data::protocol::ProtocolTx; /// indicating the status of their submitted tx. /// The codes must not change with versions, only need ones may be added. #[derive( + Default, Debug, Copy, Clone, @@ -58,6 +58,7 @@ pub enum ResultCode { // WARN: These codes shouldn't be changed between version! // ========================================================================= /// Success + #[default] Ok = 0, /// Error in WASM tx execution WasmRuntimeError = 1, @@ -107,6 +108,11 @@ impl ResultCode { ToPrimitive::to_u32(self).unwrap() } + /// Convert to `usize`. + pub fn to_usize(&self) -> usize { + ToPrimitive::to_usize(self).unwrap() + } + /// Convert from `u32`. pub fn from_u32(raw: u32) -> Option { FromPrimitive::from_u32(raw) @@ -182,10 +188,8 @@ pub struct TxResult { pub vps_result: VpsResult, /// New established addresses created by the transaction pub initialized_accounts: Vec
, - /// IBC events emitted by the transaction - pub ibc_events: BTreeSet, - /// Ethereum bridge events emitted by the transaction - pub eth_bridge_events: BTreeSet, + /// Events emitted by the transaction + pub events: BTreeSet, } impl TxResult { diff --git a/crates/tx/src/event.rs b/crates/tx/src/event.rs index b0ad629541..35407a8650 100644 --- a/crates/tx/src/event.rs +++ b/crates/tx/src/event.rs @@ -1,20 +1,60 @@ //! Transaction events. -use namada_core::event::extend::{ - ComposeEvent, ExtendEvent, Height, Log, TxHash, +use namada_core::borsh::{BorshDeserialize, BorshSerialize}; +use namada_events::extend::{ + ComposeEvent, EventAttributeEntry, Height, Log, TxHash, }; -use namada_core::event::Event; +use namada_events::{Event, EventLevel, EventToEmit}; +use namada_macros::BorshDeserializer; +#[cfg(feature = "migrations")] +use namada_migrations::*; use super::Tx; use crate::data::{ResultCode, TxResult}; use crate::TxType; +/// Transaction event. +#[derive( + Clone, + Debug, + Eq, + PartialEq, + BorshSerialize, + BorshDeserialize, + BorshDeserializer, +)] +pub struct TxEvent(pub Event); + +impl From for Event { + #[inline] + fn from(TxEvent(event): TxEvent) -> Self { + event + } +} + +impl EventToEmit for TxEvent { + const DOMAIN: &'static str = "tx"; +} + +pub mod types { + //! Transaction event types. + + use namada_events::EventType; + + use super::TxEvent; + + /// Applied transaction. + pub const APPLIED: EventType = + namada_events::event_type!(TxEvent, "applied"); +} + /// Creates a new event with the hash and height of the transaction -/// already filled in +/// already filled in. pub fn new_tx_event(tx: &Tx, height: u64) -> Event { let base_event = match tx.header().tx_type { TxType::Wrapper(_) | TxType::Protocol(_) => { - Event::applied_tx().with(TxHash(tx.header_hash())) + Event::new(types::APPLIED, EventLevel::Tx) + .with(TxHash(tx.header_hash())) } _ => unreachable!(), }; @@ -27,21 +67,27 @@ pub fn new_tx_event(tx: &Tx, height: u64) -> Event { /// Extend an [`Event`] with result code data. pub struct Code(pub ResultCode); -impl ExtendEvent for Code { - #[inline] - fn extend_event(self, event: &mut Event) { - let Self(code) = self; - event["code"] = code.into(); +impl EventAttributeEntry<'static> for Code { + type Value = ResultCode; + type ValueOwned = Self::Value; + + const KEY: &'static str = "code"; + + fn into_value(self) -> Self::Value { + self.0 } } /// Extend an [`Event`] with inner tx data. pub struct InnerTx<'result>(pub &'result TxResult); -impl ExtendEvent for InnerTx<'_> { - #[inline] - fn extend_event(self, event: &mut Event) { - let Self(tx_result) = self; - event["inner_tx"] = tx_result.to_string(); +impl<'result> EventAttributeEntry<'result> for InnerTx<'result> { + type Value = &'result TxResult; + type ValueOwned = TxResult; + + const KEY: &'static str = "inner_tx"; + + fn into_value(self) -> Self::Value { + self.0 } } diff --git a/crates/tx_env/Cargo.toml b/crates/tx_env/Cargo.toml index cc26e87cf1..2d61902f35 100644 --- a/crates/tx_env/Cargo.toml +++ b/crates/tx_env/Cargo.toml @@ -14,4 +14,5 @@ version.workspace = true [dependencies] namada_core = { path = "../core" } +namada_events = { path = "../events", default-features = false } namada_storage = { path = "../storage" } diff --git a/crates/tx_env/src/lib.rs b/crates/tx_env/src/lib.rs index d9fc090eae..c4281ec818 100644 --- a/crates/tx_env/src/lib.rs +++ b/crates/tx_env/src/lib.rs @@ -3,8 +3,8 @@ use namada_core::address::Address; use namada_core::borsh::{BorshDeserialize, BorshSerialize, BorshSerializeExt}; -use namada_core::ibc::IbcEvent; use namada_core::storage; +use namada_events::{Event, EventToEmit, EventType}; use namada_storage::{Result, ResultExt, StorageRead, StorageWrite}; /// Transaction host functions @@ -73,17 +73,14 @@ pub trait TxEnv: StorageRead + StorageWrite { code_tag: &Option, ) -> Result<()>; - /// Emit an IBC event. On multiple calls, these emitted event will be added. - fn emit_ibc_event(&mut self, event: &IbcEvent) -> Result<()>; + /// Emit an [`Event`] from a transaction. + fn emit_event(&mut self, event: E) -> Result<()>; /// Request to charge the provided amount of gas for the current transaction fn charge_gas(&mut self, used_gas: u64) -> Result<()>; - /// Get IBC events with a event type - fn get_ibc_events( - &self, - event_type: impl AsRef, - ) -> Result>; + /// Get events with a given [`EventType`]. + fn get_events(&self, event_type: &EventType) -> Result>; /// Set the sentinel for an invalid section commitment fn set_commitment_sentinel(&mut self); diff --git a/crates/tx_prelude/Cargo.toml b/crates/tx_prelude/Cargo.toml index 83bc1eac73..d6335d8abe 100644 --- a/crates/tx_prelude/Cargo.toml +++ b/crates/tx_prelude/Cargo.toml @@ -18,6 +18,7 @@ default = [] [dependencies] namada_account = { path = "../account" } namada_core = { path = "../core" } +namada_events = { path = "../events", default-features = false } namada_governance = { path = "../governance" } namada_ibc = { path = "../ibc" } namada_macros = { path = "../macros" } diff --git a/crates/tx_prelude/src/ibc.rs b/crates/tx_prelude/src/ibc.rs index 0c62a53422..00b10c9bcd 100644 --- a/crates/tx_prelude/src/ibc.rs +++ b/crates/tx_prelude/src/ibc.rs @@ -4,19 +4,20 @@ use std::cell::RefCell; use std::collections::BTreeSet; use std::rc::Rc; -use namada_core::address::{Address, InternalAddress}; -pub use namada_core::ibc::IbcEvent; +use namada_core::address::Address; use namada_core::token::Amount; -pub use namada_ibc::storage::{ibc_token, is_ibc_key}; +use namada_events::EventTypeBuilder; +pub use namada_ibc::event::{IbcEvent, IbcEventType}; +pub use namada_ibc::storage::{ + burn_tokens, ibc_token, is_ibc_key, mint_tokens, +}; pub use namada_ibc::{ IbcActions, IbcCommonContext, IbcStorageContext, NftTransferModule, ProofSpec, TransferModule, }; -use namada_storage::StorageWrite; -use namada_token::storage_key::minter_key; use namada_tx_env::TxEnv; -use crate::token::{burn, mint, transfer}; +use crate::token::transfer; use crate::{Ctx, Error}; /// IBC actions to handle an IBC message. The `verifiers` inserted into the set @@ -40,14 +41,21 @@ impl IbcStorageContext for Ctx { &mut self, event: IbcEvent, ) -> std::result::Result<(), Error> { - ::emit_ibc_event(self, &event) + ::emit_event(self, event) } fn get_ibc_events( &self, event_type: impl AsRef, ) -> Result, Error> { - ::get_ibc_events(self, &event_type) + let event_type = EventTypeBuilder::new_of::() + .with_segment(event_type.as_ref()) + .build(); + + Ok(::get_events(self, &event_type)? + .into_iter() + .filter_map(|event| IbcEvent::try_from(event).ok()) + .collect()) } fn transfer_token( @@ -75,10 +83,7 @@ impl IbcStorageContext for Ctx { token: &Address, amount: Amount, ) -> Result<(), Error> { - mint(self, target, token, amount)?; - - let minter_key = minter_key(token); - self.write(&minter_key, &Address::Internal(InternalAddress::Ibc)) + mint_tokens(self, target, token, amount) } fn burn_token( @@ -87,7 +92,7 @@ impl IbcStorageContext for Ctx { token: &Address, amount: Amount, ) -> Result<(), Error> { - burn(self, target, token, amount) + burn_tokens(self, target, token, amount) } fn log_string(&self, message: String) { diff --git a/crates/tx_prelude/src/lib.rs b/crates/tx_prelude/src/lib.rs index a8f4b4c143..3948342af8 100644 --- a/crates/tx_prelude/src/lib.rs +++ b/crates/tx_prelude/src/lib.rs @@ -31,6 +31,7 @@ pub use namada_core::storage::{ self, BlockHash, BlockHeight, Epoch, Header, BLOCK_HASH_LENGTH, }; pub use namada_core::{encode, eth_bridge_pool, *}; +use namada_events::{EmitEvents, Event, EventToEmit, EventType}; pub use namada_governance::storage as gov_storage; pub use namada_macros::transaction; pub use namada_parameters::storage as parameters_storage; @@ -248,6 +249,26 @@ impl StorageWrite for Ctx { } } +impl EmitEvents for Ctx { + #[inline] + fn emit(&mut self, event: E) + where + E: EventToEmit, + { + _ = self.emit_event(event); + } + + fn emit_many(&mut self, event_batch: B) + where + B: IntoIterator, + E: EventToEmit, + { + for event in event_batch { + self.emit(event.into()); + } + } +} + impl TxEnv for Ctx { fn read_bytes_temp( &self, @@ -336,11 +357,10 @@ impl TxEnv for Ctx { Ok(()) } - fn emit_ibc_event(&mut self, event: &ibc::IbcEvent) -> Result<(), Error> { - let event = borsh::to_vec(event).unwrap(); - unsafe { - namada_tx_emit_ibc_event(event.as_ptr() as _, event.len() as _) - }; + fn emit_event(&mut self, event: E) -> Result<(), Error> { + let event: Event = event.into(); + let event = borsh::to_vec(&event).unwrap(); + unsafe { namada_tx_emit_event(event.as_ptr() as _, event.len() as _) }; Ok(()) } @@ -349,19 +369,16 @@ impl TxEnv for Ctx { Ok(()) } - fn get_ibc_events( - &self, - event_type: impl AsRef, - ) -> Result, Error> { - let event_type = event_type.as_ref().to_string(); + fn get_events(&self, event_type: &EventType) -> Result, Error> { + let event_type = event_type.to_string(); let read_result = unsafe { - namada_tx_get_ibc_events( + namada_tx_get_events( event_type.as_ptr() as _, event_type.len() as _, ) }; match read_from_buffer(read_result, namada_tx_result_buffer) { - Some(value) => Ok(Vec::::try_from_slice(&value[..]) + Some(value) => Ok(Vec::::try_from_slice(&value[..]) .expect("The conversion shouldn't fail")), None => Ok(Vec::new()), } diff --git a/crates/tx_prelude/src/token.rs b/crates/tx_prelude/src/token.rs index f62ec8d5a5..7eaa1129a7 100644 --- a/crates/tx_prelude/src/token.rs +++ b/crates/tx_prelude/src/token.rs @@ -1,10 +1,11 @@ use namada_core::address::Address; -use namada_proof_of_stake::token::storage_key::balance_key; -use namada_storage::{Error as StorageError, ResultExt}; -pub use namada_token::*; +use namada_events::{EmitEvents, EventLevel}; +pub use namada_token::{ + storage_key, utils, Amount, DenominatedAmount, Transfer, +}; use namada_tx_env::TxEnv; -use crate::{Ctx, StorageRead, StorageWrite, TxResult}; +use crate::{Ctx, TxResult}; /// A token transfer that can be used in a transaction. pub fn transfer( @@ -14,6 +15,8 @@ pub fn transfer( token: &Address, amount: Amount, ) -> TxResult { + use namada_token::event::{TokenEvent, TokenOperation, UserAccount}; + // The tx must be authorized by the source address ctx.insert_verifier(src)?; if token.is_internal() { @@ -23,47 +26,23 @@ pub fn transfer( ctx.insert_verifier(token)?; } - if amount == Amount::zero() { - return Ok(()); - } - - let src_key = balance_key(token, src); - let dest_key = balance_key(token, dest); - let src_bal: Option = ctx.read(&src_key)?; - let mut src_bal = src_bal - .ok_or_else(|| StorageError::new_const("the source has no balance"))?; - - if !src_bal.can_spend(&amount) { - return Err(StorageError::new_const( - "the source has no enough balance", - )); - } - - src_bal.spend(&amount).into_storage_result()?; - let mut dest_bal: Amount = ctx.read(&dest_key)?.unwrap_or_default(); - dest_bal.receive(&amount).into_storage_result()?; - ctx.write(&src_key, src_bal)?; - ctx.write(&dest_key, dest_bal)?; + namada_token::transfer(ctx, token, src, dest, amount)?; + + ctx.emit(TokenEvent { + descriptor: "transfer-from-wasm".into(), + level: EventLevel::Tx, + token: token.clone(), + operation: TokenOperation::Transfer { + amount: amount.into(), + source: UserAccount::Internal(src.clone()), + target: UserAccount::Internal(dest.clone()), + source_post_balance: namada_token::read_balance(ctx, token, src)? + .into(), + target_post_balance: Some( + namada_token::read_balance(ctx, token, dest)?.into(), + ), + }, + }); Ok(()) } - -/// Mint that can be used in a transaction. -pub fn mint( - ctx: &mut Ctx, - target: &Address, - token: &Address, - amount: Amount, -) -> TxResult { - credit_tokens(ctx, token, target, amount) -} - -/// Burn that can be used in a transaction. -pub fn burn( - ctx: &mut Ctx, - target: &Address, - token: &Address, - amount: Amount, -) -> TxResult { - burn_tokens(ctx, token, target, amount) -} diff --git a/crates/vm_env/src/lib.rs b/crates/vm_env/src/lib.rs index 0ddc8d6e58..26f85aceef 100644 --- a/crates/vm_env/src/lib.rs +++ b/crates/vm_env/src/lib.rs @@ -86,11 +86,11 @@ pub mod tx { result_ptr: u64, ); - // Emit an IBC event - pub fn namada_tx_emit_ibc_event(event_ptr: u64, event_len: u64); + // Emit an event + pub fn namada_tx_emit_event(event_ptr: u64, event_len: u64); - // Get IBC events - pub fn namada_tx_get_ibc_events( + // Get events + pub fn namada_tx_get_events( event_type_ptr: u64, event_type_len: u64, ) -> i64; @@ -231,8 +231,8 @@ pub mod vp { // Get the native token address pub fn namada_vp_get_native_token(result_ptr: u64); - // Get the IBC event - pub fn namada_vp_get_ibc_events( + // Get events emitted by the current tx + pub fn namada_vp_get_events( event_type_ptr: u64, event_type_len: u64, ) -> i64; diff --git a/crates/vp_env/Cargo.toml b/crates/vp_env/Cargo.toml index 108a654773..7de5369916 100644 --- a/crates/vp_env/Cargo.toml +++ b/crates/vp_env/Cargo.toml @@ -14,6 +14,7 @@ version.workspace = true [dependencies] namada_core = { path = "../core" } +namada_events = { path = "../events", default-features = false } namada_storage = { path = "../storage" } namada_tx = { path = "../tx" } namada_ibc = { path = "../ibc" } diff --git a/crates/vp_env/src/lib.rs b/crates/vp_env/src/lib.rs index 5e1b8908c6..d785d7b570 100644 --- a/crates/vp_env/src/lib.rs +++ b/crates/vp_env/src/lib.rs @@ -10,7 +10,8 @@ use namada_core::borsh::BorshDeserialize; use namada_core::hash::Hash; use namada_core::storage::{BlockHeight, Epoch, Epochs, Header, Key, TxIndex}; use namada_core::token::Transfer; -use namada_ibc::{decode_message, IbcEvent, IbcMessage}; +use namada_events::{Event, EventType}; +use namada_ibc::{decode_message, IbcMessage}; use namada_storage::{OptionExt, StorageRead}; use namada_tx::Tx; @@ -77,11 +78,11 @@ where /// Given the information about predecessor block epochs fn get_pred_epochs(&self) -> namada_storage::Result; - /// Get the IBC events. - fn get_ibc_events( + /// Get the events emitted by the current tx. + fn get_events( &self, - event_type: String, - ) -> Result, namada_storage::Error>; + event_type: &EventType, + ) -> Result, namada_storage::Error>; /// Storage prefix iterator, ordered by storage keys. It will try to get an /// iterator from the storage. diff --git a/crates/vp_prelude/Cargo.toml b/crates/vp_prelude/Cargo.toml index ae0cbd7d43..a06c529d6a 100644 --- a/crates/vp_prelude/Cargo.toml +++ b/crates/vp_prelude/Cargo.toml @@ -18,6 +18,7 @@ default = [] [dependencies] namada_account = { path = "../account" } namada_core = { path = "../core" } +namada_events = { path = "../events", default-features = false } namada_governance = { path = "../governance" } namada_ibc = { path = "../ibc" } namada_macros = { path = "../macros" } diff --git a/crates/vp_prelude/src/lib.rs b/crates/vp_prelude/src/lib.rs index 6db9494fc7..9590b7a298 100644 --- a/crates/vp_prelude/src/lib.rs +++ b/crates/vp_prelude/src/lib.rs @@ -7,7 +7,7 @@ #![deny(rustdoc::private_intra_doc_links)] pub mod ibc { - pub use namada_core::ibc::IbcEvent; + pub use namada_ibc::event::{IbcEvent, IbcEventType}; pub use namada_ibc::storage::is_ibc_key; } @@ -27,6 +27,7 @@ use namada_core::internal::HostEnvResult; use namada_core::storage::{BlockHeight, Epoch, Epochs, Header, TxIndex}; pub use namada_core::validity_predicate::{VpError, VpErrorExtResult}; pub use namada_core::*; +use namada_events::{Event, EventType}; pub use namada_governance::pgf::storage as pgf_storage; pub use namada_governance::storage as gov_storage; pub use namada_macros::validity_predicate; @@ -325,18 +326,19 @@ impl<'view> VpEnv<'view> for Ctx { get_native_token() } - fn get_ibc_events( + fn get_events( &self, - event_type: String, - ) -> Result, StorageError> { + event_type: &EventType, + ) -> Result, StorageError> { + let event_type = event_type.to_string(); let read_result = unsafe { - namada_vp_get_ibc_events( + namada_vp_get_events( event_type.as_ptr() as _, event_type.len() as _, ) }; match read_from_buffer(read_result, namada_vp_result_buffer) { - Some(value) => Ok(Vec::::try_from_slice(&value[..]) + Some(value) => Ok(Vec::::try_from_slice(&value[..]) .expect("The conversion shouldn't fail")), None => Ok(Vec::new()), } diff --git a/wasm/Cargo.lock b/wasm/Cargo.lock index a59b5ba551..a4e2db7947 100644 --- a/wasm/Cargo.lock +++ b/wasm/Cargo.lock @@ -884,6 +884,12 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +[[package]] +name = "const_panic" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6051f239ecec86fde3410901ab7860d458d160371533842974fc61f96d15879b" + [[package]] name = "constant_time_eq" version = "0.3.0" @@ -3177,6 +3183,26 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "konst" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d712a8c49d4274f8d8a5cf61368cb5f3c143d149882b1a2918129e53395fdb0" +dependencies = [ + "const_panic", + "konst_kernel", + "typewit", +] + +[[package]] +name = "konst_kernel" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dac6ea8c376b6e208a81cf39b8e82bebf49652454d98a4829e907dac16ef1790" +dependencies = [ + "typewit", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -3511,6 +3537,7 @@ dependencies = [ "eyre", "futures", "itertools 0.10.5", + "konst", "linkme", "loupe", "masp_primitives", @@ -3518,6 +3545,7 @@ dependencies = [ "namada_account", "namada_core", "namada_ethereum_bridge", + "namada_events", "namada_gas", "namada_governance", "namada_ibc", @@ -3648,9 +3676,11 @@ dependencies = [ "ethers", "eyre", "itertools 0.10.5", + "konst", "linkme", "namada_account", "namada_core", + "namada_events", "namada_macros", "namada_migrations", "namada_parameters", @@ -3670,13 +3700,29 @@ dependencies = [ "tracing", ] +[[package]] +name = "namada_events" +version = "0.34.0" +dependencies = [ + "borsh 1.4.0", + "linkme", + "namada_core", + "namada_macros", + "namada_migrations", + "serde", + "thiserror", +] + [[package]] name = "namada_gas" version = "0.34.0" dependencies = [ "borsh 1.4.0", + "linkme", "namada_core", + "namada_events", "namada_macros", + "namada_migrations", "serde", "thiserror", ] @@ -3687,8 +3733,10 @@ version = "0.34.0" dependencies = [ "borsh 1.4.0", "itertools 0.10.5", + "konst", "linkme", "namada_core", + "namada_events", "namada_macros", "namada_migrations", "namada_parameters", @@ -3710,10 +3758,13 @@ dependencies = [ "ibc-derive", "ibc-testkit", "ics23", + "konst", "masp_primitives", "namada_core", + "namada_events", "namada_gas", "namada_governance", + "namada_macros", "namada_parameters", "namada_state", "namada_storage", @@ -3721,6 +3772,7 @@ dependencies = [ "primitive-types", "proptest", "prost 0.12.3", + "serde", "serde_json", "sha2 0.9.9", "thiserror", @@ -3784,10 +3836,12 @@ dependencies = [ "borsh 1.4.0", "data-encoding", "derivative", + "konst", "linkme", "namada_account", "namada_controller", "namada_core", + "namada_events", "namada_governance", "namada_macros", "namada_migrations", @@ -3835,6 +3889,7 @@ dependencies = [ "namada_account", "namada_core", "namada_ethereum_bridge", + "namada_events", "namada_gas", "namada_governance", "namada_ibc", @@ -3853,6 +3908,7 @@ dependencies = [ "owo-colors", "parse_duration", "paste", + "patricia_tree", "proptest", "prost 0.12.3", "rand 0.8.5", @@ -3899,6 +3955,7 @@ dependencies = [ "itertools 0.10.5", "linkme", "namada_core", + "namada_events", "namada_gas", "namada_macros", "namada_merkle_tree", @@ -3908,6 +3965,7 @@ dependencies = [ "namada_storage", "namada_trans_token", "namada_tx", + "patricia_tree", "proptest", "sha2 0.9.9", "sparse-merkle-tree", @@ -3984,6 +4042,7 @@ name = "namada_token" version = "0.34.0" dependencies = [ "namada_core", + "namada_events", "namada_shielded_token", "namada_storage", "namada_trans_token", @@ -3993,7 +4052,9 @@ dependencies = [ name = "namada_trans_token" version = "0.34.0" dependencies = [ + "konst", "namada_core", + "namada_events", "namada_storage", ] @@ -4005,9 +4066,11 @@ dependencies = [ "bitflags 2.5.0", "borsh 1.4.0", "data-encoding", + "konst", "linkme", "masp_primitives", "namada_core", + "namada_events", "namada_gas", "namada_macros", "namada_migrations", @@ -4028,6 +4091,7 @@ name = "namada_tx_env" version = "0.34.0" dependencies = [ "namada_core", + "namada_events", "namada_storage", ] @@ -4039,6 +4103,7 @@ dependencies = [ "masp_primitives", "namada_account", "namada_core", + "namada_events", "namada_governance", "namada_ibc", "namada_macros", @@ -4082,6 +4147,7 @@ dependencies = [ "derivative", "masp_primitives", "namada_core", + "namada_events", "namada_ibc", "namada_storage", "namada_tx", @@ -4095,6 +4161,7 @@ dependencies = [ "borsh 1.4.0", "namada_account", "namada_core", + "namada_events", "namada_governance", "namada_ibc", "namada_macros", @@ -4488,6 +4555,15 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +[[package]] +name = "patricia_tree" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31f2f4539bffe53fc4b4da301df49d114b845b077bd5727b7fe2bd9d8df2ae68" +dependencies = [ + "bitflags 2.5.0", +] + [[package]] name = "pbkdf2" version = "0.4.0" @@ -6790,6 +6866,21 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "typewit" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6fb9ae6a3cafaf0a5d14c2302ca525f9ae8e07a0f0e6949de88d882c37a6e24" +dependencies = [ + "typewit_proc_macros", +] + +[[package]] +name = "typewit_proc_macros" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36a83ea2b3c704935a01b4642946aadd445cea40b10935e3f8bd8052b8193d6" + [[package]] name = "ucd-trie" version = "0.1.6" diff --git a/wasm_for_tests/Cargo.lock b/wasm_for_tests/Cargo.lock index 630b6bcca2..f1f43a5e29 100644 --- a/wasm_for_tests/Cargo.lock +++ b/wasm_for_tests/Cargo.lock @@ -884,6 +884,12 @@ version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +[[package]] +name = "const_panic" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6051f239ecec86fde3410901ab7860d458d160371533842974fc61f96d15879b" + [[package]] name = "constant_time_eq" version = "0.3.0" @@ -3177,6 +3183,26 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "konst" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d712a8c49d4274f8d8a5cf61368cb5f3c143d149882b1a2918129e53395fdb0" +dependencies = [ + "const_panic", + "konst_kernel", + "typewit", +] + +[[package]] +name = "konst_kernel" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dac6ea8c376b6e208a81cf39b8e82bebf49652454d98a4829e907dac16ef1790" +dependencies = [ + "typewit", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -3491,12 +3517,14 @@ dependencies = [ "eyre", "futures", "itertools 0.10.5", + "konst", "loupe", "masp_primitives", "masp_proofs", "namada_account", "namada_core", "namada_ethereum_bridge", + "namada_events", "namada_gas", "namada_governance", "namada_ibc", @@ -3622,8 +3650,10 @@ dependencies = [ "ethers", "eyre", "itertools 0.10.5", + "konst", "namada_account", "namada_core", + "namada_events", "namada_macros", "namada_parameters", "namada_proof_of_stake", @@ -3642,12 +3672,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "namada_events" +version = "0.34.0" +dependencies = [ + "borsh 1.2.1", + "namada_core", + "namada_macros", + "serde", + "thiserror", +] + [[package]] name = "namada_gas" version = "0.34.0" dependencies = [ "borsh 1.2.1", "namada_core", + "namada_events", "namada_macros", "serde", "thiserror", @@ -3659,7 +3701,9 @@ version = "0.34.0" dependencies = [ "borsh 1.2.1", "itertools 0.10.5", + "konst", "namada_core", + "namada_events", "namada_macros", "namada_parameters", "namada_storage", @@ -3680,10 +3724,13 @@ dependencies = [ "ibc-derive", "ibc-testkit", "ics23", + "konst", "masp_primitives", "namada_core", + "namada_events", "namada_gas", "namada_governance", + "namada_macros", "namada_parameters", "namada_state", "namada_storage", @@ -3691,6 +3738,7 @@ dependencies = [ "primitive-types", "proptest", "prost 0.12.3", + "serde", "serde_json", "sha2 0.9.9", "thiserror", @@ -3742,9 +3790,11 @@ dependencies = [ "borsh 1.2.1", "data-encoding", "derivative", + "konst", "namada_account", "namada_controller", "namada_core", + "namada_events", "namada_governance", "namada_macros", "namada_parameters", @@ -3790,6 +3840,7 @@ dependencies = [ "namada_account", "namada_core", "namada_ethereum_bridge", + "namada_events", "namada_gas", "namada_governance", "namada_ibc", @@ -3807,6 +3858,7 @@ dependencies = [ "owo-colors", "parse_duration", "paste", + "patricia_tree", "proptest", "prost 0.12.3", "rand 0.8.5", @@ -3852,6 +3904,7 @@ dependencies = [ "ics23", "itertools 0.10.5", "namada_core", + "namada_events", "namada_gas", "namada_macros", "namada_merkle_tree", @@ -3860,6 +3913,7 @@ dependencies = [ "namada_storage", "namada_trans_token", "namada_tx", + "patricia_tree", "proptest", "sha2 0.9.9", "sparse-merkle-tree", @@ -3934,6 +3988,7 @@ name = "namada_token" version = "0.34.0" dependencies = [ "namada_core", + "namada_events", "namada_shielded_token", "namada_storage", "namada_trans_token", @@ -3943,7 +3998,9 @@ dependencies = [ name = "namada_trans_token" version = "0.34.0" dependencies = [ + "konst", "namada_core", + "namada_events", "namada_storage", ] @@ -3955,8 +4012,10 @@ dependencies = [ "bitflags 2.5.0", "borsh 1.2.1", "data-encoding", + "konst", "masp_primitives", "namada_core", + "namada_events", "namada_gas", "namada_macros", "num-derive", @@ -3976,6 +4035,7 @@ name = "namada_tx_env" version = "0.34.0" dependencies = [ "namada_core", + "namada_events", "namada_storage", ] @@ -3987,6 +4047,7 @@ dependencies = [ "masp_primitives", "namada_account", "namada_core", + "namada_events", "namada_governance", "namada_ibc", "namada_macros", @@ -4028,6 +4089,7 @@ dependencies = [ "derivative", "masp_primitives", "namada_core", + "namada_events", "namada_ibc", "namada_storage", "namada_tx", @@ -4041,6 +4103,7 @@ dependencies = [ "borsh 1.2.1", "namada_account", "namada_core", + "namada_events", "namada_governance", "namada_ibc", "namada_macros", @@ -4434,6 +4497,15 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +[[package]] +name = "patricia_tree" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31f2f4539bffe53fc4b4da301df49d114b845b077bd5727b7fe2bd9d8df2ae68" +dependencies = [ + "bitflags 2.5.0", +] + [[package]] name = "pbkdf2" version = "0.4.0" @@ -6655,6 +6727,21 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "typewit" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6fb9ae6a3cafaf0a5d14c2302ca525f9ae8e07a0f0e6949de88d882c37a6e24" +dependencies = [ + "typewit_proc_macros", +] + +[[package]] +name = "typewit_proc_macros" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e36a83ea2b3c704935a01b4642946aadd445cea40b10935e3f8bd8052b8193d6" + [[package]] name = "ucd-trie" version = "0.1.6"