diff --git a/.gitignore b/.gitignore index 7a6dc4c7930..fbeffa8a9c9 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ lightning-rapid-gossip-sync/res/full_graph.lngossip lightning-custom-message/target lightning-transaction-sync/target no-std-check/target +msrv-no-dev-deps-check/target diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 89ff07fe698..b8a993196b8 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -40,7 +40,7 @@ use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, use lightning::sign::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider}; use lightning::events; use lightning::events::MessageSendEventsProvider; -use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs, PaymentId, RecipientOnionFields}; use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init}; @@ -176,7 +176,7 @@ impl chain::Watch for TestChainMonitor { self.chain_monitor.watch_channel(funding_txo, monitor) } - fn update_channel(&self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus { + fn update_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus { let mut map_lock = self.latest_monitors.lock().unwrap(); let mut map_entry = match map_lock.entry(funding_txo) { hash_map::Entry::Occupied(entry) => entry, @@ -188,10 +188,10 @@ impl chain::Watch for TestChainMonitor { let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); - self.chain_monitor.update_channel(funding_txo, update) + self.chain_monitor.update_channel(funding_txo, channel_id, update) } - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec, Option)> { return self.chain_monitor.release_pending_monitor_events(); } } diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index 1f5ceb21235..2df63cf5453 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -22,8 +22,7 @@ use bitcoin::consensus::encode::deserialize; use bitcoin::network::constants::Network; use bitcoin::hashes::hex::FromHex; -use bitcoin::hashes::Hash as TraitImport; -use bitcoin::hashes::HashEngine as TraitImportEngine; +use bitcoin::hashes::Hash as _; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::sha256d::Hash as Sha256dHash; use bitcoin::hash_types::{Txid, BlockHash, WPubkeyHash}; @@ -651,7 +650,7 @@ pub fn do_test(data: &[u8], logger: &Arc) { if let None = loss_detector.txids_confirmed.get(&funding_txid) { let outpoint = OutPoint { txid: funding_txid, index: 0 }; for chan in channelmanager.list_channels() { - if chan.channel_id == outpoint.to_channel_id() { + if chan.funding_txo == Some(outpoint) { tx.version += 1; continue 'search_loop; } diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 0f2c67538d6..fb6baabfac1 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -929,7 +929,7 @@ mod tests { use lightning::chain::transaction::OutPoint; use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent}; use lightning::{get_event_msg, get_event}; - use lightning::ln::PaymentHash; + use lightning::ln::{PaymentHash, ChannelId}; use lightning::ln::channelmanager; use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId}; use lightning::ln::features::{ChannelFeatures, NodeFeatures}; @@ -1414,7 +1414,7 @@ mod tests { } // Force-close the channel. - nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap(); // Check that the force-close updates are persisted. check_persisted_data!(nodes[0].node, filepath.clone()); diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs index b5c6526207d..350b1cdd195 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store.rs @@ -450,7 +450,7 @@ mod tests { check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); - let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap(); + let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap(); // Set the store's directory to read-only, which should result in // returning an unrecoverable failure when we then attempt to persist a @@ -489,7 +489,7 @@ mod tests { check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); - let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap(); + let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap(); // Create the store with an invalid directory name and test that the // channel fails to open because the directories fail to be created. There diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 14544754318..c6e87684223 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -31,6 +31,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput}; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::{OutPoint, TransactionData}; +use crate::ln::ChannelId; use crate::sign::ecdsa::WriteableEcdsaChannelSigner; use crate::events; use crate::events::{Event, EventHandler}; @@ -158,7 +159,7 @@ pub trait Persist { /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [`Writeable::write`]: crate::util::ser::Writeable::write - fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus; + fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, data: &ChannelMonitor, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus; /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given /// update. @@ -193,7 +194,7 @@ pub trait Persist { /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors. /// /// [`Writeable::write`]: crate::util::ser::Writeable::write - fn update_persisted_channel(&self, channel_id: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus; + fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus; } struct MonitorHolder { @@ -287,7 +288,7 @@ pub struct ChainMonitor, Option)>>, + pending_monitor_events: Mutex, Option)>>, /// The best block height seen, used as a proxy for the passage of time. highest_chain_height: AtomicUsize, @@ -471,12 +472,15 @@ where C::Target: chain::Filter, } } - /// Lists the funding outpoint of each [`ChannelMonitor`] being monitored. + /// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored. /// /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always /// monitoring for on-chain state resolutions. - pub fn list_monitors(&self) -> Vec { - self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect() + pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> { + self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| { + let channel_id = monitor_holder.monitor.channel_id(); + (*outpoint, channel_id) + }).collect() } #[cfg(not(c_bindings))] @@ -542,8 +546,9 @@ where C::Target: chain::Filter, // Completed event. return Ok(()); } - self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed { - funding_txo, + let channel_id = monitor_data.monitor.channel_id(); + self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { + funding_txo, channel_id, monitor_update_id: monitor_data.monitor.get_latest_update_id(), }], monitor_data.monitor.get_counterparty_node_id())); }, @@ -565,9 +570,14 @@ where C::Target: chain::Filter, #[cfg(any(test, fuzzing))] pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) { let monitors = self.monitors.read().unwrap(); - let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id()); - self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed { + let (counterparty_node_id, channel_id) = if let Some(m) = monitors.get(&funding_txo) { + (m.monitor.get_counterparty_node_id(), m.monitor.channel_id()) + } else { + (None, ChannelId::v1_from_funding_outpoint(funding_txo)) + }; + self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { funding_txo, + channel_id, monitor_update_id, }], counterparty_node_id)); self.event_notifier.notify(); @@ -752,12 +762,12 @@ where C::Target: chain::Filter, Ok(persist_res) } - fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { + fn update_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { // Update the monitor that watches the channel referred to by the given outpoint. let monitors = self.monitors.read().unwrap(); match monitors.get(&funding_txo) { None => { - let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(funding_txo.to_channel_id())); + let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id)); log_error!(logger, "Failed to update channel monitor: no such monitor registered"); // We should never ever trigger this from within ChannelManager. Technically a @@ -815,7 +825,7 @@ where C::Target: chain::Filter, } } - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec, Option)> { let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0); for monitor_state in self.monitors.read().unwrap().values() { let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor); @@ -829,8 +839,9 @@ where C::Target: chain::Filter, let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); if monitor_events.len() > 0 { let monitor_outpoint = monitor_state.monitor.get_funding_txo().0; + let monitor_channel_id = monitor_state.monitor.channel_id(); let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id(); - pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id)); + pending_monitor_events.push((monitor_outpoint, monitor_channel_id, monitor_events, counterparty_node_id)); } } } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index c81a48b78ac..8533fc9dc96 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -158,6 +158,8 @@ pub enum MonitorEvent { Completed { /// The funding outpoint of the [`ChannelMonitor`] that was updated funding_txo: OutPoint, + /// The channel ID of the channel associated with the [`ChannelMonitor`] + channel_id: ChannelId, /// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or /// [`ChannelMonitor::get_latest_update_id`]. /// @@ -172,6 +174,7 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorEvent, (0, Completed) => { (0, funding_txo, required), (2, monitor_update_id, required), + (4, channel_id, required), }, ; (2, HTLCEvent), @@ -772,6 +775,7 @@ pub(crate) struct ChannelMonitorImpl { channel_keys_id: [u8; 32], holder_revocation_basepoint: RevocationBasepoint, + channel_id: ChannelId, funding_info: (OutPoint, ScriptBuf), current_counterparty_commitment_txid: Option, prev_counterparty_commitment_txid: Option, @@ -1097,6 +1101,7 @@ impl Writeable for ChannelMonitorImpl WithChannelMonitor<'a, L> where L::Target: Logger { pub(crate) fn from_impl(logger: &'a L, monitor_impl: &ChannelMonitorImpl) -> Self { let peer_id = monitor_impl.counterparty_node_id; - let channel_id = Some(monitor_impl.funding_info.0.to_channel_id()); + let channel_id = Some(monitor_impl.channel_id()); WithChannelMonitor { logger, peer_id, channel_id, } @@ -1181,7 +1186,8 @@ impl ChannelMonitor { funding_redeemscript: ScriptBuf, channel_value_satoshis: u64, commitment_transaction_number_obscure_factor: u64, initial_holder_commitment_tx: HolderCommitmentTransaction, - best_block: BestBlock, counterparty_node_id: PublicKey) -> ChannelMonitor { + best_block: BestBlock, counterparty_node_id: PublicKey, channel_id: ChannelId, + ) -> ChannelMonitor { assert!(commitment_transaction_number_obscure_factor <= (1 << 48)); let counterparty_payment_script = chan_utils::get_counterparty_payment_script( @@ -1235,6 +1241,7 @@ impl ChannelMonitor { channel_keys_id, holder_revocation_basepoint, + channel_id, funding_info, current_counterparty_commitment_txid: None, prev_counterparty_commitment_txid: None, @@ -1386,6 +1393,11 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_funding_txo().clone() } + /// Gets the channel_id of the channel this ChannelMonitor is monitoring for. + pub fn channel_id(&self) -> ChannelId { + self.inner.lock().unwrap().channel_id() + } + /// Gets a list of txids, with their output scripts (in the order they appear in the /// transaction), which we must learn about spends of via block_connected(). pub fn get_outputs_to_watch(&self) -> Vec<(Txid, Vec<(u32, ScriptBuf)>)> { @@ -2834,7 +2846,7 @@ impl ChannelMonitorImpl { self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger); } else if !self.holder_tx_signed { log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast"); - log_error!(logger, " in channel monitor for channel {}!", &self.funding_info.0.to_channel_id()); + log_error!(logger, " in channel monitor for channel {}!", &self.channel_id()); log_error!(logger, " Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!"); } else { // If we generated a MonitorEvent::HolderForceClosed, the ChannelManager @@ -2880,6 +2892,10 @@ impl ChannelMonitorImpl { &self.funding_info } + pub fn channel_id(&self) -> ChannelId { + self.channel_id + } + fn get_outputs_to_watch(&self) -> &HashMap> { // If we've detected a counterparty commitment tx on chain, we must include it in the set // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because @@ -3642,7 +3658,7 @@ impl ChannelMonitorImpl { if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 { let mut balance_spendable_csv = None; log_info!(logger, "Channel {} closed by funding output spend in txid {}.", - &self.funding_info.0.to_channel_id(), txid); + &self.channel_id(), txid); self.funding_spend_seen = true; let mut commitment_tx_to_counterparty_output = None; if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.to_consensus_u32() >> 8*3) as u8 == 0x20 { @@ -3812,7 +3828,7 @@ impl ChannelMonitorImpl { log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor)); self.pending_events.push(Event::SpendableOutputs { outputs: vec![descriptor], - channel_id: Some(self.funding_info.0.to_channel_id()), + channel_id: Some(self.channel_id()), }); self.spendable_txids_confirmed.push(entry.txid); }, @@ -4557,6 +4573,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let mut spendable_txids_confirmed = Some(Vec::new()); let mut counterparty_fulfilled_htlcs = Some(HashMap::new()); let mut initial_counterparty_commitment_info = None; + let mut channel_id = None; read_tlv_fields!(reader, { (1, funding_spend_confirmed, option), (3, htlcs_resolved_on_chain, optional_vec), @@ -4567,6 +4584,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (13, spendable_txids_confirmed, optional_vec), (15, counterparty_fulfilled_htlcs, option), (17, initial_counterparty_commitment_info, option), + (19, channel_id, option), }); // Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the @@ -4591,6 +4609,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP channel_keys_id, holder_revocation_basepoint, + channel_id: channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(outpoint)), funding_info, current_counterparty_commitment_txid, prev_counterparty_commitment_txid, @@ -4665,7 +4684,7 @@ mod tests { use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT}; use crate::chain::transaction::OutPoint; use crate::sign::InMemorySigner; - use crate::ln::{PaymentPreimage, PaymentHash}; + use crate::ln::{PaymentPreimage, PaymentHash, ChannelId}; use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey}; use crate::ln::chan_utils::{self,HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields}; @@ -4841,6 +4860,7 @@ mod tests { htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())) }; let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() }; + let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); let channel_parameters = ChannelTransactionParameters { holder_pubkeys: keys.holder_channel_pubkeys.clone(), holder_selected_contest_delay: 66, @@ -4860,7 +4880,7 @@ mod tests { Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(), (OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()), &channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()), - best_block, dummy_key); + best_block, dummy_key, channel_id); let mut htlcs = preimages_slice_to_htlcs!(preimages[0..10]); let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs); @@ -5090,6 +5110,7 @@ mod tests { htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())), }; let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() }; + let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); let channel_parameters = ChannelTransactionParameters { holder_pubkeys: keys.holder_channel_pubkeys.clone(), holder_selected_contest_delay: 66, @@ -5107,9 +5128,9 @@ mod tests { Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(), (OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()), &channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()), - best_block, dummy_key); + best_block, dummy_key, channel_id); - let chan_id = monitor.inner.lock().unwrap().funding_info.0.to_channel_id().clone(); + let chan_id = monitor.inner.lock().unwrap().channel_id(); let context_logger = WithChannelMonitor::from(&logger, &monitor); log_error!(context_logger, "This is an error"); log_warn!(context_logger, "This is an error"); diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index dafce03ddb0..e816d57f0f8 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -17,6 +17,7 @@ use bitcoin::network::constants::Network; use bitcoin::secp256k1::PublicKey; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent}; +use crate::ln::ChannelId; use crate::sign::ecdsa::WriteableEcdsaChannelSigner; use crate::chain::transaction::{OutPoint, TransactionData}; @@ -286,7 +287,7 @@ pub trait Watch { /// [`ChannelMonitorUpdateStatus::UnrecoverableError`], see its documentation for more info. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager - fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus; + fn update_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus; /// Returns any monitor events since the last call. Subsequent calls must only return new /// events. @@ -297,7 +298,7 @@ pub trait Watch { /// /// For details on asynchronous [`ChannelMonitor`] updating and returning /// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`]. - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)>; + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec, Option)>; } /// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to diff --git a/lightning/src/chain/transaction.rs b/lightning/src/chain/transaction.rs index 5bef97792d3..17815207a8d 100644 --- a/lightning/src/chain/transaction.rs +++ b/lightning/src/chain/transaction.rs @@ -9,9 +9,7 @@ //! Types describing on-chain transactions. -use crate::ln::ChannelId; use bitcoin::hash_types::Txid; -use bitcoin::hashes::Hash; use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint; use bitcoin::blockdata::transaction::Transaction; @@ -58,11 +56,6 @@ pub struct OutPoint { } impl OutPoint { - /// Convert an `OutPoint` to a lightning channel id. - pub fn to_channel_id(&self) -> ChannelId { - ChannelId::v1_from_funding_txid(self.txid.as_byte_array(), self.index) - } - /// Converts this OutPoint into the OutPoint field as used by rust-bitcoin /// /// This is not exported to bindings users as the same type is used universally in the C bindings @@ -86,6 +79,7 @@ impl_writeable!(OutPoint, { txid, index }); #[cfg(test)] mod tests { use crate::chain::transaction::OutPoint; + use crate::ln::ChannelId; use bitcoin::blockdata::transaction::Transaction; use bitcoin::consensus::encode; @@ -94,13 +88,13 @@ mod tests { #[test] fn test_channel_id_calculation() { let tx: Transaction = encode::deserialize(&>::from_hex("020000000001010e0adef48412e4361325ac1c6e36411299ab09d4f083b9d8ddb55fbc06e1b0c00000000000feffffff0220a1070000000000220020f81d95e040bd0a493e38bae27bff52fe2bb58b93b293eb579c01c31b05c5af1dc072cfee54a3000016001434b1d6211af5551905dc2642d05f5b04d25a8fe80247304402207f570e3f0de50546aad25a872e3df059d277e776dda4269fa0d2cc8c2ee6ec9a022054e7fae5ca94d47534c86705857c24ceea3ad51c69dd6051c5850304880fc43a012103cb11a1bacc223d98d91f1946c6752e358a5eb1a1c983b3e6fb15378f453b76bd00000000").unwrap()[..]).unwrap(); - assert_eq!(&OutPoint { + assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 - }.to_channel_id().0[..], &>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]); - assert_eq!(&OutPoint { + }).0[..], &>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]); + assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 - }.to_channel_id().0[..], &>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]); + }).0[..], &>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]); } } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index af827b8cebb..07b3e67f13a 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -21,7 +21,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields}; use crate::ln::channel::{AnnouncementSigsState, ChannelPhase}; -use crate::ln::msgs; +use crate::ln::{msgs, ChannelId}; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; use crate::util::test_channel_signer::TestChannelSigner; use crate::util::errors::APIError; @@ -50,6 +50,7 @@ fn test_monitor_and_persister_update_fail() { // Create some initial channel let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let outpoint = OutPoint { txid: chan.3.txid(), index: 0 }; + let channel_id = chan.2; // Rebalance the network to generate htlc in the two directions send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); @@ -101,12 +102,12 @@ fn test_monitor_and_persister_update_fail() { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Check that the persister returns InProgress (and will never actually complete) // as the monitor update errors. - if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor paused"); } + if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(outpoint, channel_id, &update) {} else { panic!("Expected monitor paused"); } logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Failed to update ChannelMonitor for channel [0-9a-f]*.").unwrap(), 1); // Apply the monitor update to the original ChainMonitor, ensuring the // ChannelManager and ChannelMonitor aren't out of sync. - assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), + assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, channel_id, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { @@ -1861,7 +1862,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); + let channel_id = ChannelId::v1_from_funding_outpoint(OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index ec4f26664a7..231db263e75 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -816,7 +816,7 @@ pub(super) struct ReestablishResponses { pub(crate) struct ShutdownResult { pub(crate) closure_reason: ClosureReason, /// A channel monitor update to apply. - pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, + pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelId, ChannelMonitorUpdate)>, /// A list of dropped outbound HTLCs that can safely be failed backwards immediately. pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>, /// An unbroadcasted batch funding transaction id. The closure of this channel should be @@ -2394,7 +2394,7 @@ impl ChannelContext where SP::Target: SignerProvider { }; if generate_monitor_update { self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID; - Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate { + Some((self.get_counterparty_node_id(), funding_txo, self.channel_id(), ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, counterparty_node_id: Some(self.counterparty_node_id), updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], @@ -6475,7 +6475,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { // Now that we're past error-generating stuff, update our local state: self.context.channel_state = ChannelState::FundingNegotiated; - self.context.channel_id = funding_txo.to_channel_id(); + self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100. // We can skip this if it is a zero-conf channel. @@ -6814,7 +6814,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { &self.context.channel_transaction_parameters, funding_redeemscript.clone(), self.context.channel_value_satoshis, obscure_factor, - holder_commitment_tx, best_block, self.context.counterparty_node_id); + holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id()); channel_monitor.provide_initial_counterparty_commitment_tx( counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, @@ -7356,7 +7356,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { // Now that we're past error-generating stuff, update our local state: self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); - self.context.channel_id = funding_txo.to_channel_id(); + self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); self.context.cur_counterparty_commitment_transaction_number -= 1; self.context.cur_holder_commitment_transaction_number -= 1; @@ -7374,7 +7374,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { &self.context.channel_transaction_parameters, funding_redeemscript.clone(), self.context.channel_value_satoshis, obscure_factor, - holder_commitment_tx, best_block, self.context.counterparty_node_id); + holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id()); channel_monitor.provide_initial_counterparty_commitment_tx( counterparty_initial_commitment_tx.trust().txid(), Vec::new(), self.context.cur_counterparty_commitment_transaction_number + 1, diff --git a/lightning/src/ln/channel_id.rs b/lightning/src/ln/channel_id.rs index 8df6d75ef5e..19003961ff1 100644 --- a/lightning/src/ln/channel_id.rs +++ b/lightning/src/ln/channel_id.rs @@ -9,11 +9,13 @@ //! ChannelId definition. +use crate::chain::transaction::OutPoint; +use crate::io; use crate::ln::msgs::DecodeError; use crate::sign::EntropySource; use crate::util::ser::{Readable, Writeable, Writer}; -use crate::io; +use bitcoin::hashes::Hash as _; use core::fmt; use core::ops::Deref; @@ -40,6 +42,11 @@ impl ChannelId { Self(res) } + /// Create _v1_ channel ID from a funding tx outpoint + pub fn v1_from_funding_outpoint(outpoint: OutPoint) -> Self { + Self::v1_from_funding_txid(outpoint.txid.as_byte_array(), outpoint.index) + } + /// Create a _temporary_ channel ID randomly, based on an entropy source. pub fn temporary_from_entropy_source(entropy_source: &ES) -> Self where ES::Target: EntropySource { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ec3e8ef604b..e9edc2b306b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -288,6 +288,7 @@ pub(super) struct PendingAddHTLCInfo { // Note that this may be an outbound SCID alias for the associated channel. prev_short_channel_id: u64, prev_htlc_id: u64, + prev_channel_id: ChannelId, prev_funding_outpoint: OutPoint, prev_user_channel_id: u128, } @@ -328,6 +329,7 @@ pub(crate) struct HTLCPreviousHopData { incoming_packet_shared_secret: [u8; 32], phantom_shared_secret: Option<[u8; 32]>, blinded_failure: Option, + channel_id: ChannelId, // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards // channel with a preimage provided by the forward channel. @@ -368,7 +370,7 @@ struct ClaimableHTLC { impl From<&ClaimableHTLC> for events::ClaimedHTLC { fn from(val: &ClaimableHTLC) -> Self { events::ClaimedHTLC { - channel_id: val.prev_hop.outpoint.to_channel_id(), + channel_id: val.prev_hop.channel_id, user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0), cltv_expiry: val.cltv_expiry, value_msat: val.value, @@ -707,7 +709,7 @@ enum BackgroundEvent { /// /// Note that any such events are lost on shutdown, so in general they must be updates which /// are regenerated on startup. - ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)), + ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelId, ChannelMonitorUpdate)), /// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the /// channel to continue normal operation. /// @@ -721,6 +723,7 @@ enum BackgroundEvent { MonitorUpdateRegeneratedOnStartup { counterparty_node_id: PublicKey, funding_txo: OutPoint, + channel_id: ChannelId, update: ChannelMonitorUpdate }, /// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have @@ -749,7 +752,7 @@ pub(crate) enum MonitorUpdateCompletionAction { /// outbound edge. EmitEventAndFreeOtherChannel { event: events::Event, - downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>, + downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, ChannelId, RAAMonitorUpdateBlockingAction)>, }, /// Indicates we should immediately resume the operation of another channel, unless there is /// some other reason why the channel is blocked. In practice this simply means immediately @@ -767,6 +770,7 @@ pub(crate) enum MonitorUpdateCompletionAction { downstream_counterparty_node_id: PublicKey, downstream_funding_outpoint: OutPoint, blocking_action: RAAMonitorUpdateBlockingAction, + downstream_channel_id: ChannelId, }, } @@ -778,6 +782,9 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, (0, downstream_counterparty_node_id, required), (2, downstream_funding_outpoint, required), (4, blocking_action, required), + // Note that by the time we get past the required read above, downstream_funding_outpoint will be + // filled in, so we can safely unwrap it here. + (5, downstream_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(downstream_funding_outpoint.0.unwrap()))), }, (2, EmitEventAndFreeOtherChannel) => { (0, event, upgradable_required), @@ -795,12 +802,16 @@ pub(crate) enum EventCompletionAction { ReleaseRAAChannelMonitorUpdate { counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, + channel_id: ChannelId, }, } impl_writeable_tlv_based_enum!(EventCompletionAction, (0, ReleaseRAAChannelMonitorUpdate) => { (0, channel_funding_outpoint, required), (2, counterparty_node_id, required), + // Note that by the time we get past the required read above, channel_funding_outpoint will be + // filled in, so we can safely unwrap it here. + (3, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(channel_funding_outpoint.0.unwrap()))), }; ); @@ -822,7 +833,7 @@ pub(crate) enum RAAMonitorUpdateBlockingAction { impl RAAMonitorUpdateBlockingAction { fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self { Self::ForwardedPaymentInboundClaim { - channel_id: prev_hop.outpoint.to_channel_id(), + channel_id: prev_hop.channel_id, htlc_id: prev_hop.htlc_id, } } @@ -1626,8 +1637,8 @@ pub struct ChannelDetails { /// The Channel's funding transaction output, if we've negotiated the funding transaction with /// our counterparty already. /// - /// Note that, if this has been set, `channel_id` will be equivalent to - /// `funding_txo.unwrap().to_channel_id()`. + /// Note that, if this has been set, `channel_id` for V1-established channels will be equivalent to + /// `ChannelId::v1_from_funding_outpoint(funding_txo.unwrap())`. pub funding_txo: Option, /// The features which this channel operates with. See individual features for more info. /// @@ -2285,7 +2296,7 @@ macro_rules! handle_new_monitor_update { handle_new_monitor_update!($self, $update_res, $chan, _internal, handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan)) }; - ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { + ($self: ident, $funding_txo: expr, $channel_id: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo) .or_insert_with(Vec::new); // During startup, we push monitor updates as background events through to here in @@ -2296,7 +2307,7 @@ macro_rules! handle_new_monitor_update { in_flight_updates.push($update); in_flight_updates.len() - 1 }); - let update_res = $self.chain_monitor.update_channel($funding_txo, &in_flight_updates[idx]); + let update_res = $self.chain_monitor.update_channel($funding_txo, $channel_id, &in_flight_updates[idx]); handle_new_monitor_update!($self, update_res, $chan, _internal, { let _ = in_flight_updates.remove(idx); @@ -2742,7 +2753,7 @@ where // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt.take() { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, + handle_new_monitor_update!(self, funding_txo_opt.unwrap(), *channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } } else { @@ -2853,12 +2864,12 @@ where let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update { + if let Some((_, funding_txo, channel_id, monitor_update)) = shutdown_res.monitor_update { // There isn't anything we can do if we get an update failure - we're already // force-closing. The monitor update on the required in-memory copy should broadcast // the latest local state, which is the best we can do anyway. Thus, it is safe to // ignore the result here. - let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update); + let _ = self.chain_monitor.update_channel(funding_txo, channel_id, &monitor_update); } let mut shutdown_results = Vec::new(); if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid { @@ -3393,6 +3404,7 @@ where return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); } let funding_txo = chan.context.get_funding_txo().unwrap(); + let channel_id = chan.context.channel_id(); let logger = WithChannelContext::from(&self.logger, &chan.context); let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { @@ -3403,7 +3415,7 @@ where }, onion_packet, None, &self.fee_estimator, &&logger); match break_chan_phase_entry!(self, send_res, chan_phase_entry) { Some(monitor_update) => { - match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) { + match handle_new_monitor_update!(self, funding_txo, channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) { false => { // Note that MonitorUpdateInProgress here indicates (per function // docs) that we will resend the commitment update once monitor @@ -3952,7 +3964,10 @@ where } let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() }; if let Some(funding_batch_state) = funding_batch_state.as_mut() { - funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false)); + // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably + // need to fix this somehow to not rely on using the outpoint for the channel ID if we + // want to support V2 batching here as well. + funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false)); } Ok(outpoint) }) @@ -4177,6 +4192,7 @@ where let mut per_source_pending_forward = [( payment.prev_short_channel_id, payment.prev_funding_outpoint, + payment.prev_channel_id, payment.prev_user_channel_id, vec![(pending_htlc_info, payment.prev_htlc_id)] )]; @@ -4204,6 +4220,7 @@ where short_channel_id: payment.prev_short_channel_id, user_channel_id: Some(payment.prev_user_channel_id), outpoint: payment.prev_funding_outpoint, + channel_id: payment.prev_channel_id, htlc_id: payment.prev_htlc_id, incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, phantom_shared_secret: None, @@ -4227,7 +4244,7 @@ where let mut new_events = VecDeque::new(); let mut failed_forwards = Vec::new(); - let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); + let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); { let mut forward_htlcs = HashMap::new(); mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap()); @@ -4240,20 +4257,21 @@ where for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, - forward_info: PendingHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, + prev_user_channel_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, .. } }) => { macro_rules! failure_handler { ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { - let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id())); + let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id)); log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), + channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -4317,7 +4335,7 @@ where outgoing_cltv_value, Some(phantom_shared_secret), false, None, current_height, self.default_configuration.accept_mpp_keysend) { - Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])), + Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)])), Err(InboundHTLCErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } }, @@ -4362,8 +4380,8 @@ where for forward_info in pending_forwards.drain(..) { let queue_fail_htlc_res = match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, - forward_info: PendingHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, + prev_user_channel_id, forward_info: PendingHTLCInfo { incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, routing: PendingHTLCRouting::Forward { onion_packet, blinded, .. @@ -4374,6 +4392,7 @@ where let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), + channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -4445,8 +4464,8 @@ where 'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, - forward_info: PendingHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, + prev_user_channel_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, skimmed_fee_msat, .. } @@ -4480,6 +4499,7 @@ where prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), + channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -4511,6 +4531,7 @@ where failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: $htlc.prev_hop.short_channel_id, user_channel_id: $htlc.prev_hop.user_channel_id, + channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: $htlc.prev_hop.htlc_id, incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, @@ -4591,7 +4612,6 @@ where #[allow(unused_assignments)] { committed_to_claimable = true; } - let prev_channel_id = prev_funding_outpoint.to_channel_id(); htlcs.push(claimable_htlc); let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum(); htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat)); @@ -4735,23 +4755,23 @@ where for event in background_events.drain(..) { match event { - BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => { + BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, update)) => { // The channel has already been closed, so no use bothering to care about the // monitor updating completing. - let _ = self.chain_monitor.update_channel(funding_txo, &update); + let _ = self.chain_monitor.update_channel(funding_txo, channel_id, &update); }, - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => { + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => { let mut updated_chan = false; { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) { + match peer_state.channel_by_id.entry(channel_id) { hash_map::Entry::Occupied(mut chan_phase) => { if let ChannelPhase::Funded(chan) = chan_phase.get_mut() { updated_chan = true; - handle_new_monitor_update!(self, funding_txo, update.clone(), + handle_new_monitor_update!(self, funding_txo, channel_id, update.clone(), peer_state_lock, peer_state, per_peer_state, chan); } else { debug_assert!(false, "We shouldn't have an update for a non-funded channel"); @@ -4763,7 +4783,7 @@ where } if !updated_chan { // TODO: Track this as in-flight even though the channel is closed. - let _ = self.chain_monitor.update_channel(funding_txo, &update); + let _ = self.chain_monitor.update_channel(funding_txo, channel_id, &update); } }, BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => { @@ -5286,10 +5306,10 @@ where }, HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, - ref phantom_shared_secret, ref outpoint, ref blinded_failure, .. + ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, .. }) => { log_trace!( - WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())), + WithContext::from(&self.logger, None, Some(*channel_id)), "Failing {}HTLC with payment_hash {} backwards from us: {:?}", if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error ); @@ -5333,7 +5353,7 @@ where if push_forward_ev { self.push_pending_forwards_ev(); } let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((events::Event::HTLCHandlingFailed { - prev_channel_id: outpoint.to_channel_id(), + prev_channel_id: *channel_id, failed_next_destination: destination, }, None)); }, @@ -5474,7 +5494,7 @@ where } if valid_mpp { for htlc in sources.drain(..) { - let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id(); + let prev_hop_chan_id = htlc.prev_hop.channel_id; if let Err((pk, err)) = self.claim_funds_from_hop( htlc.prev_hop, payment_preimage, |_, definitely_duplicate| { @@ -5527,7 +5547,7 @@ where { let per_peer_state = self.per_peer_state.read().unwrap(); - let chan_id = prev_hop.outpoint.to_channel_id(); + let chan_id = prev_hop.channel_id; let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) { Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()), None => None @@ -5555,7 +5575,7 @@ where peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); } if !during_init { - handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock, + handle_new_monitor_update!(self, prev_hop.outpoint, prev_hop.channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } else { // If we're running during init we cannot update a monitor directly - @@ -5565,6 +5585,7 @@ where BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo: prev_hop.outpoint, + channel_id: prev_hop.channel_id, update: monitor_update.clone(), }); } @@ -5579,13 +5600,13 @@ where log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}", chan_id, action); - let (node_id, funding_outpoint, blocker) = + let (node_id, _funding_outpoint, channel_id, blocker) = if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { downstream_counterparty_node_id: node_id, downstream_funding_outpoint: funding_outpoint, - blocking_action: blocker, + blocking_action: blocker, downstream_channel_id: channel_id, } = action { - (node_id, funding_outpoint, blocker) + (node_id, funding_outpoint, channel_id, blocker) } else { debug_assert!(false, "Duplicate claims should always free another channel immediately"); @@ -5595,7 +5616,7 @@ where let mut peer_state = peer_state_mtx.lock().unwrap(); if let Some(blockers) = peer_state .actions_blocking_raa_monitor_updates - .get_mut(&funding_outpoint.to_channel_id()) + .get_mut(&channel_id) { let mut found_blocker = false; blockers.retain(|iter| { @@ -5626,16 +5647,19 @@ where }], }; + let prev_hop_channel_id = prev_hop.channel_id; + if !during_init { // We update the ChannelMonitor on the backward link, after // receiving an `update_fulfill_htlc` from the forward link. - let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update); + let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, prev_hop_channel_id, &preimage_update); if update_res != ChannelMonitorUpdateStatus::Completed { // TODO: This needs to be handled somehow - if we receive a monitor update // with a preimage we *must* somehow manage to propagate it to the upstream // channel, or we must have an ability to receive the same event and try // again on restart. - log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}", + log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)), + "Critical error: failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, update_res); } } else { @@ -5651,7 +5675,7 @@ where // complete the monitor update completion action from `completion_action`. self.pending_background_events.lock().unwrap().push( BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(( - prev_hop.outpoint, preimage_update, + prev_hop.outpoint, prev_hop.channel_id, preimage_update, ))); } // Note that we do process the completion action here. This totally could be a @@ -5669,7 +5693,8 @@ where fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, startup_replay: bool, - next_channel_counterparty_node_id: Option, next_channel_outpoint: OutPoint + next_channel_counterparty_node_id: Option, next_channel_outpoint: OutPoint, + next_channel_id: ChannelId, ) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { @@ -5679,7 +5704,7 @@ where debug_assert_eq!(pubkey, path.hops[0].pubkey); } let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint: next_channel_outpoint, + channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id, counterparty_node_id: path.hops[0].pubkey, }; self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, @@ -5687,15 +5712,17 @@ where &self.logger); }, HTLCSource::PreviousHopData(hop_data) => { - let prev_outpoint = hop_data.outpoint; + let prev_channel_id = hop_data.channel_id; let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); #[cfg(debug_assertions)] let claiming_chan_funding_outpoint = hop_data.outpoint; + #[cfg(debug_assertions)] + let claiming_channel_id = hop_data.channel_id; let res = self.claim_funds_from_hop(hop_data, payment_preimage, |htlc_claim_value_msat, definitely_duplicate| { let chan_to_release = if let Some(node_id) = next_channel_counterparty_node_id { - Some((node_id, next_channel_outpoint, completed_blocker)) + Some((node_id, next_channel_outpoint, next_channel_id, completed_blocker)) } else { // We can only get `None` here if we are processing a // `ChannelMonitor`-originated event, in which case we @@ -5732,7 +5759,7 @@ where }, // or the channel we'd unblock is already closed, BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup( - (funding_txo, monitor_update) + (funding_txo, _channel_id, monitor_update) ) => { if *funding_txo == next_channel_outpoint { assert_eq!(monitor_update.updates.len(), 1); @@ -5748,7 +5775,7 @@ where BackgroundEvent::MonitorUpdatesComplete { channel_id, .. } => - *channel_id == claiming_chan_funding_outpoint.to_channel_id(), + *channel_id == claiming_channel_id, } }), "{:?}", *background_events); } @@ -5758,7 +5785,8 @@ where Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { downstream_counterparty_node_id: other_chan.0, downstream_funding_outpoint: other_chan.1, - blocking_action: other_chan.2, + downstream_channel_id: other_chan.2, + blocking_action: other_chan.3, }) } else { None } } else { @@ -5771,8 +5799,8 @@ where event: events::Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx: from_onchain, - prev_channel_id: Some(prev_outpoint.to_channel_id()), - next_channel_id: Some(next_channel_outpoint.to_channel_id()), + prev_channel_id: Some(prev_channel_id), + next_channel_id: Some(next_channel_id), outbound_amount_forwarded_msat: forwarded_htlc_value_msat, }, downstream_counterparty_and_funding_outpoint: chan_to_release, @@ -5822,16 +5850,17 @@ where event, downstream_counterparty_and_funding_outpoint } => { self.pending_events.lock().unwrap().push_back((event, None)); - if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint { - self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker)); + if let Some((node_id, funding_outpoint, channel_id, blocker)) = downstream_counterparty_and_funding_outpoint { + self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker)); } }, MonitorUpdateCompletionAction::FreeOtherChannelImmediately { - downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action, + downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action, } => { self.handle_monitor_update_release( downstream_counterparty_node_id, downstream_funding_outpoint, + downstream_channel_id, Some(blocking_action), ); }, @@ -5846,7 +5875,7 @@ where commitment_update: Option, order: RAACommitmentOrder, pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option) - -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> { + -> Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> { let logger = WithChannelContext::from(&self.logger, &channel.context); log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement", &channel.context.channel_id(), @@ -5861,7 +5890,7 @@ where let counterparty_node_id = channel.context.get_counterparty_node_id(); if !pending_forwards.is_empty() { htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()), - channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards)); + channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards)); } if let Some(msg) = channel_ready { @@ -5915,7 +5944,7 @@ where htlc_forwards } - fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) { + fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &ChannelId, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) { debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock let counterparty_node_id = match counterparty_node_id { @@ -5937,11 +5966,11 @@ where peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; let channel = - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) { + if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { chan } else { let update_actions = peer_state.monitor_update_blocked_actions - .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new()); + .remove(&channel_id).unwrap_or(Vec::new()); mem::drop(peer_state_lock); mem::drop(per_peer_state); self.handle_monitor_update_completion_actions(update_actions); @@ -6543,7 +6572,7 @@ where } // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, + handle_new_monitor_update!(self, funding_txo_opt.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } }, @@ -6750,7 +6779,8 @@ where hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo); + self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), + false, false, Some(*counterparty_node_id), funding_txo, msg.channel_id); Ok(()) } @@ -6824,7 +6854,7 @@ where let funding_txo = chan.context.get_funding_txo(); let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry); if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, + handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } Ok(()) @@ -6838,8 +6868,8 @@ where } #[inline] - fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) { - for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { + fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) { + for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { let mut push_forward_event = false; let mut new_intercept_events = VecDeque::new(); let mut failed_intercept_forwards = Vec::new(); @@ -6858,7 +6888,7 @@ where match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })); + prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })); }, hash_map::Entry::Vacant(entry) => { if !is_our_scid && forward_info.incoming_amt_msat.is_some() && @@ -6876,15 +6906,16 @@ where intercept_id }, None)); entry.insert(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }); + prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }); }, hash_map::Entry::Occupied(_) => { - let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id())); + let logger = WithContext::from(&self.logger, None, Some(prev_channel_id)); log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), outpoint: prev_funding_outpoint, + channel_id: prev_channel_id, htlc_id: prev_htlc_id, incoming_packet_shared_secret: forward_info.incoming_shared_secret, phantom_shared_secret: None, @@ -6904,7 +6935,7 @@ where push_forward_event = true; } entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }))); + prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }))); } } } @@ -6948,13 +6979,14 @@ where /// the [`ChannelMonitorUpdate`] in question. fn raa_monitor_updates_held(&self, actions_blocking_raa_monitor_updates: &BTreeMap>, - channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey + channel_funding_outpoint: OutPoint, channel_id: ChannelId, counterparty_node_id: PublicKey ) -> bool { actions_blocking_raa_monitor_updates - .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false) + .get(&channel_id).map(|v| !v.is_empty()).unwrap_or(false) || self.pending_events.lock().unwrap().iter().any(|(_, action)| { action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { channel_funding_outpoint, + channel_id, counterparty_node_id, }) }) @@ -6971,7 +7003,7 @@ where if let Some(chan) = peer_state.channel_by_id.get(&channel_id) { return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates, - chan.context().get_funding_txo().unwrap(), counterparty_node_id); + chan.context().get_funding_txo().unwrap(), channel_id, counterparty_node_id); } } false @@ -6993,7 +7025,7 @@ where let funding_txo_opt = chan.context.get_funding_txo(); let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt { self.raa_monitor_updates_held( - &peer_state.actions_blocking_raa_monitor_updates, funding_txo, + &peer_state.actions_blocking_raa_monitor_updates, funding_txo, msg.channel_id, *counterparty_node_id) } else { false }; let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self, @@ -7001,7 +7033,7 @@ where if let Some(monitor_update) = monitor_update_opt { let funding_txo = funding_txo_opt .expect("Funding outpoint must have been set for RAA handling to succeed"); - handle_new_monitor_update!(self, funding_txo, monitor_update, + handle_new_monitor_update!(self, funding_txo, chan.context.channel_id(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } htlcs_to_fail @@ -7239,22 +7271,22 @@ where let mut failed_channels = Vec::new(); let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); - for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) { + for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) { for monitor_event in monitor_events.drain(..) { match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { - let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id())); + let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id)); if let Some(preimage) = htlc_update.payment_preimage { log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage); - self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint); + self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint, channel_id); } else { log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); - let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; + let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id }; let reason = HTLCFailReason::from_failure_code(0x4000 | 8); self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); } }, - MonitorEvent::HolderForceClosed(funding_outpoint) => { + MonitorEvent::HolderForceClosed(_funding_outpoint) => { let counterparty_node_id_opt = match counterparty_node_id { Some(cp_id) => Some(cp_id), None => { @@ -7270,7 +7302,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; - if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { + if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) { if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) { failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { @@ -7289,8 +7321,8 @@ where } } }, - MonitorEvent::Completed { funding_txo, monitor_update_id } => { - self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref()); + MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => { + self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref()); }, } } @@ -7342,7 +7374,7 @@ where if let Some(monitor_update) = monitor_opt { has_monitor_update = true; - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, + handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); continue 'peer_loop; } @@ -7507,14 +7539,14 @@ where // Channel::force_shutdown tries to make us do) as we may still be in initialization, // so we track the update internally and handle it when the user next calls // timer_tick_occurred, guaranteeing we're running normally. - if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() { + if let Some((counterparty_node_id, funding_txo, channel_id, update)) = failure.monitor_update.take() { assert_eq!(update.updates.len(), 1); if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] { assert!(should_broadcast); } else { unreachable!(); } self.pending_background_events.lock().unwrap().push( BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, funding_txo, update + counterparty_node_id, funding_txo, update, channel_id, }); } self.finish_close_channel(failure); @@ -8073,9 +8105,12 @@ where /// [`Event`] being handled) completes, this should be called to restore the channel to normal /// operation. It will double-check that nothing *else* is also blocking the same channel from /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly. - fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option) { + fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, + channel_funding_outpoint: OutPoint, channel_id: ChannelId, + mut completed_blocker: Option) { + let logger = WithContext::from( - &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id()) + &self.logger, Some(counterparty_node_id), Some(channel_id), ); loop { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -8085,29 +8120,30 @@ where if let Some(blocker) = completed_blocker.take() { // Only do this on the first iteration of the loop. if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates - .get_mut(&channel_funding_outpoint.to_channel_id()) + .get_mut(&channel_id) { blockers.retain(|iter| iter != &blocker); } } if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates, - channel_funding_outpoint, counterparty_node_id) { + channel_funding_outpoint, channel_id, counterparty_node_id) { // Check that, while holding the peer lock, we don't have anything else // blocking monitor updates for this channel. If we do, release the monitor // update(s) when those blockers complete. log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first", - &channel_funding_outpoint.to_channel_id()); + &channel_id); break; } - if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) { + if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry( + channel_id) { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint); if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor", - channel_funding_outpoint.to_channel_id()); - handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, + channel_id); + handle_new_monitor_update!(self, channel_funding_outpoint, channel_id, monitor_update, peer_state_lck, peer_state, per_peer_state, chan); if further_update_exists { // If there are more `ChannelMonitorUpdate`s to process, restart at the @@ -8116,7 +8152,7 @@ where } } else { log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update", - channel_funding_outpoint.to_channel_id()); + channel_id); } } } @@ -8133,9 +8169,9 @@ where for action in actions { match action { EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint, counterparty_node_id + channel_funding_outpoint, channel_id, counterparty_node_id } => { - self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None); + self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None); } } } @@ -8531,6 +8567,7 @@ where incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret, phantom_shared_secret: None, outpoint: htlc.prev_funding_outpoint, + channel_id: htlc.prev_channel_id, blinded_failure: htlc.forward_info.routing.blinded_failure(), }); @@ -8542,7 +8579,7 @@ where HTLCFailReason::from_failure_code(0x2000 | 2), HTLCDestination::InvalidForward { requested_forward_scid })); let logger = WithContext::from( - &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id()) + &self.logger, None, Some(htlc.prev_channel_id) ); log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid); false @@ -9627,6 +9664,9 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (4, htlc_id, required), (6, incoming_packet_shared_secret, required), (7, user_channel_id, option), + // Note that by the time we get past the required read for type 2 above, outpoint will be + // filled in, so we can safely unwrap it here. + (9, channel_id, (default_value, ChannelId::v1_from_funding_outpoint(outpoint.0.unwrap()))), }); impl Writeable for ClaimableHTLC { @@ -9778,6 +9818,9 @@ impl_writeable_tlv_based!(PendingAddHTLCInfo, { (2, prev_short_channel_id, required), (4, prev_htlc_id, required), (6, prev_funding_outpoint, required), + // Note that by the time we get past the required read for type 2 above, prev_funding_outpoint will be + // filled in, so we can safely unwrap it here. + (7, prev_channel_id, (default_value, ChannelId::v1_from_funding_outpoint(prev_funding_outpoint.0.unwrap()))), }); impl Writeable for HTLCForwardInfo { @@ -10291,12 +10334,14 @@ where let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); + let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) ))?; let logger = WithChannelContext::from(&args.logger, &channel.context); let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?; + funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id()); funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || @@ -10326,9 +10371,9 @@ where if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { return Err(DecodeError::InvalidValue); } - if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update { + if let Some((counterparty_node_id, funding_txo, channel_id, update)) = shutdown_result.monitor_update { close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, funding_txo, update + counterparty_node_id, funding_txo, channel_id, update }); } failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs); @@ -10407,14 +10452,15 @@ where for (funding_txo, monitor) in args.channel_monitors.iter() { if !funding_txo_set.contains(funding_txo) { let logger = WithChannelMonitor::from(&args.logger, monitor); + let channel_id = monitor.channel_id(); log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed", - &funding_txo.to_channel_id()); + &channel_id); let monitor_update = ChannelMonitorUpdate { update_id: CLOSED_CHANNEL_UPDATE_ID, counterparty_node_id: None, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], }; - close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update))); + close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, channel_id, monitor_update))); } } @@ -10591,12 +10637,13 @@ where $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); for update in $chan_in_flight_upds.iter() { log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", - update.update_id, $channel_info_log, &$funding_txo.to_channel_id()); + update.update_id, $channel_info_log, &$monitor.channel_id()); max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id); pending_background_events.push( BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id: $counterparty_node_id, funding_txo: $funding_txo, + channel_id: $monitor.channel_id(), update: update.clone(), }); } @@ -10607,7 +10654,7 @@ where pending_background_events.push( BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id: $counterparty_node_id, - channel_id: $funding_txo.to_channel_id(), + channel_id: $monitor.channel_id(), }); } if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() { @@ -10661,7 +10708,8 @@ where if let Some(in_flight_upds) = in_flight_monitor_updates { for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds { - let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id())); + let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied(); + let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id); if let Some(monitor) = args.channel_monitors.get(&funding_txo) { // Now that we've removed all the in-flight monitor updates for channels that are // still open, we need to replay any monitor updates that are for closed channels, @@ -10674,8 +10722,8 @@ where funding_txo, monitor, peer_state, logger, "closed "); } else { log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!"); - log_error!(logger, " The ChannelMonitor for channel {} is missing.", - &funding_txo.to_channel_id()); + log_error!(logger, " The ChannelMonitor for channel {} is missing.", if let Some(channel_id) = + channel_id { channel_id.to_string() } else { format!("with outpoint {}", funding_txo) } ); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); @@ -10763,7 +10811,7 @@ where if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id()); + &htlc.payment_hash, &monitor.channel_id()); false } else { true } } else { true } @@ -10773,7 +10821,7 @@ where pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id()); + &htlc.payment_hash, &monitor.channel_id()); pending_events_read.retain(|(event, _)| { if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { intercepted_id != ev_id @@ -10797,6 +10845,7 @@ where let compl_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { channel_funding_outpoint: monitor.get_funding_txo().0, + channel_id: monitor.channel_id(), counterparty_node_id: path.hops[0].pubkey, }; pending_outbounds.claim_htlc(payment_id, preimage, session_priv, @@ -10822,7 +10871,7 @@ where // channel_id -> peer map entry). counterparty_opt.is_none(), counterparty_opt.cloned().or(monitor.get_counterparty_node_id()), - monitor.get_funding_txo().0)) + monitor.get_funding_txo().0, monitor.channel_id())) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage @@ -10995,7 +11044,7 @@ where // this channel as well. On the flip side, there's no harm in restarting // without the new monitor persisted - we'll end up right back here on // restart. - let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id(); + let previous_channel_id = claimable_htlc.prev_hop.channel_id; if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) { let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -11028,14 +11077,15 @@ where for action in actions.iter() { if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { downstream_counterparty_and_funding_outpoint: - Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), .. + Some((blocked_node_id, _blocked_channel_outpoint, blocked_channel_id, blocking_action)), .. } = action { if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) { + let channel_id = blocked_channel_id; log_trace!(logger, "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", - blocked_channel_outpoint.to_channel_id()); + channel_id); blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates - .entry(blocked_channel_outpoint.to_channel_id()) + .entry(*channel_id) .or_insert_with(Vec::new).push(blocking_action.clone()); } else { // If the channel we were blocking has closed, we don't need to @@ -11115,12 +11165,12 @@ where channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay { + for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we // don't remember in the `ChannelMonitor` where we got a preimage from, but if the // channel is closed we just assume that it probably came from an on-chain claim. channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), - downstream_closed, true, downstream_node_id, downstream_funding); + downstream_closed, true, downstream_node_id, downstream_funding, downstream_channel_id); } //TODO: Broadcast channel update for closed channels, but only after we've made a diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 8df84000c26..e7fc68924ef 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -252,7 +252,7 @@ pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) { // Ensure `get_claimable_balances`' self-tests never panic - for funding_outpoint in node.chain_monitor.chain_monitor.list_monitors() { + for (funding_outpoint, _channel_id) in node.chain_monitor.chain_monitor.list_monitors() { node.chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(); } } @@ -601,7 +601,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let feeest = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; let mut deserialized_monitors = Vec::new(); { - for outpoint in self.chain_monitor.chain_monitor.list_monitors() { + for (outpoint, _channel_id) in self.chain_monitor.chain_monitor.list_monitors() { let mut w = test_utils::TestVecWriter(Vec::new()); self.chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut w).unwrap(); let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor)>::read( @@ -644,7 +644,8 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let chain_source = test_utils::TestChainSource::new(Network::Testnet); let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager); for deserialized_monitor in deserialized_monitors.drain(..) { - if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) { + let funding_outpoint = deserialized_monitor.get_funding_txo().0; + if chain_monitor.watch_channel(funding_outpoint, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) { panic!(); } } @@ -1068,7 +1069,8 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User assert!(node_read.is_empty()); for monitor in monitors_read.drain(..) { - assert_eq!(node.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor), + let funding_outpoint = monitor.get_funding_txo().0; + assert_eq!(node.chain_monitor.watch_channel(funding_outpoint, monitor), Ok(ChannelMonitorUpdateStatus::Completed)); check_added_monitors!(node, 1); } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index be9bfb81f25..710ab229516 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -8470,6 +8470,7 @@ fn test_update_err_monitor_lockdown() { // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 }; + let channel_id = chan_1.2; // Rebalance the network to generate htlc in the two directions send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); @@ -8512,8 +8513,8 @@ fn test_update_err_monitor_lockdown() { let mut node_0_peer_state_lock; if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { - assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress); - assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); + assert_eq!(watchtower.chain_monitor.update_channel(outpoint, channel_id, &update), ChannelMonitorUpdateStatus::InProgress); + assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, channel_id, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { assert!(false); @@ -8540,6 +8541,7 @@ fn test_concurrent_monitor_claim() { // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 }; + let channel_id = chan_1.2; // Rebalance the network to generate htlc in the two directions send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); @@ -8615,9 +8617,9 @@ fn test_concurrent_monitor_claim() { if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update - assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress); - assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); - assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); + assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, channel_id, &update), ChannelMonitorUpdateStatus::InProgress); + assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, channel_id, &update), ChannelMonitorUpdateStatus::Completed); + assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, channel_id, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { assert!(false); @@ -8684,7 +8686,7 @@ fn test_pre_lockin_no_chan_closed_update() { check_added_monitors!(nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); + let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }); assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty()); check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true, @@ -9028,7 +9030,7 @@ fn test_peer_funding_sidechannel() { check_added_monitors!(nodes[1], 1); expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), }; - check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_output.to_channel_id(), true, reason)]); + check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(ChannelId::v1_from_funding_outpoint(funding_output), true, reason)]); let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed); @@ -9089,7 +9091,7 @@ fn test_duplicate_funding_err_in_funding() { let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]); let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }; - assert_eq!(real_chan_funding_txo.to_channel_id(), real_channel_id); + assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id); nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); @@ -9181,7 +9183,7 @@ fn test_duplicate_chan_id() { let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }; - let channel_id = funding_outpoint.to_channel_id(); + let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a // temporary one). @@ -10635,7 +10637,7 @@ fn test_batch_channel_open() { // Complete the persistence of the monitor. nodes[0].chain_monitor.complete_sole_pending_chan_update( - &OutPoint { txid: tx.txid(), index: 1 }.to_channel_id() + &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 }) ); let events = nodes[0].node.get_and_clear_pending_events(); @@ -10692,8 +10694,8 @@ fn test_disconnect_in_funding_batch() { // The channels in the batch will close immediately. let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 }; let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 }; - let channel_id_1 = funding_txo_1.to_channel_id(); - let channel_id_2 = funding_txo_2.to_channel_id(); + let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1); + let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2); check_closed_events(&nodes[0], &[ ExpectedCloseEvent { channel_id: Some(channel_id_1), @@ -10766,8 +10768,8 @@ fn test_batch_funding_close_after_funding_signed() { // Force-close the channel for which we've completed the initial monitor. let funding_txo_1 = OutPoint { txid: tx.txid(), index: 0 }; let funding_txo_2 = OutPoint { txid: tx.txid(), index: 1 }; - let channel_id_1 = funding_txo_1.to_channel_id(); - let channel_id_2 = funding_txo_2.to_channel_id(); + let channel_id_1 = ChannelId::v1_from_funding_outpoint(funding_txo_1); + let channel_id_2 = ChannelId::v1_from_funding_outpoint(funding_txo_2); nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap(); check_added_monitors(&nodes[0], 2); { @@ -10827,7 +10829,7 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0); - let chan_id = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }.to_channel_id(); + let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }); assert_eq!(nodes[0].node.list_channels().len(), 1); assert_eq!(nodes[1].node.list_channels().len(), 1); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index b6270181439..6a56c3cf816 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -15,7 +15,7 @@ use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; -use crate::ln::channel; +use crate::ln::{channel, ChannelId}; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId, RecipientOnionFields}; use crate::ln::msgs::ChannelMessageHandler; use crate::util::config::UserConfig; @@ -176,7 +176,7 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) { let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000); let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 }; - assert_eq!(funding_outpoint.to_channel_id(), chan_id); + assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id); let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64; let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_id); @@ -327,7 +327,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000); let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 }; - assert_eq!(funding_outpoint.to_channel_id(), chan_id); + assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id); // This HTLC is immediately claimed, giving node B the preimage let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000); @@ -1121,7 +1121,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000); let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 }; - assert_eq!(funding_outpoint.to_channel_id(), chan_id); + assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id); // We create five HTLCs for B to claim against A's revoked commitment transaction: // @@ -1403,7 +1403,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 12_000_000); let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 }; - assert_eq!(funding_outpoint.to_channel_id(), chan_id); + assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id); let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0; let failed_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 1_000_000).1; @@ -1705,7 +1705,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000); let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 }; - assert_eq!(funding_outpoint.to_channel_id(), chan_id); + assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id); // We create two HTLCs, one which we will give A the preimage to to generate an HTLC-Success // transaction, and one which we will not, allowing B to claim the HTLC output in an aggregated diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index ae3f9c69035..3b9f9848ec8 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -24,6 +24,7 @@ use crate::ln::ChannelId; use crate::ln::features::{InitFeatures, NodeFeatures}; use crate::ln::msgs; use crate::ln::msgs::{ChannelMessageHandler, LightningError, SocketAddress, OnionMessageHandler, RoutingMessageHandler}; +use crate::util::macro_logger::DebugFundingChannelId; use crate::util::ser::{VecWriter, Writeable, Writer}; use crate::ln::peer_channel_encryptor::{PeerChannelEncryptor, NextNoiseStep, MessageBuf, MSG_BUF_ALLOC_SIZE}; use crate::ln::wire; @@ -2006,7 +2007,7 @@ impl { assert_eq!(script.into_inner(), unsupported_shutdown_script.clone().into_inner()); }, Err(e) => panic!("Unexpected error: {:?}", e), Ok(_) => panic!("Expected error"), } - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); check_added_monitors!(nodes[1], 1); // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit @@ -1007,7 +1007,7 @@ fn test_invalid_shutdown_script() { let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.close_channel(&chan.2, &nodes[0].node.get_our_node_id()).unwrap(); check_added_monitors!(nodes[1], 1); // Use a segwit v0 script with an unsupported witness program @@ -1041,7 +1041,7 @@ fn test_user_shutdown_script() { let shutdown_script = ShutdownScript::try_from(script.clone()).unwrap(); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - nodes[1].node.close_channel_with_feerate_and_script(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)).unwrap(); + nodes[1].node.close_channel_with_feerate_and_script(&chan.2, &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)).unwrap(); check_added_monitors!(nodes[1], 1); let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); @@ -1068,7 +1068,7 @@ fn test_already_set_user_shutdown_script() { let shutdown_script = ShutdownScript::try_from(script).unwrap(); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let result = nodes[1].node.close_channel_with_feerate_and_script(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)); + let result = nodes[1].node.close_channel_with_feerate_and_script(&chan.2, &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)); assert_eq!(result, Err(APIError::APIMisuseError { err: "Cannot override shutdown script for a channel with one already set".to_string() })); } @@ -1200,7 +1200,7 @@ fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) { *feerate_lock *= 10; } - nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); @@ -1240,7 +1240,7 @@ fn simple_target_feerate_shutdown() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let chan_id = OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(); + let chan_id = chan.2; nodes[0].node.close_channel_with_feerate_and_script(&chan_id, &nodes[1].node.get_our_node_id(), Some(253 * 10), None).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); diff --git a/lightning/src/util/macro_logger.rs b/lightning/src/util/macro_logger.rs index 203c544e009..55b11604d94 100644 --- a/lightning/src/util/macro_logger.rs +++ b/lightning/src/util/macro_logger.rs @@ -8,6 +8,7 @@ // licenses. use crate::chain::transaction::OutPoint; +use crate::ln::ChannelId; use crate::sign::SpendableOutputDescriptor; use bitcoin::hash_types::Txid; @@ -41,24 +42,21 @@ macro_rules! log_bytes { pub(crate) struct DebugFundingChannelId<'a>(pub &'a Txid, pub u16); impl<'a> core::fmt::Display for DebugFundingChannelId<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { - (OutPoint { txid: self.0.clone(), index: self.1 }).to_channel_id().fmt(f) - } -} -macro_rules! log_funding_channel_id { - ($funding_txid: expr, $funding_txo: expr) => { - $crate::util::macro_logger::DebugFundingChannelId(&$funding_txid, $funding_txo) + ChannelId::v1_from_funding_outpoint(OutPoint { txid: self.0.clone(), index: self.1 }).fmt(f) } } -pub(crate) struct DebugFundingInfo<'a, T: 'a>(pub &'a (OutPoint, T)); -impl<'a, T> core::fmt::Display for DebugFundingInfo<'a, T> { +pub(crate) struct DebugFundingInfo<'a>(pub &'a ChannelId); +impl<'a> core::fmt::Display for DebugFundingInfo<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { - (self.0).0.to_channel_id().fmt(f) + self.0.fmt(f) } } macro_rules! log_funding_info { ($key_storage: expr) => { - $crate::util::macro_logger::DebugFundingInfo(&$key_storage.get_funding_txo()) + $crate::util::macro_logger::DebugFundingInfo( + &$key_storage.channel_id() + ) } } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index e6329062051..7d501345c3c 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -1052,9 +1052,9 @@ mod tests { { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); - let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap(); + let update_id = update_map.get(&added_monitors[0].1.channel_id()).unwrap(); let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap(); - let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0]; + let cmu = &cmu_map.get(&added_monitors[0].1.channel_id()).unwrap()[0]; let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 }; let ro_persister = MonitorUpdatingPersister { diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index 02423541fb9..d1e1d652064 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -341,32 +341,32 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1; assert!(new_monitor == monitor); - self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), + self.latest_monitor_update_id.lock().unwrap().insert(monitor.channel_id(), (funding_txo, monitor.get_latest_update_id(), MonitorUpdateId::from_new_monitor(&monitor))); self.added_monitors.lock().unwrap().push((funding_txo, monitor)); self.chain_monitor.watch_channel(funding_txo, new_monitor) } - fn update_channel(&self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus { + fn update_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus { // Every monitor update should survive roundtrip let mut w = TestVecWriter(Vec::new()); update.write(&mut w).unwrap(); assert!(channelmonitor::ChannelMonitorUpdate::read( &mut io::Cursor::new(&w.0)).unwrap() == *update); - self.monitor_updates.lock().unwrap().entry(funding_txo.to_channel_id()).or_insert(Vec::new()).push(update.clone()); + self.monitor_updates.lock().unwrap().entry(channel_id).or_insert(Vec::new()).push(update.clone()); if let Some(exp) = self.expect_channel_force_closed.lock().unwrap().take() { - assert_eq!(funding_txo.to_channel_id(), exp.0); + assert_eq!(channel_id, exp.0); assert_eq!(update.updates.len(), 1); if let channelmonitor::ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] { assert_eq!(should_broadcast, exp.1); } else { panic!(); } } - self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), + self.latest_monitor_update_id.lock().unwrap().insert(channel_id, (funding_txo, update.update_id, MonitorUpdateId::from_monitor_update(update))); - let update_res = self.chain_monitor.update_channel(funding_txo, update); + let update_res = self.chain_monitor.update_channel(funding_txo, channel_id, update); // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... let monitor = self.chain_monitor.get_monitor(funding_txo).unwrap(); @@ -375,7 +375,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1; if let Some(chan_id) = self.expect_monitor_round_trip_fail.lock().unwrap().take() { - assert_eq!(chan_id, funding_txo.to_channel_id()); + assert_eq!(chan_id, channel_id); assert!(new_monitor != *monitor); } else { assert!(new_monitor == *monitor); @@ -384,7 +384,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { update_res } - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, ChannelId, Vec, Option)> { return self.chain_monitor.release_pending_monitor_events(); } }