diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 7a32434b86f..00cc6191220 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -40,7 +40,7 @@ use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, use lightning::sign::{KeyMaterial, InMemorySigner, Recipient, EntropySource, NodeSigner, SignerProvider}; use lightning::events; use lightning::events::MessageSendEventsProvider; -use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; +use lightning::ln::{ChannelId, PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::ln::channelmanager::{ChainParameters, ChannelDetails, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs, PaymentId, RecipientOnionFields}; use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; use lightning::ln::msgs::{self, CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init}; @@ -167,16 +167,16 @@ impl TestChainMonitor { } } impl chain::Watch for TestChainMonitor { - fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result { + fn watch_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, monitor: channelmonitor::ChannelMonitor) -> Result { let mut ser = VecWriter(Vec::new()); monitor.write(&mut ser).unwrap(); if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) { panic!("Already had monitor pre-watch_channel"); } - self.chain_monitor.watch_channel(funding_txo, monitor) + self.chain_monitor.watch_channel(funding_txo, channel_id, monitor) } - fn update_channel(&self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus { + fn update_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus { let mut map_lock = self.latest_monitors.lock().unwrap(); let mut map_entry = match map_lock.entry(funding_txo) { hash_map::Entry::Occupied(entry) => entry, @@ -188,10 +188,10 @@ impl chain::Watch for TestChainMonitor { let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); - self.chain_monitor.update_channel(funding_txo, update) + self.chain_monitor.update_channel(funding_txo, channel_id, update) } - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Option, Vec, Option)> { return self.chain_monitor.release_pending_monitor_events(); } } @@ -539,7 +539,8 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { let res = (<(BlockHash, ChanMan)>::read(&mut Cursor::new(&$ser.0), read_args).expect("Failed to read manager").1, chain_monitor.clone()); for (funding_txo, mon) in monitors.drain() { - assert_eq!(chain_monitor.chain_monitor.watch_channel(funding_txo, mon), + let channel_id = mon.get_channel_id(); + assert_eq!(chain_monitor.chain_monitor.watch_channel(funding_txo, channel_id, mon), Ok(ChannelMonitorUpdateStatus::Completed)); } res @@ -704,7 +705,9 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { lock_fundings!(nodes); let chan_a = nodes[0].list_usable_channels()[0].short_channel_id.unwrap(); + let chan_1_id = Some(nodes[0].list_usable_channels()[0].channel_id); let chan_b = nodes[2].list_usable_channels()[0].short_channel_id.unwrap(); + let chan_2_id = Some(nodes[2].list_usable_channels()[0].channel_id); let mut payment_id: u8 = 0; let mut payment_idx: u64 = 0; @@ -1060,25 +1063,25 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { 0x08 => { if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { - monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); + monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, chan_1_id, *id); nodes[0].process_monitor_events(); } }, 0x09 => { if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) { - monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); + monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, chan_1_id, *id); nodes[1].process_monitor_events(); } }, 0x0a => { if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) { - monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); + monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, chan_2_id, *id); nodes[1].process_monitor_events(); } }, 0x0b => { if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) { - monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); + monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, chan_2_id, *id); nodes[2].process_monitor_events(); } }, @@ -1299,19 +1302,19 @@ pub fn do_test(data: &[u8], underlying_out: Out, anchors: bool) { *monitor_c.persister.update_ret.lock().unwrap() = ChannelMonitorUpdateStatus::Completed; if let Some((id, _)) = monitor_a.latest_monitors.lock().unwrap().get(&chan_1_funding) { - monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); + monitor_a.chain_monitor.force_channel_monitor_updated(chan_1_funding, chan_1_id, *id); nodes[0].process_monitor_events(); } if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_1_funding) { - monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, *id); + monitor_b.chain_monitor.force_channel_monitor_updated(chan_1_funding, chan_1_id, *id); nodes[1].process_monitor_events(); } if let Some((id, _)) = monitor_b.latest_monitors.lock().unwrap().get(&chan_2_funding) { - monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); + monitor_b.chain_monitor.force_channel_monitor_updated(chan_2_funding, chan_1_id, *id); nodes[1].process_monitor_events(); } if let Some((id, _)) = monitor_c.latest_monitors.lock().unwrap().get(&chan_2_funding) { - monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, *id); + monitor_c.chain_monitor.force_channel_monitor_updated(chan_2_funding, chan_1_id, *id); nodes[2].process_monitor_events(); } diff --git a/fuzz/src/full_stack.rs b/fuzz/src/full_stack.rs index a2ce98cf4d2..56eb9e59099 100644 --- a/fuzz/src/full_stack.rs +++ b/fuzz/src/full_stack.rs @@ -651,7 +651,7 @@ pub fn do_test(data: &[u8], logger: &Arc) { if let None = loss_detector.txids_confirmed.get(&funding_txid) { let outpoint = OutPoint { txid: funding_txid, index: 0 }; for chan in channelmanager.list_channels() { - if chan.channel_id == outpoint.to_channel_id() { + if chan.channel_id == funding_generation.0 { tx.version += 1; continue 'search_loop; } diff --git a/fuzz/src/utils/test_persister.rs b/fuzz/src/utils/test_persister.rs index 89de25aa5e6..c3b72e073ef 100644 --- a/fuzz/src/utils/test_persister.rs +++ b/fuzz/src/utils/test_persister.rs @@ -2,6 +2,7 @@ use lightning::chain; use lightning::chain::{chainmonitor, channelmonitor}; use lightning::chain::chainmonitor::MonitorUpdateId; use lightning::chain::transaction::OutPoint; +use lightning::ln::ChannelId; use lightning::util::test_channel_signer::TestChannelSigner; use std::sync::Mutex; @@ -10,11 +11,11 @@ pub struct TestPersister { pub update_ret: Mutex, } impl chainmonitor::Persist for TestPersister { - fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn persist_new_channel(&self, _funding_txo: OutPoint, _channel_id: ChannelId, _data: &channelmonitor::ChannelMonitor, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { self.update_ret.lock().unwrap().clone() } - fn update_persisted_channel(&self, _funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn update_persisted_channel(&self, _funding_txo: OutPoint, _channel_id: ChannelId, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { self.update_ret.lock().unwrap().clone() } } diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 0f2c67538d6..fb6baabfac1 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -929,7 +929,7 @@ mod tests { use lightning::chain::transaction::OutPoint; use lightning::events::{Event, PathFailure, MessageSendEventsProvider, MessageSendEvent}; use lightning::{get_event_msg, get_event}; - use lightning::ln::PaymentHash; + use lightning::ln::{PaymentHash, ChannelId}; use lightning::ln::channelmanager; use lightning::ln::channelmanager::{BREAKDOWN_TIMEOUT, ChainParameters, MIN_CLTV_EXPIRY_DELTA, PaymentId}; use lightning::ln::features::{ChannelFeatures, NodeFeatures}; @@ -1414,7 +1414,7 @@ mod tests { } // Force-close the channel. - nodes[0].node.force_close_broadcasting_latest_txn(&OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap(); // Check that the force-close updates are persisted. check_persisted_data!(nodes[0].node, filepath.clone()); diff --git a/lightning-block-sync/src/init.rs b/lightning-block-sync/src/init.rs index 8cb0ff70a2e..23e2fae43a1 100644 --- a/lightning-block-sync/src/init.rs +++ b/lightning-block-sync/src/init.rs @@ -49,6 +49,7 @@ BlockSourceResult where B::Target: BlockSource { /// use lightning::chain::chaininterface::FeeEstimator; /// use lightning::sign; /// use lightning::sign::{EntropySource, NodeSigner, SignerProvider}; +/// use lightning::ln::ChannelId; /// use lightning::ln::channelmanager::{ChannelManager, ChannelManagerReadArgs}; /// use lightning::routing::router::Router; /// use lightning::util::config::UserConfig; @@ -119,7 +120,9 @@ BlockSourceResult where B::Target: BlockSource { /// /// // Allow the chain monitor to watch any channels. /// let monitor = monitor_listener.0; -/// chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); +/// let funding_outpoint = monitor.get_funding_txo().0; +/// let channel_id = monitor.get_channel_id(); +/// chain_monitor.watch_channel(funding_outpoint, channel_id, monitor); /// /// // Create an SPV client to notify the chain monitor and channel manager of block events. /// let chain_poller = poll::ChainPoller::new(block_source, Network::Bitcoin); diff --git a/lightning-persister/src/fs_store.rs b/lightning-persister/src/fs_store.rs index b5c6526207d..410af7bf665 100644 --- a/lightning-persister/src/fs_store.rs +++ b/lightning-persister/src/fs_store.rs @@ -436,6 +436,8 @@ mod tests { #[cfg(not(target_os = "windows"))] #[test] fn test_readonly_dir_perm_failure() { + use lightning::ln::ChannelId; + let store = FilesystemStore::new("test_readonly_dir_perm_failure".into()); fs::create_dir_all(&store.get_data_dir()).unwrap(); @@ -450,7 +452,9 @@ mod tests { check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); - let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap(); + let funding_outpoint = &added_monitors[0].0; + let channel_id = &added_monitors[0].1.get_channel_id(); + let update_id = update_map.get(&channel_id).unwrap(); // Set the store's directory to read-only, which should result in // returning an unrecoverable failure when we then attempt to persist a @@ -464,7 +468,7 @@ mod tests { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 }; - match store.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) { + match store.persist_new_channel(test_txo, *channel_id, &added_monitors[0].1, update_id.2) { ChannelMonitorUpdateStatus::UnrecoverableError => {}, _ => panic!("unexpected result from persisting new channel") } @@ -489,7 +493,9 @@ mod tests { check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000); let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); - let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap(); + let funding_outpoint = &added_monitors[0].0; + let channel_id = &added_monitors[0].1.get_channel_id(); + let update_id = update_map.get(channel_id).unwrap(); // Create the store with an invalid directory name and test that the // channel fails to open because the directories fail to be created. There @@ -501,7 +507,7 @@ mod tests { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 }; - match store.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) { + match store.persist_new_channel(test_txo, *channel_id, &added_monitors[0].1, update_id.2) { ChannelMonitorUpdateStatus::UnrecoverableError => {}, _ => panic!("unexpected result from persisting new channel") } diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 14544754318..7b4a6294357 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -31,6 +31,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Filter, WatchedOutput}; use crate::chain::chaininterface::{BroadcasterInterface, FeeEstimator}; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, Balance, MonitorEvent, TransactionOutputs, WithChannelMonitor, LATENCY_GRACE_PERIOD_BLOCKS}; use crate::chain::transaction::{OutPoint, TransactionData}; +use crate::ln::ChannelId; use crate::sign::ecdsa::WriteableEcdsaChannelSigner; use crate::events; use crate::events::{Event, EventHandler}; @@ -158,7 +159,7 @@ pub trait Persist { /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [`Writeable::write`]: crate::util::ser::Writeable::write - fn persist_new_channel(&self, channel_id: OutPoint, data: &ChannelMonitor, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus; + fn persist_new_channel(&self, channel_funding_outpoint: OutPoint, channel_id: ChannelId, data: &ChannelMonitor, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus; /// Update one channel's data. The provided [`ChannelMonitor`] has already applied the given /// update. @@ -193,7 +194,7 @@ pub trait Persist { /// [`ChannelMonitorUpdateStatus`] for requirements when returning errors. /// /// [`Writeable::write`]: crate::util::ser::Writeable::write - fn update_persisted_channel(&self, channel_id: OutPoint, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus; + fn update_persisted_channel(&self, channel_funding_outpoint: OutPoint, channel_id: ChannelId, update: Option<&ChannelMonitorUpdate>, data: &ChannelMonitor, update_id: MonitorUpdateId) -> ChannelMonitorUpdateStatus; } struct MonitorHolder { @@ -287,7 +288,7 @@ pub struct ChainMonitor, Option)>>, + pending_monitor_events: Mutex, Vec, Option)>>, /// The best block height seen, used as a proxy for the passage of time. highest_chain_height: AtomicUsize, @@ -321,7 +322,8 @@ where C::Target: chain::Filter, for funding_outpoint in funding_outpoints.iter() { let monitor_lock = self.monitors.read().unwrap(); if let Some(monitor_state) = monitor_lock.get(funding_outpoint) { - if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state).is_err() { + let channel_id = monitor_state.monitor.get_channel_id(); + if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, channel_id, &monitor_state).is_err() { // Take the monitors lock for writing so that we poison it and any future // operations going forward fail immediately. core::mem::drop(monitor_lock); @@ -336,7 +338,8 @@ where C::Target: chain::Filter, let monitor_states = self.monitors.write().unwrap(); for (funding_outpoint, monitor_state) in monitor_states.iter() { if !funding_outpoints.contains(funding_outpoint) { - if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, &monitor_state).is_err() { + let channel_id = monitor_state.monitor.get_channel_id(); + if self.update_monitor_with_chain_data(header, best_height, txdata, &process, funding_outpoint, channel_id, &monitor_state).is_err() { log_error!(self.logger, "{}", err_str); panic!("{}", err_str); } @@ -356,7 +359,7 @@ where C::Target: chain::Filter, fn update_monitor_with_chain_data( &self, header: &Header, best_height: Option, txdata: &TransactionData, - process: FN, funding_outpoint: &OutPoint, monitor_state: &MonitorHolder + process: FN, funding_outpoint: &OutPoint, channel_id: ChannelId, monitor_state: &MonitorHolder ) -> Result<(), ()> where FN: Fn(&ChannelMonitor, &TransactionData) -> Vec { let monitor = &monitor_state.monitor; let logger = WithChannelMonitor::from(&self.logger, &monitor); @@ -377,7 +380,7 @@ where C::Target: chain::Filter, } log_trace!(logger, "Syncing Channel Monitor for channel {}", log_funding_info!(monitor)); - match self.persister.update_persisted_channel(*funding_outpoint, None, monitor, update_id) { + match self.persister.update_persisted_channel(*funding_outpoint, channel_id, None, monitor, update_id) { ChannelMonitorUpdateStatus::Completed => log_trace!(logger, "Finished syncing Channel Monitor for channel {}", log_funding_info!(monitor)), ChannelMonitorUpdateStatus::InProgress => { @@ -471,12 +474,15 @@ where C::Target: chain::Filter, } } - /// Lists the funding outpoint of each [`ChannelMonitor`] being monitored. + /// Lists the funding outpoint and channel ID of each [`ChannelMonitor`] being monitored. /// /// Note that [`ChannelMonitor`]s are not removed when a channel is closed as they are always /// monitoring for on-chain state resolutions. - pub fn list_monitors(&self) -> Vec { - self.monitors.read().unwrap().keys().map(|outpoint| *outpoint).collect() + pub fn list_monitors(&self) -> Vec<(OutPoint, ChannelId)> { + self.monitors.read().unwrap().iter().map(|(outpoint, monitor_holder)| { + let channel_id = monitor_holder.monitor.get_channel_id(); + (*outpoint, channel_id) + }).collect() } #[cfg(not(c_bindings))] @@ -517,7 +523,7 @@ where C::Target: chain::Filter, /// /// Returns an [`APIError::APIMisuseError`] if `funding_txo` does not match any currently /// registered [`ChannelMonitor`]s. - pub fn channel_monitor_updated(&self, funding_txo: OutPoint, completed_update_id: MonitorUpdateId) -> Result<(), APIError> { + pub fn channel_monitor_updated(&self, funding_txo: OutPoint, channel_id: Option, completed_update_id: MonitorUpdateId) -> Result<(), APIError> { let monitors = self.monitors.read().unwrap(); let monitor_data = if let Some(mon) = monitors.get(&funding_txo) { mon } else { return Err(APIError::APIMisuseError { err: format!("No ChannelMonitor matching funding outpoint {:?} found", funding_txo) }); @@ -542,8 +548,8 @@ where C::Target: chain::Filter, // Completed event. return Ok(()); } - self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed { - funding_txo, + self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { + funding_txo, channel_id, monitor_update_id: monitor_data.monitor.get_latest_update_id(), }], monitor_data.monitor.get_counterparty_node_id())); }, @@ -563,11 +569,12 @@ where C::Target: chain::Filter, /// chain::Watch API wherein we mark a monitor fully-updated by just calling /// channel_monitor_updated once with the highest ID. #[cfg(any(test, fuzzing))] - pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, monitor_update_id: u64) { + pub fn force_channel_monitor_updated(&self, funding_txo: OutPoint, channel_id: Option, monitor_update_id: u64) { let monitors = self.monitors.read().unwrap(); let counterparty_node_id = monitors.get(&funding_txo).and_then(|m| m.monitor.get_counterparty_node_id()); - self.pending_monitor_events.lock().unwrap().push((funding_txo, vec![MonitorEvent::Completed { + self.pending_monitor_events.lock().unwrap().push((funding_txo, channel_id, vec![MonitorEvent::Completed { funding_txo, + channel_id, monitor_update_id, }], counterparty_node_id)); self.event_notifier.notify(); @@ -713,7 +720,7 @@ where C::Target: chain::Filter, L::Target: Logger, P::Target: Persist, { - fn watch_channel(&self, funding_outpoint: OutPoint, monitor: ChannelMonitor) -> Result { + fn watch_channel(&self, funding_outpoint: OutPoint, channel_id: ChannelId, monitor: ChannelMonitor) -> Result { let logger = WithChannelMonitor::from(&self.logger, &monitor); let mut monitors = self.monitors.write().unwrap(); let entry = match monitors.entry(funding_outpoint) { @@ -726,7 +733,7 @@ where C::Target: chain::Filter, log_trace!(logger, "Got new ChannelMonitor for channel {}", log_funding_info!(monitor)); let update_id = MonitorUpdateId::from_new_monitor(&monitor); let mut pending_monitor_updates = Vec::new(); - let persist_res = self.persister.persist_new_channel(funding_outpoint, &monitor, update_id); + let persist_res = self.persister.persist_new_channel(funding_outpoint, channel_id, &monitor, update_id); match persist_res { ChannelMonitorUpdateStatus::InProgress => { log_info!(logger, "Persistence of new ChannelMonitor for channel {} in progress", log_funding_info!(monitor)); @@ -752,12 +759,12 @@ where C::Target: chain::Filter, Ok(persist_res) } - fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { + fn update_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus { // Update the monitor that watches the channel referred to by the given outpoint. let monitors = self.monitors.read().unwrap(); match monitors.get(&funding_txo) { None => { - let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(funding_txo.to_channel_id())); + let logger = WithContext::from(&self.logger, update.counterparty_node_id, Some(channel_id)); log_error!(logger, "Failed to update channel monitor: no such monitor registered"); // We should never ever trigger this from within ChannelManager. Technically a @@ -783,9 +790,9 @@ where C::Target: chain::Filter, // while reading `channel_monitor` with updates from storage. Instead, we should persist // the entire `channel_monitor` here. log_warn!(logger, "Failed to update ChannelMonitor for channel {}. Going ahead and persisting the entire ChannelMonitor", log_funding_info!(monitor)); - self.persister.update_persisted_channel(funding_txo, None, monitor, update_id) + self.persister.update_persisted_channel(funding_txo, channel_id, None, monitor, update_id) } else { - self.persister.update_persisted_channel(funding_txo, Some(update), monitor, update_id) + self.persister.update_persisted_channel(funding_txo, channel_id, Some(update), monitor, update_id) }; match persist_res { ChannelMonitorUpdateStatus::InProgress => { @@ -815,7 +822,7 @@ where C::Target: chain::Filter, } } - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Option, Vec, Option)> { let mut pending_monitor_events = self.pending_monitor_events.lock().unwrap().split_off(0); for monitor_state in self.monitors.read().unwrap().values() { let logger = WithChannelMonitor::from(&self.logger, &monitor_state.monitor); @@ -829,8 +836,9 @@ where C::Target: chain::Filter, let monitor_events = monitor_state.monitor.get_and_clear_pending_monitor_events(); if monitor_events.len() > 0 { let monitor_outpoint = monitor_state.monitor.get_funding_txo().0; + let monitor_channel_id = monitor_state.monitor.get_channel_id(); let counterparty_node_id = monitor_state.monitor.get_counterparty_node_id(); - pending_monitor_events.push((monitor_outpoint, monitor_events, counterparty_node_id)); + pending_monitor_events.push((monitor_outpoint, Some(monitor_channel_id), monitor_events, counterparty_node_id)); } } } @@ -904,7 +912,7 @@ mod tests { let persistences = chanmon_cfgs[1].persister.offchain_monitor_updates.lock().unwrap().clone(); assert_eq!(persistences.len(), 1); - let (funding_txo, updates) = persistences.iter().next().unwrap(); + let (funding_txo, (channel_id, updates)) = persistences.iter().next().unwrap(); assert_eq!(updates.len(), 2); // Note that updates is a HashMap so the ordering here is actually random. This shouldn't @@ -918,7 +926,7 @@ mod tests { #[cfg(c_bindings)] assert!(nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().iter() .find(|(txo, _)| txo == funding_txo).unwrap().1.contains(&next_update)); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, next_update.clone()).unwrap(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, Some(*channel_id), next_update.clone()).unwrap(); // Should not contain the previously pending next_update when pending updates listed. #[cfg(not(c_bindings))] assert!(!nodes[1].chain_monitor.chain_monitor.list_pending_monitor_updates().get(funding_txo) @@ -929,7 +937,7 @@ mod tests { assert!(nodes[1].chain_monitor.release_pending_monitor_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, update_iter.next().unwrap().clone()).unwrap(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(*funding_txo, Some(*channel_id), update_iter.next().unwrap().clone()).unwrap(); let claim_events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(claim_events.len(), 2); @@ -1037,9 +1045,9 @@ mod tests { nodes[0].chain_monitor.chain_monitor.best_block_updated(&latest_header, nodes[0].best_block_info().1 + LATENCY_GRACE_PERIOD_BLOCKS); } else { let persistences = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap().clone(); - for (funding_outpoint, update_ids) in persistences { + for (funding_outpoint, (channel_id, update_ids)) in persistences { for update_id in update_ids { - nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_outpoint, update_id).unwrap(); + nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_outpoint, channel_id, update_id).unwrap(); } } } diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index bcc324a5582..5fa07b04659 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -158,6 +158,8 @@ pub enum MonitorEvent { Completed { /// The funding outpoint of the [`ChannelMonitor`] that was updated funding_txo: OutPoint, + /// The channel ID of the channel associated with the [`ChannelMonitor`] + channel_id: Option, /// The Update ID from [`ChannelMonitorUpdate::update_id`] which was applied or /// [`ChannelMonitor::get_latest_update_id`]. /// @@ -172,6 +174,7 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorEvent, (0, Completed) => { (0, funding_txo, required), (2, monitor_update_id, required), + (4, channel_id, required), }, ; (2, HTLCEvent), @@ -772,6 +775,7 @@ pub(crate) struct ChannelMonitorImpl { channel_keys_id: [u8; 32], holder_revocation_basepoint: RevocationBasepoint, + channel_id: Option, funding_info: (OutPoint, ScriptBuf), current_counterparty_commitment_txid: Option, prev_counterparty_commitment_txid: Option, @@ -1097,6 +1101,7 @@ impl Writeable for ChannelMonitorImpl WithChannelMonitor<'a, L> where L::Target: Logger { pub(crate) fn from_impl(logger: &'a L, monitor_impl: &ChannelMonitorImpl) -> Self { let peer_id = monitor_impl.counterparty_node_id; - let channel_id = Some(monitor_impl.funding_info.0.to_channel_id()); + let channel_id = Some(monitor_impl.get_channel_id()); WithChannelMonitor { logger, peer_id, channel_id, } @@ -1181,7 +1186,8 @@ impl ChannelMonitor { funding_redeemscript: ScriptBuf, channel_value_satoshis: u64, commitment_transaction_number_obscure_factor: u64, initial_holder_commitment_tx: HolderCommitmentTransaction, - best_block: BestBlock, counterparty_node_id: PublicKey) -> ChannelMonitor { + best_block: BestBlock, counterparty_node_id: PublicKey, channel_id: ChannelId, + ) -> ChannelMonitor { assert!(commitment_transaction_number_obscure_factor <= (1 << 48)); let counterparty_payment_script = chan_utils::get_counterparty_payment_script( @@ -1235,6 +1241,7 @@ impl ChannelMonitor { channel_keys_id, holder_revocation_basepoint, + channel_id: Some(channel_id), funding_info, current_counterparty_commitment_txid: None, prev_counterparty_commitment_txid: None, @@ -1386,6 +1393,11 @@ impl ChannelMonitor { self.inner.lock().unwrap().get_funding_txo().clone() } + /// Gets the channel_id of the channel this ChannelMonitor is monitoring for. + pub fn get_channel_id(&self) -> ChannelId { + self.inner.lock().unwrap().get_channel_id() + } + /// Gets a list of txids, with their output scripts (in the order they appear in the /// transaction), which we must learn about spends of via block_connected(). pub fn get_outputs_to_watch(&self) -> Vec<(Txid, Vec<(u32, ScriptBuf)>)> { @@ -2834,7 +2846,8 @@ impl ChannelMonitorImpl { self.queue_latest_holder_commitment_txn_for_broadcast(broadcaster, &bounded_fee_estimator, logger); } else if !self.holder_tx_signed { log_error!(logger, "WARNING: You have a potentially-unsafe holder commitment transaction available to broadcast"); - log_error!(logger, " in channel monitor for channel {}!", &self.funding_info.0.to_channel_id()); + let channel_id = self.get_channel_id(); + log_error!(logger, " in channel monitor for channel {}!", &channel_id); log_error!(logger, " Read the docs for ChannelMonitor::get_latest_holder_commitment_txn and take manual action!"); } else { // If we generated a MonitorEvent::HolderForceClosed, the ChannelManager @@ -2880,6 +2893,10 @@ impl ChannelMonitorImpl { &self.funding_info } + pub fn get_channel_id(&self) -> ChannelId { + self.channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(self.funding_info.0)) + } + fn get_outputs_to_watch(&self) -> &HashMap> { // If we've detected a counterparty commitment tx on chain, we must include it in the set // of outputs to watch for spends of, otherwise we're likely to lose user funds. Because @@ -3642,7 +3659,7 @@ impl ChannelMonitorImpl { if prevout.txid == self.funding_info.0.txid && prevout.vout == self.funding_info.0.index as u32 { let mut balance_spendable_csv = None; log_info!(logger, "Channel {} closed by funding output spend in txid {}.", - &self.funding_info.0.to_channel_id(), txid); + &self.get_channel_id(), txid); self.funding_spend_seen = true; let mut commitment_tx_to_counterparty_output = None; if (tx.input[0].sequence.0 >> 8*3) as u8 == 0x80 && (tx.lock_time.to_consensus_u32() >> 8*3) as u8 == 0x20 { @@ -3812,7 +3829,7 @@ impl ChannelMonitorImpl { log_debug!(logger, "Descriptor {} has got enough confirmations to be passed upstream", log_spendable!(descriptor)); self.pending_events.push(Event::SpendableOutputs { outputs: vec![descriptor], - channel_id: Some(self.funding_info.0.to_channel_id()), + channel_id: Some(self.get_channel_id()), }); self.spendable_txids_confirmed.push(entry.txid); }, @@ -4557,6 +4574,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP let mut spendable_txids_confirmed = Some(Vec::new()); let mut counterparty_fulfilled_htlcs = Some(HashMap::new()); let mut initial_counterparty_commitment_info = None; + let mut channel_id = None; read_tlv_fields!(reader, { (1, funding_spend_confirmed, option), (3, htlcs_resolved_on_chain, optional_vec), @@ -4567,6 +4585,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP (13, spendable_txids_confirmed, optional_vec), (15, counterparty_fulfilled_htlcs, option), (17, initial_counterparty_commitment_info, option), + (19, channel_id, option), }); // Monitors for anchor outputs channels opened in v0.0.116 suffered from a bug in which the @@ -4591,6 +4610,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP channel_keys_id, holder_revocation_basepoint, + channel_id, funding_info, current_counterparty_commitment_txid, prev_counterparty_commitment_txid, @@ -4665,7 +4685,7 @@ mod tests { use crate::chain::package::{weight_offered_htlc, weight_received_htlc, weight_revoked_offered_htlc, weight_revoked_received_htlc, WEIGHT_REVOKED_OUTPUT}; use crate::chain::transaction::OutPoint; use crate::sign::InMemorySigner; - use crate::ln::{PaymentPreimage, PaymentHash}; + use crate::ln::{PaymentPreimage, PaymentHash, ChannelId}; use crate::ln::channel_keys::{DelayedPaymentBasepoint, DelayedPaymentKey, HtlcBasepoint, RevocationBasepoint, RevocationKey}; use crate::ln::chan_utils::{self,HTLCOutputInCommitment, ChannelPublicKeys, ChannelTransactionParameters, HolderCommitmentTransaction, CounterpartyChannelTransactionParameters}; use crate::ln::channelmanager::{PaymentSendFailure, PaymentId, RecipientOnionFields}; @@ -4841,6 +4861,7 @@ mod tests { htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())) }; let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() }; + let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); let channel_parameters = ChannelTransactionParameters { holder_pubkeys: keys.holder_channel_pubkeys.clone(), holder_selected_contest_delay: 66, @@ -4860,7 +4881,7 @@ mod tests { Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(), (OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()), &channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()), - best_block, dummy_key); + best_block, dummy_key, channel_id); let mut htlcs = preimages_slice_to_htlcs!(preimages[0..10]); let dummy_commitment_tx = HolderCommitmentTransaction::dummy(&mut htlcs); @@ -5090,6 +5111,7 @@ mod tests { htlc_basepoint: HtlcBasepoint::from(PublicKey::from_secret_key(&secp_ctx, &SecretKey::from_slice(&[48; 32]).unwrap())), }; let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::max_value() }; + let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); let channel_parameters = ChannelTransactionParameters { holder_pubkeys: keys.holder_channel_pubkeys.clone(), holder_selected_contest_delay: 66, @@ -5107,9 +5129,12 @@ mod tests { Some(ShutdownScript::new_p2wpkh_from_pubkey(shutdown_pubkey).into_inner()), 0, &ScriptBuf::new(), (OutPoint { txid: Txid::from_slice(&[43; 32]).unwrap(), index: 0 }, ScriptBuf::new()), &channel_parameters, ScriptBuf::new(), 46, 0, HolderCommitmentTransaction::dummy(&mut Vec::new()), - best_block, dummy_key); + best_block, dummy_key, channel_id); - let chan_id = monitor.inner.lock().unwrap().funding_info.0.to_channel_id().clone(); + let chan_id = { + let monitor_impl = monitor.inner.lock().unwrap(); + monitor_impl.get_channel_id() + }; let context_logger = WithChannelMonitor::from(&logger, &monitor); log_error!(context_logger, "This is an error"); log_warn!(context_logger, "This is an error"); diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index dafce03ddb0..639f2f4280b 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -17,6 +17,7 @@ use bitcoin::network::constants::Network; use bitcoin::secp256k1::PublicKey; use crate::chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, MonitorEvent}; +use crate::ln::ChannelId; use crate::sign::ecdsa::WriteableEcdsaChannelSigner; use crate::chain::transaction::{OutPoint, TransactionData}; @@ -269,7 +270,7 @@ pub trait Watch { /// [`get_outputs_to_watch`]: channelmonitor::ChannelMonitor::get_outputs_to_watch /// [`block_connected`]: channelmonitor::ChannelMonitor::block_connected /// [`block_disconnected`]: channelmonitor::ChannelMonitor::block_disconnected - fn watch_channel(&self, funding_txo: OutPoint, monitor: ChannelMonitor) -> Result; + fn watch_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, monitor: ChannelMonitor) -> Result; /// Updates a channel identified by `funding_txo` by applying `update` to its monitor. /// @@ -286,7 +287,7 @@ pub trait Watch { /// [`ChannelMonitorUpdateStatus::UnrecoverableError`], see its documentation for more info. /// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager - fn update_channel(&self, funding_txo: OutPoint, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus; + fn update_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, update: &ChannelMonitorUpdate) -> ChannelMonitorUpdateStatus; /// Returns any monitor events since the last call. Subsequent calls must only return new /// events. @@ -297,7 +298,7 @@ pub trait Watch { /// /// For details on asynchronous [`ChannelMonitor`] updating and returning /// [`MonitorEvent::Completed`] here, see [`ChannelMonitorUpdateStatus::InProgress`]. - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)>; + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Option, Vec, Option)>; } /// The `Filter` trait defines behavior for indicating chain activity of interest pertaining to diff --git a/lightning/src/chain/transaction.rs b/lightning/src/chain/transaction.rs index 5bef97792d3..8ae5fae0224 100644 --- a/lightning/src/chain/transaction.rs +++ b/lightning/src/chain/transaction.rs @@ -9,9 +9,7 @@ //! Types describing on-chain transactions. -use crate::ln::ChannelId; use bitcoin::hash_types::Txid; -use bitcoin::hashes::Hash; use bitcoin::blockdata::transaction::OutPoint as BitcoinOutPoint; use bitcoin::blockdata::transaction::Transaction; @@ -58,11 +56,6 @@ pub struct OutPoint { } impl OutPoint { - /// Convert an `OutPoint` to a lightning channel id. - pub fn to_channel_id(&self) -> ChannelId { - ChannelId::v1_from_funding_txid(self.txid.as_byte_array(), self.index) - } - /// Converts this OutPoint into the OutPoint field as used by rust-bitcoin /// /// This is not exported to bindings users as the same type is used universally in the C bindings @@ -82,25 +75,3 @@ impl core::fmt::Display for OutPoint { } impl_writeable!(OutPoint, { txid, index }); - -#[cfg(test)] -mod tests { - use crate::chain::transaction::OutPoint; - - use bitcoin::blockdata::transaction::Transaction; - use bitcoin::consensus::encode; - use bitcoin::hashes::hex::FromHex; - - #[test] - fn test_channel_id_calculation() { - let tx: Transaction = encode::deserialize(&>::from_hex("020000000001010e0adef48412e4361325ac1c6e36411299ab09d4f083b9d8ddb55fbc06e1b0c00000000000feffffff0220a1070000000000220020f81d95e040bd0a493e38bae27bff52fe2bb58b93b293eb579c01c31b05c5af1dc072cfee54a3000016001434b1d6211af5551905dc2642d05f5b04d25a8fe80247304402207f570e3f0de50546aad25a872e3df059d277e776dda4269fa0d2cc8c2ee6ec9a022054e7fae5ca94d47534c86705857c24ceea3ad51c69dd6051c5850304880fc43a012103cb11a1bacc223d98d91f1946c6752e358a5eb1a1c983b3e6fb15378f453b76bd00000000").unwrap()[..]).unwrap(); - assert_eq!(&OutPoint { - txid: tx.txid(), - index: 0 - }.to_channel_id().0[..], &>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]); - assert_eq!(&OutPoint { - txid: tx.txid(), - index: 1 - }.to_channel_id().0[..], &>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]); - } -} diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index af827b8cebb..ad86bec2384 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -21,7 +21,7 @@ use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, PaymentPurpose, ClosureReason, HTLCDestination}; use crate::ln::channelmanager::{RAACommitmentOrder, PaymentSendFailure, PaymentId, RecipientOnionFields}; use crate::ln::channel::{AnnouncementSigsState, ChannelPhase}; -use crate::ln::msgs; +use crate::ln::{msgs, ChannelId}; use crate::ln::msgs::{ChannelMessageHandler, RoutingMessageHandler}; use crate::util::test_channel_signer::TestChannelSigner; use crate::util::errors::APIError; @@ -50,6 +50,7 @@ fn test_monitor_and_persister_update_fail() { // Create some initial channel let chan = create_announced_chan_between_nodes(&nodes, 0, 1); let outpoint = OutPoint { txid: chan.3.txid(), index: 0 }; + let channel_id = chan.2; // Rebalance the network to generate htlc in the two directions send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); @@ -80,7 +81,7 @@ fn test_monitor_and_persister_update_fail() { new_monitor }; let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(chain_mon.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!(chain_mon.watch_channel(outpoint, channel_id, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); chain_mon }; chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200); @@ -101,12 +102,12 @@ fn test_monitor_and_persister_update_fail() { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Check that the persister returns InProgress (and will never actually complete) // as the monitor update errors. - if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(outpoint, &update) {} else { panic!("Expected monitor paused"); } + if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(outpoint, channel_id, &update) {} else { panic!("Expected monitor paused"); } logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Failed to update ChannelMonitor for channel [0-9a-f]*.").unwrap(), 1); // Apply the monitor update to the original ChainMonitor, ensuring the // ChannelManager and ChannelMonitor aren't out of sync. - assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), + assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, channel_id, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { @@ -152,7 +153,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); check_added_monitors!(nodes[0], 0); let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -312,7 +313,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Now fix monitor updating... chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); check_added_monitors!(nodes[0], 0); macro_rules! disconnect_reconnect_peers { () => { { @@ -621,7 +622,7 @@ fn test_monitor_update_fail_cs() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); check_added_monitors!(nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(responses.len(), 2); @@ -654,7 +655,7 @@ fn test_monitor_update_fail_cs() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); check_added_monitors!(nodes[0], 0); let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); @@ -716,7 +717,7 @@ fn test_monitor_update_fail_no_rebroadcast() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); @@ -778,7 +779,7 @@ fn test_monitor_update_raa_while_paused() { check_added_monitors!(nodes[0], 1); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); check_added_monitors!(nodes[0], 0); let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -904,7 +905,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // update_add update. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(chan_2.2), latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); @@ -1159,7 +1160,7 @@ fn test_monitor_update_fail_reestablish() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(chan_1.2), latest_update); check_added_monitors!(nodes[1], 0); updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1237,7 +1238,7 @@ fn raa_no_response_awaiting_raa_state() { check_added_monitors!(nodes[1], 1); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); // nodes[1] should be AwaitingRAA here! check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1359,7 +1360,7 @@ fn claim_while_disconnected_monitor_update_fail() { // receiving the commitment update from A, and the resulting commitment dances. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); check_added_monitors!(nodes[1], 0); let bs_msgs = nodes[1].node.get_and_clear_pending_msg_events(); @@ -1474,7 +1475,7 @@ fn monitor_failed_no_reestablish_response() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1567,7 +1568,7 @@ fn first_message_on_recv_ordering() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1655,7 +1656,7 @@ fn test_monitor_update_fail_claim() { // Now restore monitor updating on the 0<->1 channel and claim the funds on B. let channel_id = chan_1.2; let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); @@ -1755,7 +1756,7 @@ fn test_monitor_update_on_pending_forwards() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(chan_1.2), latest_update); check_added_monitors!(nodes[1], 0); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1822,7 +1823,7 @@ fn monitor_update_claim_fail_no_response() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1861,7 +1862,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - let channel_id = OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); + let channel_id = ChannelId::v1_from_funding_outpoint(OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }); nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); @@ -1872,7 +1873,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); check_added_monitors!(nodes[0], 0); expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); @@ -1918,7 +1919,7 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); check_added_monitors!(nodes[1], 0); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { @@ -2019,7 +2020,7 @@ fn test_path_paused_mpp() { // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(chan_2_id), latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None); @@ -2394,7 +2395,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // not occur prior to #756). chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (funding_txo, mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, mon_id); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_txo, Some(chan_id), mon_id); expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); // New outbound messages should be generated immediately upon a call to @@ -2613,14 +2614,14 @@ fn test_temporary_error_during_shutdown() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id())); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(channel_id), latest_update); nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); @@ -2665,10 +2666,10 @@ fn double_temp_error() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (_, latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_1); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, Some(channel_id), latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, latest_update_2); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(funding_tx, Some(channel_id), latest_update_2); // Complete the first HTLC. Note that as a side-effect we handle the monitor update completions // and get both PaymentClaimed events at once. @@ -3112,7 +3113,7 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating // process. let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, Some(chan_id_ab), ab_update_id).unwrap(); // When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has // completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C @@ -3140,7 +3141,7 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); let (outpoint, _, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, ab_update_id).unwrap(); + nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(outpoint, Some(chan_id_ab), ab_update_id).unwrap(); // The ChannelMonitorUpdate which was completed prior to the reconnect only contained the // preimage (as it was a replay of the original ChannelMonitorUpdate from before we @@ -3309,7 +3310,7 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, reconnect_nodes(reconnect_args); let (outpoint, ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, ab_update_id); + nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(chan_id_ab), ab_update_id); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), true, false); if !close_chans_before_reload { // Once we call `process_pending_events` the final `ChannelMonitor` for the B<->C diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 1dfc6dc552f..8024bc5791a 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -815,7 +815,7 @@ pub(super) struct ReestablishResponses { #[must_use] pub(crate) struct ShutdownResult { /// A channel monitor update to apply. - pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, + pub(crate) monitor_update: Option<(PublicKey, OutPoint, Option, ChannelMonitorUpdate)>, /// A list of dropped outbound HTLCs that can safely be failed backwards immediately. pub(crate) dropped_outbound_htlcs: Vec<(HTLCSource, PaymentHash, PublicKey, ChannelId)>, /// An unbroadcasted batch funding transaction id. The closure of this channel should be @@ -2389,7 +2389,7 @@ impl ChannelContext where SP::Target: SignerProvider { }; if generate_monitor_update { self.latest_monitor_update_id = CLOSED_CHANNEL_UPDATE_ID; - Some((self.get_counterparty_node_id(), funding_txo, ChannelMonitorUpdate { + Some((self.get_counterparty_node_id(), funding_txo, Some(self.channel_id()), ChannelMonitorUpdate { update_id: self.latest_monitor_update_id, counterparty_node_id: Some(self.counterparty_node_id), updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast }], @@ -6462,7 +6462,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { // Now that we're past error-generating stuff, update our local state: self.context.channel_state = ChannelState::FundingNegotiated; - self.context.channel_id = funding_txo.to_channel_id(); + self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); // If the funding transaction is a coinbase transaction, we need to set the minimum depth to 100. // We can skip this if it is a zero-conf channel. @@ -6801,7 +6801,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { &self.context.channel_transaction_parameters, funding_redeemscript.clone(), self.context.channel_value_satoshis, obscure_factor, - holder_commitment_tx, best_block, self.context.counterparty_node_id); + holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id()); channel_monitor.provide_initial_counterparty_commitment_tx( counterparty_initial_bitcoin_tx.txid, Vec::new(), self.context.cur_counterparty_commitment_transaction_number, @@ -7343,7 +7343,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { // Now that we're past error-generating stuff, update our local state: self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); - self.context.channel_id = funding_txo.to_channel_id(); + self.context.channel_id = ChannelId::v1_from_funding_outpoint(funding_txo); self.context.cur_counterparty_commitment_transaction_number -= 1; self.context.cur_holder_commitment_transaction_number -= 1; @@ -7361,7 +7361,7 @@ impl InboundV1Channel where SP::Target: SignerProvider { &self.context.channel_transaction_parameters, funding_redeemscript.clone(), self.context.channel_value_satoshis, obscure_factor, - holder_commitment_tx, best_block, self.context.counterparty_node_id); + holder_commitment_tx, best_block, self.context.counterparty_node_id, self.context.channel_id()); channel_monitor.provide_initial_counterparty_commitment_tx( counterparty_initial_commitment_tx.trust().txid(), Vec::new(), self.context.cur_counterparty_commitment_transaction_number + 1, diff --git a/lightning/src/ln/channel_id.rs b/lightning/src/ln/channel_id.rs index 8df6d75ef5e..258abff0a44 100644 --- a/lightning/src/ln/channel_id.rs +++ b/lightning/src/ln/channel_id.rs @@ -9,6 +9,9 @@ //! ChannelId definition. +use bitcoin::hashes::Hash as _; + +use crate::chain::transaction::OutPoint; use crate::ln::msgs::DecodeError; use crate::sign::EntropySource; use crate::util::ser::{Readable, Writeable, Writer}; @@ -40,6 +43,11 @@ impl ChannelId { Self(res) } + /// Create _v1_ channel ID from a funding tx outpoint + pub fn v1_from_funding_outpoint(outpoint: OutPoint) -> Self { + Self::v1_from_funding_txid(outpoint.txid.as_byte_array(), outpoint.index) + } + /// Create a _temporary_ channel ID randomly, based on an entropy source. pub fn temporary_from_entropy_source(entropy_source: &ES) -> Self where ES::Target: EntropySource { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 80d129bbe6f..e1760b58ccb 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -286,6 +286,7 @@ pub(super) struct PendingAddHTLCInfo { // Note that this may be an outbound SCID alias for the associated channel. prev_short_channel_id: u64, prev_htlc_id: u64, + prev_channel_id: ChannelId, prev_funding_outpoint: OutPoint, prev_user_channel_id: u128, } @@ -321,6 +322,7 @@ pub(crate) struct HTLCPreviousHopData { incoming_packet_shared_secret: [u8; 32], phantom_shared_secret: Option<[u8; 32]>, blinded_failure: Option, + channel_id: ChannelId, // This field is consumed by `claim_funds_from_hop()` when updating a force-closed backwards // channel with a preimage provided by the forward channel. @@ -361,7 +363,7 @@ struct ClaimableHTLC { impl From<&ClaimableHTLC> for events::ClaimedHTLC { fn from(val: &ClaimableHTLC) -> Self { events::ClaimedHTLC { - channel_id: val.prev_hop.outpoint.to_channel_id(), + channel_id: val.prev_hop.channel_id, user_channel_id: val.prev_hop.user_channel_id.unwrap_or(0), cltv_expiry: val.cltv_expiry, value_msat: val.value, @@ -704,7 +706,7 @@ enum BackgroundEvent { /// /// Note that any such events are lost on shutdown, so in general they must be updates which /// are regenerated on startup. - ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, ChannelMonitorUpdate)), + ClosedMonitorUpdateRegeneratedOnStartup((OutPoint, Option, ChannelMonitorUpdate)), /// Handle a ChannelMonitorUpdate which may or may not close the channel and may unblock the /// channel to continue normal operation. /// @@ -718,6 +720,7 @@ enum BackgroundEvent { MonitorUpdateRegeneratedOnStartup { counterparty_node_id: PublicKey, funding_txo: OutPoint, + channel_id: Option, update: ChannelMonitorUpdate }, /// Some [`ChannelMonitorUpdate`] (s) completed before we were serialized but we still have @@ -746,7 +749,7 @@ pub(crate) enum MonitorUpdateCompletionAction { /// outbound edge. EmitEventAndFreeOtherChannel { event: events::Event, - downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, RAAMonitorUpdateBlockingAction)>, + downstream_counterparty_and_funding_outpoint: Option<(PublicKey, OutPoint, Option, RAAMonitorUpdateBlockingAction)>, }, /// Indicates we should immediately resume the operation of another channel, unless there is /// some other reason why the channel is blocked. In practice this simply means immediately @@ -764,6 +767,7 @@ pub(crate) enum MonitorUpdateCompletionAction { downstream_counterparty_node_id: PublicKey, downstream_funding_outpoint: OutPoint, blocking_action: RAAMonitorUpdateBlockingAction, + downstream_channel_id: Option, }, } @@ -775,6 +779,7 @@ impl_writeable_tlv_based_enum_upgradable!(MonitorUpdateCompletionAction, (0, downstream_counterparty_node_id, required), (2, downstream_funding_outpoint, required), (4, blocking_action, required), + (5, downstream_channel_id, option), }, (2, EmitEventAndFreeOtherChannel) => { (0, event, upgradable_required), @@ -792,12 +797,14 @@ pub(crate) enum EventCompletionAction { ReleaseRAAChannelMonitorUpdate { counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, + channel_id: Option, }, } impl_writeable_tlv_based_enum!(EventCompletionAction, (0, ReleaseRAAChannelMonitorUpdate) => { (0, channel_funding_outpoint, required), (2, counterparty_node_id, required), + (3, channel_id, option), }; ); @@ -819,7 +826,7 @@ pub(crate) enum RAAMonitorUpdateBlockingAction { impl RAAMonitorUpdateBlockingAction { fn from_prev_hop_data(prev_hop: &HTLCPreviousHopData) -> Self { Self::ForwardedPaymentInboundClaim { - channel_id: prev_hop.outpoint.to_channel_id(), + channel_id: prev_hop.channel_id, htlc_id: prev_hop.htlc_id, } } @@ -1623,8 +1630,8 @@ pub struct ChannelDetails { /// The Channel's funding transaction output, if we've negotiated the funding transaction with /// our counterparty already. /// - /// Note that, if this has been set, `channel_id` will be equivalent to - /// `funding_txo.unwrap().to_channel_id()`. + /// Note that, if this has been set, `channel_id` for V1-established channels will be equivalent to + /// `ChannelId::v1_from_funding_outpoint(funding_txo.unwrap())`. pub funding_txo: Option, /// The features which this channel operates with. See individual features for more info. /// @@ -2286,7 +2293,7 @@ macro_rules! handle_new_monitor_update { handle_new_monitor_update!($self, $update_res, $chan, _internal, handle_monitor_update_completion!($self, $peer_state_lock, $peer_state, $per_peer_state_lock, $chan)) }; - ($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { + ($self: ident, $funding_txo: expr, $channel_id: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr) => { { let in_flight_updates = $peer_state.in_flight_monitor_updates.entry($funding_txo) .or_insert_with(Vec::new); // During startup, we push monitor updates as background events through to here in @@ -2297,7 +2304,7 @@ macro_rules! handle_new_monitor_update { in_flight_updates.push($update); in_flight_updates.len() - 1 }); - let update_res = $self.chain_monitor.update_channel($funding_txo, &in_flight_updates[idx]); + let update_res = $self.chain_monitor.update_channel($funding_txo, $channel_id, &in_flight_updates[idx]); handle_new_monitor_update!($self, update_res, $chan, _internal, { let _ = in_flight_updates.remove(idx); @@ -2763,7 +2770,7 @@ where // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt.take() { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, + handle_new_monitor_update!(self, funding_txo_opt.unwrap(), *channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } } else { @@ -2873,12 +2880,13 @@ where let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id }; self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update { + if let Some((_, funding_txo, channel_id, monitor_update)) = shutdown_res.monitor_update { // There isn't anything we can do if we get an update failure - we're already // force-closing. The monitor update on the required in-memory copy should broadcast // the latest local state, which is the best we can do anyway. Thus, it is safe to // ignore the result here. - let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update); + let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo)); + let _ = self.chain_monitor.update_channel(funding_txo, channel_id, &monitor_update); } let mut shutdown_results = Vec::new(); if let Some(txid) = shutdown_res.unbroadcasted_batch_funding_txid { @@ -3396,6 +3404,7 @@ where return Err(APIError::ChannelUnavailable{err: "Peer for first hop currently disconnected".to_owned()}); } let funding_txo = chan.context.get_funding_txo().unwrap(); + let channel_id = chan.context.channel_id(); let logger = WithChannelContext::from(&self.logger, &chan.context); let send_res = chan.send_htlc_and_commit(htlc_msat, payment_hash.clone(), htlc_cltv, HTLCSource::OutboundRoute { @@ -3406,7 +3415,7 @@ where }, onion_packet, None, &self.fee_estimator, &&logger); match break_chan_phase_entry!(self, send_res, chan_phase_entry) { Some(monitor_update) => { - match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) { + match handle_new_monitor_update!(self, funding_txo, channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan) { false => { // Note that MonitorUpdateInProgress here indicates (per function // docs) that we will resend the commitment update once monitor @@ -3944,7 +3953,10 @@ where } let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() }; if let Some(funding_batch_state) = funding_batch_state.as_mut() { - funding_batch_state.push((outpoint.to_channel_id(), *counterparty_node_id, false)); + // TODO(dual_funding): We only do batch funding for V1 channels at the moment, but we'll probably + // need to fix this somehow to not rely on using the outpoint for the channel ID if we + // want to support V2 batching here as well. + funding_batch_state.push((ChannelId::v1_from_funding_outpoint(outpoint), *counterparty_node_id, false)); } Ok(outpoint) }) @@ -4168,6 +4180,7 @@ where let mut per_source_pending_forward = [( payment.prev_short_channel_id, payment.prev_funding_outpoint, + payment.prev_channel_id, payment.prev_user_channel_id, vec![(pending_htlc_info, payment.prev_htlc_id)] )]; @@ -4195,6 +4208,7 @@ where short_channel_id: payment.prev_short_channel_id, user_channel_id: Some(payment.prev_user_channel_id), outpoint: payment.prev_funding_outpoint, + channel_id: payment.prev_channel_id, htlc_id: payment.prev_htlc_id, incoming_packet_shared_secret: payment.forward_info.incoming_shared_secret, phantom_shared_secret: None, @@ -4218,7 +4232,7 @@ where let mut new_events = VecDeque::new(); let mut failed_forwards = Vec::new(); - let mut phantom_receives: Vec<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); + let mut phantom_receives: Vec<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> = Vec::new(); { let mut forward_htlcs = HashMap::new(); mem::swap(&mut forward_htlcs, &mut self.forward_htlcs.lock().unwrap()); @@ -4231,20 +4245,21 @@ where for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, - forward_info: PendingHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, + prev_user_channel_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, .. } }) => { macro_rules! failure_handler { ($msg: expr, $err_code: expr, $err_data: expr, $phantom_ss: expr, $next_hop_unknown: expr) => { - let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_funding_outpoint.to_channel_id())); + let logger = WithContext::from(&self.logger, forwarding_counterparty, Some(prev_channel_id)); log_info!(logger, "Failed to accept/forward incoming HTLC: {}", $msg); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), + channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -4308,7 +4323,7 @@ where outgoing_cltv_value, Some(phantom_shared_secret), false, None, current_height, self.default_configuration.accept_mpp_keysend) { - Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, vec![(info, prev_htlc_id)])), + Ok(info) => phantom_receives.push((prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, vec![(info, prev_htlc_id)])), Err(InboundOnionErr { err_code, err_data, msg }) => failed_payment!(msg, err_code, err_data, Some(phantom_shared_secret)) } }, @@ -4353,8 +4368,8 @@ where for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, - forward_info: PendingHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, + prev_user_channel_id, forward_info: PendingHTLCInfo { incoming_shared_secret, payment_hash, outgoing_amt_msat, outgoing_cltv_value, routing: PendingHTLCRouting::Forward { onion_packet, blinded, .. @@ -4365,6 +4380,7 @@ where let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), + channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -4441,8 +4457,8 @@ where 'next_forwardable_htlc: for forward_info in pending_forwards.drain(..) { match forward_info { HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_htlc_id, prev_funding_outpoint, prev_user_channel_id, - forward_info: PendingHTLCInfo { + prev_short_channel_id, prev_htlc_id, prev_channel_id, prev_funding_outpoint, + prev_user_channel_id, forward_info: PendingHTLCInfo { routing, incoming_shared_secret, payment_hash, incoming_amt_msat, outgoing_amt_msat, skimmed_fee_msat, .. } @@ -4476,6 +4492,7 @@ where prev_hop: HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), + channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: prev_htlc_id, incoming_packet_shared_secret: incoming_shared_secret, @@ -4507,6 +4524,7 @@ where failed_forwards.push((HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: $htlc.prev_hop.short_channel_id, user_channel_id: $htlc.prev_hop.user_channel_id, + channel_id: prev_channel_id, outpoint: prev_funding_outpoint, htlc_id: $htlc.prev_hop.htlc_id, incoming_packet_shared_secret: $htlc.prev_hop.incoming_packet_shared_secret, @@ -4587,7 +4605,6 @@ where #[allow(unused_assignments)] { committed_to_claimable = true; } - let prev_channel_id = prev_funding_outpoint.to_channel_id(); htlcs.push(claimable_htlc); let amount_msat = htlcs.iter().map(|htlc| htlc.value).sum(); htlcs.iter_mut().for_each(|htlc| htlc.total_value_received = Some(amount_msat)); @@ -4731,23 +4748,25 @@ where for event in background_events.drain(..) { match event { - BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, update)) => { + BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((funding_txo, channel_id, update)) => { // The channel has already been closed, so no use bothering to care about the // monitor updating completing. - let _ = self.chain_monitor.update_channel(funding_txo, &update); + let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo)); + let _ = self.chain_monitor.update_channel(funding_txo, channel_id, &update); }, - BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => { + BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, channel_id, update } => { let mut updated_chan = false; { let per_peer_state = self.per_peer_state.read().unwrap(); if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) { let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; - match peer_state.channel_by_id.entry(funding_txo.to_channel_id()) { + match peer_state.channel_by_id.entry(channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo))) { hash_map::Entry::Occupied(mut chan_phase) => { if let ChannelPhase::Funded(chan) = chan_phase.get_mut() { updated_chan = true; - handle_new_monitor_update!(self, funding_txo, update.clone(), + let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo)); + handle_new_monitor_update!(self, funding_txo, channel_id, update.clone(), peer_state_lock, peer_state, per_peer_state, chan); } else { debug_assert!(false, "We shouldn't have an update for a non-funded channel"); @@ -4758,8 +4777,9 @@ where } } if !updated_chan { + let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo)); // TODO: Track this as in-flight even though the channel is closed. - let _ = self.chain_monitor.update_channel(funding_txo, &update); + let _ = self.chain_monitor.update_channel(funding_txo, channel_id, &update); } }, BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => { @@ -5283,10 +5303,10 @@ where }, HTLCSource::PreviousHopData(HTLCPreviousHopData { ref short_channel_id, ref htlc_id, ref incoming_packet_shared_secret, - ref phantom_shared_secret, ref outpoint, ref blinded_failure, .. + ref phantom_shared_secret, outpoint: _, ref blinded_failure, ref channel_id, .. }) => { log_trace!( - WithContext::from(&self.logger, None, Some(outpoint.to_channel_id())), + WithContext::from(&self.logger, None, Some(*channel_id)), "Failing {}HTLC with payment_hash {} backwards from us: {:?}", if blinded_failure.is_some() { "blinded " } else { "" }, &payment_hash, onion_error ); @@ -5330,7 +5350,7 @@ where if push_forward_ev { self.push_pending_forwards_ev(); } let mut pending_events = self.pending_events.lock().unwrap(); pending_events.push_back((events::Event::HTLCHandlingFailed { - prev_channel_id: outpoint.to_channel_id(), + prev_channel_id: *channel_id, failed_next_destination: destination, }, None)); }, @@ -5471,7 +5491,7 @@ where } if valid_mpp { for htlc in sources.drain(..) { - let prev_hop_chan_id = htlc.prev_hop.outpoint.to_channel_id(); + let prev_hop_chan_id = htlc.prev_hop.channel_id; if let Err((pk, err)) = self.claim_funds_from_hop( htlc.prev_hop, payment_preimage, |_, definitely_duplicate| { @@ -5524,7 +5544,7 @@ where { let per_peer_state = self.per_peer_state.read().unwrap(); - let chan_id = prev_hop.outpoint.to_channel_id(); + let chan_id = prev_hop.channel_id; let counterparty_node_id_opt = match self.short_to_chan_info.read().unwrap().get(&prev_hop.short_channel_id) { Some((cp_id, _dup_chan_id)) => Some(cp_id.clone()), None => None @@ -5552,7 +5572,8 @@ where peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action); } if !during_init { - handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock, + let prev_hop_channel_id = prev_hop.channel_id; + handle_new_monitor_update!(self, prev_hop.outpoint, prev_hop_channel_id, monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } else { // If we're running during init we cannot update a monitor directly - @@ -5562,6 +5583,7 @@ where BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo: prev_hop.outpoint, + channel_id: Some(prev_hop.channel_id), update: monitor_update.clone(), }); } @@ -5576,13 +5598,13 @@ where log_trace!(logger, "Completing monitor update completion action for channel {} as claim was redundant: {:?}", chan_id, action); - let (node_id, funding_outpoint, blocker) = + let (node_id, funding_outpoint, channel_id, blocker) = if let MonitorUpdateCompletionAction::FreeOtherChannelImmediately { downstream_counterparty_node_id: node_id, downstream_funding_outpoint: funding_outpoint, - blocking_action: blocker, + blocking_action: blocker, downstream_channel_id: channel_id, } = action { - (node_id, funding_outpoint, blocker) + (node_id, funding_outpoint, channel_id, blocker) } else { debug_assert!(false, "Duplicate claims should always free another channel immediately"); @@ -5592,7 +5614,7 @@ where let mut peer_state = peer_state_mtx.lock().unwrap(); if let Some(blockers) = peer_state .actions_blocking_raa_monitor_updates - .get_mut(&funding_outpoint.to_channel_id()) + .get_mut(&channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_outpoint))) { let mut found_blocker = false; blockers.retain(|iter| { @@ -5623,16 +5645,19 @@ where }], }; + let prev_hop_channel_id = prev_hop.channel_id; + if !during_init { // We update the ChannelMonitor on the backward link, after // receiving an `update_fulfill_htlc` from the forward link. - let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, &preimage_update); + let update_res = self.chain_monitor.update_channel(prev_hop.outpoint, prev_hop_channel_id, &preimage_update); if update_res != ChannelMonitorUpdateStatus::Completed { // TODO: This needs to be handled somehow - if we receive a monitor update // with a preimage we *must* somehow manage to propagate it to the upstream // channel, or we must have an ability to receive the same event and try // again on restart. - log_error!(WithContext::from(&self.logger, None, Some(prev_hop.outpoint.to_channel_id())), "Critical error: failed to update channel monitor with preimage {:?}: {:?}", + log_error!(WithContext::from(&self.logger, None, Some(prev_hop.channel_id)), + "Critical error: failed to update channel monitor with preimage {:?}: {:?}", payment_preimage, update_res); } } else { @@ -5648,7 +5673,7 @@ where // complete the monitor update completion action from `completion_action`. self.pending_background_events.lock().unwrap().push( BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup(( - prev_hop.outpoint, preimage_update, + prev_hop.outpoint, Some(prev_hop.channel_id), preimage_update, ))); } // Note that we do process the completion action here. This totally could be a @@ -5666,7 +5691,8 @@ where fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, startup_replay: bool, - next_channel_counterparty_node_id: Option, next_channel_outpoint: OutPoint + next_channel_counterparty_node_id: Option, next_channel_outpoint: OutPoint, + next_channel_id: Option, ) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { @@ -5676,7 +5702,7 @@ where debug_assert_eq!(pubkey, path.hops[0].pubkey); } let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint: next_channel_outpoint, + channel_funding_outpoint: next_channel_outpoint, channel_id: next_channel_id, counterparty_node_id: path.hops[0].pubkey, }; self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, @@ -5684,15 +5710,17 @@ where &self.logger); }, HTLCSource::PreviousHopData(hop_data) => { - let prev_outpoint = hop_data.outpoint; + let prev_channel_id = hop_data.channel_id; let completed_blocker = RAAMonitorUpdateBlockingAction::from_prev_hop_data(&hop_data); #[cfg(debug_assertions)] let claiming_chan_funding_outpoint = hop_data.outpoint; + #[cfg(debug_assertions)] + let claiming_channel_id = hop_data.channel_id; let res = self.claim_funds_from_hop(hop_data, payment_preimage, |htlc_claim_value_msat, definitely_duplicate| { let chan_to_release = if let Some(node_id) = next_channel_counterparty_node_id { - Some((node_id, next_channel_outpoint, completed_blocker)) + Some((node_id, next_channel_outpoint, next_channel_id, completed_blocker)) } else { // We can only get `None` here if we are processing a // `ChannelMonitor`-originated event, in which case we @@ -5729,7 +5757,7 @@ where }, // or the channel we'd unblock is already closed, BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup( - (funding_txo, monitor_update) + (funding_txo, _channel_id, monitor_update) ) => { if *funding_txo == next_channel_outpoint { assert_eq!(monitor_update.updates.len(), 1); @@ -5745,7 +5773,7 @@ where BackgroundEvent::MonitorUpdatesComplete { channel_id, .. } => - *channel_id == claiming_chan_funding_outpoint.to_channel_id(), + *channel_id == claiming_channel_id, } }), "{:?}", *background_events); } @@ -5755,7 +5783,8 @@ where Some(MonitorUpdateCompletionAction::FreeOtherChannelImmediately { downstream_counterparty_node_id: other_chan.0, downstream_funding_outpoint: other_chan.1, - blocking_action: other_chan.2, + downstream_channel_id: other_chan.2, + blocking_action: other_chan.3, }) } else { None } } else { @@ -5768,8 +5797,8 @@ where event: events::Event::PaymentForwarded { fee_earned_msat, claim_from_onchain_tx: from_onchain, - prev_channel_id: Some(prev_outpoint.to_channel_id()), - next_channel_id: Some(next_channel_outpoint.to_channel_id()), + prev_channel_id: Some(prev_channel_id), + next_channel_id: Some(next_channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(next_channel_outpoint))), outbound_amount_forwarded_msat: forwarded_htlc_value_msat, }, downstream_counterparty_and_funding_outpoint: chan_to_release, @@ -5819,16 +5848,17 @@ where event, downstream_counterparty_and_funding_outpoint } => { self.pending_events.lock().unwrap().push_back((event, None)); - if let Some((node_id, funding_outpoint, blocker)) = downstream_counterparty_and_funding_outpoint { - self.handle_monitor_update_release(node_id, funding_outpoint, Some(blocker)); + if let Some((node_id, funding_outpoint, channel_id, blocker)) = downstream_counterparty_and_funding_outpoint { + self.handle_monitor_update_release(node_id, funding_outpoint, channel_id, Some(blocker)); } }, MonitorUpdateCompletionAction::FreeOtherChannelImmediately { - downstream_counterparty_node_id, downstream_funding_outpoint, blocking_action, + downstream_counterparty_node_id, downstream_funding_outpoint, downstream_channel_id, blocking_action, } => { self.handle_monitor_update_release( downstream_counterparty_node_id, downstream_funding_outpoint, + downstream_channel_id, Some(blocking_action), ); }, @@ -5843,7 +5873,7 @@ where commitment_update: Option, order: RAACommitmentOrder, pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, channel_ready: Option, announcement_sigs: Option) - -> Option<(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)> { + -> Option<(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)> { let logger = WithChannelContext::from(&self.logger, &channel.context); log_trace!(logger, "Handling channel resumption for channel {} with {} RAA, {} commitment update, {} pending forwards, {}broadcasting funding, {} channel ready, {} announcement", &channel.context.channel_id(), @@ -5858,7 +5888,7 @@ where let counterparty_node_id = channel.context.get_counterparty_node_id(); if !pending_forwards.is_empty() { htlc_forwards = Some((channel.context.get_short_channel_id().unwrap_or(channel.context.outbound_scid_alias()), - channel.context.get_funding_txo().unwrap(), channel.context.get_user_id(), pending_forwards)); + channel.context.get_funding_txo().unwrap(), channel.context.channel_id(), channel.context.get_user_id(), pending_forwards)); } if let Some(msg) = channel_ready { @@ -5912,9 +5942,10 @@ where htlc_forwards } - fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) { + fn channel_monitor_updated(&self, funding_txo: &OutPoint, channel_id: &Option, highest_applied_update_id: u64, counterparty_node_id: Option<&PublicKey>) { debug_assert!(self.total_consistency_lock.try_write().is_err()); // Caller holds read lock + let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo.clone())); let counterparty_node_id = match counterparty_node_id { Some(cp_id) => cp_id.clone(), None => { @@ -5934,11 +5965,11 @@ where peer_state_lock = peer_state_mutex_opt.unwrap().lock().unwrap(); let peer_state = &mut *peer_state_lock; let channel = - if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&funding_txo.to_channel_id()) { + if let Some(ChannelPhase::Funded(chan)) = peer_state.channel_by_id.get_mut(&channel_id) { chan } else { let update_actions = peer_state.monitor_update_blocked_actions - .remove(&funding_txo.to_channel_id()).unwrap_or(Vec::new()); + .remove(&channel_id).unwrap_or(Vec::new()); mem::drop(peer_state_lock); mem::drop(per_peer_state); self.handle_monitor_update_completion_actions(update_actions); @@ -6329,7 +6360,8 @@ where fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible"); }, hash_map::Entry::Vacant(i_e) => { - let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor); + let channel_id = monitor.get_channel_id(); + let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, channel_id, monitor); if let Ok(persist_state) = monitor_res { i_e.insert(chan.context.get_counterparty_node_id()); mem::drop(outpoint_to_peer_lock); @@ -6387,7 +6419,7 @@ where chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger); match res { Ok((chan, monitor)) => { - if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) { + if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), chan.context.channel_id(), monitor) { // We really should be able to insert here without doing a second // lookup, but sadly rust stdlib doesn't currently allow keeping // the original Entry around with the value removed. @@ -6512,7 +6544,7 @@ where } // Update the monitor with the shutdown script if necessary. if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, + handle_new_monitor_update!(self, funding_txo_opt.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } }, @@ -6721,7 +6753,8 @@ where hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, false, Some(*counterparty_node_id), funding_txo); + self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), + false, false, Some(*counterparty_node_id), funding_txo, Some(msg.channel_id)); Ok(()) } @@ -6795,7 +6828,7 @@ where let funding_txo = chan.context.get_funding_txo(); let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &&logger), chan_phase_entry); if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock, + handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } Ok(()) @@ -6809,8 +6842,8 @@ where } #[inline] - fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, u128, Vec<(PendingHTLCInfo, u64)>)]) { - for &mut (prev_short_channel_id, prev_funding_outpoint, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { + fn forward_htlcs(&self, per_source_pending_forwards: &mut [(u64, OutPoint, ChannelId, u128, Vec<(PendingHTLCInfo, u64)>)]) { + for &mut (prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_user_channel_id, ref mut pending_forwards) in per_source_pending_forwards { let mut push_forward_event = false; let mut new_intercept_events = VecDeque::new(); let mut failed_intercept_forwards = Vec::new(); @@ -6829,7 +6862,7 @@ where match forward_htlcs.entry(scid) { hash_map::Entry::Occupied(mut entry) => { entry.get_mut().push(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info })); + prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info })); }, hash_map::Entry::Vacant(entry) => { if !is_our_scid && forward_info.incoming_amt_msat.is_some() && @@ -6847,15 +6880,16 @@ where intercept_id }, None)); entry.insert(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }); + prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }); }, hash_map::Entry::Occupied(_) => { - let logger = WithContext::from(&self.logger, None, Some(prev_funding_outpoint.to_channel_id())); + let logger = WithContext::from(&self.logger, None, Some(prev_channel_id)); log_info!(logger, "Failed to forward incoming HTLC: detected duplicate intercepted payment over short channel id {}", scid); let htlc_source = HTLCSource::PreviousHopData(HTLCPreviousHopData { short_channel_id: prev_short_channel_id, user_channel_id: Some(prev_user_channel_id), outpoint: prev_funding_outpoint, + channel_id: prev_channel_id, htlc_id: prev_htlc_id, incoming_packet_shared_secret: forward_info.incoming_shared_secret, phantom_shared_secret: None, @@ -6875,7 +6909,7 @@ where push_forward_event = true; } entry.insert(vec!(HTLCForwardInfo::AddHTLC(PendingAddHTLCInfo { - prev_short_channel_id, prev_funding_outpoint, prev_htlc_id, prev_user_channel_id, forward_info }))); + prev_short_channel_id, prev_funding_outpoint, prev_channel_id, prev_htlc_id, prev_user_channel_id, forward_info }))); } } } @@ -6919,13 +6953,14 @@ where /// the [`ChannelMonitorUpdate`] in question. fn raa_monitor_updates_held(&self, actions_blocking_raa_monitor_updates: &BTreeMap>, - channel_funding_outpoint: OutPoint, counterparty_node_id: PublicKey + channel_funding_outpoint: OutPoint, channel_id: Option, counterparty_node_id: PublicKey ) -> bool { actions_blocking_raa_monitor_updates - .get(&channel_funding_outpoint.to_channel_id()).map(|v| !v.is_empty()).unwrap_or(false) + .get(&channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(channel_funding_outpoint))).map(|v| !v.is_empty()).unwrap_or(false) || self.pending_events.lock().unwrap().iter().any(|(_, action)| { action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { channel_funding_outpoint, + channel_id, counterparty_node_id, }) }) @@ -6942,7 +6977,7 @@ where if let Some(chan) = peer_state.channel_by_id.get(&channel_id) { return self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates, - chan.context().get_funding_txo().unwrap(), counterparty_node_id); + chan.context().get_funding_txo().unwrap(), Some(channel_id), counterparty_node_id); } } false @@ -6964,7 +6999,7 @@ where let funding_txo_opt = chan.context.get_funding_txo(); let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt { self.raa_monitor_updates_held( - &peer_state.actions_blocking_raa_monitor_updates, funding_txo, + &peer_state.actions_blocking_raa_monitor_updates, funding_txo, Some(msg.channel_id), *counterparty_node_id) } else { false }; let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self, @@ -6972,7 +7007,7 @@ where if let Some(monitor_update) = monitor_update_opt { let funding_txo = funding_txo_opt .expect("Funding outpoint must have been set for RAA handling to succeed"); - handle_new_monitor_update!(self, funding_txo, monitor_update, + handle_new_monitor_update!(self, funding_txo, chan.context.channel_id(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); } htlcs_to_fail @@ -7210,22 +7245,23 @@ where let mut failed_channels = Vec::new(); let mut pending_monitor_events = self.chain_monitor.release_pending_monitor_events(); let has_pending_monitor_events = !pending_monitor_events.is_empty(); - for (funding_outpoint, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) { + for (funding_outpoint, channel_id, mut monitor_events, counterparty_node_id) in pending_monitor_events.drain(..) { + let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_outpoint)); for monitor_event in monitor_events.drain(..) { match monitor_event { MonitorEvent::HTLCEvent(htlc_update) => { - let logger = WithContext::from(&self.logger, counterparty_node_id, Some(funding_outpoint.to_channel_id())); + let logger = WithContext::from(&self.logger, counterparty_node_id, Some(channel_id)); if let Some(preimage) = htlc_update.payment_preimage { log_trace!(logger, "Claiming HTLC with preimage {} from our monitor", preimage); - self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint); + self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, false, counterparty_node_id, funding_outpoint, Some(channel_id)); } else { log_trace!(logger, "Failing HTLC with hash {} from our monitor", &htlc_update.payment_hash); - let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; + let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id }; let reason = HTLCFailReason::from_failure_code(0x4000 | 8); self.fail_htlc_backwards_internal(&htlc_update.source, &htlc_update.payment_hash, &reason, receiver); } }, - MonitorEvent::HolderForceClosed(funding_outpoint) => { + MonitorEvent::HolderForceClosed(_funding_outpoint) => { let counterparty_node_id_opt = match counterparty_node_id { Some(cp_id) => Some(cp_id), None => { @@ -7241,7 +7277,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let pending_msg_events = &mut peer_state.pending_msg_events; - if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { + if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id) { if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) { failed_channels.push(chan.context.force_shutdown(false)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { @@ -7261,8 +7297,8 @@ where } } }, - MonitorEvent::Completed { funding_txo, monitor_update_id } => { - self.channel_monitor_updated(&funding_txo, monitor_update_id, counterparty_node_id.as_ref()); + MonitorEvent::Completed { funding_txo, channel_id, monitor_update_id } => { + self.channel_monitor_updated(&funding_txo, &channel_id, monitor_update_id, counterparty_node_id.as_ref()); }, } } @@ -7314,7 +7350,7 @@ where if let Some(monitor_update) = monitor_opt { has_monitor_update = true; - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, + handle_new_monitor_update!(self, funding_txo.unwrap(), chan.context.channel_id(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); continue 'peer_loop; } @@ -7481,14 +7517,14 @@ where // Channel::force_shutdown tries to make us do) as we may still be in initialization, // so we track the update internally and handle it when the user next calls // timer_tick_occurred, guaranteeing we're running normally. - if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update.take() { + if let Some((counterparty_node_id, funding_txo, channel_id, update)) = failure.monitor_update.take() { assert_eq!(update.updates.len(), 1); if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] { assert!(should_broadcast); } else { unreachable!(); } self.pending_background_events.lock().unwrap().push( BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, funding_txo, update + counterparty_node_id, funding_txo, update, channel_id, }); } self.finish_close_channel(failure); @@ -8047,9 +8083,13 @@ where /// [`Event`] being handled) completes, this should be called to restore the channel to normal /// operation. It will double-check that nothing *else* is also blocking the same channel from /// making progress and then let any blocked [`ChannelMonitorUpdate`]s fly. - fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, channel_funding_outpoint: OutPoint, mut completed_blocker: Option) { + fn handle_monitor_update_release(&self, counterparty_node_id: PublicKey, + channel_funding_outpoint: OutPoint, channel_id: Option, + mut completed_blocker: Option) { + let channel_id = channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(channel_funding_outpoint)); + let logger = WithContext::from( - &self.logger, Some(counterparty_node_id), Some(channel_funding_outpoint.to_channel_id()) + &self.logger, Some(counterparty_node_id), Some(channel_id), ); loop { let per_peer_state = self.per_peer_state.read().unwrap(); @@ -8059,29 +8099,30 @@ where if let Some(blocker) = completed_blocker.take() { // Only do this on the first iteration of the loop. if let Some(blockers) = peer_state.actions_blocking_raa_monitor_updates - .get_mut(&channel_funding_outpoint.to_channel_id()) + .get_mut(&channel_id) { blockers.retain(|iter| iter != &blocker); } } if self.raa_monitor_updates_held(&peer_state.actions_blocking_raa_monitor_updates, - channel_funding_outpoint, counterparty_node_id) { + channel_funding_outpoint, Some(channel_id), counterparty_node_id) { // Check that, while holding the peer lock, we don't have anything else // blocking monitor updates for this channel. If we do, release the monitor // update(s) when those blockers complete. log_trace!(logger, "Delaying monitor unlock for channel {} as another channel's mon update needs to complete first", - &channel_funding_outpoint.to_channel_id()); + &channel_id); break; } - if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry(channel_funding_outpoint.to_channel_id()) { + if let hash_map::Entry::Occupied(mut chan_phase_entry) = peer_state.channel_by_id.entry( + channel_id) { if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { debug_assert_eq!(chan.context.get_funding_txo().unwrap(), channel_funding_outpoint); if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() { log_debug!(logger, "Unlocking monitor updating for channel {} and updating monitor", - channel_funding_outpoint.to_channel_id()); - handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update, + channel_id); + handle_new_monitor_update!(self, channel_funding_outpoint, channel_id, monitor_update, peer_state_lck, peer_state, per_peer_state, chan); if further_update_exists { // If there are more `ChannelMonitorUpdate`s to process, restart at the @@ -8090,7 +8131,7 @@ where } } else { log_trace!(logger, "Unlocked monitor updating for channel {} without monitors to update", - channel_funding_outpoint.to_channel_id()); + channel_id); } } } @@ -8107,9 +8148,9 @@ where for action in actions { match action { EventCompletionAction::ReleaseRAAChannelMonitorUpdate { - channel_funding_outpoint, counterparty_node_id + channel_funding_outpoint, channel_id, counterparty_node_id } => { - self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, None); + self.handle_monitor_update_release(counterparty_node_id, channel_funding_outpoint, channel_id, None); } } } @@ -8506,6 +8547,7 @@ where incoming_packet_shared_secret: htlc.forward_info.incoming_shared_secret, phantom_shared_secret: None, outpoint: htlc.prev_funding_outpoint, + channel_id: htlc.prev_channel_id, blinded_failure: htlc.forward_info.routing.blinded_failure(), }); @@ -8517,7 +8559,7 @@ where HTLCFailReason::from_failure_code(0x2000 | 2), HTLCDestination::InvalidForward { requested_forward_scid })); let logger = WithContext::from( - &self.logger, None, Some(htlc.prev_funding_outpoint.to_channel_id()) + &self.logger, None, Some(htlc.prev_channel_id) ); log_trace!(logger, "Timing out intercepted HTLC with requested forward scid {}", requested_forward_scid); false @@ -9601,6 +9643,7 @@ impl_writeable_tlv_based!(HTLCPreviousHopData, { (4, htlc_id, required), (6, incoming_packet_shared_secret, required), (7, user_channel_id, option), + (8, channel_id, required), }); impl Writeable for ClaimableHTLC { @@ -9752,6 +9795,7 @@ impl_writeable_tlv_based!(PendingAddHTLCInfo, { (2, prev_short_channel_id, required), (4, prev_htlc_id, required), (6, prev_funding_outpoint, required), + (8, prev_channel_id, required), }); impl Writeable for HTLCForwardInfo { @@ -10265,12 +10309,14 @@ where let mut short_to_chan_info = HashMap::with_capacity(cmp::min(channel_count as usize, 128)); let mut channel_closures = VecDeque::new(); let mut close_background_events = Vec::new(); + let mut funding_txo_to_channel_id = HashMap::with_capacity(channel_count as usize); for _ in 0..channel_count { let mut channel: Channel = Channel::read(reader, ( &args.entropy_source, &args.signer_provider, best_block_height, &provided_channel_type_features(&args.default_config) ))?; let logger = WithChannelContext::from(&args.logger, &channel.context); let funding_txo = channel.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?; + funding_txo_to_channel_id.insert(funding_txo, channel.context.channel_id()); funding_txo_set.insert(funding_txo.clone()); if let Some(ref mut monitor) = args.channel_monitors.get_mut(&funding_txo) { if channel.get_cur_holder_commitment_transaction_number() > monitor.get_cur_holder_commitment_number() || @@ -10300,9 +10346,9 @@ where if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { return Err(DecodeError::InvalidValue); } - if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result.monitor_update { + if let Some((counterparty_node_id, funding_txo, channel_id, update)) = shutdown_result.monitor_update { close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup { - counterparty_node_id, funding_txo, update + counterparty_node_id, funding_txo, channel_id, update }); } failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs); @@ -10379,14 +10425,15 @@ where for (funding_txo, monitor) in args.channel_monitors.iter() { if !funding_txo_set.contains(funding_txo) { let logger = WithChannelMonitor::from(&args.logger, monitor); + let channel_id = monitor.get_channel_id(); log_info!(logger, "Queueing monitor update to ensure missing channel {} is force closed", - &funding_txo.to_channel_id()); + &channel_id); let monitor_update = ChannelMonitorUpdate { update_id: CLOSED_CHANNEL_UPDATE_ID, counterparty_node_id: None, updates: vec![ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast: true }], }; - close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, monitor_update))); + close_background_events.push(BackgroundEvent::ClosedMonitorUpdateRegeneratedOnStartup((*funding_txo, Some(channel_id), monitor_update))); } } @@ -10557,18 +10604,19 @@ where let mut pending_background_events = Vec::new(); macro_rules! handle_in_flight_updates { ($counterparty_node_id: expr, $chan_in_flight_upds: expr, $funding_txo: expr, - $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr + $channel_id: expr, $monitor: expr, $peer_state: expr, $logger: expr, $channel_info_log: expr ) => { { let mut max_in_flight_update_id = 0; $chan_in_flight_upds.retain(|upd| upd.update_id > $monitor.get_latest_update_id()); for update in $chan_in_flight_upds.iter() { log_trace!($logger, "Replaying ChannelMonitorUpdate {} for {}channel {}", - update.update_id, $channel_info_log, &$funding_txo.to_channel_id()); + update.update_id, $channel_info_log, &$channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint($funding_txo))); max_in_flight_update_id = cmp::max(max_in_flight_update_id, update.update_id); pending_background_events.push( BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id: $counterparty_node_id, funding_txo: $funding_txo, + channel_id: $channel_id, update: update.clone(), }); } @@ -10579,7 +10627,7 @@ where pending_background_events.push( BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id: $counterparty_node_id, - channel_id: $funding_txo.to_channel_id(), + channel_id: $channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint($funding_txo)), }); } if $peer_state.in_flight_monitor_updates.insert($funding_txo, $chan_in_flight_upds).is_some() { @@ -10600,6 +10648,7 @@ where // Channels that were persisted have to be funded, otherwise they should have been // discarded. let funding_txo = chan.context.get_funding_txo().ok_or(DecodeError::InvalidValue)?; + let channel_id = chan.context.channel_id(); let monitor = args.channel_monitors.get(&funding_txo) .expect("We already checked for monitor presence when loading channels"); let mut max_in_flight_update_id = monitor.get_latest_update_id(); @@ -10607,7 +10656,7 @@ where if let Some(mut chan_in_flight_upds) = in_flight_upds.remove(&(*counterparty_id, funding_txo)) { max_in_flight_update_id = cmp::max(max_in_flight_update_id, handle_in_flight_updates!(*counterparty_id, chan_in_flight_upds, - funding_txo, monitor, peer_state, logger, "")); + funding_txo, Some(channel_id), monitor, peer_state, logger, "")); } } if chan.get_latest_unblocked_monitor_update_id() > max_in_flight_update_id { @@ -10633,7 +10682,8 @@ where if let Some(in_flight_upds) = in_flight_monitor_updates { for ((counterparty_id, funding_txo), mut chan_in_flight_updates) in in_flight_upds { - let logger = WithContext::from(&args.logger, Some(counterparty_id), Some(funding_txo.to_channel_id())); + let channel_id = funding_txo_to_channel_id.get(&funding_txo).copied(); + let logger = WithContext::from(&args.logger, Some(counterparty_id), channel_id); if let Some(monitor) = args.channel_monitors.get(&funding_txo) { // Now that we've removed all the in-flight monitor updates for channels that are // still open, we need to replay any monitor updates that are for closed channels, @@ -10643,11 +10693,11 @@ where }); let mut peer_state = peer_state_mutex.lock().unwrap(); handle_in_flight_updates!(counterparty_id, chan_in_flight_updates, - funding_txo, monitor, peer_state, logger, "closed "); + funding_txo, channel_id, monitor, peer_state, logger, "closed "); } else { log_error!(logger, "A ChannelMonitor is missing even though we have in-flight updates for it! This indicates a potentially-critical violation of the chain::Watch API!"); log_error!(logger, " The ChannelMonitor for channel {} is missing.", - &funding_txo.to_channel_id()); + &channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(funding_txo))); log_error!(logger, " The chain::Watch API *requires* that monitors are persisted durably before returning,"); log_error!(logger, " client applications must ensure that ChannelMonitor data is always available and the latest to avoid funds loss!"); log_error!(logger, " Without the latest ChannelMonitor we cannot continue without risking funds."); @@ -10735,7 +10785,7 @@ where if let HTLCForwardInfo::AddHTLC(htlc_info) = forward { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending to-forward HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id()); + &htlc.payment_hash, &monitor.get_channel_id()); false } else { true } } else { true } @@ -10745,7 +10795,7 @@ where pending_intercepted_htlcs.as_mut().unwrap().retain(|intercepted_id, htlc_info| { if pending_forward_matches_htlc(&htlc_info) { log_info!(logger, "Removing pending intercepted HTLC with hash {} as it was forwarded to the closed channel {}", - &htlc.payment_hash, &monitor.get_funding_txo().0.to_channel_id()); + &htlc.payment_hash, &monitor.get_channel_id()); pending_events_read.retain(|(event, _)| { if let Event::HTLCIntercepted { intercept_id: ev_id, .. } = event { intercepted_id != ev_id @@ -10769,6 +10819,7 @@ where let compl_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { channel_funding_outpoint: monitor.get_funding_txo().0, + channel_id: Some(monitor.get_channel_id()), counterparty_node_id: path.hops[0].pubkey, }; pending_outbounds.claim_htlc(payment_id, preimage, session_priv, @@ -10794,7 +10845,7 @@ where // channel_id -> peer map entry). counterparty_opt.is_none(), counterparty_opt.cloned().or(monitor.get_counterparty_node_id()), - monitor.get_funding_txo().0)) + monitor.get_funding_txo().0, monitor.get_channel_id())) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage @@ -10967,7 +11018,7 @@ where // this channel as well. On the flip side, there's no harm in restarting // without the new monitor persisted - we'll end up right back here on // restart. - let previous_channel_id = claimable_htlc.prev_hop.outpoint.to_channel_id(); + let previous_channel_id = claimable_htlc.prev_hop.channel_id; if let Some(peer_node_id) = outpoint_to_peer.get(&claimable_htlc.prev_hop.outpoint) { let peer_state_mutex = per_peer_state.get(peer_node_id).unwrap(); let mut peer_state_lock = peer_state_mutex.lock().unwrap(); @@ -11000,14 +11051,15 @@ where for action in actions.iter() { if let MonitorUpdateCompletionAction::EmitEventAndFreeOtherChannel { downstream_counterparty_and_funding_outpoint: - Some((blocked_node_id, blocked_channel_outpoint, blocking_action)), .. + Some((blocked_node_id, blocked_channel_outpoint, blocked_channel_id, blocking_action)), .. } = action { if let Some(blocked_peer_state) = per_peer_state.get(&blocked_node_id) { + let channel_id = blocked_channel_id.unwrap_or(ChannelId::v1_from_funding_outpoint(*blocked_channel_outpoint)); log_trace!(logger, "Holding the next revoke_and_ack from {} until the preimage is durably persisted in the inbound edge's ChannelMonitor", - blocked_channel_outpoint.to_channel_id()); + channel_id); blocked_peer_state.lock().unwrap().actions_blocking_raa_monitor_updates - .entry(blocked_channel_outpoint.to_channel_id()) + .entry(channel_id) .or_insert_with(Vec::new).push(blocking_action.clone()); } else { // If the channel we were blocking has closed, we don't need to @@ -11087,12 +11139,12 @@ where channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding) in pending_claims_to_replay { + for (source, preimage, downstream_value, downstream_closed, downstream_node_id, downstream_funding, downstream_channel_id) in pending_claims_to_replay { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we // don't remember in the `ChannelMonitor` where we got a preimage from, but if the // channel is closed we just assume that it probably came from an on-chain claim. channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), - downstream_closed, true, downstream_node_id, downstream_funding); + downstream_closed, true, downstream_node_id, downstream_funding, Some(downstream_channel_id)); } //TODO: Broadcast channel update for closed channels, but only after we've made a diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index a2d9631716c..8a145eb2ad8 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -249,7 +249,7 @@ pub fn connect_block<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, block: &Block) fn call_claimable_balances<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>) { // Ensure `get_claimable_balances`' self-tests never panic - for funding_outpoint in node.chain_monitor.chain_monitor.list_monitors() { + for (funding_outpoint, _channel_id) in node.chain_monitor.chain_monitor.list_monitors() { node.chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances(); } } @@ -568,7 +568,7 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let feeest = test_utils::TestFeeEstimator { sat_per_kw: Mutex::new(253) }; let mut deserialized_monitors = Vec::new(); { - for outpoint in self.chain_monitor.chain_monitor.list_monitors() { + for (outpoint, _channel_id) in self.chain_monitor.chain_monitor.list_monitors() { let mut w = test_utils::TestVecWriter(Vec::new()); self.chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut w).unwrap(); let (_, deserialized_monitor) = <(BlockHash, ChannelMonitor)>::read( @@ -611,7 +611,9 @@ impl<'a, 'b, 'c> Drop for Node<'a, 'b, 'c> { let chain_source = test_utils::TestChainSource::new(Network::Testnet); let chain_monitor = test_utils::TestChainMonitor::new(Some(&chain_source), &broadcaster, &self.logger, &feeest, &persister, &self.keys_manager); for deserialized_monitor in deserialized_monitors.drain(..) { - if chain_monitor.watch_channel(deserialized_monitor.get_funding_txo().0, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) { + let funding_outpoint = deserialized_monitor.get_funding_txo().0; + let channel_id = deserialized_monitor.get_channel_id(); + if chain_monitor.watch_channel(funding_outpoint, channel_id, deserialized_monitor) != Ok(ChannelMonitorUpdateStatus::Completed) { panic!(); } } @@ -1035,7 +1037,9 @@ pub fn _reload_node<'a, 'b, 'c>(node: &'a Node<'a, 'b, 'c>, default_config: User assert!(node_read.is_empty()); for monitor in monitors_read.drain(..) { - assert_eq!(node.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor), + let funding_outpoint = monitor.get_funding_txo().0; + let channel_id = monitor.get_channel_id(); + assert_eq!(node.chain_monitor.watch_channel(funding_outpoint, channel_id, monitor), Ok(ChannelMonitorUpdateStatus::Completed)); check_added_monitors!(node, 1); } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2ad53faa8b2..ef509accc45 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2428,7 +2428,7 @@ fn channel_monitor_network_test() { assert_eq!(nodes[3].node.list_channels().len(), 0); assert_eq!(nodes[4].node.list_channels().len(), 0); - assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3_mon), + assert_eq!(nodes[3].chain_monitor.chain_monitor.watch_channel(OutPoint { txid: chan_3.3.txid(), index: 0 }, chan_3.2, chan_3_mon), Ok(ChannelMonitorUpdateStatus::Completed)); check_closed_event!(nodes[3], 1, ClosureReason::HolderForceClosed, [nodes[4].node.get_our_node_id()], 100000); } @@ -8471,6 +8471,7 @@ fn test_update_err_monitor_lockdown() { // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 }; + let channel_id = chan_1.2; // Rebalance the network to generate htlc in the two directions send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); @@ -8491,7 +8492,7 @@ fn test_update_err_monitor_lockdown() { new_monitor }; let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &chanmon_cfgs[0].tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!(watchtower.watch_channel(outpoint, channel_id, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); watchtower }; let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); @@ -8513,8 +8514,8 @@ fn test_update_err_monitor_lockdown() { let mut node_0_peer_state_lock; if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { - assert_eq!(watchtower.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress); - assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); + assert_eq!(watchtower.chain_monitor.update_channel(outpoint, channel_id, &update), ChannelMonitorUpdateStatus::InProgress); + assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, channel_id, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { assert!(false); @@ -8541,6 +8542,7 @@ fn test_concurrent_monitor_claim() { // Create some initial channel let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let outpoint = OutPoint { txid: chan_1.3.txid(), index: 0 }; + let channel_id = chan_1.2; // Rebalance the network to generate htlc in the two directions send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); @@ -8564,7 +8566,7 @@ fn test_concurrent_monitor_claim() { new_monitor }; let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &alice_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!(watchtower.watch_channel(outpoint, channel_id, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); watchtower }; let block = create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()); @@ -8596,7 +8598,7 @@ fn test_concurrent_monitor_claim() { new_monitor }; let watchtower = test_utils::TestChainMonitor::new(Some(&chain_source), &bob_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(watchtower.watch_channel(outpoint, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + assert_eq!(watchtower.watch_channel(outpoint, channel_id, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); watchtower }; watchtower_bob.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), HTLC_TIMEOUT_BROADCAST - 1); @@ -8616,9 +8618,9 @@ fn test_concurrent_monitor_claim() { if let ChannelPhase::Funded(ref mut channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan_1.2) { if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed, &node_cfgs[0].logger) { // Watchtower Alice should already have seen the block and reject the update - assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::InProgress); - assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); - assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, &update), ChannelMonitorUpdateStatus::Completed); + assert_eq!(watchtower_alice.chain_monitor.update_channel(outpoint, channel_id, &update), ChannelMonitorUpdateStatus::InProgress); + assert_eq!(watchtower_bob.chain_monitor.update_channel(outpoint, channel_id, &update), ChannelMonitorUpdateStatus::Completed); + assert_eq!(nodes[0].chain_monitor.update_channel(outpoint, channel_id, &update), ChannelMonitorUpdateStatus::Completed); } else { assert!(false); } } else { assert!(false); @@ -8685,7 +8687,7 @@ fn test_pre_lockin_no_chan_closed_update() { check_added_monitors!(nodes[0], 0); let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - let channel_id = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }.to_channel_id(); + let channel_id = ChannelId::v1_from_funding_outpoint(crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }); nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &msgs::ErrorMessage { channel_id, data: "Hi".to_owned() }); assert!(nodes[0].chain_monitor.added_monitors.lock().unwrap().is_empty()); check_closed_event!(nodes[0], 2, ClosureReason::CounterpartyForceClosed { peer_msg: UntrustedString("Hi".to_string()) }, true, @@ -8995,7 +8997,7 @@ fn test_duplicate_funding_err_in_funding() { let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]); let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }; - assert_eq!(real_chan_funding_txo.to_channel_id(), real_channel_id); + assert_eq!(ChannelId::v1_from_funding_outpoint(real_chan_funding_txo), real_channel_id); nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap(); let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); @@ -9087,7 +9089,7 @@ fn test_duplicate_chan_id() { let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); let funding_outpoint = crate::chain::transaction::OutPoint { txid: funding_created_msg.funding_txid, index: funding_created_msg.funding_output_index }; - let channel_id = funding_outpoint.to_channel_id(); + let channel_id = ChannelId::v1_from_funding_outpoint(funding_outpoint); // Now we have the first channel past funding_created (ie it has a txid-based channel_id, not a // temporary one). @@ -10541,7 +10543,7 @@ fn test_batch_channel_open() { // Complete the persistence of the monitor. nodes[0].chain_monitor.complete_sole_pending_chan_update( - &OutPoint { txid: tx.txid(), index: 1 }.to_channel_id() + &ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 }) ); let events = nodes[0].node.get_and_clear_pending_events(); @@ -10596,8 +10598,8 @@ fn test_disconnect_in_funding_batch() { nodes[0].node.peer_disconnected(&nodes[2].node.get_our_node_id()); // The channels in the batch will close immediately. - let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(); - let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id(); + let channel_id_1 = ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }); + let channel_id_2 = ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 }); check_closed_events(&nodes[0], &[ ExpectedCloseEvent { channel_id: Some(channel_id_1), @@ -10664,8 +10666,8 @@ fn test_batch_funding_close_after_funding_signed() { assert_eq!(nodes[0].tx_broadcaster.txn_broadcast().len(), 0); // Force-close the channel for which we've completed the initial monitor. - let channel_id_1 = OutPoint { txid: tx.txid(), index: 0 }.to_channel_id(); - let channel_id_2 = OutPoint { txid: tx.txid(), index: 1 }.to_channel_id(); + let channel_id_1 = ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 0 }); + let channel_id_2 = ChannelId::v1_from_funding_outpoint(OutPoint { txid: tx.txid(), index: 1 }); nodes[0].node.force_close_broadcasting_latest_txn(&channel_id_1, &nodes[1].node.get_our_node_id()).unwrap(); check_added_monitors(&nodes[0], 2); { @@ -10721,7 +10723,7 @@ fn do_test_funding_and_commitment_tx_confirm_same_block(confirm_remote_commitmen let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let funding_tx = create_chan_between_nodes_with_value_init(&nodes[0], &nodes[1], 1_000_000, 0); - let chan_id = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }.to_channel_id(); + let chan_id = ChannelId::v1_from_funding_outpoint(chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 }); assert_eq!(nodes[0].node.list_channels().len(), 1); assert_eq!(nodes[1].node.list_channels().len(), 1); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 74740a6f227..71672655702 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -15,7 +15,7 @@ use crate::chain::transaction::OutPoint; use crate::chain::chaininterface::{LowerBoundedFeeEstimator, compute_feerate_sat_per_1000_weight}; use crate::events::bump_transaction::{BumpTransactionEvent, WalletSource}; use crate::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; -use crate::ln::channel; +use crate::ln::{channel, ChannelId}; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId, RecipientOnionFields}; use crate::ln::msgs::ChannelMessageHandler; use crate::util::config::UserConfig; @@ -176,7 +176,7 @@ fn do_chanmon_claim_value_coop_close(anchors: bool) { let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000); let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 }; - assert_eq!(funding_outpoint.to_channel_id(), chan_id); + assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id); let chan_feerate = get_feerate!(nodes[0], nodes[1], chan_id) as u64; let channel_type_features = get_channel_type_features!(nodes[0], nodes[1], chan_id); @@ -327,7 +327,7 @@ fn do_test_claim_value_force_close(anchors: bool, prev_commitment_tx: bool) { let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 1_000_000); let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 }; - assert_eq!(funding_outpoint.to_channel_id(), chan_id); + assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id); // This HTLC is immediately claimed, giving node B the preimage let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 3_000_000); @@ -1121,7 +1121,7 @@ fn do_test_revoked_counterparty_commitment_balances(anchors: bool, confirm_htlc_ let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000); let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 }; - assert_eq!(funding_outpoint.to_channel_id(), chan_id); + assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id); // We create five HTLCs for B to claim against A's revoked commitment transaction: // @@ -1403,7 +1403,7 @@ fn do_test_revoked_counterparty_htlc_tx_balances(anchors: bool) { let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 12_000_000); let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 }; - assert_eq!(funding_outpoint.to_channel_id(), chan_id); + assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id); let payment_preimage = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0; let failed_payment_hash = route_payment(&nodes[1], &[&nodes[0]], 1_000_000).1; @@ -1705,7 +1705,7 @@ fn do_test_revoked_counterparty_aggregated_claims(anchors: bool) { let (_, _, chan_id, funding_tx) = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 100_000_000); let funding_outpoint = OutPoint { txid: funding_tx.txid(), index: 0 }; - assert_eq!(funding_outpoint.to_channel_id(), chan_id); + assert_eq!(ChannelId::v1_from_funding_outpoint(funding_outpoint), chan_id); // We create two HTLCs, one which we will give A the preimage to to generate an HTLC-Success // transaction, and one which we will not, allowing B to claim the HTLC output in an aggregated diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 73cdf59bbb6..7f0db3be706 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -1110,7 +1110,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co let funding_txo = OutPoint { txid: funding_tx.txid(), index: 0 }; let mon_updates: Vec<_> = chanmon_cfgs[0].persister.chain_sync_monitor_persistences.lock().unwrap() - .get_mut(&funding_txo).unwrap().drain().collect(); + .get_mut(&funding_txo).unwrap().1.drain().collect(); // If we are using chain::Confirm instead of chain::Listen, we will get the same update twice. // If we're testing connection idempotency we may get substantially more. assert!(mon_updates.len() >= 1); @@ -1129,7 +1129,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); for update in mon_updates { - nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, update).unwrap(); + nodes[0].chain_monitor.chain_monitor.channel_monitor_updated(funding_txo, Some(chan_id), update).unwrap(); } if payment_timeout { expect_payment_failed!(nodes[0], payment_hash, false); diff --git a/lightning/src/ln/peer_handler.rs b/lightning/src/ln/peer_handler.rs index 9f49b67932b..d05379c82b4 100644 --- a/lightning/src/ln/peer_handler.rs +++ b/lightning/src/ln/peer_handler.rs @@ -2005,7 +2005,7 @@ impl { assert_eq!(script.into_inner(), unsupported_shutdown_script.clone().into_inner()); }, Err(e) => panic!("Unexpected error: {:?}", e), Ok(_) => panic!("Expected error"), } - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.close_channel(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: chan.3.txid(), index: 0 }), &nodes[0].node.get_our_node_id()).unwrap(); check_added_monitors!(nodes[1], 1); // Use a non-v0 segwit script unsupported without option_shutdown_anysegwit @@ -1004,7 +1004,7 @@ fn test_invalid_shutdown_script() { let nodes = create_network(3, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - nodes[1].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id()).unwrap(); + nodes[1].node.close_channel(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: chan.3.txid(), index: 0 }), &nodes[0].node.get_our_node_id()).unwrap(); check_added_monitors!(nodes[1], 1); // Use a segwit v0 script with an unsupported witness program @@ -1038,7 +1038,7 @@ fn test_user_shutdown_script() { let shutdown_script = ShutdownScript::try_from(script.clone()).unwrap(); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - nodes[1].node.close_channel_with_feerate_and_script(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)).unwrap(); + nodes[1].node.close_channel_with_feerate_and_script(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: chan.3.txid(), index: 0 }), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)).unwrap(); check_added_monitors!(nodes[1], 1); let mut node_0_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); @@ -1065,7 +1065,7 @@ fn test_already_set_user_shutdown_script() { let shutdown_script = ShutdownScript::try_from(script).unwrap(); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let result = nodes[1].node.close_channel_with_feerate_and_script(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)); + let result = nodes[1].node.close_channel_with_feerate_and_script(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: chan.3.txid(), index: 0 }), &nodes[0].node.get_our_node_id(), None, Some(shutdown_script)); assert_eq!(result, Err(APIError::APIMisuseError { err: "Cannot override shutdown script for a channel with one already set".to_string() })); } @@ -1197,7 +1197,7 @@ fn do_simple_legacy_shutdown_test(high_initiator_fee: bool) { *feerate_lock *= 10; } - nodes[0].node.close_channel(&OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(), &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.close_channel(&ChannelId::v1_from_funding_outpoint(OutPoint { txid: chan.3.txid(), index: 0 }), &nodes[1].node.get_our_node_id()).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()); @@ -1237,7 +1237,7 @@ fn simple_target_feerate_shutdown() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1); - let chan_id = OutPoint { txid: chan.3.txid(), index: 0 }.to_channel_id(); + let chan_id = chan.2; nodes[0].node.close_channel_with_feerate_and_script(&chan_id, &nodes[1].node.get_our_node_id(), Some(253 * 10), None).unwrap(); let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()); @@ -1342,7 +1342,7 @@ fn do_outbound_update_no_early_closing_signed(use_htlc: bool) { expect_channel_shutdown_state!(nodes[0], chan_id, ChannelShutdownState::ResolvingHTLCs); assert_eq!(nodes[0].node.get_and_clear_pending_msg_events(), Vec::new()); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); - nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, latest_update); + nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(outpoint, Some(chan_id), latest_update); let as_raa_closing_signed = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_raa_closing_signed.len(), 2); diff --git a/lightning/src/util/macro_logger.rs b/lightning/src/util/macro_logger.rs index 203c544e009..34c38ab2f9e 100644 --- a/lightning/src/util/macro_logger.rs +++ b/lightning/src/util/macro_logger.rs @@ -8,6 +8,7 @@ // licenses. use crate::chain::transaction::OutPoint; +use crate::ln::ChannelId; use crate::sign::SpendableOutputDescriptor; use bitcoin::hash_types::Txid; @@ -41,24 +42,26 @@ macro_rules! log_bytes { pub(crate) struct DebugFundingChannelId<'a>(pub &'a Txid, pub u16); impl<'a> core::fmt::Display for DebugFundingChannelId<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { - (OutPoint { txid: self.0.clone(), index: self.1 }).to_channel_id().fmt(f) + ChannelId::v1_from_funding_outpoint(OutPoint { txid: self.0.clone(), index: self.1 }).fmt(f) } } -macro_rules! log_funding_channel_id { +macro_rules! log_v1_funding_channel_id { ($funding_txid: expr, $funding_txo: expr) => { $crate::util::macro_logger::DebugFundingChannelId(&$funding_txid, $funding_txo) } } -pub(crate) struct DebugFundingInfo<'a, T: 'a>(pub &'a (OutPoint, T)); -impl<'a, T> core::fmt::Display for DebugFundingInfo<'a, T> { +pub(crate) struct DebugFundingInfo<'a>(pub &'a ChannelId); +impl<'a> core::fmt::Display for DebugFundingInfo<'a> { fn fmt(&self, f: &mut core::fmt::Formatter) -> Result<(), core::fmt::Error> { - (self.0).0.to_channel_id().fmt(f) + self.0.fmt(f) } } macro_rules! log_funding_info { ($key_storage: expr) => { - $crate::util::macro_logger::DebugFundingInfo(&$key_storage.get_funding_txo()) + $crate::util::macro_logger::DebugFundingInfo( + &$key_storage.get_channel_id() + ) } } diff --git a/lightning/src/util/persist.rs b/lightning/src/util/persist.rs index e6329062051..b6f6f3a57fa 100644 --- a/lightning/src/util/persist.rs +++ b/lightning/src/util/persist.rs @@ -14,6 +14,7 @@ use core::ops::Deref; use core::str::FromStr; use bitcoin::{BlockHash, Txid}; +use crate::ln::ChannelId; use crate::{io, log_error}; use crate::alloc::string::ToString; use crate::prelude::*; @@ -193,7 +194,7 @@ impl Persist, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn persist_new_channel(&self, funding_txo: OutPoint, _channel_id: ChannelId, monitor: &ChannelMonitor, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index); match self.write( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, @@ -205,7 +206,7 @@ impl Persist, monitor: &ChannelMonitor, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn update_persisted_channel(&self, funding_txo: OutPoint, _channel_id: ChannelId, _update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor, _update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { let key = format!("{}_{}", funding_txo.txid.to_string(), funding_txo.index); match self.write( CHANNEL_MONITOR_PERSISTENCE_PRIMARY_NAMESPACE, @@ -606,7 +607,7 @@ where /// Persists a new channel. This means writing the entire monitor to the /// parametrized [`KVStore`]. fn persist_new_channel( - &self, funding_txo: OutPoint, monitor: &ChannelMonitor, + &self, funding_txo: OutPoint, _channel_id: ChannelId, monitor: &ChannelMonitor, _monitor_update_call_id: MonitorUpdateId, ) -> chain::ChannelMonitorUpdateStatus { // Determine the proper key for this monitor @@ -650,7 +651,7 @@ where /// `update` is `None`. /// - The update is at [`CLOSED_CHANNEL_UPDATE_ID`] fn update_persisted_channel( - &self, funding_txo: OutPoint, update: Option<&ChannelMonitorUpdate>, + &self, funding_txo: OutPoint, channel_id: ChannelId, update: Option<&ChannelMonitorUpdate>, monitor: &ChannelMonitor, monitor_update_call_id: MonitorUpdateId, ) -> chain::ChannelMonitorUpdateStatus { // IMPORTANT: monitor_update_call_id: MonitorUpdateId is not to be confused with @@ -690,7 +691,7 @@ where }; // We could write this update, but it meets criteria of our design that calls for a full monitor write. - let monitor_update_status = self.persist_new_channel(funding_txo, monitor, monitor_update_call_id); + let monitor_update_status = self.persist_new_channel(funding_txo, channel_id, monitor, monitor_update_call_id); if let chain::ChannelMonitorUpdateStatus::Completed = monitor_update_status { let cleanup_range = if monitor.get_latest_update_id() == CLOSED_CHANNEL_UPDATE_ID { @@ -719,7 +720,7 @@ where } } else { // There is no update given, so we must persist a new monitor. - self.persist_new_channel(funding_txo, monitor, monitor_update_call_id) + self.persist_new_channel(funding_txo, channel_id, monitor, monitor_update_call_id) } } } @@ -1052,10 +1053,11 @@ mod tests { { let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap(); let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap(); - let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap(); - let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap(); - let cmu = &cmu_map.get(&added_monitors[0].0.to_channel_id()).unwrap()[0]; let test_txo = OutPoint { txid: Txid::from_str("8984484a580b825b9972d7adb15050b3ab624ccd731946b3eeddb92f4e7ef6be").unwrap(), index: 0 }; + let channel_id = added_monitors[0].1.get_channel_id(); + let update_id = update_map.get(&channel_id).unwrap(); + let cmu_map = nodes[1].chain_monitor.monitor_updates.lock().unwrap(); + let cmu = &cmu_map.get(&channel_id).unwrap()[0]; let ro_persister = MonitorUpdatingPersister { kv_store: &TestStore::new(true), @@ -1064,7 +1066,7 @@ mod tests { entropy_source: node_cfgs[0].keys_manager, signer_provider: node_cfgs[0].keys_manager, }; - match ro_persister.persist_new_channel(test_txo, &added_monitors[0].1, update_id.2) { + match ro_persister.persist_new_channel(test_txo, channel_id, &added_monitors[0].1, update_id.2) { ChannelMonitorUpdateStatus::UnrecoverableError => { // correct result } @@ -1075,7 +1077,7 @@ mod tests { panic!("Returned InProgress when shouldn't have") } } - match ro_persister.update_persisted_channel(test_txo, Some(cmu), &added_monitors[0].1, update_id.2) { + match ro_persister.update_persisted_channel(test_txo, channel_id, Some(cmu), &added_monitors[0].1, update_id.2) { ChannelMonitorUpdateStatus::UnrecoverableError => { // correct result } diff --git a/lightning/src/util/test_utils.rs b/lightning/src/util/test_utils.rs index a006d37e90e..bb805ab77e6 100644 --- a/lightning/src/util/test_utils.rs +++ b/lightning/src/util/test_utils.rs @@ -289,11 +289,11 @@ impl<'a> TestChainMonitor<'a> { pub fn complete_sole_pending_chan_update(&self, channel_id: &ChannelId) { let (outpoint, _, latest_update) = self.latest_monitor_update_id.lock().unwrap().get(channel_id).unwrap().clone(); - self.chain_monitor.channel_monitor_updated(outpoint, latest_update).unwrap(); + self.chain_monitor.channel_monitor_updated(outpoint, Some(*channel_id), latest_update).unwrap(); } } impl<'a> chain::Watch for TestChainMonitor<'a> { - fn watch_channel(&self, funding_txo: OutPoint, monitor: channelmonitor::ChannelMonitor) -> Result { + fn watch_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, monitor: channelmonitor::ChannelMonitor) -> Result { // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... let mut w = TestVecWriter(Vec::new()); @@ -301,32 +301,32 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1; assert!(new_monitor == monitor); - self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), + self.latest_monitor_update_id.lock().unwrap().insert(channel_id, (funding_txo, monitor.get_latest_update_id(), MonitorUpdateId::from_new_monitor(&monitor))); self.added_monitors.lock().unwrap().push((funding_txo, monitor)); - self.chain_monitor.watch_channel(funding_txo, new_monitor) + self.chain_monitor.watch_channel(funding_txo, channel_id, new_monitor) } - fn update_channel(&self, funding_txo: OutPoint, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus { + fn update_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, update: &channelmonitor::ChannelMonitorUpdate) -> chain::ChannelMonitorUpdateStatus { // Every monitor update should survive roundtrip let mut w = TestVecWriter(Vec::new()); update.write(&mut w).unwrap(); assert!(channelmonitor::ChannelMonitorUpdate::read( &mut io::Cursor::new(&w.0)).unwrap() == *update); - self.monitor_updates.lock().unwrap().entry(funding_txo.to_channel_id()).or_insert(Vec::new()).push(update.clone()); + self.monitor_updates.lock().unwrap().entry(channel_id).or_insert(Vec::new()).push(update.clone()); if let Some(exp) = self.expect_channel_force_closed.lock().unwrap().take() { - assert_eq!(funding_txo.to_channel_id(), exp.0); + assert_eq!(channel_id, exp.0); assert_eq!(update.updates.len(), 1); if let channelmonitor::ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] { assert_eq!(should_broadcast, exp.1); } else { panic!(); } } - self.latest_monitor_update_id.lock().unwrap().insert(funding_txo.to_channel_id(), + self.latest_monitor_update_id.lock().unwrap().insert(channel_id, (funding_txo, update.update_id, MonitorUpdateId::from_monitor_update(update))); - let update_res = self.chain_monitor.update_channel(funding_txo, update); + let update_res = self.chain_monitor.update_channel(funding_txo, channel_id, update); // At every point where we get a monitor update, we should be able to send a useful monitor // to a watchtower and disk... let monitor = self.chain_monitor.get_monitor(funding_txo).unwrap(); @@ -335,7 +335,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { let new_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>::read( &mut io::Cursor::new(&w.0), (self.keys_manager, self.keys_manager)).unwrap().1; if let Some(chan_id) = self.expect_monitor_round_trip_fail.lock().unwrap().take() { - assert_eq!(chan_id, funding_txo.to_channel_id()); + assert_eq!(chan_id, channel_id); assert!(new_monitor != *monitor); } else { assert!(new_monitor == *monitor); @@ -344,7 +344,7 @@ impl<'a> chain::Watch for TestChainMonitor<'a> { update_res } - fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Vec, Option)> { + fn release_pending_monitor_events(&self) -> Vec<(OutPoint, Option, Vec, Option)> { return self.chain_monitor.release_pending_monitor_events(); } } @@ -399,10 +399,10 @@ impl WatchtowerPersister { } impl chainmonitor::Persist for WatchtowerPersister { - fn persist_new_channel(&self, funding_txo: OutPoint, + fn persist_new_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, data: &channelmonitor::ChannelMonitor, id: MonitorUpdateId ) -> chain::ChannelMonitorUpdateStatus { - let res = self.persister.persist_new_channel(funding_txo, data, id); + let res = self.persister.persist_new_channel(funding_txo, channel_id, data, id); assert!(self.unsigned_justice_tx_data.lock().unwrap() .insert(funding_txo, VecDeque::new()).is_none()); @@ -421,10 +421,10 @@ impl chainmonitor::Persist, + &self, funding_txo: OutPoint, channel_id: ChannelId, update: Option<&channelmonitor::ChannelMonitorUpdate>, data: &channelmonitor::ChannelMonitor, update_id: MonitorUpdateId ) -> chain::ChannelMonitorUpdateStatus { - let res = self.persister.update_persisted_channel(funding_txo, update, data, update_id); + let res = self.persister.update_persisted_channel(funding_txo, channel_id, update, data, update_id); if let Some(update) = update { let commitment_txs = data.counterparty_commitment_txs_from_update(update); @@ -459,10 +459,10 @@ pub struct TestPersister { pub update_rets: Mutex>, /// When we get an update_persisted_channel call with no ChannelMonitorUpdate, we insert the /// MonitorUpdateId here. - pub chain_sync_monitor_persistences: Mutex>>, + pub chain_sync_monitor_persistences: Mutex, HashSet)>>, /// When we get an update_persisted_channel call *with* a ChannelMonitorUpdate, we insert the /// MonitorUpdateId here. - pub offchain_monitor_updates: Mutex>>, + pub offchain_monitor_updates: Mutex)>>, } impl TestPersister { pub fn new() -> Self { @@ -479,23 +479,23 @@ impl TestPersister { } } impl chainmonitor::Persist for TestPersister { - fn persist_new_channel(&self, _funding_txo: OutPoint, _data: &channelmonitor::ChannelMonitor, _id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn persist_new_channel(&self, _funding_txo: OutPoint, _channel_id: ChannelId, _data: &channelmonitor::ChannelMonitor, _id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() { return update_ret } chain::ChannelMonitorUpdateStatus::Completed } - fn update_persisted_channel(&self, funding_txo: OutPoint, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { + fn update_persisted_channel(&self, funding_txo: OutPoint, channel_id: ChannelId, _update: Option<&channelmonitor::ChannelMonitorUpdate>, _data: &channelmonitor::ChannelMonitor, update_id: MonitorUpdateId) -> chain::ChannelMonitorUpdateStatus { let mut ret = chain::ChannelMonitorUpdateStatus::Completed; if let Some(update_ret) = self.update_rets.lock().unwrap().pop_front() { ret = update_ret; } let is_chain_sync = if let UpdateOrigin::ChainSync(_) = update_id.contents { true } else { false }; if is_chain_sync { - self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id); + self.chain_sync_monitor_persistences.lock().unwrap().entry(funding_txo).or_insert((Some(channel_id), HashSet::new())).1.insert(update_id); } else { - self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert(HashSet::new()).insert(update_id); + self.offchain_monitor_updates.lock().unwrap().entry(funding_txo).or_insert((channel_id, HashSet::new())).1.insert(update_id); } ret }