From 29b43ba1654261ce5ce6ee1c637dec47116439a4 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 4 Jul 2021 14:51:21 +0000 Subject: [PATCH 1/9] Add standard derives for ConfirmationTarget --- lightning/src/chain/chaininterface.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 83fee4d674d..8ccfb945543 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -23,6 +23,7 @@ pub trait BroadcasterInterface { /// An enum that represents the speed at which we want a transaction to confirm used for feerate /// estimation. +#[derive(Clone, Copy, PartialEq, Eq)] pub enum ConfirmationTarget { /// We are happy with this transaction confirming slowly when feerate drops some. Background, From 692b0c78daf432c1bcd6634f90ebf4324c3eb437 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 5 Jul 2021 18:13:16 +0000 Subject: [PATCH 2/9] Set cfg=fuzzing when building fuzz crate in CI We will likely drop the fuzztarget feature soon, and should thus be setting cfg=fuzzing explicitly anyway. --- ci/check-compiles.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci/check-compiles.sh b/ci/check-compiles.sh index 2bc31007b6b..193c2b4ef1f 100755 --- a/ci/check-compiles.sh +++ b/ci/check-compiles.sh @@ -5,5 +5,5 @@ echo Testing $(git log -1 --oneline) cargo check cargo doc cargo doc --document-private-items -cd fuzz && cargo check --features=stdin_fuzz +cd fuzz && RUSTFLAGS="--cfg=fuzzing" cargo check --features=stdin_fuzz cd ../lightning && cargo check --no-default-features --features=no-std From 03439ec99f38f65022ff84464a9a3e744cc65266 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 28 Jun 2021 03:41:44 +0000 Subject: [PATCH 3/9] Automatically update fees on outbound channels as fees change Previously we'd been expecting to implement anchor outputs before shipping 0.1, thus reworking our channel fee update process entirely and leaving it as a future task. However, due to the difficulty of working with on-chain anchor pools, we are now likely to ship 0.1 without requiring anchor outputs. In either case, there isn't a lot of reason to require that users call an explicit "prevailing feerates have changed" function now that we have a timer method which is called regularly. Further, we really should be the ones deciding on the channel feerate in terms of the users' FeeEstimator, instead of requiring users implement a second fee-providing interface by calling an update_fee method. Finally, there is no reason for an update_fee method to be channel-specific, as we should be updating all (outbound) channel fees at once. Thus, we move the update_fee handling to the background, calling it on the regular 1-minute timer. We also update the regular 1-minute timer to fire on startup as well as every minute to ensure we get fee updates even on mobile clients that are rarely, if ever, open for more than one minute. --- lightning-background-processor/src/lib.rs | 3 + lightning/src/ln/channel.rs | 3 +- lightning/src/ln/channelmanager.rs | 199 ++++++++++++---------- lightning/src/ln/functional_tests.rs | 99 ++++++++--- 4 files changed, 185 insertions(+), 119 deletions(-) diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index e73ddeb709c..4fdf2eeff03 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -140,6 +140,9 @@ impl BackgroundProcessor { let stop_thread = Arc::new(AtomicBool::new(false)); let stop_thread_clone = stop_thread.clone(); let handle = thread::spawn(move || -> Result<(), std::io::Error> { + log_trace!(logger, "Calling ChannelManager's timer_tick_occurred on startup"); + channel_manager.timer_tick_occurred(); + let mut last_freshness_call = Instant::now(); let mut last_ping_call = Instant::now(); loop { diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e685c15d68d..dc09dc8242f 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2905,7 +2905,7 @@ impl Channel { panic!("Cannot update fee while peer is disconnected/we're awaiting a monitor update (ChannelManager should have caught this)"); } - if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32)) == (ChannelState::AwaitingRemoteRevoke as u32) { + if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateFailed as u32)) != 0 { self.holding_cell_update_fee = Some(feerate_per_kw); return None; } @@ -3622,7 +3622,6 @@ impl Channel { self.config.max_dust_htlc_exposure_msat } - #[cfg(test)] pub fn get_feerate(&self) -> u32 { self.feerate_per_kw } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 439b5444a54..389e67ddb9b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -37,7 +37,7 @@ use bitcoin::secp256k1; use chain; use chain::{Confirm, Watch, BestBlock}; -use chain::chaininterface::{BroadcasterInterface, FeeEstimator}; +use chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, FeeEstimator}; use chain::channelmonitor::{ChannelMonitor, ChannelMonitorUpdate, ChannelMonitorUpdateStep, ChannelMonitorUpdateErr, HTLC_FAIL_BACK_BUFFER, CLTV_CLAIM_BUFFER, LATENCY_GRACE_PERIOD_BLOCKS, ANTI_REORG_DELAY, MonitorEvent, CLOSED_CHANNEL_UPDATE_ID}; use chain::transaction::{OutPoint, TransactionData}; // Since this struct is returned in `list_channels` methods, expose it here in case users want to @@ -71,7 +71,6 @@ use core::time::Duration; #[cfg(any(test, feature = "allow_wallclock_use"))] use std::time::Instant; use core::ops::Deref; -use bitcoin::hashes::hex::ToHex; // We hold various information about HTLC relay in the HTLC objects in Channel itself: // @@ -2561,46 +2560,120 @@ impl ChannelMana self.process_background_events(); } - /// If a peer is disconnected we mark any channels with that peer as 'disabled'. - /// After some time, if channels are still disabled we need to broadcast a ChannelUpdate - /// to inform the network about the uselessness of these channels. + fn update_channel_fee(&self, short_to_id: &mut HashMap, pending_msg_events: &mut Vec, chan_id: &[u8; 32], chan: &mut Channel, new_feerate: u32) -> (bool, NotifyOption, Result<(), MsgHandleErrInternal>) { + if !chan.is_outbound() { return (true, NotifyOption::SkipPersist, Ok(())); } + // If the feerate has decreased by less than half, don't bother + if new_feerate <= chan.get_feerate() && new_feerate * 2 > chan.get_feerate() { + log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {}.", + log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + return (true, NotifyOption::SkipPersist, Ok(())); + } + if !chan.is_live() { + log_trace!(self.logger, "Channel {} does not qualify for a feerate change from {} to {} as it cannot currently be updated (probably the peer is disconnected).", + log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + return (true, NotifyOption::SkipPersist, Ok(())); + } + log_trace!(self.logger, "Channel {} qualifies for a feerate change from {} to {}.", + log_bytes!(chan_id[..]), chan.get_feerate(), new_feerate); + + let mut retain_channel = true; + let res = match chan.send_update_fee_and_commit(new_feerate, &self.logger) { + Ok(res) => Ok(res), + Err(e) => { + let (drop, res) = convert_chan_err!(self, e, short_to_id, chan, chan_id); + if drop { retain_channel = false; } + Err(res) + } + }; + let ret_err = match res { + Ok(Some((update_fee, commitment_signed, monitor_update))) => { + if let Err(e) = self.chain_monitor.update_channel(chan.get_funding_txo().unwrap(), monitor_update) { + let (res, drop) = handle_monitor_err!(self, e, short_to_id, chan, RAACommitmentOrder::CommitmentFirst, false, true, Vec::new(), Vec::new(), chan_id); + if drop { retain_channel = false; } + res + } else { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: chan.get_counterparty_node_id(), + updates: msgs::CommitmentUpdate { + update_add_htlcs: Vec::new(), + update_fulfill_htlcs: Vec::new(), + update_fail_htlcs: Vec::new(), + update_fail_malformed_htlcs: Vec::new(), + update_fee: Some(update_fee), + commitment_signed, + }, + }); + Ok(()) + } + }, + Ok(None) => Ok(()), + Err(e) => Err(e), + }; + (retain_channel, NotifyOption::DoPersist, ret_err) + } + + /// Performs actions which should happen on startup and roughly once per minute thereafter. /// - /// This method handles all the details, and must be called roughly once per minute. + /// This currently includes: + /// * Increasing or decreasing the on-chain feerate estimates for our outbound channels, + /// * Broadcasting `ChannelUpdate` messages if we've been disconnected from our peer for more + /// than a minute, informing the network that they should no longer attempt to route over + /// the channel. /// - /// Note that in some rare cases this may generate a `chain::Watch::update_channel` call. + /// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate + /// estimate fetches. pub fn timer_tick_occurred(&self) { PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { let mut should_persist = NotifyOption::SkipPersist; if self.process_background_events() { should_persist = NotifyOption::DoPersist; } - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - for (_, chan) in channel_state.by_id.iter_mut() { - match chan.channel_update_status() { - ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), - ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), - ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), - ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), - ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Disabled); - }, - ChannelUpdateStatus::EnabledStaged if chan.is_live() => { - if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { - channel_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: update - }); - } - should_persist = NotifyOption::DoPersist; - chan.set_channel_update_status(ChannelUpdateStatus::Enabled); - }, - _ => {}, - } + let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + + let mut handle_errors = Vec::new(); + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let pending_msg_events = &mut channel_state.pending_msg_events; + let short_to_id = &mut channel_state.short_to_id; + channel_state.by_id.retain(|chan_id, chan| { + match chan.channel_update_status() { + ChannelUpdateStatus::Enabled if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::DisabledStaged), + ChannelUpdateStatus::Disabled if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::EnabledStaged), + ChannelUpdateStatus::DisabledStaged if chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Enabled), + ChannelUpdateStatus::EnabledStaged if !chan.is_live() => chan.set_channel_update_status(ChannelUpdateStatus::Disabled), + ChannelUpdateStatus::DisabledStaged if !chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Disabled); + }, + ChannelUpdateStatus::EnabledStaged if chan.is_live() => { + if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { + pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { + msg: update + }); + } + should_persist = NotifyOption::DoPersist; + chan.set_channel_update_status(ChannelUpdateStatus::Enabled); + }, + _ => {}, + } + + let counterparty_node_id = chan.get_counterparty_node_id(); + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push((err, counterparty_node_id)); + } + retain_channel + }); + } + + for (err, counterparty_node_id) in handle_errors.drain(..) { + let _ = handle_error!(self, err, counterparty_node_id); } should_persist @@ -3728,62 +3801,6 @@ impl ChannelMana Ok(()) } - /// Begin Update fee process. Allowed only on an outbound channel. - /// If successful, will generate a UpdateHTLCs event, so you should probably poll - /// PeerManager::process_events afterwards. - /// Note: This API is likely to change! - /// (C-not exported) Cause its doc(hidden) anyway - #[doc(hidden)] - pub fn update_fee(&self, channel_id: [u8;32], feerate_per_kw: u32) -> Result<(), APIError> { - let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let counterparty_node_id; - let err: Result<(), _> = loop { - let mut channel_state_lock = self.channel_state.lock().unwrap(); - let channel_state = &mut *channel_state_lock; - - match channel_state.by_id.entry(channel_id) { - hash_map::Entry::Vacant(_) => return Err(APIError::APIMisuseError{err: format!("Failed to find corresponding channel for id {}", channel_id.to_hex())}), - hash_map::Entry::Occupied(mut chan) => { - if !chan.get().is_outbound() { - return Err(APIError::APIMisuseError{err: "update_fee cannot be sent for an inbound channel".to_owned()}); - } - if chan.get().is_awaiting_monitor_update() { - return Err(APIError::MonitorUpdateFailed); - } - if !chan.get().is_live() { - return Err(APIError::ChannelUnavailable{err: "Channel is either not yet fully established or peer is currently disconnected".to_owned()}); - } - counterparty_node_id = chan.get().get_counterparty_node_id(); - if let Some((update_fee, commitment_signed, monitor_update)) = - break_chan_entry!(self, chan.get_mut().send_update_fee_and_commit(feerate_per_kw, &self.logger), channel_state, chan) - { - if let Err(_e) = self.chain_monitor.update_channel(chan.get().get_funding_txo().unwrap(), monitor_update) { - unimplemented!(); - } - log_debug!(self.logger, "Updating fee resulted in a commitment_signed for channel {}", log_bytes!(chan.get().channel_id())); - channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: chan.get().get_counterparty_node_id(), - updates: msgs::CommitmentUpdate { - update_add_htlcs: Vec::new(), - update_fulfill_htlcs: Vec::new(), - update_fail_htlcs: Vec::new(), - update_fail_malformed_htlcs: Vec::new(), - update_fee: Some(update_fee), - commitment_signed, - }, - }); - } - }, - } - return Ok(()) - }; - - match handle_error!(self, err, counterparty_node_id) { - Ok(_) => unreachable!(), - Err(e) => { Err(APIError::APIMisuseError { err: e.err })} - } - } - /// Process pending events from the `chain::Watch`, returning whether any events were processed. fn process_pending_monitor_events(&self) -> bool { let mut failed_channels = Vec::new(); diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 754ef563425..1a20d86fb2b 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -130,9 +130,8 @@ fn test_async_inbound_update_fee() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); let logger = test_utils::TestLogger::new(); - let channel_id = chan.2; // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); @@ -155,7 +154,11 @@ fn test_async_inbound_update_fee() { // (6) RAA is delivered -> // First nodes[0] generates an update_fee - nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap(); + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -245,15 +248,18 @@ fn test_update_fee_unordered_raa() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let channel_id = chan.2; + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); let logger = test_utils::TestLogger::new(); // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); // First nodes[0] generates an update_fee - nodes[0].node.update_fee(channel_id, get_feerate!(nodes[0], channel_id) + 20).unwrap(); + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -300,8 +306,7 @@ fn test_multi_flight_update_fee() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let channel_id = chan.2; + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); // A B // update_fee/commitment_signed -> @@ -323,8 +328,13 @@ fn test_multi_flight_update_fee() { // revoke_and_ack -> // First nodes[0] generates an update_fee - let initial_feerate = get_feerate!(nodes[0], channel_id); - nodes[0].node.update_fee(channel_id, initial_feerate + 20).unwrap(); + let initial_feerate; + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + initial_feerate = *feerate_lock; + *feerate_lock = initial_feerate + 20; + } + nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -344,7 +354,11 @@ fn test_multi_flight_update_fee() { // nodes[0] is awaiting a revoke from nodes[1] before it will create a new commitment // transaction: - nodes[0].node.update_fee(channel_id, initial_feerate + 40).unwrap(); + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = initial_feerate + 40; + } + nodes[0].node.timer_tick_occurred(); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -536,11 +550,13 @@ fn test_update_fee_vanilla() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let channel_id = chan.2; + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let feerate = get_feerate!(nodes[0], channel_id); - nodes[0].node.update_fee(channel_id, feerate+25).unwrap(); + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 25; + } + nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -582,7 +598,11 @@ fn test_update_fee_that_funder_cannot_afford() { let channel_id = chan.2; let feerate = 260; - nodes[0].node.update_fee(channel_id, feerate).unwrap(); + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = feerate; + } + nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); let update_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -605,7 +625,11 @@ fn test_update_fee_that_funder_cannot_afford() { //Add 2 to the previous fee rate to the final fee increases by 1 (with no HTLCs the fee is essentially //fee_rate*(724/1000) so the increment of 1*0.724 is rounded back down) - nodes[0].node.update_fee(channel_id, feerate+2).unwrap(); + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = feerate + 2; + } + nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); let update2_msg = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -628,14 +652,16 @@ fn test_update_fee_with_fundee_update_add_htlc() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let chan = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); - let channel_id = chan.2; let logger = test_utils::TestLogger::new(); // balancing send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); - let feerate = get_feerate!(nodes[0], channel_id); - nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -743,8 +769,13 @@ fn test_update_fee() { // revoke_and_ack -> // Create and deliver (1)... - let feerate = get_feerate!(nodes[0], channel_id); - nodes[0].node.update_fee(channel_id, feerate+20).unwrap(); + let feerate; + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + feerate = *feerate_lock; + *feerate_lock = feerate + 20; + } + nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); @@ -768,7 +799,11 @@ fn test_update_fee() { check_added_monitors!(nodes[0], 1); // Create and deliver (4)... - nodes[0].node.update_fee(channel_id, feerate+30).unwrap(); + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock = feerate + 30; + } + nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); let events_0 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_0.len(), 1); @@ -6256,7 +6291,11 @@ fn test_fail_holding_cell_htlc_upon_free() { // First nodes[0] generates an update_fee, setting the channel's // pending_update_fee. - nodes[0].node.update_fee(chan.2, get_feerate!(nodes[0], chan.2) + 20).unwrap(); + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -6330,7 +6369,11 @@ fn test_free_and_fail_holding_cell_htlcs() { // First nodes[0] generates an update_fee, setting the channel's // pending_update_fee. - nodes[0].node.update_fee(chan.2, get_feerate!(nodes[0], chan.2) + 200).unwrap(); + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 200; + } + nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); let events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -6458,7 +6501,11 @@ fn test_fail_holding_cell_htlc_upon_free_multihop() { // First nodes[1] generates an update_fee, setting the channel's // pending_update_fee. - nodes[1].node.update_fee(chan_1_2.2, get_feerate!(nodes[1], chan_1_2.2) + 20).unwrap(); + { + let mut feerate_lock = chanmon_cfgs[1].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[1].node.timer_tick_occurred(); check_added_monitors!(nodes[1], 1); let events = nodes[1].node.get_and_clear_pending_msg_events(); From f8caa325e51d8afb0cb65effd9cdb351ffda3fc7 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 30 Jun 2021 03:09:04 +0000 Subject: [PATCH 4/9] Add fuzz coverage of (potential) fee update messages --- fuzz/src/chanmon_consistency.rs | 89 +++++++++++++++++++++++------- lightning/src/ln/channel.rs | 18 +++++- lightning/src/ln/channelmanager.rs | 31 +++++++++++ lightning/src/ln/mod.rs | 4 ++ 4 files changed, 122 insertions(+), 20 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 4ce0df09286..21a8b37f1cd 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -37,6 +37,7 @@ use lightning::chain::chaininterface::{BroadcasterInterface, ConfirmationTarget, use lightning::chain::keysinterface::{KeysInterface, InMemorySigner}; use lightning::ln::{PaymentHash, PaymentPreimage, PaymentSecret}; use lightning::ln::channelmanager::{ChainParameters, ChannelManager, PaymentSendFailure, ChannelManagerReadArgs}; +use lightning::ln::channel::FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE; use lightning::ln::features::{ChannelFeatures, InitFeatures, NodeFeatures}; use lightning::ln::msgs::{CommitmentUpdate, ChannelMessageHandler, DecodeError, UpdateAddHTLC, Init}; use lightning::ln::script::ShutdownScript; @@ -58,16 +59,27 @@ use bitcoin::secp256k1::recovery::RecoverableSignature; use bitcoin::secp256k1::Secp256k1; use std::mem; -use std::cmp::Ordering; +use std::cmp::{self, Ordering}; use std::collections::{HashSet, hash_map, HashMap}; use std::sync::{Arc,Mutex}; use std::sync::atomic; use std::io::Cursor; -struct FuzzEstimator {} +const MAX_FEE: u32 = 10_000; +struct FuzzEstimator { + ret_val: atomic::AtomicU32, +} impl FeeEstimator for FuzzEstimator { - fn get_est_sat_per_1000_weight(&self, _: ConfirmationTarget) -> u32 { - 253 + fn get_est_sat_per_1000_weight(&self, conf_target: ConfirmationTarget) -> u32 { + // We force-close channels if our counterparty sends us a feerate which is a small multiple + // of our HighPriority fee estimate or smaller than our Background fee estimate. Thus, we + // always return a HighPriority feerate here which is >= the maximum Normal feerate and a + // Background feerate which is <= the minimum Normal feerate. + match conf_target { + ConfirmationTarget::HighPriority => MAX_FEE, + ConfirmationTarget::Background => 253, + ConfirmationTarget::Normal => cmp::min(self.ret_val.load(atomic::Ordering::Acquire), MAX_FEE), + } } } @@ -132,7 +144,7 @@ impl chain::Watch for TestChainMonitor { }; let deserialized_monitor = <(BlockHash, channelmonitor::ChannelMonitor)>:: read(&mut Cursor::new(&map_entry.get().1), &*self.keys).unwrap().1; - deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator{}, &self.logger).unwrap(); + deserialized_monitor.update_monitor(&update, &&TestBroadcaster{}, &&FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }, &self.logger).unwrap(); let mut ser = VecWriter(Vec::new()); deserialized_monitor.write(&mut ser).unwrap(); map_entry.insert((update.update_id, ser.0)); @@ -334,14 +346,13 @@ fn send_hop_payment(source: &ChanMan, middle: &ChanMan, middle_chan_id: u64, des #[inline] pub fn do_test(data: &[u8], out: Out) { - let fee_est = Arc::new(FuzzEstimator{}); let broadcast = Arc::new(TestBroadcaster{}); macro_rules! make_node { - ($node_id: expr) => { { + ($node_id: expr, $fee_estimator: expr) => { { let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); let keys_manager = Arc::new(KeyProvider { node_id: $node_id, rand_bytes_id: atomic::AtomicU32::new(0), revoked_commitments: Mutex::new(HashMap::new()) }); - let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}), Arc::clone(&keys_manager))); + let monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister{}), Arc::clone(&keys_manager))); let mut config = UserConfig::default(); config.channel_options.forwarding_fee_proportional_millionths = 0; @@ -351,16 +362,16 @@ pub fn do_test(data: &[u8], out: Out) { network, best_block: BestBlock::from_genesis(network), }; - (ChannelManager::new(fee_est.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params), + (ChannelManager::new($fee_estimator.clone(), monitor.clone(), broadcast.clone(), Arc::clone(&logger), keys_manager.clone(), config, params), monitor, keys_manager) } } } macro_rules! reload_node { - ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr) => { { + ($ser: expr, $node_id: expr, $old_monitors: expr, $keys_manager: expr, $fee_estimator: expr) => { { let keys_manager = Arc::clone(& $keys_manager); let logger: Arc = Arc::new(test_logger::TestLogger::new($node_id.to_string(), out.clone())); - let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), fee_est.clone(), Arc::new(TestPersister{}), Arc::clone(& $keys_manager))); + let chain_monitor = Arc::new(TestChainMonitor::new(broadcast.clone(), logger.clone(), $fee_estimator.clone(), Arc::new(TestPersister{}), Arc::clone(& $keys_manager))); let mut config = UserConfig::default(); config.channel_options.forwarding_fee_proportional_millionths = 0; @@ -379,7 +390,7 @@ pub fn do_test(data: &[u8], out: Out) { let read_args = ChannelManagerReadArgs { keys_manager, - fee_estimator: fee_est.clone(), + fee_estimator: $fee_estimator.clone(), chain_monitor: chain_monitor.clone(), tx_broadcaster: broadcast.clone(), logger, @@ -497,11 +508,18 @@ pub fn do_test(data: &[u8], out: Out) { } } } + let fee_est_a = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let mut last_htlc_clear_fee_a = 253; + let fee_est_b = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let mut last_htlc_clear_fee_b = 253; + let fee_est_c = Arc::new(FuzzEstimator { ret_val: atomic::AtomicU32::new(253) }); + let mut last_htlc_clear_fee_c = 253; + // 3 nodes is enough to hit all the possible cases, notably unknown-source-unknown-dest // forwarding. - let (node_a, mut monitor_a, keys_manager_a) = make_node!(0); - let (node_b, mut monitor_b, keys_manager_b) = make_node!(1); - let (node_c, mut monitor_c, keys_manager_c) = make_node!(2); + let (node_a, mut monitor_a, keys_manager_a) = make_node!(0, fee_est_a); + let (node_b, mut monitor_b, keys_manager_b) = make_node!(1, fee_est_b); + let (node_c, mut monitor_c, keys_manager_c) = make_node!(2, fee_est_c); let mut nodes = [node_a, node_b, node_c]; @@ -639,7 +657,6 @@ pub fn do_test(data: &[u8], out: Out) { events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { for dest in nodes.iter() { if dest.get_our_node_id() == node_id { - assert!(update_fee.is_none()); for update_add in update_add_htlcs.iter() { if !$corrupt_forward { dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), update_add); @@ -663,6 +680,9 @@ pub fn do_test(data: &[u8], out: Out) { for update_fail_malformed in update_fail_malformed_htlcs.iter() { dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), update_fail_malformed); } + if let Some(msg) = update_fee { + dest.handle_update_fee(&nodes[$node].get_our_node_id(), &msg); + } let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() || !update_fail_htlcs.is_empty() || !update_fail_malformed_htlcs.is_empty(); if $limit_events != ProcessMessages::AllMessages && processed_change { @@ -928,7 +948,7 @@ pub fn do_test(data: &[u8], out: Out) { node_a_ser.0.clear(); nodes[0].write(&mut node_a_ser).unwrap(); } - let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a); + let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a); nodes[0] = new_node_a; monitor_a = new_monitor_a; }, @@ -947,7 +967,7 @@ pub fn do_test(data: &[u8], out: Out) { bc_events.clear(); cb_events.clear(); } - let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b); + let (new_node_b, new_monitor_b) = reload_node!(node_b_ser, 1, monitor_b, keys_manager_b, fee_est_b); nodes[1] = new_node_b; monitor_b = new_monitor_b; }, @@ -961,7 +981,7 @@ pub fn do_test(data: &[u8], out: Out) { node_c_ser.0.clear(); nodes[2].write(&mut node_c_ser).unwrap(); } - let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c); + let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c); nodes[2] = new_node_c; monitor_c = new_monitor_c; }, @@ -1023,6 +1043,33 @@ pub fn do_test(data: &[u8], out: Out) { 0x6c => { send_hop_payment(&nodes[0], &nodes[1], chan_a, &nodes[2], chan_b, 1, &mut payment_id); }, 0x6d => { send_hop_payment(&nodes[2], &nodes[1], chan_b, &nodes[0], chan_a, 1, &mut payment_id); }, + 0x80 => { + let max_feerate = last_htlc_clear_fee_a * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; + if fee_est_a.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { + fee_est_a.ret_val.store(max_feerate, atomic::Ordering::Release); + } + nodes[0].maybe_update_chan_fees(); + }, + 0x81 => { fee_est_a.ret_val.store(253, atomic::Ordering::Release); nodes[0].maybe_update_chan_fees(); }, + + 0x84 => { + let max_feerate = last_htlc_clear_fee_b * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; + if fee_est_b.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { + fee_est_b.ret_val.store(max_feerate, atomic::Ordering::Release); + } + nodes[1].maybe_update_chan_fees(); + }, + 0x85 => { fee_est_b.ret_val.store(253, atomic::Ordering::Release); nodes[1].maybe_update_chan_fees(); }, + + 0x88 => { + let max_feerate = last_htlc_clear_fee_c * FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE as u32; + if fee_est_c.ret_val.fetch_add(250, atomic::Ordering::AcqRel) + 250 > max_feerate { + fee_est_c.ret_val.store(max_feerate, atomic::Ordering::Release); + } + nodes[2].maybe_update_chan_fees(); + }, + 0x89 => { fee_est_c.ret_val.store(253, atomic::Ordering::Release); nodes[2].maybe_update_chan_fees(); }, + 0xff => { // Test that no channel is in a stuck state where neither party can send funds even // after we resolve all pending events. @@ -1078,6 +1125,10 @@ pub fn do_test(data: &[u8], out: Out) { assert!( send_payment(&nodes[1], &nodes[2], chan_b, 10_000_000, &mut payment_id) || send_payment(&nodes[2], &nodes[1], chan_b, 10_000_000, &mut payment_id)); + + last_htlc_clear_fee_a = fee_est_a.ret_val.load(atomic::Ordering::Acquire); + last_htlc_clear_fee_b = fee_est_b.ret_val.load(atomic::Ordering::Acquire); + last_htlc_clear_fee_c = fee_est_c.ret_val.load(atomic::Ordering::Acquire); }, _ => test_return!(), } diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index dc09dc8242f..2a3edd6845e 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -341,6 +341,22 @@ pub enum UpdateFulfillCommitFetch { DuplicateClaim {}, } +/// If the majority of the channels funds are to the fundee and the initiator holds only just +/// enough funds to cover their reserve value, channels are at risk of getting "stuck". Because the +/// initiator controls the feerate, if they then go to increase the channel fee, they may have no +/// balance but the fundee is unable to send a payment as the increase in fee more than drains +/// their reserve value. Thus, neither side can send a new HTLC and the channel becomes useless. +/// Thus, before sending an HTLC when we are the initiator, we check that the feerate can increase +/// by this multiple without hitting this case, before sending. +/// This multiple is effectively the maximum feerate "jump" we expect until more HTLCs flow over +/// the channel. Sadly, there isn't really a good number for this - if we expect to have no new +/// HTLCs for days we may need this to suffice for feerate increases across days, but that may +/// leave the channel less usable as we hold a bigger reserve. +#[cfg(fuzzing)] +pub const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2; +#[cfg(not(fuzzing))] +const FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE: u64 = 2; + // TODO: We should refactor this to be an Inbound/OutboundChannel until initial setup handshaking // has been completed, and then turn into a Channel to get compiler-time enforcement of things like // calling channel_id() before we're set up or things like get_outbound_funding_signed on an @@ -4326,7 +4342,7 @@ impl Channel { // `2 *` and extra HTLC are for the fee spike buffer. let commit_tx_fee_msat = if self.is_outbound() { let htlc_candidate = HTLCCandidate::new(amount_msat, HTLCInitiator::LocalOffered); - 2 * self.next_local_commit_tx_fee_msat(htlc_candidate, Some(())) + FEE_SPIKE_BUFFER_FEE_INCREASE_MULTIPLE * self.next_local_commit_tx_fee_msat(htlc_candidate, Some(())) } else { 0 }; if pending_value_to_self_msat - amount_msat < commit_tx_fee_msat { return Err(ChannelError::Ignore(format!("Cannot send value that would not leave enough to pay for fees. Pending value to self: {}. local_commit_tx_fee {}", pending_value_to_self_msat, commit_tx_fee_msat))); diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 389e67ddb9b..7304c698ab1 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2612,6 +2612,37 @@ impl ChannelMana (retain_channel, NotifyOption::DoPersist, ret_err) } + #[cfg(fuzzing)] + /// In chanmon_consistency we want to sometimes do the channel fee updates done in + /// timer_tick_occurred, but we can't generate the disabled channel updates as it considers + /// these a fuzz failure (as they usually indicate a channel force-close, which is exactly what + /// it wants to detect). Thus, we have a variant exposed here for its benefit. + pub fn maybe_update_chan_fees(&self) { + PersistenceNotifierGuard::optionally_notify(&self.total_consistency_lock, &self.persistence_notifier, || { + let mut should_persist = NotifyOption::SkipPersist; + + let new_feerate = self.fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::Normal); + + let mut handle_errors = Vec::new(); + { + let mut channel_state_lock = self.channel_state.lock().unwrap(); + let channel_state = &mut *channel_state_lock; + let pending_msg_events = &mut channel_state.pending_msg_events; + let short_to_id = &mut channel_state.short_to_id; + channel_state.by_id.retain(|chan_id, chan| { + let (retain_channel, chan_needs_persist, err) = self.update_channel_fee(short_to_id, pending_msg_events, chan_id, chan, new_feerate); + if chan_needs_persist == NotifyOption::DoPersist { should_persist = NotifyOption::DoPersist; } + if err.is_err() { + handle_errors.push(err); + } + retain_channel + }); + } + + should_persist + }); + } + /// Performs actions which should happen on startup and roughly once per minute thereafter. /// /// This currently includes: diff --git a/lightning/src/ln/mod.rs b/lightning/src/ln/mod.rs index bebd763f660..5576d8bd057 100644 --- a/lightning/src/ln/mod.rs +++ b/lightning/src/ln/mod.rs @@ -34,7 +34,11 @@ pub mod peer_channel_encryptor; #[cfg(not(feature = "fuzztarget"))] pub(crate) mod peer_channel_encryptor; +#[cfg(feature = "fuzztarget")] +pub mod channel; +#[cfg(not(feature = "fuzztarget"))] mod channel; + mod onion_utils; mod wire; From b09a60b7b554834e43f72e60059b21fdbafef3f1 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 13 Jul 2021 02:23:41 +0000 Subject: [PATCH 5/9] Add more logging during chanmon_consistency runs --- fuzz/src/chanmon_consistency.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/fuzz/src/chanmon_consistency.rs b/fuzz/src/chanmon_consistency.rs index 21a8b37f1cd..7eacf25365b 100644 --- a/fuzz/src/chanmon_consistency.rs +++ b/fuzz/src/chanmon_consistency.rs @@ -655,9 +655,10 @@ pub fn do_test(data: &[u8], out: Out) { had_events = true; match event { events::MessageSendEvent::UpdateHTLCs { node_id, updates: CommitmentUpdate { update_add_htlcs, update_fail_htlcs, update_fulfill_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed } } => { - for dest in nodes.iter() { + for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == node_id { for update_add in update_add_htlcs.iter() { + out.locked_write(format!("Delivering update_add_htlc to node {}.\n", idx).as_bytes()); if !$corrupt_forward { dest.handle_update_add_htlc(&nodes[$node].get_our_node_id(), update_add); } else { @@ -672,15 +673,19 @@ pub fn do_test(data: &[u8], out: Out) { } } for update_fulfill in update_fulfill_htlcs.iter() { + out.locked_write(format!("Delivering update_fulfill_htlc to node {}.\n", idx).as_bytes()); dest.handle_update_fulfill_htlc(&nodes[$node].get_our_node_id(), update_fulfill); } for update_fail in update_fail_htlcs.iter() { + out.locked_write(format!("Delivering update_fail_htlc to node {}.\n", idx).as_bytes()); dest.handle_update_fail_htlc(&nodes[$node].get_our_node_id(), update_fail); } for update_fail_malformed in update_fail_malformed_htlcs.iter() { + out.locked_write(format!("Delivering update_fail_malformed_htlc to node {}.\n", idx).as_bytes()); dest.handle_update_fail_malformed_htlc(&nodes[$node].get_our_node_id(), update_fail_malformed); } if let Some(msg) = update_fee { + out.locked_write(format!("Delivering update_fee to node {}.\n", idx).as_bytes()); dest.handle_update_fee(&nodes[$node].get_our_node_id(), &msg); } let processed_change = !update_add_htlcs.is_empty() || !update_fulfill_htlcs.is_empty() || @@ -697,21 +702,24 @@ pub fn do_test(data: &[u8], out: Out) { } }); break; } + out.locked_write(format!("Delivering commitment_signed to node {}.\n", idx).as_bytes()); dest.handle_commitment_signed(&nodes[$node].get_our_node_id(), &commitment_signed); break; } } }, events::MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - for dest in nodes.iter() { + for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering revoke_and_ack to node {}.\n", idx).as_bytes()); dest.handle_revoke_and_ack(&nodes[$node].get_our_node_id(), msg); } } }, events::MessageSendEvent::SendChannelReestablish { ref node_id, ref msg } => { - for dest in nodes.iter() { + for (idx, dest) in nodes.iter().enumerate() { if dest.get_our_node_id() == *node_id { + out.locked_write(format!("Delivering channel_reestablish to node {}.\n", idx).as_bytes()); dest.handle_channel_reestablish(&nodes[$node].get_our_node_id(), msg); } } @@ -844,7 +852,9 @@ pub fn do_test(data: &[u8], out: Out) { } } } - match get_slice!(1)[0] { + let v = get_slice!(1)[0]; + out.locked_write(format!("READ A BYTE! HANDLING INPUT {:x}...........\n", v).as_bytes()); + match v { // In general, we keep related message groups close together in binary form, allowing // bit-twiddling mutations to have similar effects. This is probably overkill, but no // harm in doing so. From 9d49c5c1a174bf807ed04421f0f08578b569448c Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 30 Jun 2021 18:12:51 +0000 Subject: [PATCH 6/9] Fix re-sending commitment updates with an outbound fee update When we send an update_fee to our counterparty on an outbound channel, if we need to re-send a commitment update after reconnection, the update_fee must be present in the re-sent commitment update messages. However, wewere always setting the update_fee field in the commitment update to None, causing us to generate invalid commitment signatures and get channel force-closures. This fixes the issue by correctly detecting when an update_fee needs to be re-sent, doing so when required. --- lightning/src/ln/chanmon_update_fail_tests.rs | 92 +++++++++++++++++++ lightning/src/ln/channel.rs | 15 ++- 2 files changed, 103 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index e85a69017f5..55a8c42a81b 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2054,6 +2054,98 @@ fn test_path_paused_mpp() { claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage); } +fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { + // In early versions we did not handle resending of update_fee on reconnect correctly. The + // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases + // explicitly here. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + send_payment(&nodes[0], &[&nodes[1]], 1000); + + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert!(update_msgs.update_fee.is_some()); + if deliver_update { + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap()); + } + + if parallel_updates { + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock += 20; + } + nodes[0].node.timer_tick_occurred(); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + } + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() }); + let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() }); + let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg); + let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert!(update_msgs.update_fee.is_some()); + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap()); + if parallel_updates { + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed); + check_added_monitors!(nodes[1], 1); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa); + check_added_monitors!(nodes[0], 1); + let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_cs); + check_added_monitors!(nodes[0], 1); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed); + check_added_monitors!(nodes[1], 1); + let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_first_raa); + let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + check_added_monitors!(nodes[1], 1); + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa); + check_added_monitors!(nodes[0], 1); + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed); + check_added_monitors!(nodes[0], 1); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_second_raa); + check_added_monitors!(nodes[1], 1); + } else { + commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false); + } + + send_payment(&nodes[0], &[&nodes[1]], 1000); +} +#[test] +fn update_fee_resend_test() { + do_update_fee_resend_test(false, false); + do_update_fee_resend_test(true, false); + do_update_fee_resend_test(false, true); + do_update_fee_resend_test(true, true); +} + fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // Tests that, when we serialize a channel with AddHTLC entries in the holding cell, we // properly free them on reconnect. We previously failed such HTLCs upon serialization, but diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 2a3edd6845e..4795a2524ba 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3145,11 +3145,18 @@ impl Channel { } } - log_trace!(logger, "Regenerated latest commitment update in channel {} with {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", - log_bytes!(self.channel_id()), update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); + let update_fee = if self.is_outbound() && self.pending_update_fee.is_some() { + Some(msgs::UpdateFee { + channel_id: self.channel_id(), + feerate_per_kw: self.pending_update_fee.unwrap(), + }) + } else { None }; + + log_trace!(logger, "Regenerated latest commitment update in channel {} with{} {} update_adds, {} update_fulfills, {} update_fails, and {} update_fail_malformeds", + log_bytes!(self.channel_id()), if update_fee.is_some() { " update_fee," } else { "" }, + update_add_htlcs.len(), update_fulfill_htlcs.len(), update_fail_htlcs.len(), update_fail_malformed_htlcs.len()); msgs::CommitmentUpdate { - update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, - update_fee: None, + update_add_htlcs, update_fulfill_htlcs, update_fail_htlcs, update_fail_malformed_htlcs, update_fee, commitment_signed: self.send_commitment_no_state_update(logger).expect("It looks like we failed to re-generate a commitment_signed we had previously sent?").0, } } From b3d0a8dd4e16b7ea89a53096d22a631959aa622f Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 12 Jul 2021 15:39:27 +0000 Subject: [PATCH 7/9] Fix handling of inbound uncommitted feerate updates If we receive an update_fee but do not receive a commitment_signed, we should not persist the pending fee update to disk or hold on to it after our peer disconnects. In order to make the code the most readable, we add a state enum which matches the relevant states from InboundHTLCState, allowing for more simple code comparison between inbound HTLC handling and update_fee handling. --- lightning/src/ln/chanmon_update_fail_tests.rs | 99 +++++++++- lightning/src/ln/channel.rs | 186 +++++++++++------- 2 files changed, 215 insertions(+), 70 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 55a8c42a81b..37bcabfcea8 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -2054,6 +2054,98 @@ fn test_path_paused_mpp() { claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, payment_preimage); } +#[test] +fn test_pending_update_fee_ack_on_reconnect() { + // In early versions of our automated fee update patch, nodes did not correctly use the + // previous channel feerate after sending an undelivered revoke_and_ack when re-sending an + // undelivered commitment_signed. + // + // B sends A new HTLC + CS, not delivered + // A sends B update_fee + CS + // B receives the CS and sends RAA, previously causing B to lock in the new feerate + // reconnect + // B resends initial CS, using the original fee + + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); + send_payment(&nodes[0], &[&nodes[1]], 100_000_00); + + let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash!(&nodes[0]); + let route = get_route(&nodes[1].node.get_our_node_id(), &nodes[1].net_graph_msg_handler.network_graph.read().unwrap(), + &nodes[0].node.get_our_node_id(), Some(InvoiceFeatures::known()), None, &Vec::new(), 1_000_000, TEST_FINAL_CLTV, nodes[1].logger).unwrap(); + nodes[1].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap(); + check_added_monitors!(nodes[1], 1); + let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + // bs_initial_send_msgs are not delivered until they are re-generated after reconnect + + { + let mut feerate_lock = chanmon_cfgs[0].fee_estimator.sat_per_kw.lock().unwrap(); + *feerate_lock *= 2; + } + nodes[0].node.timer_tick_occurred(); + check_added_monitors!(nodes[0], 1); + let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert!(as_update_fee_msgs.update_fee.is_some()); + + nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap()); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed); + check_added_monitors!(nodes[1], 1); + let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + // bs_first_raa is not delivered until it is re-generated after reconnect + + nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id(), false); + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); + + nodes[0].node.peer_connected(&nodes[1].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() }); + let as_connect_msg = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReestablish, nodes[1].node.get_our_node_id()); + nodes[1].node.peer_connected(&nodes[0].node.get_our_node_id(), &msgs::Init { features: InitFeatures::known() }); + let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); + + nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); + let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events(); + assert_eq!(bs_resend_msgs.len(), 3); + if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] { + assert_eq!(*updates, bs_initial_send_msgs); + } else { panic!(); } + if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] { + assert_eq!(*msg, bs_first_raa); + } else { panic!(); } + if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); } + + nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg); + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed); + check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); + check_added_monitors!(nodes[1], 1); + let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed; + + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa); + check_added_monitors!(nodes[0], 1); + nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed); + check_added_monitors!(nodes[1], 1); + let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_cs); + check_added_monitors!(nodes[0], 1); + nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa); + check_added_monitors!(nodes[0], 1); + + nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); + check_added_monitors!(nodes[1], 1); + + expect_pending_htlcs_forwardable!(nodes[0]); + expect_payment_received!(nodes[0], payment_hash, payment_secret, 1_000_000); + + claim_payment(&nodes[1], &[&nodes[0]], payment_preimage); +} + fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { // In early versions we did not handle resending of update_fee on reconnect correctly. The // chanmon_consistency fuzz target, of course, immediately found it, but we test a few cases @@ -2096,10 +2188,15 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { let bs_connect_msg = get_event_msg!(nodes[1], MessageSendEvent::SendChannelReestablish, nodes[0].node.get_our_node_id()); nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_connect_msg); + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_channel_reestablish(&nodes[1].node.get_our_node_id(), &bs_connect_msg); - let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events(); + assert_eq!(as_reconnect_msgs.len(), 2); + if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); } + let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap() + { updates } else { panic!(); }; assert!(update_msgs.update_fee.is_some()); nodes[1].node.handle_update_fee(&nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap()); if parallel_updates { diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 4795a2524ba..9a5fd042222 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -63,6 +63,21 @@ pub struct ChannelValueStat { pub counterparty_dust_limit_msat: u64, } +#[derive(Clone, Copy, PartialEq)] +enum FeeUpdateState { + // Inbound states mirroring InboundHTLCState + RemoteAnnounced, + AwaitingRemoteRevokeToAnnounce, + // Note that we do not have a AwaitingAnnouncedRemoteRevoke variant here as it is universally + // handled the same as `Committed`, with the only exception in `InboundHTLCState` being the + // distinction of when we allow ourselves to forward the HTLC. Because we aren't "forwarding" + // the fee update anywhere, we can simply consider the fee update `Committed` immediately + // instead of setting it to AwaitingAnnouncedRemoteRevoke. + + // Outbound state can only be `LocalAnnounced` or `Committed` + Outbound, +} + enum InboundHTLCRemovalReason { FailRelay(msgs::OnionErrorPacket), FailMalformed(([u8; 32], u16)), @@ -420,7 +435,7 @@ pub(super) struct Channel { // revoke_and_ack is received and new commitment_signed is generated to be // sent to the funder. Otherwise, the pending value is removed when receiving // commitment_signed. - pending_update_fee: Option, + pending_update_fee: Option<(u32, FeeUpdateState)>, // update_fee() during ChannelState::AwaitingRemoteRevoke is hold in // holdina_cell_update_fee then moved to pending_udpate_fee when revoke_and_ack // is received. holding_cell_update_fee is updated when there are additional @@ -1003,10 +1018,10 @@ impl Channel { /// which peer generated this transaction and "to whom" this transaction flows. /// Returns (the transaction info, the number of HTLC outputs which were present in the /// transaction, the list of HTLCs which were not ignored when building the transaction). - /// Note that below-dust HTLCs are included in the third return value, but not the second, and - /// sources are provided only for outbound HTLCs in the third return value. + /// Note that below-dust HTLCs are included in the fourth return value, but not the third, and + /// sources are provided only for outbound HTLCs in the fourth return value. #[inline] - fn build_commitment_transaction(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, feerate_per_kw: u32, logger: &L) -> (CommitmentTransaction, usize, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger { + fn build_commitment_transaction(&self, commitment_number: u64, keys: &TxCreationKeys, local: bool, generated_by_local: bool, logger: &L) -> (CommitmentTransaction, u32, usize, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>) where L::Target: Logger { let mut included_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::new(); let num_htlcs = self.pending_inbound_htlcs.len() + self.pending_outbound_htlcs.len(); let mut included_non_dust_htlcs: Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)> = Vec::with_capacity(num_htlcs); @@ -1016,6 +1031,19 @@ impl Channel { let mut local_htlc_total_msat = 0; let mut value_to_self_msat_offset = 0; + let mut feerate_per_kw = self.feerate_per_kw; + if let Some((feerate, update_state)) = self.pending_update_fee { + if match update_state { + // Note that these match the inclusion criteria when scanning + // pending_inbound_htlcs below. + FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); !generated_by_local }, + FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { debug_assert!(!self.is_outbound()); !generated_by_local }, + FeeUpdateState::Outbound => { assert!(self.is_outbound()); generated_by_local }, + } { + feerate_per_kw = feerate; + } + } + log_trace!(logger, "Building commitment transaction number {} (really {} xor {}) for channel {} for {}, generated by {} with fee {}...", commitment_number, (INITIAL_COMMITMENT_NUMBER - commitment_number), get_commitment_transaction_number_obscure_factor(&self.get_holder_pubkeys().payment_point, &self.get_counterparty_pubkeys().payment_point, self.is_outbound()), @@ -1176,7 +1204,7 @@ impl Channel { htlcs_included.sort_unstable_by_key(|h| h.0.transaction_output_index.unwrap()); htlcs_included.append(&mut included_dust_htlcs); - (tx, num_nondust_htlcs, htlcs_included) + (tx, feerate_per_kw, num_nondust_htlcs, htlcs_included) } #[inline] @@ -1229,6 +1257,7 @@ impl Channel { assert!(self.pending_inbound_htlcs.is_empty()); assert!(self.pending_outbound_htlcs.is_empty()); + assert!(self.pending_update_fee.is_none()); let mut txouts: Vec<(TxOut, ())> = Vec::new(); let mut total_fee_satoshis = proposed_total_fee_satoshis; @@ -1654,7 +1683,7 @@ impl Channel { let funding_script = self.get_funding_redeemscript(); let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?; - let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, self.feerate_per_kw, logger).0; + let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger).0; { let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); @@ -1668,7 +1697,7 @@ impl Channel { } let counterparty_keys = self.build_remote_transaction_keys()?; - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, self.feerate_per_kw, logger).0; + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).0; let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); @@ -1776,7 +1805,7 @@ impl Channel { let funding_script = self.get_funding_redeemscript(); let counterparty_keys = self.build_remote_transaction_keys()?; - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, self.feerate_per_kw, logger).0; + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).0; let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); @@ -1784,7 +1813,7 @@ impl Channel { log_bytes!(self.channel_id()), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); let holder_signer = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number)?; - let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, self.feerate_per_kw, logger).0; + let initial_commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).0; { let trusted_tx = initial_commitment_tx.trust(); let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); @@ -2355,16 +2384,8 @@ impl Channel { let keys = self.build_holder_transaction_keys(self.cur_holder_commitment_transaction_number).map_err(|e| (None, e))?; - let mut update_fee = false; - let feerate_per_kw = if !self.is_outbound() && self.pending_update_fee.is_some() { - update_fee = true; - self.pending_update_fee.unwrap() - } else { - self.feerate_per_kw - }; - - let (num_htlcs, mut htlcs_cloned, commitment_tx, commitment_txid) = { - let commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, feerate_per_kw, logger); + let (num_htlcs, mut htlcs_cloned, commitment_tx, commitment_txid, feerate_per_kw) = { + let commitment_tx = self.build_commitment_transaction(self.cur_holder_commitment_transaction_number, &keys, true, false, logger); let commitment_txid = { let trusted_tx = commitment_tx.0.trust(); let bitcoin_tx = trusted_tx.built_transaction(); @@ -2379,12 +2400,17 @@ impl Channel { } bitcoin_tx.txid }; - let htlcs_cloned: Vec<_> = commitment_tx.2.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect(); - (commitment_tx.1, htlcs_cloned, commitment_tx.0, commitment_txid) + let htlcs_cloned: Vec<_> = commitment_tx.3.iter().map(|htlc| (htlc.0.clone(), htlc.1.map(|h| h.clone()))).collect(); + (commitment_tx.2, htlcs_cloned, commitment_tx.0, commitment_txid, commitment_tx.1) }; + // If our counterparty updated the channel fee in this commitment transaction, check that + // they can actually afford the new fee now. + let update_fee = if let Some((_, update_state)) = self.pending_update_fee { + update_state == FeeUpdateState::RemoteAnnounced + } else { false }; + if update_fee { debug_assert!(!self.is_outbound()); } let total_fee = feerate_per_kw as u64 * (COMMITMENT_TX_BASE_WEIGHT + (num_htlcs as u64) * COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000; - //If channel fee was updated by funder confirm funder can afford the new fee rate when applied to the current local commitment transaction if update_fee { let counterparty_reserve_we_require = Channel::::get_holder_selected_channel_reserve_satoshis(self.channel_value_satoshis); if self.channel_value_satoshis - self.value_to_self_msat / 1000 < total_fee + counterparty_reserve_we_require { @@ -2448,16 +2474,10 @@ impl Channel { // Update state now that we've passed all the can-fail calls... let mut need_commitment = false; - if !self.is_outbound() { - if let Some(fee_update) = self.pending_update_fee { - self.feerate_per_kw = fee_update; - // We later use the presence of pending_update_fee to indicate we should generate a - // commitment_signed upon receipt of revoke_and_ack, so we can only set it to None - // if we're not awaiting a revoke (ie will send a commitment_signed now). - if (self.channel_state & ChannelState::AwaitingRemoteRevoke as u32) == 0 { - need_commitment = true; - self.pending_update_fee = None; - } + if let &mut Some((_, ref mut update_state)) = &mut self.pending_update_fee { + if *update_state == FeeUpdateState::RemoteAnnounced { + *update_state = FeeUpdateState::AwaitingRemoteRevokeToAnnounce; + need_commitment = true; } } @@ -2638,8 +2658,9 @@ impl Channel { if update_add_htlcs.is_empty() && update_fulfill_htlcs.is_empty() && update_fail_htlcs.is_empty() && self.holding_cell_update_fee.is_none() { return Ok((None, htlcs_to_fail)); } - let update_fee = if let Some(feerate) = self.holding_cell_update_fee { - self.pending_update_fee = self.holding_cell_update_fee.take(); + let update_fee = if let Some(feerate) = self.holding_cell_update_fee.take() { + assert!(self.is_outbound()); + self.pending_update_fee = Some((feerate, FeeUpdateState::Outbound)); Some(msgs::UpdateFee { channel_id: self.channel_id, feerate_per_kw: feerate as u32, @@ -2823,21 +2844,22 @@ impl Channel { } self.value_to_self_msat = (self.value_to_self_msat as i64 + value_to_self_msat_diff) as u64; - if self.is_outbound() { - if let Some(feerate) = self.pending_update_fee.take() { - self.feerate_per_kw = feerate; - } - } else { - if let Some(feerate) = self.pending_update_fee { - // Because a node cannot send two commitment_signeds in a row without getting a - // revoke_and_ack from us (as it would otherwise not know the per_commitment_point - // it should use to create keys with) and because a node can't send a - // commitment_signed without changes, checking if the feerate is equal to the - // pending feerate update is sufficient to detect require_commitment. - if feerate == self.feerate_per_kw { + if let Some((feerate, update_state)) = self.pending_update_fee { + match update_state { + FeeUpdateState::Outbound => { + debug_assert!(self.is_outbound()); + log_trace!(logger, " ...promoting outbound fee update {} to Committed", feerate); + self.feerate_per_kw = feerate; + self.pending_update_fee = None; + }, + FeeUpdateState::RemoteAnnounced => { debug_assert!(!self.is_outbound()); }, + FeeUpdateState::AwaitingRemoteRevokeToAnnounce => { + debug_assert!(!self.is_outbound()); + log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); require_commitment = true; + self.feerate_per_kw = feerate; self.pending_update_fee = None; - } + }, } } @@ -2927,7 +2949,7 @@ impl Channel { } debug_assert!(self.pending_update_fee.is_none()); - self.pending_update_fee = Some(feerate_per_kw); + self.pending_update_fee = Some((feerate_per_kw, FeeUpdateState::Outbound)); Some(msgs::UpdateFee { channel_id: self.channel_id, @@ -2988,6 +3010,13 @@ impl Channel { }); self.next_counterparty_htlc_id -= inbound_drop_count; + if let Some((_, update_state)) = self.pending_update_fee { + if update_state == FeeUpdateState::RemoteAnnounced { + debug_assert!(!self.is_outbound()); + self.pending_update_fee = None; + } + } + for htlc in self.pending_outbound_htlcs.iter_mut() { if let OutboundHTLCState::RemoteRemoved(_) = htlc.state { // They sent us an update to remove this but haven't yet sent the corresponding @@ -3082,7 +3111,7 @@ impl Channel { return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned())); } Channel::::check_remote_fee(fee_estimator, msg.feerate_per_kw)?; - self.pending_update_fee = Some(msg.feerate_per_kw); + self.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); self.update_time_counter += 1; Ok(()) } @@ -3148,7 +3177,7 @@ impl Channel { let update_fee = if self.is_outbound() && self.pending_update_fee.is_some() { Some(msgs::UpdateFee { channel_id: self.channel_id(), - feerate_per_kw: self.pending_update_fee.unwrap(), + feerate_per_kw: self.pending_update_fee.unwrap().0, }) } else { None }; @@ -4056,7 +4085,7 @@ impl Channel { /// If an Err is returned, it is a ChannelError::Close (for get_outbound_funding_created) fn get_outbound_funding_created_signature(&mut self, logger: &L) -> Result where L::Target: Logger { let counterparty_keys = self.build_remote_transaction_keys()?; - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, self.feerate_per_kw, logger).0; + let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).0; Ok(self.holder_signer.sign_counterparty_commitment(&counterparty_initial_commitment_tx, &self.secp_ctx) .map_err(|_| ChannelError::Close("Failed to get signatures for new commitment_signed".to_owned()))?.0) } @@ -4413,7 +4442,7 @@ impl Channel { if (self.channel_state & (ChannelState::MonitorUpdateFailed as u32)) == (ChannelState::MonitorUpdateFailed as u32) { panic!("Cannot create commitment tx while awaiting monitor update unfreeze, as send_htlc will have returned an Err so a send_commitment precondition has been violated"); } - let mut have_updates = self.pending_update_fee.is_some(); + let mut have_updates = self.is_outbound() && self.pending_update_fee.is_some(); for htlc in self.pending_outbound_htlcs.iter() { if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { have_updates = true; @@ -4451,6 +4480,13 @@ impl Channel { htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(fail_reason); } } + if let Some((feerate, update_state)) = self.pending_update_fee { + if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce { + debug_assert!(!self.is_outbound()); + self.feerate_per_kw = feerate; + self.pending_update_fee = None; + } + } self.resend_order = RAACommitmentOrder::RevokeAndACKFirst; let (res, counterparty_commitment_txid, htlcs) = match self.send_commitment_no_state_update(logger) { @@ -4480,15 +4516,9 @@ impl Channel { /// Only fails in case of bad keys. Used for channel_reestablish commitment_signed generation /// when we shouldn't change HTLC/channel state. fn send_commitment_no_state_update(&self, logger: &L) -> Result<(msgs::CommitmentSigned, (Txid, Vec<(HTLCOutputInCommitment, Option<&HTLCSource>)>)), ChannelError> where L::Target: Logger { - let mut feerate_per_kw = self.feerate_per_kw; - if let Some(feerate) = self.pending_update_fee { - if self.is_outbound() { - feerate_per_kw = feerate; - } - } - let counterparty_keys = self.build_remote_transaction_keys()?; - let counterparty_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, feerate_per_kw, logger); + let counterparty_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, true, logger); + let feerate_per_kw = counterparty_commitment_tx.1; let counterparty_commitment_txid = counterparty_commitment_tx.0.trust().txid(); let (signature, htlc_signatures); @@ -4503,7 +4533,7 @@ impl Channel { && info.next_holder_htlc_id == self.next_holder_htlc_id && info.next_counterparty_htlc_id == self.next_counterparty_htlc_id && info.feerate == self.feerate_per_kw { - let actual_fee = self.commit_tx_fee_msat(counterparty_commitment_tx.1); + let actual_fee = self.commit_tx_fee_msat(counterparty_commitment_tx.2); assert_eq!(actual_fee, info.fee); } } @@ -4511,8 +4541,8 @@ impl Channel { } { - let mut htlcs = Vec::with_capacity(counterparty_commitment_tx.2.len()); - for &(ref htlc, _) in counterparty_commitment_tx.2.iter() { + let mut htlcs = Vec::with_capacity(counterparty_commitment_tx.3.len()); + for &(ref htlc, _) in counterparty_commitment_tx.3.iter() { htlcs.push(htlc); } @@ -4539,7 +4569,7 @@ impl Channel { channel_id: self.channel_id, signature, htlc_signatures, - }, (counterparty_commitment_txid, counterparty_commitment_tx.2))) + }, (counterparty_commitment_txid, counterparty_commitment_tx.3))) } /// Adds a pending outbound HTLC to this channel, and creates a signed commitment transaction @@ -4880,7 +4910,14 @@ impl Writeable for Channel { fail_reason.write(writer)?; } - self.pending_update_fee.write(writer)?; + if self.is_outbound() { + self.pending_update_fee.map(|(a, _)| a).write(writer)?; + } else if let Some((feerate, FeeUpdateState::AwaitingRemoteRevokeToAnnounce)) = self.pending_update_fee { + // As for inbound HTLCs, if the update was only announced and never committed, drop it. + Some(feerate).write(writer)?; + } else { + None::.write(writer)?; + } self.holding_cell_update_fee.write(writer)?; self.next_holder_htlc_id.write(writer)?; @@ -5095,7 +5132,8 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel monitor_pending_failures.push((Readable::read(reader)?, Readable::read(reader)?, Readable::read(reader)?)); } - let pending_update_fee = Readable::read(reader)?; + let pending_update_fee_value: Option = Readable::read(reader)?; + let holding_cell_update_fee = Readable::read(reader)?; let next_holder_htlc_id = Readable::read(reader)?; @@ -5147,7 +5185,7 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel _ => return Err(DecodeError::InvalidValue), }; - let channel_parameters = Readable::read(reader)?; + let channel_parameters: ChannelTransactionParameters = Readable::read(reader)?; let funding_transaction = Readable::read(reader)?; let counterparty_cur_commitment_point = Readable::read(reader)?; @@ -5170,6 +5208,16 @@ impl<'a, Signer: Sign, K: Deref> ReadableArgs<&'a K> for Channel } } + let pending_update_fee = if let Some(feerate) = pending_update_fee_value { + Some((feerate, if channel_parameters.is_outbound_from_holder { + FeeUpdateState::Outbound + } else { + FeeUpdateState::AwaitingRemoteRevokeToAnnounce + })) + } else { + None + }; + let mut announcement_sigs = None; read_tlv_fields!(reader, { (0, announcement_sigs, option), @@ -5708,9 +5756,9 @@ mod tests { $( { $htlc_idx: expr, $counterparty_htlc_sig_hex: expr, $htlc_sig_hex: expr, $htlc_tx_hex: expr } ), * } ) => { { let (commitment_tx, htlcs): (_, Vec) = { - let mut res = chan.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, chan.feerate_per_kw, &logger); + let mut res = chan.build_commitment_transaction(0xffffffffffff - 42, &keys, true, false, &logger); - let htlcs = res.2.drain(..) + let htlcs = res.3.drain(..) .filter_map(|(htlc, _)| if htlc.transaction_output_index.is_some() { Some(htlc) } else { None }) .collect(); (res.0, htlcs) From 01e8ff5ed85c1ecb890350829c9c250fd5f2a793 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 13 Jul 2021 16:07:01 +0000 Subject: [PATCH 8/9] Log when we change HTLC state while sending a commitment transaction --- lightning/src/ln/channel.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 9a5fd042222..a1123a320f8 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -4462,6 +4462,7 @@ impl Channel { } /// Only fails in case of bad keys fn send_commitment_no_status_check(&mut self, logger: &L) -> Result<(msgs::CommitmentSigned, ChannelMonitorUpdate), ChannelError> where L::Target: Logger { + log_trace!(logger, "Updating HTLC state for a newly-sent commitment_signed..."); // We can upgrade the status of some HTLCs that are waiting on a commitment, even if we // fail to generate this, we still are at least at a position where upgrading their status // is acceptable. @@ -4470,6 +4471,7 @@ impl Channel { Some(InboundHTLCState::AwaitingAnnouncedRemoteRevoke(forward_info.clone())) } else { None }; if let Some(state) = new_state { + log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce {} to AwaitingAnnouncedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); htlc.state = state; } } @@ -4477,12 +4479,14 @@ impl Channel { if let Some(fail_reason) = if let &mut OutboundHTLCState::AwaitingRemoteRevokeToRemove(ref mut fail_reason) = &mut htlc.state { Some(fail_reason.take()) } else { None } { + log_trace!(logger, " ...promoting outbound AwaitingRemoteRevokeToRemove {} to AwaitingRemovedRemoteRevoke", log_bytes!(htlc.payment_hash.0)); htlc.state = OutboundHTLCState::AwaitingRemovedRemoteRevoke(fail_reason); } } if let Some((feerate, update_state)) = self.pending_update_fee { if update_state == FeeUpdateState::AwaitingRemoteRevokeToAnnounce { debug_assert!(!self.is_outbound()); + log_trace!(logger, " ...promoting inbound AwaitingRemoteRevokeToAnnounce fee update {} to Committed", feerate); self.feerate_per_kw = feerate; self.pending_update_fee = None; } From d3af49e9f07fc28d104f1f1dbcf8e216b65e9f89 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 30 Jun 2021 03:16:01 +0000 Subject: [PATCH 9/9] Limit inbound fee updates by dust exposure instead of our estimator Inbound fee udpates are rather broken in lightning as they can impact the non-fundee despite the funder paying the fee, but only in the dust exposure it places on the fundee. At least lnd is fairly aggressively high in their (non-anchor) fee estimation, running the risk of force-closure. Further, because we relied on a fee estimator we don't have full control over, we were assuming our users' fees are particularly conservative, and thus were at a lot of risk to force-closures. This converts our fee limiting to use an absurd upper bound, focusing on whether we are over-exposed to in-flight dust when we receive an update_fee. --- lightning/src/ln/channel.rs | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index a1123a320f8..a0071d5436b 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -750,7 +750,12 @@ impl Channel { if feerate_per_kw < lower_limit { return Err(ChannelError::Close(format!("Peer's feerate much too low. Actual: {}. Our expected lower limit: {}", feerate_per_kw, lower_limit))); } - let upper_limit = fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 2; + // We only bound the fee updates on the upper side to prevent completely absurd feerates, + // always accepting up to 25 sat/vByte or 10x our fee estimator's "High Priority" fee. + // We generally don't care too much if they set the feerate to something very high, but it + // could result in the channel being useless due to everything being dust. + let upper_limit = cmp::max(250 * 25, + fee_estimator.get_est_sat_per_1000_weight(ConfirmationTarget::HighPriority) as u64 * 10); if feerate_per_kw as u64 > upper_limit { return Err(ChannelError::Close(format!("Peer's feerate much too high. Actual: {}. Our expected upper limit: {}", feerate_per_kw, upper_limit))); } @@ -3111,8 +3116,27 @@ impl Channel { return Err(ChannelError::Close("Peer sent update_fee when we needed a channel_reestablish".to_owned())); } Channel::::check_remote_fee(fee_estimator, msg.feerate_per_kw)?; + let feerate_over_dust_buffer = msg.feerate_per_kw > self.get_dust_buffer_feerate(); + self.pending_update_fee = Some((msg.feerate_per_kw, FeeUpdateState::RemoteAnnounced)); self.update_time_counter += 1; + // If the feerate has increased over the previous dust buffer (note that + // `get_dust_buffer_feerate` considers the `pending_update_fee` status), check that we + // won't be pushed over our dust exposure limit by the feerate increase. + if feerate_over_dust_buffer { + let inbound_stats = self.get_inbound_pending_htlc_stats(); + let outbound_stats = self.get_outbound_pending_htlc_stats(); + let holder_tx_dust_exposure = inbound_stats.on_holder_tx_dust_exposure_msat + outbound_stats.on_holder_tx_dust_exposure_msat; + let counterparty_tx_dust_exposure = inbound_stats.on_counterparty_tx_dust_exposure_msat + outbound_stats.on_counterparty_tx_dust_exposure_msat; + if holder_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() { + return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our own transactions (totaling {} msat)", + msg.feerate_per_kw, holder_tx_dust_exposure))); + } + if counterparty_tx_dust_exposure > self.get_max_dust_htlc_exposure_msat() { + return Err(ChannelError::Close(format!("Peer sent update_fee with a feerate ({}) which may over-expose us to dust-in-flight on our counterparty's transactions (totaling {} msat)", + msg.feerate_per_kw, counterparty_tx_dust_exposure))); + } + } Ok(()) } @@ -3684,7 +3708,11 @@ impl Channel { // whichever is higher. This ensures that we aren't suddenly exposed to significantly // more dust balance if the feerate increases when we have several HTLCs pending // which are near the dust limit. - cmp::max(2530, self.feerate_per_kw * 1250 / 1000) + let mut feerate_per_kw = self.feerate_per_kw; + if let Some((feerate, _)) = self.pending_update_fee { + feerate_per_kw = cmp::max(feerate_per_kw, feerate); + } + cmp::max(2530, feerate_per_kw * 1250 / 1000) } pub fn get_cur_holder_commitment_transaction_number(&self) -> u64 {