From f0c6dfbd80ea5a1166f711c919318a28e87e3c92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Wed, 12 Oct 2022 01:07:23 +0200 Subject: [PATCH 1/3] Move `claimable_htlcs` to separate lock --- lightning/src/ln/channelmanager.rs | 138 +++++++++++++++-------------- 1 file changed, 71 insertions(+), 67 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 465256d21ea..f1c894368da 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -395,13 +395,6 @@ pub(super) enum RAACommitmentOrder { // Note this is only exposed in cfg(test): pub(super) struct ChannelHolder { pub(super) by_id: HashMap<[u8; 32], Channel>, - /// Map from payment hash to the payment data and any HTLCs which are to us and can be - /// failed/claimed by the user. - /// - /// Note that while this is held in the same mutex as the channels themselves, no consistency - /// guarantees are made about the channels given here actually existing anymore by the time you - /// go to read them! - claimable_htlcs: HashMap)>, /// Messages to send to peers - pushed to in the same lock that they are generated in (except /// for broadcast messages, where ordering isn't as strict). pub(super) pending_msg_events: Vec, @@ -682,6 +675,8 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage // | | // | |__`pending_inbound_payments` // | | +// | |__`claimable_htlcs` +// | | // | |__`pending_outbound_payments` // | | // | |__`best_block` @@ -753,6 +748,15 @@ pub struct ChannelManager #[cfg(not(test))] forward_htlcs: Mutex>>, + /// Map from payment hash to the payment data and any HTLCs which are to us and can be + /// failed/claimed by the user. + /// + /// Note that, no consistency guarantees are made about the channels given here actually + /// existing anymore by the time you go to read them! + /// + /// See `ChannelManager` struct-level documentation for lock order requirements. + claimable_htlcs: Mutex)>>, + /// The set of outbound SCID aliases across all our channels, including unconfirmed channels /// and some closed channels which reached a usable state prior to being closed. This is used /// only to avoid duplicates, and is not persisted explicitly to disk, but rebuilt from the @@ -1657,13 +1661,13 @@ impl ChannelManager ChannelManager { @@ -3242,6 +3244,8 @@ impl ChannelManager { forwarding_channel_not_found!(); @@ -3434,7 +3438,8 @@ impl ChannelManager ChannelManager { - match channel_state.claimable_htlcs.entry(payment_hash) { + match self.claimable_htlcs.lock().unwrap().entry(payment_hash) { hash_map::Entry::Vacant(e) => { let purpose = events::PaymentPurpose::SpontaneousPayment(preimage); e.insert((purpose.clone(), vec![claimable_htlc])); @@ -3791,29 +3796,29 @@ impl ChannelManager= MPP_TIMEOUT_TICKS + }) { + timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone()))); return false; } - if let OnionPayload::Invoice { .. } = htlcs[0].onion_payload { - // Check if we've received all the parts we need for an MPP (the value of the parts adds to total_msat). - // In this case we're not going to handle any timeouts of the parts here. - if htlcs[0].total_msat == htlcs.iter().fold(0, |total, htlc| total + htlc.value) { - return true; - } else if htlcs.into_iter().any(|htlc| { - htlc.timer_ticks += 1; - return htlc.timer_ticks >= MPP_TIMEOUT_TICKS - }) { - timed_out_mpp_htlcs.extend(htlcs.into_iter().map(|htlc| (htlc.prev_hop.clone(), payment_hash.clone()))); - return false; - } - } - true - }); - } + } + true + }); for htlc_source in timed_out_mpp_htlcs.drain(..) { let receiver = HTLCDestination::FailedPayment { payment_hash: htlc_source.1 }; @@ -3846,10 +3851,7 @@ impl ChannelManager ChannelManager= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { - let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); - htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); - - timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { - failure_code: 0x4000 | 15, - data: htlc_msat_height_data - }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); - false - } else { true } - }); - !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. + if let Some(height) = height_opt { + self.claimable_htlcs.lock().unwrap().retain(|payment_hash, (_, htlcs)| { + htlcs.retain(|htlc| { + // If height is approaching the number of blocks we think it takes us to get + // our commitment transaction confirmed before the HTLC expires, plus the + // number of blocks we generally consider it to take to do a commitment update, + // just give up on it and fail the HTLC. + if height >= htlc.cltv_expiry - HTLC_FAIL_BACK_BUFFER { + let mut htlc_msat_height_data = byte_utils::be64_to_array(htlc.value).to_vec(); + htlc_msat_height_data.extend_from_slice(&byte_utils::be32_to_array(height)); + + timed_out_htlcs.push((HTLCSource::PreviousHopData(htlc.prev_hop.clone()), payment_hash.clone(), HTLCFailReason::Reason { + failure_code: 0x4000 | 15, + data: htlc_msat_height_data + }, HTLCDestination::FailedPayment { payment_hash: payment_hash.clone() })); + false + } else { true } }); - } + !htlcs.is_empty() // Only retain this entry if htlcs has at least one entry. + }); } self.handle_init_event_channel_failures(failed_channels); @@ -6775,16 +6777,18 @@ impl Writeable for ChannelMana } } - let channel_state = self.channel_state.lock().unwrap(); - let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); - (channel_state.claimable_htlcs.len() as u64).write(writer)?; - for (payment_hash, (purpose, previous_hops)) in channel_state.claimable_htlcs.iter() { - payment_hash.write(writer)?; - (previous_hops.len() as u64).write(writer)?; - for htlc in previous_hops.iter() { - htlc.write(writer)?; + let mut htlc_purposes: Vec = Vec::new(); + { + let claimable_htlcs = self.claimable_htlcs.lock().unwrap(); + (claimable_htlcs.len() as u64).write(writer)?; + for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() { + payment_hash.write(writer)?; + (previous_hops.len() as u64).write(writer)?; + for htlc in previous_hops.iter() { + htlc.write(writer)?; + } + htlc_purposes.push(purpose.clone()); } - htlc_purposes.push(purpose); } let per_peer_state = self.per_peer_state.write().unwrap(); @@ -7389,7 +7393,6 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> channel_state: Mutex::new(ChannelHolder { by_id, - claimable_htlcs, pending_msg_events: Vec::new(), }), inbound_payment_key: expanded_inbound_key, @@ -7397,6 +7400,7 @@ impl<'a, M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> pending_outbound_payments: Mutex::new(pending_outbound_payments.unwrap()), forward_htlcs: Mutex::new(forward_htlcs), + claimable_htlcs: Mutex::new(claimable_htlcs), outbound_scid_aliases: Mutex::new(outbound_scid_aliases), id_to_peer: Mutex::new(id_to_peer), short_to_chan_info: FairRwLock::new(short_to_chan_info), From 6b12117782a06f759152741b9bf54802b50c7e58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Wed, 12 Oct 2022 21:26:28 +0200 Subject: [PATCH 2/3] Lock pending inbound and outbound payments to before `channel_state` As the `channel_state` lock will be removed, we prepare for that by flipping the lock order for `pending_inbound_payments` and `pending_outbound_payments` locks to before the `channel_state` lock. --- lightning/src/ln/channelmanager.rs | 45 +++++++++++++++--------------- 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f1c894368da..7a11bd8167c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -663,21 +663,21 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, M, T, F, L> = ChannelManage // | // |__`forward_htlcs` // | -// |__`channel_state` +// |__`pending_inbound_payments` // | | -// | |__`id_to_peer` +// | |__`claimable_htlcs` // | | -// | |__`short_to_chan_info` -// | | -// | |__`per_peer_state` -// | | -// | |__`outbound_scid_aliases` +// | |__`pending_outbound_payments` // | | -// | |__`pending_inbound_payments` +// | |__`channel_state` +// | | +// | |__`id_to_peer` // | | -// | |__`claimable_htlcs` +// | |__`short_to_chan_info` // | | -// | |__`pending_outbound_payments` +// | |__`per_peer_state` +// | | +// | |__`outbound_scid_aliases` // | | // | |__`best_block` // | | @@ -6777,18 +6777,19 @@ impl Writeable for ChannelMana } } - let mut htlc_purposes: Vec = Vec::new(); - { - let claimable_htlcs = self.claimable_htlcs.lock().unwrap(); - (claimable_htlcs.len() as u64).write(writer)?; - for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() { - payment_hash.write(writer)?; - (previous_hops.len() as u64).write(writer)?; - for htlc in previous_hops.iter() { - htlc.write(writer)?; - } - htlc_purposes.push(purpose.clone()); + let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); + let claimable_htlcs = self.claimable_htlcs.lock().unwrap(); + let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); + + let mut htlc_purposes: Vec<&events::PaymentPurpose> = Vec::new(); + (claimable_htlcs.len() as u64).write(writer)?; + for (payment_hash, (purpose, previous_hops)) in claimable_htlcs.iter() { + payment_hash.write(writer)?; + (previous_hops.len() as u64).write(writer)?; + for htlc in previous_hops.iter() { + htlc.write(writer)?; } + htlc_purposes.push(purpose); } let per_peer_state = self.per_peer_state.write().unwrap(); @@ -6799,8 +6800,6 @@ impl Writeable for ChannelMana peer_state.latest_features.write(writer)?; } - let pending_inbound_payments = self.pending_inbound_payments.lock().unwrap(); - let pending_outbound_payments = self.pending_outbound_payments.lock().unwrap(); let events = self.pending_events.lock().unwrap(); (events.len() as u64).write(writer)?; for event in events.iter() { From 782eb3658fbc53ceb9c64847692061f2198cb786 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Viktor=20Tigerstr=C3=B6m?= <11711198+ViktorTigerstrom@users.noreply.github.com> Date: Mon, 7 Nov 2022 01:11:44 +0100 Subject: [PATCH 3/3] Don't hold `per_peer_state` lock during chain monitor update For Windows build only, the `TestPersister::chain_sync_monitor_persistences` lock has a lock order before the `ChannelManager::per_peer_state` lock. This fix ensures that the `per_peer_state` lock isn't held before the `TestPersister::chain_sync_monitor_persistences` lock is acquired. --- lightning/src/ln/channelmanager.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7a11bd8167c..e475cf13854 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1906,14 +1906,16 @@ impl ChannelManager { - let peer_state = peer_state.lock().unwrap(); - let their_features = &peer_state.latest_features; - chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? - }, - None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), + let (shutdown_msg, monitor_update, htlcs) = { + let per_peer_state = self.per_peer_state.read().unwrap(); + match per_peer_state.get(&counterparty_node_id) { + Some(peer_state) => { + let peer_state = peer_state.lock().unwrap(); + let their_features = &peer_state.latest_features; + chan_entry.get_mut().get_shutdown(&self.keys_manager, their_features, target_feerate_sats_per_1000_weight)? + }, + None => return Err(APIError::ChannelUnavailable { err: format!("Not connected to node: {}", counterparty_node_id) }), + } }; failed_htlcs = htlcs;