Skip to content

Commit a6fe97c

Browse files
f - remove peers through timer_tick_occurred
1 parent 12dd28b commit a6fe97c

File tree

1 file changed

+32
-54
lines changed

1 file changed

+32
-54
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 32 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -620,8 +620,6 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = C
620620
// | |
621621
// | |__`best_block`
622622
// | |
623-
// | |__`pending_peers_awaiting_removal`
624-
// | |
625623
// | |__`pending_events`
626624
// | |
627625
// | |__`pending_background_events`
@@ -789,16 +787,6 @@ where
789787

790788
/// See `ChannelManager` struct-level documentation for lock order requirements.
791789
pending_events: Mutex<Vec<events::Event>>,
792-
/// When a peer disconnects but still has channels, the peer's `peer_state` entry in the
793-
/// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels of
794-
/// to that peer is later closed while still being disconnected (i.e. force closed), we
795-
/// therefore need to remove the peer from `peer_state` separately.
796-
/// To avoid having to take the `per_peer_state` `write` lock once the channels are closed, we
797-
/// instead store such peers awaiting removal in this field, and remove them on a timer to
798-
/// limit the negative effects on parallelism as much as possible.
799-
///
800-
/// See `ChannelManager` struct-level documentation for lock order requirements.
801-
pending_peers_awaiting_removal: Mutex<HashSet<PublicKey>>,
802790
/// See `ChannelManager` struct-level documentation for lock order requirements.
803791
pending_background_events: Mutex<Vec<BackgroundEvent>>,
804792
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
@@ -1359,11 +1347,10 @@ macro_rules! try_chan_entry {
13591347
}
13601348

13611349
macro_rules! remove_channel {
1362-
($self: expr, $entry: expr, $peer_state: expr) => {
1350+
($self: expr, $entry: expr) => {
13631351
{
13641352
let channel = $entry.remove_entry().1;
13651353
update_maps_on_chan_removal!($self, channel);
1366-
$self.add_pending_peer_to_be_removed(channel.get_counterparty_node_id(), $peer_state);
13671354
channel
13681355
}
13691356
}
@@ -1536,7 +1523,6 @@ where
15361523
per_peer_state: FairRwLock::new(HashMap::new()),
15371524

15381525
pending_events: Mutex::new(Vec::new()),
1539-
pending_peers_awaiting_removal: Mutex::new(HashSet::new()),
15401526
pending_background_events: Mutex::new(Vec::new()),
15411527
total_consistency_lock: RwLock::new(()),
15421528
persistence_notifier: Notifier::new(),
@@ -1803,7 +1789,7 @@ where
18031789
let (result, is_permanent) =
18041790
handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
18051791
if is_permanent {
1806-
remove_channel!(self, chan_entry, peer_state);
1792+
remove_channel!(self, chan_entry);
18071793
break result;
18081794
}
18091795
}
@@ -1814,7 +1800,7 @@ where
18141800
});
18151801

18161802
if chan_entry.get().is_shutdown() {
1817-
let channel = remove_channel!(self, chan_entry, peer_state);
1803+
let channel = remove_channel!(self, chan_entry);
18181804
if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
18191805
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
18201806
msg: channel_update
@@ -1917,7 +1903,7 @@ where
19171903
} else {
19181904
self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
19191905
}
1920-
remove_channel!(self, chan, peer_state)
1906+
remove_channel!(self, chan)
19211907
} else {
19221908
return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
19231909
}
@@ -1956,13 +1942,6 @@ where
19561942
}
19571943
}
19581944

1959-
fn add_pending_peer_to_be_removed(&self, counterparty_node_id: PublicKey, peer_state: &mut PeerState<<SP::Target as SignerProvider>::Signer>) {
1960-
let peer_should_be_removed = !peer_state.is_connected && peer_state.channel_by_id.len() == 0;
1961-
if peer_should_be_removed {
1962-
self.pending_peers_awaiting_removal.lock().unwrap().insert(counterparty_node_id);
1963-
}
1964-
}
1965-
19661945
/// Force closes a channel, immediately broadcasting the latest local transaction(s) and
19671946
/// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
19681947
/// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
@@ -3442,16 +3421,20 @@ where
34423421
true
34433422
}
34443423

3445-
/// Removes peers which have been been added to `pending_peers_awaiting_removal` which are
3446-
/// still disconnected and we have no channels to.
3424+
/// When a peer disconnects but still has channels, the peer's `peer_state` entry in the
3425+
/// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels of
3426+
/// to that peer is later closed while still being disconnected (i.e. force closed), we
3427+
/// therefore need to remove the peer from `peer_state` separately.
3428+
/// To avoid having to take the `per_peer_state` `write` lock once the channels are closed, we
3429+
/// instead remove such peers awaiting removal through this function, which is called on a
3430+
/// timer through `timer_tick_occurred`, passing the peers disconnected peers with no channels,
3431+
/// to limit the negative effects on parallelism as much as possible.
34473432
///
34483433
/// Must be called without the `per_peer_state` lock acquired.
3449-
fn remove_peers_awaiting_removal(&self) {
3450-
let mut pending_peers_awaiting_removal = HashSet::new();
3451-
mem::swap(&mut *self.pending_peers_awaiting_removal.lock().unwrap(), &mut pending_peers_awaiting_removal);
3434+
fn remove_peers_awaiting_removal(&self, pending_peers_awaiting_removal: HashSet<PublicKey>) {
34523435
if pending_peers_awaiting_removal.len() > 0 {
34533436
let mut per_peer_state = self.per_peer_state.write().unwrap();
3454-
for counterparty_node_id in pending_peers_awaiting_removal.drain() {
3437+
for counterparty_node_id in pending_peers_awaiting_removal {
34553438
match per_peer_state.entry(counterparty_node_id) {
34563439
hash_map::Entry::Occupied(entry) => {
34573440
// Remove the entry if the peer is still disconnected and we still
@@ -3530,6 +3513,7 @@ where
35303513
/// the channel.
35313514
/// * Expiring a channel's previous `ChannelConfig` if necessary to only allow forwarding HTLCs
35323515
/// with the current `ChannelConfig`.
3516+
/// * Removing peers which have disconnected but and no longer have any channels.
35333517
///
35343518
/// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
35353519
/// estimate fetches.
@@ -3542,6 +3526,7 @@ where
35423526

35433527
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
35443528
let mut timed_out_mpp_htlcs = Vec::new();
3529+
let mut pending_peers_awaiting_removal = HashSet::new();
35453530
{
35463531
let per_peer_state = self.per_peer_state.read().unwrap();
35473532
for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
@@ -3589,10 +3574,13 @@ where
35893574

35903575
true
35913576
});
3592-
self.add_pending_peer_to_be_removed(counterparty_node_id, peer_state);
3577+
let peer_should_be_removed = !peer_state.is_connected && peer_state.channel_by_id.len() == 0;
3578+
if peer_should_be_removed {
3579+
pending_peers_awaiting_removal.insert(counterparty_node_id);
3580+
}
35933581
}
35943582
}
3595-
self.remove_peers_awaiting_removal();
3583+
self.remove_peers_awaiting_removal(pending_peers_awaiting_removal);
35963584

35973585
self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
35983586
if htlcs.is_empty() {
@@ -4347,7 +4335,7 @@ where
43474335
}
43484336
};
43494337
peer_state.pending_msg_events.push(send_msg_err_event);
4350-
let _ = remove_channel!(self, channel, peer_state);
4338+
let _ = remove_channel!(self, channel);
43514339
return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
43524340
}
43534341

@@ -4633,7 +4621,7 @@ where
46334621
let (result, is_permanent) =
46344622
handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
46354623
if is_permanent {
4636-
remove_channel!(self, chan_entry, peer_state);
4624+
remove_channel!(self, chan_entry);
46374625
break result;
46384626
}
46394627
}
@@ -4682,7 +4670,7 @@ where
46824670
// also implies there are no pending HTLCs left on the channel, so we can
46834671
// fully delete it from tracking (the channel monitor is still around to
46844672
// watch for old state broadcasts)!
4685-
(tx, Some(remove_channel!(self, chan_entry, peer_state)))
4673+
(tx, Some(remove_channel!(self, chan_entry)))
46864674
} else { (tx, None) }
46874675
},
46884676
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
@@ -5185,11 +5173,12 @@ where
51855173
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
51865174
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
51875175
let peer_state = &mut *peer_state_lock;
5176+
let pending_msg_events = &mut peer_state.pending_msg_events;
51885177
if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
5189-
let mut chan = remove_channel!(self, chan_entry, peer_state);
5178+
let mut chan = remove_channel!(self, chan_entry);
51905179
failed_channels.push(chan.force_shutdown(false));
51915180
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
5192-
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
5181+
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
51935182
msg: update
51945183
});
51955184
}
@@ -5199,7 +5188,7 @@ where
51995188
ClosureReason::CommitmentTxConfirmed
52005189
};
52015190
self.issue_channel_close_events(&chan, reason);
5202-
peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
5191+
pending_msg_events.push(events::MessageSendEvent::HandleError {
52035192
node_id: chan.get_counterparty_node_id(),
52045193
action: msgs::ErrorAction::SendErrorMessage {
52055194
msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
@@ -5241,7 +5230,7 @@ where
52415230
{
52425231
let per_peer_state = self.per_peer_state.read().unwrap();
52435232

5244-
for (cp_id, peer_state_mutex) in per_peer_state.iter() {
5233+
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
52455234
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
52465235
let peer_state = &mut *peer_state_lock;
52475236
let pending_msg_events = &mut peer_state.pending_msg_events;
@@ -5281,7 +5270,6 @@ where
52815270
}
52825271
}
52835272
});
5284-
self.add_pending_peer_to_be_removed(*cp_id, peer_state);
52855273
}
52865274
}
52875275

@@ -5306,7 +5294,7 @@ where
53065294
{
53075295
let per_peer_state = self.per_peer_state.read().unwrap();
53085296

5309-
for (cp_id, peer_state_mutex) in per_peer_state.iter() {
5297+
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
53105298
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
53115299
let peer_state = &mut *peer_state_lock;
53125300
let pending_msg_events = &mut peer_state.pending_msg_events;
@@ -5344,7 +5332,6 @@ where
53445332
}
53455333
}
53465334
});
5347-
self.add_pending_peer_to_be_removed(*cp_id, peer_state);
53485335
}
53495336
}
53505337

@@ -5926,7 +5913,7 @@ where
59265913
let mut timed_out_htlcs = Vec::new();
59275914
{
59285915
let per_peer_state = self.per_peer_state.read().unwrap();
5929-
for (cp_id, peer_state_mutex) in per_peer_state.iter() {
5916+
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
59305917
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
59315918
let peer_state = &mut *peer_state_lock;
59325919
let pending_msg_events = &mut peer_state.pending_msg_events;
@@ -6010,7 +5997,6 @@ where
60105997
}
60115998
true
60125999
});
6013-
self.add_pending_peer_to_be_removed(*cp_id, peer_state);
60146000
}
60156001
}
60166002

@@ -6338,7 +6324,7 @@ where
63386324

63396325
let per_peer_state = self.per_peer_state.read().unwrap();
63406326

6341-
for (cp_id, peer_state_mutex) in per_peer_state.iter() {
6327+
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
63426328
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
63436329
let peer_state = &mut *peer_state_lock;
63446330
let pending_msg_events = &mut peer_state.pending_msg_events;
@@ -6370,7 +6356,6 @@ where
63706356
}
63716357
retain
63726358
});
6373-
self.add_pending_peer_to_be_removed(*cp_id, peer_state);
63746359
}
63756360
//TODO: Also re-broadcast announcement_signatures
63766361
Ok(())
@@ -6884,8 +6869,6 @@ where
68846869

68856870
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
68866871

6887-
self.remove_peers_awaiting_removal();
6888-
68896872
self.genesis_hash.write(writer)?;
68906873
{
68916874
let best_block = self.best_block.read().unwrap();
@@ -7711,7 +7694,6 @@ where
77117694
per_peer_state: FairRwLock::new(per_peer_state),
77127695

77137696
pending_events: Mutex::new(pending_events_read),
7714-
pending_peers_awaiting_removal: Mutex::new(HashSet::new()),
77157697
pending_background_events: Mutex::new(pending_background_events_read),
77167698
total_consistency_lock: RwLock::new(()),
77177699
persistence_notifier: Notifier::new(),
@@ -8196,9 +8178,6 @@ mod tests {
81968178
// Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
81978179
// disconnected and the channel between has been force closed.
81988180
let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
8199-
let nodes_0_pending_peers_awaiting_removal = nodes[0].node.pending_peers_awaiting_removal.lock().unwrap();
8200-
assert_eq!(nodes_0_pending_peers_awaiting_removal.len(), 1);
8201-
assert!(nodes_0_pending_peers_awaiting_removal.get(&nodes[1].node.get_our_node_id()).is_some());
82028181
// Assert that nodes[1] isn't removed before `timer_tick_occurred` has been executed.
82038182
assert_eq!(nodes_0_per_peer_state.len(), 1);
82048183
assert!(nodes_0_per_peer_state.get(&nodes[1].node.get_our_node_id()).is_some());
@@ -8209,7 +8188,6 @@ mod tests {
82098188
{
82108189
// Assert that nodes[1] has now been removed.
82118190
assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
8212-
assert_eq!(nodes[0].node.pending_peers_awaiting_removal.lock().unwrap().len(), 0);
82138191
}
82148192
}
82158193

0 commit comments

Comments
 (0)