Skip to content

Commit 1ffca8f

Browse files
committed
Always process inb. shutdown ChannelMonitorUpdates asynchronously
We currently have two codepaths on most channel update functions - most methods return a set of messages to send a peer iff the `ChannelMonitorUpdate` succeeds, but if it does not we push the messages back into the `Channel` and then pull them back out when the `ChannelMonitorUpdate` completes and send them then. This adds a substantial amount of complexity in very critical codepaths. Instead, here we swap all our channel update codepaths to immediately set the channel-update-required flag and only return a `ChannelMonitorUpdate` to the `ChannelManager`. Internally in the `Channel` we store a queue of `ChannelMonitorUpdate`s, which will become critical in future work to surface pending `ChannelMonitorUpdate`s to users at startup so they can complete. This leaves some redundant work in `Channel` to be cleaned up later. Specifically, we still generate the messages which we will now ignore and regenerate later. This commit updates the `ChannelMonitorUpdate` pipeline for handling inbound `shutdown` messages.
1 parent b17ffe4 commit 1ffca8f

File tree

3 files changed

+28
-18
lines changed

3 files changed

+28
-18
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2618,7 +2618,15 @@ fn test_permanent_error_during_handling_shutdown() {
26182618
assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
26192619
let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
26202620
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &channelmanager::provided_init_features(), &shutdown);
2621-
check_closed_broadcast!(nodes[1], true);
2621+
2622+
// We always send the `shutdown` response when receiving a shutdown, even if we immediately
2623+
// close the channel thereafter.
2624+
let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2625+
assert_eq!(msg_events.len(), 3);
2626+
if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2627+
if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2628+
if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
2629+
26222630
check_added_monitors!(nodes[1], 2);
26232631
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
26242632
}

lightning/src/ln/channel.rs

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4166,7 +4166,7 @@ impl<Signer: Sign> Channel<Signer> {
41664166

41674167
pub fn shutdown<K: Deref>(
41684168
&mut self, keys_provider: &K, their_features: &InitFeatures, msg: &msgs::Shutdown
4169-
) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4169+
) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
41704170
where K::Target: KeysInterface
41714171
{
41724172
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
@@ -4222,12 +4222,15 @@ impl<Signer: Sign> Channel<Signer> {
42224222

42234223
let monitor_update = if update_shutdown_script {
42244224
self.latest_monitor_update_id += 1;
4225-
Some(ChannelMonitorUpdate {
4225+
let monitor_update = ChannelMonitorUpdate {
42264226
update_id: self.latest_monitor_update_id,
42274227
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
42284228
scriptpubkey: self.get_closing_scriptpubkey(),
42294229
}],
4230-
})
4230+
};
4231+
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4232+
self.pending_monitor_updates.push(monitor_update);
4233+
Some(self.pending_monitor_updates.last().unwrap())
42314234
} else { None };
42324235
let shutdown = if send_shutdown {
42334236
Some(msgs::Shutdown {

lightning/src/ln/channelmanager.rs

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -5000,27 +5000,27 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
50005000
if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
50015001
}
50025002

5003-
let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry);
5003+
let funding_txo_opt = chan_entry.get().get_funding_txo();
5004+
let (shutdown, monitor_update_opt, htlcs) =
5005+
try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.keys_manager, &their_features, &msg), chan_entry);
50045006
dropped_htlcs = htlcs;
50055007

5006-
// Update the monitor with the shutdown script if necessary.
5007-
if let Some(monitor_update) = monitor_update {
5008-
let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
5009-
let (result, is_permanent) =
5010-
handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
5011-
if is_permanent {
5012-
remove_channel!(self, chan_entry);
5013-
break result;
5014-
}
5015-
}
5016-
50175008
if let Some(msg) = shutdown {
5009+
// We can send the `shutdown` message before updating the `ChannelMonitor`
5010+
// here as we don't need the monitor update to complete until we send a
5011+
// `shutdown_signed`, which we'll delay if we're pending a monitor update.
50185012
channel_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
50195013
node_id: *counterparty_node_id,
50205014
msg,
50215015
});
50225016
}
50235017

5018+
// Update the monitor with the shutdown script if necessary.
5019+
if let Some(monitor_update) = monitor_update_opt {
5020+
let update_id = monitor_update.update_id;
5021+
let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
5022+
break handle_new_monitor_update!(self, update_res, update_id, channel_state_lock, channel_state.pending_msg_events, chan_entry);
5023+
}
50245024
break Ok(());
50255025
},
50265026
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
@@ -5032,8 +5032,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
50325032
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
50335033
}
50345034

5035-
let _ = handle_error!(self, result, *counterparty_node_id);
5036-
Ok(())
5035+
result
50375036
}
50385037

50395038
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {

0 commit comments

Comments
 (0)