Skip to content

Commit 06e3e96

Browse files
committed
Always process inb. shutdown ChannelMonitorUpdates asynchronously
We currently have two codepaths on most channel update functions - most methods return a set of messages to send a peer iff the `ChannelMonitorUpdate` succeeds, but if it does not we push the messages back into the `Channel` and then pull them back out when the `ChannelMonitorUpdate` completes and send them then. This adds a substantial amount of complexity in very critical codepaths. Instead, here we swap all our channel update codepaths to immediately set the channel-update-required flag and only return a `ChannelMonitorUpdate` to the `ChannelManager`. Internally in the `Channel` we store a queue of `ChannelMonitorUpdate`s, which will become critical in future work to surface pending `ChannelMonitorUpdate`s to users at startup so they can complete. This leaves some redundant work in `Channel` to be cleaned up later. Specifically, we still generate the messages which we will now ignore and regenerate later. This commit updates the `ChannelMonitorUpdate` pipeline for handling inbound `shutdown` messages.
1 parent cf5ecd5 commit 06e3e96

File tree

3 files changed

+28
-18
lines changed

3 files changed

+28
-18
lines changed

lightning/src/ln/chanmon_update_fail_tests.rs

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2631,7 +2631,15 @@ fn test_permanent_error_during_handling_shutdown() {
26312631
assert!(nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).is_ok());
26322632
let shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
26332633
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &shutdown);
2634-
check_closed_broadcast!(nodes[1], true);
2634+
2635+
// We always send the `shutdown` response when receiving a shutdown, even if we immediately
2636+
// close the channel thereafter.
2637+
let msg_events = nodes[1].node.get_and_clear_pending_msg_events();
2638+
assert_eq!(msg_events.len(), 3);
2639+
if let MessageSendEvent::SendShutdown { .. } = msg_events[0] {} else { panic!(); }
2640+
if let MessageSendEvent::BroadcastChannelUpdate { .. } = msg_events[1] {} else { panic!(); }
2641+
if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
2642+
26352643
check_added_monitors!(nodes[1], 2);
26362644
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
26372645
}

lightning/src/ln/channel.rs

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4251,7 +4251,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
42514251

42524252
pub fn shutdown<SP: Deref>(
42534253
&mut self, signer_provider: &SP, their_features: &InitFeatures, msg: &msgs::Shutdown
4254-
) -> Result<(Option<msgs::Shutdown>, Option<ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
4254+
) -> Result<(Option<msgs::Shutdown>, Option<&ChannelMonitorUpdate>, Vec<(HTLCSource, PaymentHash)>), ChannelError>
42554255
where SP::Target: SignerProvider
42564256
{
42574257
if self.channel_state & (ChannelState::PeerDisconnected as u32) == ChannelState::PeerDisconnected as u32 {
@@ -4307,12 +4307,15 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
43074307

43084308
let monitor_update = if update_shutdown_script {
43094309
self.latest_monitor_update_id += 1;
4310-
Some(ChannelMonitorUpdate {
4310+
let monitor_update = ChannelMonitorUpdate {
43114311
update_id: self.latest_monitor_update_id,
43124312
updates: vec![ChannelMonitorUpdateStep::ShutdownScript {
43134313
scriptpubkey: self.get_closing_scriptpubkey(),
43144314
}],
4315-
})
4315+
};
4316+
self.monitor_updating_paused(false, false, false, Vec::new(), Vec::new(), Vec::new());
4317+
self.pending_monitor_updates.push(monitor_update);
4318+
Some(self.pending_monitor_updates.last().unwrap())
43164319
} else { None };
43174320
let shutdown = if send_shutdown {
43184321
Some(msgs::Shutdown {

lightning/src/ln/channelmanager.rs

Lines changed: 13 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -4678,27 +4678,27 @@ where
46784678
if chan_entry.get().sent_shutdown() { " after we initiated shutdown" } else { "" });
46794679
}
46804680

4681-
let (shutdown, monitor_update, htlcs) = try_chan_entry!(self, chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
4681+
let funding_txo_opt = chan_entry.get().get_funding_txo();
4682+
let (shutdown, monitor_update_opt, htlcs) = try_chan_entry!(self,
4683+
chan_entry.get_mut().shutdown(&self.signer_provider, &peer_state.latest_features, &msg), chan_entry);
46824684
dropped_htlcs = htlcs;
46834685

4684-
// Update the monitor with the shutdown script if necessary.
4685-
if let Some(monitor_update) = monitor_update {
4686-
let update_res = self.chain_monitor.update_channel(chan_entry.get().get_funding_txo().unwrap(), &monitor_update);
4687-
let (result, is_permanent) =
4688-
handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
4689-
if is_permanent {
4690-
remove_channel!(self, chan_entry);
4691-
break result;
4692-
}
4693-
}
4694-
46954686
if let Some(msg) = shutdown {
4687+
// We can send the `shutdown` message before updating the `ChannelMonitor`
4688+
// here as we don't need the monitor update to complete until we send a
4689+
// `shutdown_signed`, which we'll delay if we're pending a monitor update.
46964690
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
46974691
node_id: *counterparty_node_id,
46984692
msg,
46994693
});
47004694
}
47014695

4696+
// Update the monitor with the shutdown script if necessary.
4697+
if let Some(monitor_update) = monitor_update_opt {
4698+
let update_id = monitor_update.update_id;
4699+
let update_res = self.chain_monitor.update_channel(funding_txo_opt.unwrap(), monitor_update);
4700+
break handle_new_monitor_update!(self, update_res, update_id, peer_state_lock, peer_state, chan_entry);
4701+
}
47024702
break Ok(());
47034703
},
47044704
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
@@ -4710,8 +4710,7 @@ where
47104710
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
47114711
}
47124712

4713-
let _ = handle_error!(self, result, *counterparty_node_id);
4714-
Ok(())
4713+
result
47154714
}
47164715

47174716
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {

0 commit comments

Comments
 (0)