Skip to content

Commit bb7c4d1

Browse files
committed
Work around LND bug 6039
LND hasn't properly handled shutdown messages ever, and force-closes any time we send one while HTLCs are still present. The issue is tracked at lightningnetwork/lnd#6039 and has had multiple patches to fix it but none so far have managed to land upstream. The issue appears to be very low priority for the LND team despite being marked "P1". We're not going to bother handling this in a sensible way, instead simply repeated the Shutdown message on repeat until morale improves.
1 parent d4ad826 commit bb7c4d1

File tree

3 files changed

+91
-7
lines changed

3 files changed

+91
-7
lines changed

lightning/src/ln/channel.rs

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3811,6 +3811,17 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
38113811
}
38123812
}
38133813

3814+
/// Gets the `Shutdown` message we should send our peer on reconnect, if any.
3815+
pub fn get_outbound_shutdown(&self) -> Option<msgs::Shutdown> {
3816+
if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
3817+
assert!(self.context.shutdown_scriptpubkey.is_some());
3818+
Some(msgs::Shutdown {
3819+
channel_id: self.context.channel_id,
3820+
scriptpubkey: self.get_closing_scriptpubkey(),
3821+
})
3822+
} else { None }
3823+
}
3824+
38143825
/// May panic if some calls other than message-handling calls (which will all Err immediately)
38153826
/// have been called between remove_uncommitted_htlcs_and_mark_paused and this call.
38163827
///
@@ -3877,13 +3888,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
38773888
self.context.channel_state &= !(ChannelState::PeerDisconnected as u32);
38783889
self.context.sent_message_awaiting_response = None;
38793890

3880-
let shutdown_msg = if self.context.channel_state & (ChannelState::LocalShutdownSent as u32) != 0 {
3881-
assert!(self.context.shutdown_scriptpubkey.is_some());
3882-
Some(msgs::Shutdown {
3883-
channel_id: self.context.channel_id,
3884-
scriptpubkey: self.get_closing_scriptpubkey(),
3885-
})
3886-
} else { None };
3891+
let shutdown_msg = self.get_outbound_shutdown();
38873892

38883893
let announcement_sigs = self.get_announcement_sigs(node_signer, genesis_block_hash, user_config, best_block.height(), logger);
38893894

lightning/src/ln/channelmanager.rs

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7489,6 +7489,36 @@ where
74897489
fn handle_error(&self, counterparty_node_id: &PublicKey, msg: &msgs::ErrorMessage) {
74907490
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
74917491

7492+
match &msg.data as &str {
7493+
"cannot co-op close channel w/ active htlcs"|
7494+
"link failed to shutdown" =>
7495+
{
7496+
// LND hasn't properly handled shutdown messages ever, and force-closes any time we
7497+
// send one while HTLCs are still present. The issue is tracked at
7498+
// https://github.com/lightningnetwork/lnd/issues/6039 and has had multiple patches
7499+
// to fix it but none so far have managed to land upstream. The issue appears to be
7500+
// very low priority for the LND team despite being marked "P1".
7501+
// We're not going to bother handling this in a sensible way, instead simply
7502+
// repeating the Shutdown message on repeat until morale improves.
7503+
if msg.channel_id != [0; 32] {
7504+
let per_peer_state = self.per_peer_state.read().unwrap();
7505+
let peer_state_mutex_opt = per_peer_state.get(counterparty_node_id);
7506+
if peer_state_mutex_opt.is_none() { return; }
7507+
let mut peer_state = peer_state_mutex_opt.unwrap().lock().unwrap();
7508+
if let Some(chan) = peer_state.channel_by_id.get(&msg.channel_id) {
7509+
if let Some(msg) = chan.get_outbound_shutdown() {
7510+
peer_state.pending_msg_events.push(events::MessageSendEvent::SendShutdown {
7511+
node_id: *counterparty_node_id,
7512+
msg,
7513+
});
7514+
}
7515+
}
7516+
}
7517+
return;
7518+
}
7519+
_ => {}
7520+
}
7521+
74927522
if msg.channel_id == [0; 32] {
74937523
let channel_ids: Vec<[u8; 32]> = {
74947524
let per_peer_state = self.per_peer_state.read().unwrap();

lightning/src/ln/shutdown_tests.rs

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,55 @@ fn expect_channel_shutdown_state_with_htlc() {
198198
assert!(nodes[0].node.list_channels().is_empty());
199199
}
200200

201+
#[test]
202+
fn test_lnd_bug_6039() {
203+
// LND sends a nonsense error message any time it gets a shutdown if there are still HTLCs
204+
// pending. We currently swallow that error to work around LND's bug #6039. This test emulates
205+
// the LND nonsense and ensures we at least kinda handle it.
206+
let chanmon_cfgs = create_chanmon_cfgs(2);
207+
let node_cfgs = create_node_cfgs(2, &chanmon_cfgs);
208+
let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]);
209+
let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs);
210+
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
211+
212+
let (payment_preimage, payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 100_000);
213+
214+
nodes[0].node.close_channel(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
215+
let node_0_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
216+
nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown);
217+
218+
// Generate an lnd-like error message and check that we respond by simply screaming louder to
219+
// see if LND will accept our protocol compliance.
220+
let err_msg = msgs::ErrorMessage { channel_id: chan.2, data: "link failed to shutdown".to_string() };
221+
nodes[0].node.handle_error(&nodes[1].node.get_our_node_id(), &err_msg);
222+
let _node_0_repeated_shutdown = get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id());
223+
224+
let node_1_shutdown = get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id());
225+
226+
assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty());
227+
assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty());
228+
229+
claim_payment(&nodes[0], &[&nodes[1]], payment_preimage);
230+
231+
// Assume that LND will eventually respond to our Shutdown if we clear all the remaining HTLCs
232+
nodes[0].node.handle_shutdown(&nodes[1].node.get_our_node_id(), &node_1_shutdown);
233+
234+
// ClosingSignNegotion process
235+
let node_0_closing_signed = get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id());
236+
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_closing_signed);
237+
let node_1_closing_signed = get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id());
238+
nodes[0].node.handle_closing_signed(&nodes[1].node.get_our_node_id(), &node_1_closing_signed);
239+
let (_, node_0_2nd_closing_signed) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id());
240+
nodes[1].node.handle_closing_signed(&nodes[0].node.get_our_node_id(), &node_0_2nd_closing_signed.unwrap());
241+
let (_, node_1_none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
242+
assert!(node_1_none.is_none());
243+
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
244+
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
245+
246+
// Shutdown basically removes the channelDetails, testing of shutdowncomplete state unnecessary
247+
assert!(nodes[0].node.list_channels().is_empty());
248+
}
249+
201250
#[test]
202251
fn expect_channel_shutdown_state_with_force_closure() {
203252
// Test sending a shutdown prior to channel_ready after funding generation

0 commit comments

Comments
 (0)