Skip to content

Commit e9452c7

Browse files
committed
Consistently clean up when failing in internal_funding_created
When we fail to accept a counterparty's funding for various reasons, we should ensure we call the correct cleanup methods in `internal_funding_created` to remove the temporary data for the channel in our various internal structs (primarily the SCID alias map). This adds the missing cleanup, using `convert_chan_phase_err` consistently in all the error paths. This also ensures we get a `ChannelClosed` event when relevant.
1 parent 1bee708 commit e9452c7

File tree

4 files changed

+112
-24
lines changed

4 files changed

+112
-24
lines changed

lightning/src/ln/channel.rs

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2920,6 +2920,20 @@ impl<SP: Deref> Channel<SP> where
29202920
self.context.channel_state.clear_waiting_for_batch();
29212921
}
29222922

2923+
/// Unsets the existing funding information.
2924+
///
2925+
/// This must only be used if the channel has not yet completed funding and has not been used.
2926+
///
2927+
/// Further, the channel must be immediately shut down after this with a call to
2928+
/// [`ChannelContext::force_shutdown`].
2929+
pub fn unset_funding_info(&mut self, temporary_channel_id: ChannelId) {
2930+
debug_assert!(matches!(
2931+
self.context.channel_state, ChannelState::AwaitingChannelReady(_)
2932+
));
2933+
self.context.channel_transaction_parameters.funding_outpoint = None;
2934+
self.context.channel_id = temporary_channel_id;
2935+
}
2936+
29232937
/// Handles a channel_ready message from our peer. If we've already sent our channel_ready
29242938
/// and the channel is now usable (and public), this may generate an announcement_signatures to
29252939
/// reply with.

lightning/src/ln/channelmanager.rs

Lines changed: 32 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1251,7 +1251,10 @@ where
12511251
/// required to access the channel with the `counterparty_node_id`.
12521252
///
12531253
/// See `ChannelManager` struct-level documentation for lock order requirements.
1254+
#[cfg(not(test))]
12541255
outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
1256+
#[cfg(test)]
1257+
pub(crate) outpoint_to_peer: Mutex<HashMap<OutPoint, PublicKey>>,
12551258

12561259
/// SCIDs (and outbound SCID aliases) -> `counterparty_node_id`s and `channel_id`s.
12571260
///
@@ -6205,44 +6208,55 @@ where
62056208

62066209
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
62076210
let peer_state = &mut *peer_state_lock;
6208-
let (chan, funding_msg_opt, monitor) =
6211+
let (mut chan, funding_msg_opt, monitor) =
62096212
match peer_state.channel_by_id.remove(&msg.temporary_channel_id) {
62106213
Some(ChannelPhase::UnfundedInboundV1(inbound_chan)) => {
62116214
let logger = WithChannelContext::from(&self.logger, &inbound_chan.context);
62126215
match inbound_chan.funding_created(msg, best_block, &self.signer_provider, &&logger) {
62136216
Ok(res) => res,
6214-
Err((mut inbound_chan, err)) => {
6217+
Err((inbound_chan, err)) => {
62156218
// We've already removed this inbound channel from the map in `PeerState`
62166219
// above so at this point we just need to clean up any lingering entries
62176220
// concerning this channel as it is safe to do so.
6218-
update_maps_on_chan_removal!(self, &inbound_chan.context);
6219-
let user_id = inbound_chan.context.get_user_id();
6220-
let shutdown_res = inbound_chan.context.force_shutdown(false);
6221-
return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err),
6222-
msg.temporary_channel_id, user_id, shutdown_res, None, inbound_chan.context.get_value_satoshis()));
6221+
debug_assert!(matches!(err, ChannelError::Close(_)));
6222+
// Really we should be returning the channel_id the peer expects based
6223+
// on their funding info here, but they're horribly confused anyway, so
6224+
// there's not a lot we can do to save them.
6225+
return Err(convert_chan_phase_err!(self, err, &mut ChannelPhase::UnfundedInboundV1(inbound_chan), &msg.temporary_channel_id).1);
62236226
},
62246227
}
62256228
},
6226-
Some(ChannelPhase::Funded(_)) | Some(ChannelPhase::UnfundedOutboundV1(_)) => {
6227-
return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id));
6229+
Some(mut phase) => {
6230+
let err_msg = format!("Got an unexpected funding_created message from peer with counterparty_node_id {}", counterparty_node_id);
6231+
let err = ChannelError::Close(err_msg);
6232+
return Err(convert_chan_phase_err!(self, err, &mut phase, &msg.temporary_channel_id).1);
62286233
},
62296234
None => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.temporary_channel_id))
62306235
};
62316236

6232-
match peer_state.channel_by_id.entry(chan.context.channel_id()) {
6237+
let funded_channel_id = chan.context.channel_id();
6238+
6239+
macro_rules! fail_chan { ($err: expr) => { {
6240+
// Note that at this point we've filled in the funding outpoint on our
6241+
// channel, but its actually in conflict with another channel. Thus, if
6242+
// we call `convert_chan_phase_err` immediately (thus calling
6243+
// `update_maps_on_chan_removal`), we'll remove the existing channel
6244+
// from `outpoint_to_peer`. Thus, we must first unset the funding outpoint
6245+
// on the channel.
6246+
let err = ChannelError::Close($err.to_owned());
6247+
chan.unset_funding_info(msg.temporary_channel_id);
6248+
return Err(convert_chan_phase_err!(self, err, chan, &funded_channel_id, UNFUNDED_CHANNEL).1);
6249+
} } }
6250+
6251+
match peer_state.channel_by_id.entry(funded_channel_id) {
62336252
hash_map::Entry::Occupied(_) => {
6234-
Err(MsgHandleErrInternal::send_err_msg_no_close(
6235-
"Already had channel with the new channel_id".to_owned(),
6236-
chan.context.channel_id()
6237-
))
6253+
fail_chan!("Already had channel with the new channel_id");
62386254
},
62396255
hash_map::Entry::Vacant(e) => {
62406256
let mut outpoint_to_peer_lock = self.outpoint_to_peer.lock().unwrap();
62416257
match outpoint_to_peer_lock.entry(monitor.get_funding_txo().0) {
62426258
hash_map::Entry::Occupied(_) => {
6243-
return Err(MsgHandleErrInternal::send_err_msg_no_close(
6244-
"The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
6245-
chan.context.channel_id()))
6259+
fail_chan!("The funding_created message had the same funding_txid as an existing channel - funding is not possible");
62466260
},
62476261
hash_map::Entry::Vacant(i_e) => {
62486262
let monitor_res = self.chain_monitor.watch_channel(monitor.get_funding_txo().0, monitor);
@@ -6271,13 +6285,7 @@ where
62716285
} else {
62726286
let logger = WithChannelContext::from(&self.logger, &chan.context);
62736287
log_error!(logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
6274-
let channel_id = match funding_msg_opt {
6275-
Some(msg) => msg.channel_id,
6276-
None => chan.context.channel_id(),
6277-
};
6278-
return Err(MsgHandleErrInternal::send_err_msg_no_close(
6279-
"The funding_created message had the same funding_txid as an existing channel - funding is not possible".to_owned(),
6280-
channel_id));
6288+
fail_chan!("Duplicate funding outpoint");
62816289
}
62826290
}
62836291
}

lightning/src/ln/functional_test_utils.rs

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1536,6 +1536,18 @@ pub struct ExpectedCloseEvent {
15361536
pub reason: Option<ClosureReason>,
15371537
}
15381538

1539+
impl ExpectedCloseEvent {
1540+
pub fn from_id_reason(channel_id: ChannelId, discard_funding: bool, reason: ClosureReason) -> Self {
1541+
Self {
1542+
channel_capacity_sats: None,
1543+
channel_id: Some(channel_id),
1544+
counterparty_node_id: None,
1545+
discard_funding,
1546+
reason: Some(reason),
1547+
}
1548+
}
1549+
}
1550+
15391551
/// Check that multiple channel closing events have been issued.
15401552
pub fn check_closed_events(node: &Node, expected_close_events: &[ExpectedCloseEvent]) {
15411553
let closed_events_count = expected_close_events.len();

lightning/src/ln/functional_tests.rs

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8970,6 +8970,54 @@ fn test_duplicate_temporary_channel_id_from_different_peers() {
89708970
}
89718971
}
89728972

8973+
#[test]
8974+
fn test_duplicate_funding_err_in_funding() {
8975+
// Test that if we have a live channel with one peer, then another peer comes along and tries
8976+
// to create a second channel with the same txid we'll fail and not overwrite the
8977+
// outpoint_to_peer map in `ChannelManager`.
8978+
//
8979+
// This was previously broken.
8980+
let chanmon_cfgs = create_chanmon_cfgs(3);
8981+
let node_cfgs = create_node_cfgs(3, &chanmon_cfgs);
8982+
let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]);
8983+
let nodes = create_network(3, &node_cfgs, &node_chanmgrs);
8984+
8985+
let (_, _, _, real_channel_id, funding_tx) = create_chan_between_nodes(&nodes[0], &nodes[1]);
8986+
let real_chan_funding_txo = chain::transaction::OutPoint { txid: funding_tx.txid(), index: 0 };
8987+
assert_eq!(real_chan_funding_txo.to_channel_id(), real_channel_id);
8988+
8989+
nodes[2].node.create_channel(nodes[1].node.get_our_node_id(), 100_000, 0, 42, None, None).unwrap();
8990+
let mut open_chan_msg = get_event_msg!(nodes[2], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
8991+
let node_c_temp_chan_id = open_chan_msg.temporary_channel_id;
8992+
open_chan_msg.temporary_channel_id = real_channel_id;
8993+
nodes[1].node.handle_open_channel(&nodes[2].node.get_our_node_id(), &open_chan_msg);
8994+
let mut accept_chan_msg = get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[2].node.get_our_node_id());
8995+
accept_chan_msg.temporary_channel_id = node_c_temp_chan_id;
8996+
nodes[2].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &accept_chan_msg);
8997+
8998+
// Now that we have a second channel with the same funding txo, send a bogus funding message
8999+
// and let nodes[1] remove the inbound channel.
9000+
let (_, funding_tx, _) = create_funding_transaction(&nodes[2], &nodes[1].node.get_our_node_id(), 100_000, 42);
9001+
9002+
nodes[2].node.funding_transaction_generated(&node_c_temp_chan_id, &nodes[1].node.get_our_node_id(), funding_tx).unwrap();
9003+
9004+
let mut funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id());
9005+
funding_created_msg.temporary_channel_id = real_channel_id;
9006+
// Make the signature invalid by changing the funding output
9007+
funding_created_msg.funding_output_index += 10;
9008+
nodes[1].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg);
9009+
get_err_msg(&nodes[1], &nodes[2].node.get_our_node_id());
9010+
let err = "Invalid funding_created signature from peer".to_owned();
9011+
let reason = ClosureReason::ProcessingError { err };
9012+
let expected_closing = ExpectedCloseEvent::from_id_reason(real_channel_id, false, reason);
9013+
check_closed_events(&nodes[1], &[expected_closing]);
9014+
9015+
assert_eq!(
9016+
*nodes[1].node.outpoint_to_peer.lock().unwrap().get(&real_chan_funding_txo).unwrap(),
9017+
nodes[0].node.get_our_node_id()
9018+
);
9019+
}
9020+
89739021
#[test]
89749022
fn test_duplicate_chan_id() {
89759023
// Test that if a given peer tries to open a channel with the same channel_id as one that is
@@ -9080,6 +9128,12 @@ fn test_duplicate_chan_id() {
90809128
// without trying to persist the `ChannelMonitor`.
90819129
check_added_monitors!(nodes[1], 0);
90829130

9131+
check_closed_events(&nodes[1], &[
9132+
ExpectedCloseEvent::from_id_reason(channel_id, false, ClosureReason::ProcessingError {
9133+
err: "Already had channel with the new channel_id".to_owned()
9134+
})
9135+
]);
9136+
90839137
// ...still, nodes[1] will reject the duplicate channel.
90849138
{
90859139
let events = nodes[1].node.get_and_clear_pending_msg_events();

0 commit comments

Comments
 (0)