Skip to content

Commit 755f15c

Browse files
committed
Wait to free the holding cell during channel_reestablish handling
When we process a `channel_reestablish` message we free the HTLC update holding cell as things may have changed while we were disconnected. However, some time ago, to handle freeing from the holding cell when a monitor update completes, we added a holding cell freeing check in `get_and_clear_pending_msg_events`. This leaves the in-`channel_reestablish` holding cell clear redundant, as doing it immediately or is `get_and_clear_pending_msg_events` is not a user-visible difference. Thus, we remove the redundant code here, substantially simplifying `handle_chan_restoration_locked` while we're at it.
1 parent 3438989 commit 755f15c

File tree

4 files changed

+29
-113
lines changed

4 files changed

+29
-113
lines changed

lightning/src/ln/channel.rs

Lines changed: 9 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -439,8 +439,6 @@ pub(super) struct ReestablishResponses {
439439
pub raa: Option<msgs::RevokeAndACK>,
440440
pub commitment_update: Option<msgs::CommitmentUpdate>,
441441
pub order: RAACommitmentOrder,
442-
pub mon_update: Option<ChannelMonitorUpdate>,
443-
pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>,
444442
pub announcement_sigs: Option<msgs::AnnouncementSignatures>,
445443
pub shutdown_msg: Option<msgs::Shutdown>,
446444
}
@@ -3955,9 +3953,8 @@ impl<Signer: Sign> Channel<Signer> {
39553953
// Short circuit the whole handler as there is nothing we can resend them
39563954
return Ok(ReestablishResponses {
39573955
channel_ready: None,
3958-
raa: None, commitment_update: None, mon_update: None,
3956+
raa: None, commitment_update: None,
39593957
order: RAACommitmentOrder::CommitmentFirst,
3960-
holding_cell_failed_htlcs: Vec::new(),
39613958
shutdown_msg, announcement_sigs,
39623959
});
39633960
}
@@ -3970,9 +3967,8 @@ impl<Signer: Sign> Channel<Signer> {
39703967
next_per_commitment_point,
39713968
short_channel_id_alias: Some(self.outbound_scid_alias),
39723969
}),
3973-
raa: None, commitment_update: None, mon_update: None,
3970+
raa: None, commitment_update: None,
39743971
order: RAACommitmentOrder::CommitmentFirst,
3975-
holding_cell_failed_htlcs: Vec::new(),
39763972
shutdown_msg, announcement_sigs,
39773973
});
39783974
}
@@ -4015,46 +4011,12 @@ impl<Signer: Sign> Channel<Signer> {
40154011
log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id()));
40164012
}
40174013

4018-
if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 {
4019-
// We're up-to-date and not waiting on a remote revoke (if we are our
4020-
// channel_reestablish should result in them sending a revoke_and_ack), but we may
4021-
// have received some updates while we were disconnected. Free the holding cell
4022-
// now!
4023-
match self.free_holding_cell_htlcs(logger) {
4024-
Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)),
4025-
Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) =>
4026-
panic!("Got non-channel-failing result from free_holding_cell_htlcs"),
4027-
Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => {
4028-
Ok(ReestablishResponses {
4029-
channel_ready, shutdown_msg, announcement_sigs,
4030-
raa: required_revoke,
4031-
commitment_update: Some(commitment_update),
4032-
order: self.resend_order.clone(),
4033-
mon_update: Some(monitor_update),
4034-
holding_cell_failed_htlcs,
4035-
})
4036-
},
4037-
Ok((None, holding_cell_failed_htlcs)) => {
4038-
Ok(ReestablishResponses {
4039-
channel_ready, shutdown_msg, announcement_sigs,
4040-
raa: required_revoke,
4041-
commitment_update: None,
4042-
order: self.resend_order.clone(),
4043-
mon_update: None,
4044-
holding_cell_failed_htlcs,
4045-
})
4046-
},
4047-
}
4048-
} else {
4049-
Ok(ReestablishResponses {
4050-
channel_ready, shutdown_msg, announcement_sigs,
4051-
raa: required_revoke,
4052-
commitment_update: None,
4053-
order: self.resend_order.clone(),
4054-
mon_update: None,
4055-
holding_cell_failed_htlcs: Vec::new(),
4056-
})
4057-
}
4014+
Ok(ReestablishResponses {
4015+
channel_ready, shutdown_msg, announcement_sigs,
4016+
raa: required_revoke,
4017+
commitment_update: None,
4018+
order: self.resend_order.clone(),
4019+
})
40584020
} else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 {
40594021
if required_revoke.is_some() {
40604022
log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id()));
@@ -4066,18 +4028,15 @@ impl<Signer: Sign> Channel<Signer> {
40664028
self.monitor_pending_commitment_signed = true;
40674029
Ok(ReestablishResponses {
40684030
channel_ready, shutdown_msg, announcement_sigs,
4069-
commitment_update: None, raa: None, mon_update: None,
4031+
commitment_update: None, raa: None,
40704032
order: self.resend_order.clone(),
4071-
holding_cell_failed_htlcs: Vec::new(),
40724033
})
40734034
} else {
40744035
Ok(ReestablishResponses {
40754036
channel_ready, shutdown_msg, announcement_sigs,
40764037
raa: required_revoke,
40774038
commitment_update: Some(self.get_last_commitment_update(logger)),
40784039
order: self.resend_order.clone(),
4079-
mon_update: None,
4080-
holding_cell_failed_htlcs: Vec::new(),
40814040
})
40824041
}
40834042
} else {

lightning/src/ln/channelmanager.rs

Lines changed: 9 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -1518,13 +1518,11 @@ macro_rules! emit_channel_ready_event {
15181518
}
15191519

15201520
macro_rules! handle_chan_restoration_locked {
1521-
($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr,
1522-
$raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr,
1521+
($self: ident, $channel_state: expr, $channel_entry: expr,
1522+
$raa: expr, $commitment_update: expr, $order: expr,
15231523
$pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { {
15241524
let mut htlc_forwards = None;
15251525

1526-
let chanmon_update: Option<ChannelMonitorUpdate> = $chanmon_update; // Force type-checking to resolve
1527-
let chanmon_update_is_none = chanmon_update.is_none();
15281526
let counterparty_node_id = $channel_entry.get().get_counterparty_node_id();
15291527
let res = loop {
15301528
let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve
@@ -1533,24 +1531,7 @@ macro_rules! handle_chan_restoration_locked {
15331531
$channel_entry.get().get_funding_txo().unwrap(), forwards));
15341532
}
15351533

1536-
if chanmon_update.is_some() {
1537-
// On reconnect, we, by definition, only resend a channel_ready if there have been
1538-
// no commitment updates, so the only channel monitor update which could also be
1539-
// associated with a channel_ready would be the funding_created/funding_signed
1540-
// monitor update. That monitor update failing implies that we won't send
1541-
// channel_ready until it's been updated, so we can't have a channel_ready and a
1542-
// monitor update here (so we don't bother to handle it correctly below).
1543-
assert!($channel_ready.is_none());
1544-
// A channel monitor update makes no sense without either a channel_ready or a
1545-
// commitment update to process after it. Since we can't have a channel_ready, we
1546-
// only bother to handle the monitor-update + commitment_update case below.
1547-
assert!($commitment_update.is_some());
1548-
}
1549-
15501534
if let Some(msg) = $channel_ready {
1551-
// Similar to the above, this implies that we're letting the channel_ready fly
1552-
// before it should be allowed to.
1553-
assert!(chanmon_update.is_none());
15541535
send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg);
15551536
}
15561537
if let Some(msg) = $announcement_sigs {
@@ -1562,33 +1543,6 @@ macro_rules! handle_chan_restoration_locked {
15621543

15631544
emit_channel_ready_event!($self, $channel_entry.get_mut());
15641545

1565-
let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
1566-
if let Some(monitor_update) = chanmon_update {
1567-
// We only ever broadcast a funding transaction in response to a funding_signed
1568-
// message and the resulting monitor update. Thus, on channel_reestablish
1569-
// message handling we can't have a funding transaction to broadcast. When
1570-
// processing a monitor update finishing resulting in a funding broadcast, we
1571-
// cannot have a second monitor update, thus this case would indicate a bug.
1572-
assert!(funding_broadcastable.is_none());
1573-
// Given we were just reconnected or finished updating a channel monitor, the
1574-
// only case where we can get a new ChannelMonitorUpdate would be if we also
1575-
// have some commitment updates to send as well.
1576-
assert!($commitment_update.is_some());
1577-
match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) {
1578-
ChannelMonitorUpdateStatus::Completed => {},
1579-
e => {
1580-
// channel_reestablish doesn't guarantee the order it returns is sensical
1581-
// for the messages it returns, but if we're setting what messages to
1582-
// re-transmit on monitor update success, we need to make sure it is sane.
1583-
let mut order = $order;
1584-
if $raa.is_none() {
1585-
order = RAACommitmentOrder::CommitmentFirst;
1586-
}
1587-
break handle_monitor_update_res!($self, e, $channel_entry, order, $raa.is_some(), true);
1588-
}
1589-
}
1590-
}
1591-
15921546
macro_rules! handle_cs { () => {
15931547
if let Some(update) = $commitment_update {
15941548
$channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs {
@@ -1615,20 +1569,15 @@ macro_rules! handle_chan_restoration_locked {
16151569
handle_cs!();
16161570
},
16171571
}
1572+
1573+
let funding_broadcastable: Option<Transaction> = $funding_broadcastable; // Force type-checking to resolve
16181574
if let Some(tx) = funding_broadcastable {
16191575
log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid());
16201576
$self.tx_broadcaster.broadcast_transaction(&tx);
16211577
}
16221578
break Ok(());
16231579
};
16241580

1625-
if chanmon_update_is_none {
1626-
// If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop
1627-
// above. Doing so would imply calling handle_err!() from channel_monitor_updated() which
1628-
// should *never* end up calling back to `chain_monitor.update_channel()`.
1629-
assert!(res.is_ok());
1630-
}
1631-
16321581
(htlc_forwards, res, counterparty_node_id)
16331582
} }
16341583
}
@@ -4520,7 +4469,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
45204469
})
45214470
} else { None }
45224471
} else { None };
4523-
chan_restoration_res = handle_chan_restoration_locked!(self, channel_lock, channel_state, channel, updates.raa, updates.commitment_update, updates.order, None, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
4472+
chan_restoration_res = handle_chan_restoration_locked!(self, channel_state, channel, updates.raa, updates.commitment_update, updates.order, updates.accepted_htlcs, updates.funding_broadcastable, updates.channel_ready, updates.announcement_sigs);
45244473
if let Some(upd) = channel_update {
45254474
channel_state.pending_msg_events.push(upd);
45264475
}
@@ -5279,7 +5228,7 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
52795228

52805229
fn internal_channel_reestablish(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReestablish) -> Result<(), MsgHandleErrInternal> {
52815230
let chan_restoration_res;
5282-
let (htlcs_failed_forward, need_lnd_workaround) = {
5231+
let need_lnd_workaround = {
52835232
let mut channel_state_lock = self.channel_state.lock().unwrap();
52845233
let channel_state = &mut *channel_state_lock;
52855234

@@ -5314,18 +5263,17 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
53145263
}
53155264
let need_lnd_workaround = chan.get_mut().workaround_lnd_bug_4006.take();
53165265
chan_restoration_res = handle_chan_restoration_locked!(
5317-
self, channel_state_lock, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
5318-
responses.mon_update, Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
5266+
self, channel_state, chan, responses.raa, responses.commitment_update, responses.order,
5267+
Vec::new(), None, responses.channel_ready, responses.announcement_sigs);
53195268
if let Some(upd) = channel_update {
53205269
channel_state.pending_msg_events.push(upd);
53215270
}
5322-
(responses.holding_cell_failed_htlcs, need_lnd_workaround)
5271+
need_lnd_workaround
53235272
},
53245273
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
53255274
}
53265275
};
53275276
post_handle_chan_restoration!(self, chan_restoration_res);
5328-
self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id);
53295277

53305278
if let Some(channel_ready_msg) = need_lnd_workaround {
53315279
self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?;

lightning/src/ln/functional_test_utils.rs

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2413,6 +2413,14 @@ macro_rules! handle_chan_reestablish_msgs {
24132413
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
24142414
}
24152415

2416+
let mut had_channel_update = false; // ChannelUpdate may be now or later, but not both
2417+
if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, ref msg }) = msg_events.get(idx) {
2418+
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
2419+
idx += 1;
2420+
assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
2421+
had_channel_update = true;
2422+
}
2423+
24162424
let mut revoke_and_ack = None;
24172425
let mut commitment_update = None;
24182426
let order = if let Some(ev) = msg_events.get(idx) {
@@ -2457,6 +2465,7 @@ macro_rules! handle_chan_reestablish_msgs {
24572465
assert_eq!(*node_id, $dst_node.node.get_our_node_id());
24582466
idx += 1;
24592467
assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected.
2468+
assert!(!had_channel_update);
24602469
}
24612470

24622471
assert_eq!(msg_events.len(), idx);

lightning/src/ln/reload_tests.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -786,9 +786,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
786786
let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
787787
check_added_monitors!(nodes[3], 1);
788788
assert_eq!(ds_msgs.len(), 2);
789-
if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); }
789+
if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); }
790790

791-
let cs_updates = match ds_msgs[0] {
791+
let cs_updates = match ds_msgs[1] {
792792
MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
793793
nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
794794
check_added_monitors!(nodes[2], 1);

0 commit comments

Comments
 (0)