diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 9b0e3036762..e78b8999f7e 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -324,7 +324,6 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), commitment_signed); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); } (update_fulfill_htlcs[0].clone(), commitment_signed.clone()) @@ -633,7 +632,6 @@ fn test_monitor_update_fail_cs() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -664,7 +662,6 @@ fn test_monitor_update_fail_cs() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); }, @@ -726,7 +723,6 @@ fn test_monitor_update_fail_no_rebroadcast() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &bs_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); check_added_monitors!(nodes[1], 1); @@ -784,12 +780,11 @@ fn test_monitor_update_raa_while_paused() { nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1); check_added_monitors!(nodes[0], 1); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -875,7 +870,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_revoke_and_ack(&nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); @@ -911,8 +905,6 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[1].node.handle_commitment_signed(&nodes[2].node.get_our_node_id(), &send_event.commitment_msg); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); - assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) } else { (None, None) }; @@ -1136,7 +1128,7 @@ fn test_monitor_update_fail_reestablish() { get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) .contents.flags & 2, 0); // The "disabled" bit should be unset as we just reconnected - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); + nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell check_added_monitors!(nodes[1], 1); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id(), false); @@ -1228,12 +1220,11 @@ fn raa_no_response_awaiting_raa_state() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Existing pending monitor update prevented responses to RAA".to_string(), 1); check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -1331,7 +1322,6 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1].node.handle_channel_reestablish(&nodes[0].node.get_our_node_id(), &as_reconnect); let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1348,7 +1338,6 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &as_updates.commitment_signed); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC // until we've channel_monitor_update'd and updated for the new commitment transaction. @@ -1440,7 +1429,6 @@ fn monitor_failed_no_reestablish_response() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1] @@ -1540,7 +1528,6 @@ fn first_message_on_recv_ordering() { // to the next message also tests resetting the delivery order. nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[1], 1); // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an @@ -1550,7 +1537,6 @@ fn first_message_on_recv_ordering() { nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Previous monitor update failure prevented generation of RAA".to_string(), 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); @@ -1599,7 +1585,7 @@ fn test_monitor_update_fail_claim() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); // Note that at this point there is a pending commitment transaction update for A being held by @@ -1730,7 +1716,6 @@ fn test_monitor_update_on_pending_forwards() { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); @@ -1791,9 +1776,7 @@ fn monitor_update_claim_fail_no_response() { expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 1); - let events = nodes[1].node.get_and_clear_pending_msg_events(); - assert_eq!(events.len(), 0); - nodes[1].logger.assert_log("lightning::ln::channelmanager".to_string(), "Temporary failure claiming HTLC, treating as success: Failed to update ChannelMonitor".to_string(), 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); @@ -1841,9 +1824,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].logger.assert_log("lightning::ln::channelmanager".to_string(), "Failed to update ChannelMonitor".to_string(), 1); check_added_monitors!(nodes[0], 1); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (outpoint, latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 5d0c3dd4d57..d49221cd0f3 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -439,8 +439,6 @@ pub(super) struct ReestablishResponses { pub raa: Option, pub commitment_update: Option, pub order: RAACommitmentOrder, - pub mon_update: Option, - pub holding_cell_failed_htlcs: Vec<(HTLCSource, PaymentHash)>, pub announcement_sigs: Option, pub shutdown_msg: Option, } @@ -3955,9 +3953,8 @@ impl Channel { // Short circuit the whole handler as there is nothing we can resend them return Ok(ReestablishResponses { channel_ready: None, - raa: None, commitment_update: None, mon_update: None, + raa: None, commitment_update: None, order: RAACommitmentOrder::CommitmentFirst, - holding_cell_failed_htlcs: Vec::new(), shutdown_msg, announcement_sigs, }); } @@ -3970,9 +3967,8 @@ impl Channel { next_per_commitment_point, short_channel_id_alias: Some(self.outbound_scid_alias), }), - raa: None, commitment_update: None, mon_update: None, + raa: None, commitment_update: None, order: RAACommitmentOrder::CommitmentFirst, - holding_cell_failed_htlcs: Vec::new(), shutdown_msg, announcement_sigs, }); } @@ -4015,46 +4011,12 @@ impl Channel { log_debug!(logger, "Reconnected channel {} with no loss", log_bytes!(self.channel_id())); } - if (self.channel_state & (ChannelState::AwaitingRemoteRevoke as u32 | ChannelState::MonitorUpdateInProgress as u32)) == 0 { - // We're up-to-date and not waiting on a remote revoke (if we are our - // channel_reestablish should result in them sending a revoke_and_ack), but we may - // have received some updates while we were disconnected. Free the holding cell - // now! - match self.free_holding_cell_htlcs(logger) { - Err(ChannelError::Close(msg)) => Err(ChannelError::Close(msg)), - Err(ChannelError::Warn(_)) | Err(ChannelError::Ignore(_)) => - panic!("Got non-channel-failing result from free_holding_cell_htlcs"), - Ok((Some((commitment_update, monitor_update)), holding_cell_failed_htlcs)) => { - Ok(ReestablishResponses { - channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, - commitment_update: Some(commitment_update), - order: self.resend_order.clone(), - mon_update: Some(monitor_update), - holding_cell_failed_htlcs, - }) - }, - Ok((None, holding_cell_failed_htlcs)) => { - Ok(ReestablishResponses { - channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, - commitment_update: None, - order: self.resend_order.clone(), - mon_update: None, - holding_cell_failed_htlcs, - }) - }, - } - } else { - Ok(ReestablishResponses { - channel_ready, shutdown_msg, announcement_sigs, - raa: required_revoke, - commitment_update: None, - order: self.resend_order.clone(), - mon_update: None, - holding_cell_failed_htlcs: Vec::new(), - }) - } + Ok(ReestablishResponses { + channel_ready, shutdown_msg, announcement_sigs, + raa: required_revoke, + commitment_update: None, + order: self.resend_order.clone(), + }) } else if msg.next_local_commitment_number == next_counterparty_commitment_number - 1 { if required_revoke.is_some() { log_debug!(logger, "Reconnected channel {} with lost outbound RAA and lost remote commitment tx", log_bytes!(self.channel_id())); @@ -4066,9 +4028,8 @@ impl Channel { self.monitor_pending_commitment_signed = true; Ok(ReestablishResponses { channel_ready, shutdown_msg, announcement_sigs, - commitment_update: None, raa: None, mon_update: None, + commitment_update: None, raa: None, order: self.resend_order.clone(), - holding_cell_failed_htlcs: Vec::new(), }) } else { Ok(ReestablishResponses { @@ -4076,8 +4037,6 @@ impl Channel { raa: required_revoke, commitment_update: Some(self.get_last_commitment_update(logger)), order: self.resend_order.clone(), - mon_update: None, - holding_cell_failed_htlcs: Vec::new(), }) } } else { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 07003277237..882f3538798 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1517,134 +1517,6 @@ macro_rules! emit_channel_ready_event { } } -macro_rules! handle_chan_restoration_locked { - ($self: ident, $channel_lock: expr, $channel_state: expr, $channel_entry: expr, - $raa: expr, $commitment_update: expr, $order: expr, $chanmon_update: expr, - $pending_forwards: expr, $funding_broadcastable: expr, $channel_ready: expr, $announcement_sigs: expr) => { { - let mut htlc_forwards = None; - - let chanmon_update: Option = $chanmon_update; // Force type-checking to resolve - let chanmon_update_is_none = chanmon_update.is_none(); - let counterparty_node_id = $channel_entry.get().get_counterparty_node_id(); - let res = loop { - let forwards: Vec<(PendingHTLCInfo, u64)> = $pending_forwards; // Force type-checking to resolve - if !forwards.is_empty() { - htlc_forwards = Some(($channel_entry.get().get_short_channel_id().unwrap_or($channel_entry.get().outbound_scid_alias()), - $channel_entry.get().get_funding_txo().unwrap(), forwards)); - } - - if chanmon_update.is_some() { - // On reconnect, we, by definition, only resend a channel_ready if there have been - // no commitment updates, so the only channel monitor update which could also be - // associated with a channel_ready would be the funding_created/funding_signed - // monitor update. That monitor update failing implies that we won't send - // channel_ready until it's been updated, so we can't have a channel_ready and a - // monitor update here (so we don't bother to handle it correctly below). - assert!($channel_ready.is_none()); - // A channel monitor update makes no sense without either a channel_ready or a - // commitment update to process after it. Since we can't have a channel_ready, we - // only bother to handle the monitor-update + commitment_update case below. - assert!($commitment_update.is_some()); - } - - if let Some(msg) = $channel_ready { - // Similar to the above, this implies that we're letting the channel_ready fly - // before it should be allowed to. - assert!(chanmon_update.is_none()); - send_channel_ready!($self, $channel_state.pending_msg_events, $channel_entry.get(), msg); - } - if let Some(msg) = $announcement_sigs { - $channel_state.pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { - node_id: counterparty_node_id, - msg, - }); - } - - emit_channel_ready_event!($self, $channel_entry.get_mut()); - - let funding_broadcastable: Option = $funding_broadcastable; // Force type-checking to resolve - if let Some(monitor_update) = chanmon_update { - // We only ever broadcast a funding transaction in response to a funding_signed - // message and the resulting monitor update. Thus, on channel_reestablish - // message handling we can't have a funding transaction to broadcast. When - // processing a monitor update finishing resulting in a funding broadcast, we - // cannot have a second monitor update, thus this case would indicate a bug. - assert!(funding_broadcastable.is_none()); - // Given we were just reconnected or finished updating a channel monitor, the - // only case where we can get a new ChannelMonitorUpdate would be if we also - // have some commitment updates to send as well. - assert!($commitment_update.is_some()); - match $self.chain_monitor.update_channel($channel_entry.get().get_funding_txo().unwrap(), monitor_update) { - ChannelMonitorUpdateStatus::Completed => {}, - e => { - // channel_reestablish doesn't guarantee the order it returns is sensical - // for the messages it returns, but if we're setting what messages to - // re-transmit on monitor update success, we need to make sure it is sane. - let mut order = $order; - if $raa.is_none() { - order = RAACommitmentOrder::CommitmentFirst; - } - break handle_monitor_update_res!($self, e, $channel_entry, order, $raa.is_some(), true); - } - } - } - - macro_rules! handle_cs { () => { - if let Some(update) = $commitment_update { - $channel_state.pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id: counterparty_node_id, - updates: update, - }); - } - } } - macro_rules! handle_raa { () => { - if let Some(revoke_and_ack) = $raa { - $channel_state.pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { - node_id: counterparty_node_id, - msg: revoke_and_ack, - }); - } - } } - match $order { - RAACommitmentOrder::CommitmentFirst => { - handle_cs!(); - handle_raa!(); - }, - RAACommitmentOrder::RevokeAndACKFirst => { - handle_raa!(); - handle_cs!(); - }, - } - if let Some(tx) = funding_broadcastable { - log_info!($self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); - $self.tx_broadcaster.broadcast_transaction(&tx); - } - break Ok(()); - }; - - if chanmon_update_is_none { - // If there was no ChannelMonitorUpdate, we should never generate an Err in the res loop - // above. Doing so would imply calling handle_err!() from channel_monitor_updated() which - // should *never* end up calling back to `chain_monitor.update_channel()`. - assert!(res.is_ok()); - } - - (htlc_forwards, res, counterparty_node_id) - } } -} - -macro_rules! post_handle_chan_restoration { - ($self: ident, $locked_res: expr) => { { - let (htlc_forwards, res, counterparty_node_id) = $locked_res; - - let _ = handle_error!($self, res, counterparty_node_id); - - if let Some(forwards) = htlc_forwards { - $self.forward_htlcs(&mut [forwards][..]); - } - } } -} - impl ChannelManager where M::Target: chain::Watch<::Signer>, T::Target: BroadcasterInterface, @@ -4490,10 +4362,73 @@ impl ChannelManager, + channel: &mut Channel<::Signer>, raa: Option, + commitment_update: Option, order: RAACommitmentOrder, + pending_forwards: Vec<(PendingHTLCInfo, u64)>, funding_broadcastable: Option, + channel_ready: Option, announcement_sigs: Option) + -> Option<(u64, OutPoint, Vec<(PendingHTLCInfo, u64)>)> { + let mut htlc_forwards = None; + + let counterparty_node_id = channel.get_counterparty_node_id(); + if !pending_forwards.is_empty() { + htlc_forwards = Some((channel.get_short_channel_id().unwrap_or(channel.outbound_scid_alias()), + channel.get_funding_txo().unwrap(), pending_forwards)); + } + + if let Some(msg) = channel_ready { + send_channel_ready!(self, pending_msg_events, channel, msg); + } + if let Some(msg) = announcement_sigs { + pending_msg_events.push(events::MessageSendEvent::SendAnnouncementSignatures { + node_id: counterparty_node_id, + msg, + }); + } + + emit_channel_ready_event!(self, channel); + + macro_rules! handle_cs { () => { + if let Some(update) = commitment_update { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id: counterparty_node_id, + updates: update, + }); + } + } } + macro_rules! handle_raa { () => { + if let Some(revoke_and_ack) = raa { + pending_msg_events.push(events::MessageSendEvent::SendRevokeAndACK { + node_id: counterparty_node_id, + msg: revoke_and_ack, + }); + } + } } + match order { + RAACommitmentOrder::CommitmentFirst => { + handle_cs!(); + handle_raa!(); + }, + RAACommitmentOrder::RevokeAndACKFirst => { + handle_raa!(); + handle_cs!(); + }, + } + + if let Some(tx) = funding_broadcastable { + log_info!(self.logger, "Broadcasting funding transaction with txid {}", tx.txid()); + self.tx_broadcaster.broadcast_transaction(&tx); + } + + htlc_forwards + } + fn channel_monitor_updated(&self, funding_txo: &OutPoint, highest_applied_update_id: u64) { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(&self.total_consistency_lock, &self.persistence_notifier); - let chan_restoration_res; + let htlc_forwards; let (mut pending_failures, finalized_claims, counterparty_node_id) = { let mut channel_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_lock; @@ -4520,14 +4455,16 @@ impl ChannelManager ChannelManager Result<(), MsgHandleErrInternal> { - let chan_restoration_res; - let (htlcs_failed_forward, need_lnd_workaround) = { + let htlc_forwards; + let need_lnd_workaround = { let mut channel_state_lock = self.channel_state.lock().unwrap(); let channel_state = &mut *channel_state_lock; @@ -5313,19 +5250,21 @@ impl ChannelManager return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) } }; - post_handle_chan_restoration!(self, chan_restoration_res); - self.fail_holding_cell_htlcs(htlcs_failed_forward, msg.channel_id, counterparty_node_id); + + if let Some(forwards) = htlc_forwards { + self.forward_htlcs(&mut [forwards][..]); + } if let Some(channel_ready_msg) = need_lnd_workaround { self.internal_channel_ready(counterparty_node_id, &channel_ready_msg)?; diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 677fdccfd37..6b247b28f09 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -2413,6 +2413,14 @@ macro_rules! handle_chan_reestablish_msgs { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); } + let mut had_channel_update = false; // ChannelUpdate may be now or later, but not both + if let Some(&MessageSendEvent::SendChannelUpdate { ref node_id, ref msg }) = msg_events.get(idx) { + assert_eq!(*node_id, $dst_node.node.get_our_node_id()); + idx += 1; + assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected. + had_channel_update = true; + } + let mut revoke_and_ack = None; let mut commitment_update = None; let order = if let Some(ev) = msg_events.get(idx) { @@ -2457,6 +2465,7 @@ macro_rules! handle_chan_reestablish_msgs { assert_eq!(*node_id, $dst_node.node.get_our_node_id()); idx += 1; assert_eq!(msg.contents.flags & 2, 0); // "disabled" flag must not be set as we just reconnected. + assert!(!had_channel_update); } assert_eq!(msg_events.len(), idx); diff --git a/lightning/src/ln/reload_tests.rs b/lightning/src/ln/reload_tests.rs index dc3d5392213..1056ceebdd9 100644 --- a/lightning/src/ln/reload_tests.rs +++ b/lightning/src/ln/reload_tests.rs @@ -786,9 +786,9 @@ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) { let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events(); check_added_monitors!(nodes[3], 1); assert_eq!(ds_msgs.len(), 2); - if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); } + if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[0] {} else { panic!(); } - let cs_updates = match ds_msgs[0] { + let cs_updates = match ds_msgs[1] { MessageSendEvent::UpdateHTLCs { ref updates, .. } => { nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); check_added_monitors!(nodes[2], 1);