diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index ebef21657b7..b6198215bcb 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -187,25 +187,29 @@ pub const FEERATE_FLOOR_SATS_PER_KW: u32 = 253; /// /// Note that this does *not* implement [`FeeEstimator`] to make it harder to accidentally mix the /// two. -pub(crate) struct LowerBoundedFeeEstimator(pub F) where F::Target: FeeEstimator; - -impl LowerBoundedFeeEstimator where F::Target: FeeEstimator { +pub(crate) struct LowerBoundedFeeEstimator(pub F) +where + F::Target: FeeEstimator; + +impl LowerBoundedFeeEstimator +where + F::Target: FeeEstimator, +{ /// Creates a new `LowerBoundedFeeEstimator` which wraps the provided fee_estimator pub fn new(fee_estimator: F) -> Self { LowerBoundedFeeEstimator(fee_estimator) } pub fn bounded_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 { - cmp::max( - self.0.get_est_sat_per_1000_weight(confirmation_target), - FEERATE_FLOOR_SATS_PER_KW, - ) + cmp::max(self.0.get_est_sat_per_1000_weight(confirmation_target), FEERATE_FLOOR_SATS_PER_KW) } } #[cfg(test)] mod tests { - use super::{FEERATE_FLOOR_SATS_PER_KW, LowerBoundedFeeEstimator, ConfirmationTarget, FeeEstimator}; + use super::{ + ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, FEERATE_FLOOR_SATS_PER_KW, + }; struct TestFeeEstimator { sat_per_kw: u32, @@ -223,7 +227,10 @@ mod tests { let test_fee_estimator = &TestFeeEstimator { sat_per_kw }; let fee_estimator = LowerBoundedFeeEstimator::new(test_fee_estimator); - assert_eq!(fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee), FEERATE_FLOOR_SATS_PER_KW); + assert_eq!( + fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee), + FEERATE_FLOOR_SATS_PER_KW + ); } #[test] @@ -232,6 +239,9 @@ mod tests { let test_fee_estimator = &TestFeeEstimator { sat_per_kw }; let fee_estimator = LowerBoundedFeeEstimator::new(test_fee_estimator); - assert_eq!(fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee), sat_per_kw); + assert_eq!( + fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee), + sat_per_kw + ); } } diff --git a/lightning/src/chain/transaction.rs b/lightning/src/chain/transaction.rs index 60c1e4475a5..cde4256be40 100644 --- a/lightning/src/chain/transaction.rs +++ b/lightning/src/chain/transaction.rs @@ -61,10 +61,7 @@ impl OutPoint { /// This is not exported to bindings users as the same type is used universally in the C bindings /// for all outpoints pub fn into_bitcoin_outpoint(self) -> BitcoinOutPoint { - BitcoinOutPoint { - txid: self.txid, - vout: self.index as u32, - } + BitcoinOutPoint { txid: self.txid, vout: self.index as u32 } } } @@ -90,20 +87,24 @@ mod tests { use crate::chain::transaction::OutPoint; use crate::ln::types::ChannelId; - use bitcoin::transaction::Transaction; use bitcoin::consensus::encode; use bitcoin::hex::FromHex; + use bitcoin::transaction::Transaction; #[test] fn test_channel_id_calculation() { - let tx: Transaction = encode::deserialize(&>::from_hex("020000000001010e0adef48412e4361325ac1c6e36411299ab09d4f083b9d8ddb55fbc06e1b0c00000000000feffffff0220a1070000000000220020f81d95e040bd0a493e38bae27bff52fe2bb58b93b293eb579c01c31b05c5af1dc072cfee54a3000016001434b1d6211af5551905dc2642d05f5b04d25a8fe80247304402207f570e3f0de50546aad25a872e3df059d277e776dda4269fa0d2cc8c2ee6ec9a022054e7fae5ca94d47534c86705857c24ceea3ad51c69dd6051c5850304880fc43a012103cb11a1bacc223d98d91f1946c6752e358a5eb1a1c983b3e6fb15378f453b76bd00000000").unwrap()[..]).unwrap(); - assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint { - txid: tx.compute_txid(), - index: 0 - }).0[..], &>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]); - assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint { - txid: tx.compute_txid(), - index: 1 - }).0[..], &>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]); + let tx_hex = "020000000001010e0adef48412e4361325ac1c6e36411299ab09d4f083b9d8ddb55fbc06e1b0c00000000000feffffff0220a1070000000000220020f81d95e040bd0a493e38bae27bff52fe2bb58b93b293eb579c01c31b05c5af1dc072cfee54a3000016001434b1d6211af5551905dc2642d05f5b04d25a8fe80247304402207f570e3f0de50546aad25a872e3df059d277e776dda4269fa0d2cc8c2ee6ec9a022054e7fae5ca94d47534c86705857c24ceea3ad51c69dd6051c5850304880fc43a012103cb11a1bacc223d98d91f1946c6752e358a5eb1a1c983b3e6fb15378f453b76bd00000000"; + let tx: Transaction = + encode::deserialize(&>::from_hex(tx_hex).unwrap()[..]).unwrap(); + + let txid = tx.compute_txid(); + + let id_0 = ChannelId::v1_from_funding_outpoint(OutPoint { txid, index: 0 }); + let expected_0 = "3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e"; + assert_eq!(&id_0.0[..], &Vec::::from_hex(expected_0).unwrap()[..]); + + let id_1 = ChannelId::v1_from_funding_outpoint(OutPoint { txid, index: 1 }); + let expected_1 = "3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f"; + assert_eq!(&id_1.0[..], &Vec::::from_hex(expected_1).unwrap()[..]); } } diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index d91d3969401..ef8f256ed5e 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -12,29 +12,37 @@ //! There are a bunch of these as their handling is relatively error-prone so they are split out //! here. See also the chanmon_fail_consistency fuzz test. -use bitcoin::constants::genesis_block; -use bitcoin::hash_types::BlockHash; -use bitcoin::network::Network; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; +use crate::chain::channelmonitor::{ChannelMonitor, ANTI_REORG_DELAY}; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; -use crate::events::{Event, PaymentPurpose, ClosureReason, HTLCHandlingFailureType}; -use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; +use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose}; use crate::ln::channel::AnnouncementSigsState; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; use crate::ln::msgs; +use crate::ln::msgs::{ + BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, RoutingMessageHandler, +}; use crate::ln::types::ChannelId; -use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, MessageSendEvent}; -use crate::util::test_channel_signer::TestChannelSigner; use crate::util::ser::{ReadableArgs, Writeable}; +use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils::TestBroadcaster; +use bitcoin::constants::genesis_block; +use bitcoin::hash_types::BlockHash; +use bitcoin::network::Network; use crate::ln::functional_test_utils::*; use crate::util::test_utils; -use crate::io; -use bitcoin::hashes::Hash; use crate::prelude::*; use crate::sync::{Arc, Mutex}; +use bitcoin::hashes::Hash; + +fn get_latest_mon_update_id<'a, 'b, 'c>( + node: &Node<'a, 'b, 'c>, channel_id: ChannelId, +) -> (u64, u64) { + let monitor_id_state = node.chain_monitor.latest_monitor_update_id.lock().unwrap(); + monitor_id_state.get(&channel_id).unwrap().clone() +} #[test] fn test_monitor_and_persister_update_fail() { @@ -46,11 +54,14 @@ fn test_monitor_and_persister_update_fail() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channel let chan = create_announced_chan_between_nodes(&nodes, 0, 1); // Rebalance the network to generate htlc in the two directions - send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); + send_payment(&nodes[0], &[&nodes[1]], 10_000_000); // Route an HTLC from node 0 to node 1 (but don't settle) let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); @@ -72,42 +83,74 @@ fn test_monitor_and_persister_update_fail() { let chain_mon = { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan.2).unwrap(); - let new_monitor = <(BlockHash, ChannelMonitor)>::read( - &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; + let (_, new_monitor) = <(BlockHash, ChannelMonitor)>::read( + &mut &monitor.encode()[..], + (nodes[0].keys_manager, nodes[0].keys_manager), + ) + .unwrap(); assert!(new_monitor == *monitor); new_monitor }; - let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(chain_mon.watch_channel(chan.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + let chain_mon = test_utils::TestChainMonitor::new( + Some(&chain_source), + &tx_broadcaster, + &logger, + &chanmon_cfgs[0].fee_estimator, + &persister, + &node_cfgs[0].keys_manager, + ); + assert_eq!( + chain_mon.watch_channel(chan.2, new_monitor), + Ok(ChannelMonitorUpdateStatus::Completed) + ); chain_mon }; - chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200); + chain_mon + .chain_monitor + .block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200); // Try to update ChannelMonitor nodes[1].node.claim_funds(preimage); expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); check_added_monitors!(nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2).as_funded_mut() { + let mut per_peer_lock; + let mut peer_state_lock; + let chan_opt = get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan.2); + if let Some(channel) = chan_opt.as_funded_mut() { assert_eq!(updates.commitment_signed.len(), 1); - if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) { + if let Ok(Some(update)) = + channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) + { // Check that the persister returns InProgress (and will never actually complete) // as the monitor update errors. - if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(chan.2, &update) {} else { panic!("Expected monitor paused"); } - logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Failed to update ChannelMonitor for channel [0-9a-f]*.").unwrap(), 1); + if let ChannelMonitorUpdateStatus::InProgress = + chain_mon.chain_monitor.update_channel(chan.2, &update) + { + } else { + panic!("Expected monitor paused"); + } + logger.assert_log_regex( + "lightning::chain::chainmonitor", + regex::Regex::new("Failed to update ChannelMonitor for channel [0-9a-f]*.") + .unwrap(), + 1, + ); // Apply the monitor update to the original ChainMonitor, ensuring the // ChannelManager and ChannelMonitor aren't out of sync. - assert_eq!(nodes[0].chain_monitor.update_channel(chan.2, &update), - ChannelMonitorUpdateStatus::Completed); - } else { assert!(false); } + assert_eq!( + nodes[0].chain_monitor.update_channel(chan.2, &update), + ChannelMonitorUpdateStatus::Completed + ); + } else { + assert!(false); + } } else { assert!(false); } @@ -124,42 +167,45 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let user_channel_id = nodes[1].node.list_channels()[0].user_channel_id; - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - { - nodes[0].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) - ).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert_eq!(nodes[0].node.list_channels().len(), 1); if disconnect { - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready = (true, true); reconnect_nodes(reconnect_args); } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); let payment_event = SendEvent::from_event(events_2.pop().unwrap()); - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + assert_eq!(payment_event.node_id, node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -167,17 +213,26 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let events_3 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(payment_hash_1, *payment_hash); assert_eq!(amount_msat, 1_000_000); - assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); - assert_eq!(*via_channel_ids, vec![(channel_id, Some(user_channel_id))]); + assert_eq!(receiver_node_id.unwrap(), node_b_id); + assert_eq!(*via_channel_ids, &[(channel_id, Some(user_channel_id))]); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_1, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), @@ -186,28 +241,28 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); // Now set it to failed again... - let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); - { - chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) - ).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let (route, payment_hash_2, _, payment_secret_2) = + get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert_eq!(nodes[0].node.list_channels().len(), 1); if disconnect { - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); } // ...and make sure we can force-close a frozen channel - let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + let err_msg = "Channel force-closed".to_owned(); + nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, err_msg).unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); @@ -215,7 +270,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // PaymentPathFailed event assert_eq!(nodes[0].node.list_channels().len(), 0); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } #[test] @@ -248,20 +304,24 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let user_channel_id = nodes[1].node.list_channels()[0].user_channel_id; - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); // Now try to send a second payment which will fail to send - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) - ).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -276,8 +336,20 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + MessageSendEvent::UpdateHTLCs { + ref node_id, + channel_id: _, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + } => { + assert_eq!(*node_id, node_a_id); assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); @@ -285,7 +357,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(update_fee.is_none()); if (disconnect_count & 16) == 0 { - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_htlcs[0]); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -296,7 +368,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -307,61 +379,66 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { }; if disconnect_count & !disconnect_flags > 0 { - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); } // Now fix monitor updating... chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); - macro_rules! disconnect_reconnect_peers { () => { { - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); - assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); - let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); - assert_eq!(reestablish_2.len(), 1); - - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); - let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); - let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); - - assert!(as_resp.0.is_none()); - assert!(bs_resp.0.is_none()); - - (reestablish_1, reestablish_2, as_resp, bs_resp) - } } } + macro_rules! disconnect_reconnect_peers { + () => {{ + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); + + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); + let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); + let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + (reestablish_1, reestablish_2, as_resp, bs_resp) + }}; + } let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 { assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); check_added_monitors!(nodes[0], 0); let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); check_added_monitors!(nodes[1], 0); let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); @@ -374,18 +451,21 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_resp.1.is_some()); assert!(as_resp.2.is_some()); - assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst); + assert_eq!(as_resp.3, RAACommitmentOrder::CommitmentFirst); } else { assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty()); assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none()); - assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]); - assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed); + assert_eq!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs, [bs_initial_fulfill]); + assert_eq!(bs_resp.2.as_ref().unwrap().commitment_signed, bs_initial_commitment_signed); assert!(as_resp.1.is_none()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + node_b_id, + &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0], + ); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -396,8 +476,12 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed); - let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test( + node_b_id, + &bs_resp.2.as_ref().unwrap().commitment_signed, + ); + let as_resp_raa = + get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -406,42 +490,49 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } if disconnect_count & !disconnect_flags > 1 { - let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!(); + let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = + disconnect_reconnect_peers!(); if (disconnect_count & 16) == 0 { - assert!(reestablish_1 == second_reestablish_1); - assert!(reestablish_2 == second_reestablish_2); + assert_eq!(reestablish_1, second_reestablish_1); + assert_eq!(reestablish_2, second_reestablish_2); } - assert!(as_resp == second_as_resp); - assert!(bs_resp == second_bs_resp); + assert_eq!(as_resp, second_as_resp); + assert_eq!(bs_resp, second_bs_resp); } - (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), channel_id, as_resp.2.unwrap()), as_resp.1.unwrap()) + ( + SendEvent::from_commitment_update(node_b_id, channel_id, as_resp.2.unwrap()), + as_resp.1.unwrap(), + ) } else { let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 2); - (SendEvent::from_event(events_4.remove(0)), match events_4[0] { - MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - msg.clone() + ( + SendEvent::from_event(events_4.remove(0)), + match events_4[0] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, node_b_id); + msg.clone() + }, + _ => panic!("Unexpected event"), }, - _ => panic!("Unexpected event"), - }) + ) }; - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.node_id, node_b_id); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); if disconnect_count & !disconnect_flags > 2 { let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); - assert!(as_resp.1.unwrap() == initial_revoke_and_ack); - assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + assert_eq!(as_resp.1.unwrap(), initial_revoke_and_ack); + assert_eq!(bs_resp.1.unwrap(), bs_revoke_and_ack); assert!(as_resp.2.is_none()); assert!(bs_resp.2.is_none()); @@ -450,27 +541,31 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let as_commitment_update; let bs_second_commitment_update; - macro_rules! handle_bs_raa { () => { - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); - as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - assert!(as_commitment_update.update_add_htlcs.is_empty()); - assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); - assert!(as_commitment_update.update_fail_htlcs.is_empty()); - assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); - assert!(as_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[0], 1); - } } - - macro_rules! handle_initial_raa { () => { - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &initial_revoke_and_ack); - bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); - } } + macro_rules! handle_bs_raa { + () => { + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); + as_commitment_update = get_htlc_update_msgs!(nodes[0], node_b_id); + assert!(as_commitment_update.update_add_htlcs.is_empty()); + assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(as_commitment_update.update_fail_htlcs.is_empty()); + assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(as_commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[0], 1); + }; + } + + macro_rules! handle_initial_raa { + () => { + nodes[1].node.handle_revoke_and_ack(node_a_id, &initial_revoke_and_ack); + bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); + assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[1], 1); + }; + } if (disconnect_count & 8) == 0 { handle_bs_raa!(); @@ -478,13 +573,13 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { if disconnect_count & !disconnect_flags > 3 { let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); - assert!(as_resp.1.unwrap() == initial_revoke_and_ack); + assert_eq!(as_resp.1.unwrap(), initial_revoke_and_ack); assert!(bs_resp.1.is_none()); - assert!(as_resp.2.unwrap() == as_commitment_update); + assert_eq!(as_resp.2.unwrap(), as_commitment_update); assert!(bs_resp.2.is_none()); - assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst); + assert_eq!(as_resp.3, RAACommitmentOrder::RevokeAndACKFirst); } handle_initial_raa!(); @@ -495,8 +590,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_resp.1.is_none()); assert!(bs_resp.1.is_none()); - assert!(as_resp.2.unwrap() == as_commitment_update); - assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + assert_eq!(as_resp.2.unwrap(), as_commitment_update); + assert_eq!(bs_resp.2.unwrap(), bs_second_commitment_update); } } else { handle_initial_raa!(); @@ -505,12 +600,12 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); assert!(as_resp.1.is_none()); - assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + assert_eq!(bs_resp.1.unwrap(), bs_revoke_and_ack); assert!(as_resp.2.is_none()); - assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + assert_eq!(bs_resp.2.unwrap(), bs_second_commitment_update); - assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst); + assert_eq!(bs_resp.3, RAACommitmentOrder::RevokeAndACKFirst); } handle_bs_raa!(); @@ -521,26 +616,32 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_resp.1.is_none()); assert!(bs_resp.1.is_none()); - assert!(as_resp.2.unwrap() == as_commitment_update); - assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + assert_eq!(as_resp.2.unwrap(), as_commitment_update); + assert_eq!(bs_resp.2.unwrap(), bs_second_commitment_update); } } - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed); - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test( + node_b_id, + &bs_second_commitment_update.commitment_signed, + ); + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed); - let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_commitment_update.commitment_signed); + let bs_second_revoke_and_ack = + get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); @@ -550,17 +651,26 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_5 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); match events_5[0] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(payment_hash_2, *payment_hash); assert_eq!(amount_msat, 1_000_000); - assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); - assert_eq!(*via_channel_ids, vec![(channel_id, Some(user_channel_id))]); + assert_eq!(receiver_node_id.unwrap(), node_b_id); + assert_eq!(*via_channel_ids, [(channel_id, Some(user_channel_id))]); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_2, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), @@ -603,27 +713,32 @@ fn test_monitor_update_fail_cs() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let user_channel_id = nodes[1].node.list_channels()[0].user_channel_id; - let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let (route, our_payment_hash, payment_preimage, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); - let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + let send_event = + SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); @@ -631,8 +746,8 @@ fn test_monitor_update_fail_cs() { match responses[0] { MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &msg); + assert_eq!(*node_id, node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &msg); check_added_monitors!(nodes[0], 1); }, _ => panic!("Unexpected event"), @@ -644,10 +759,12 @@ fn test_monitor_update_fail_cs() { assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -656,12 +773,12 @@ fn test_monitor_update_fail_cs() { } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); - let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &final_raa); + let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &final_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -669,17 +786,26 @@ fn test_monitor_update_fail_cs() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentClaimable { payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(payment_hash, our_payment_hash); assert_eq!(amount_msat, 1_000_000); - assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); - assert_eq!(*via_channel_ids, vec![(channel_id, Some(user_channel_id))]); + assert_eq!(receiver_node_id.unwrap(), node_b_id); + assert_eq!(*via_channel_ids, [(channel_id, Some(user_channel_id))]); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(our_payment_secret, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), @@ -697,28 +823,33 @@ fn test_monitor_update_fail_no_rebroadcast() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(our_payment_hash.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); - let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); - let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true); + let send_event = + SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + let commitment = send_event.commitment_msg; + let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], commitment, false, true, false, true); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &bs_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &bs_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); @@ -744,65 +875,73 @@ fn test_monitor_update_raa_while_paused() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; send_payment(&nodes[0], &[&nodes[1]], 5000000); - let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0].node.send_payment_with_route(route, our_payment_hash_1, - RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } - let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion = RecipientOnionFields::secret_only(our_payment_secret_1); + let id = PaymentId(our_payment_hash_1.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash_1, onion, id).unwrap(); - let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000); - { - nodes[1].node.send_payment_with_route(route, our_payment_hash_2, - RecipientOnionFields::secret_only(our_payment_secret_2), PaymentId(our_payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[1], 1); - } - let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); + check_added_monitors!(nodes[0], 1); + let send_event_1 = + SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg); + let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = + get_route_and_payment_hash!(nodes[1], nodes[0], 1000000); + let onion_2 = RecipientOnionFields::secret_only(our_payment_secret_2); + let id_2 = PaymentId(our_payment_hash_2.0); + nodes[1].node.send_payment_with_route(route, our_payment_hash_2, onion_2, id_2).unwrap(); + + check_added_monitors!(nodes[1], 1); + let send_event_2 = + SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); + + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event_1.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event_1.commitment_msg); check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg); + nodes[0].node.handle_update_add_htlc(node_b_id, &send_event_2.msgs[0]); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_event_2.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); - let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_update_raa.0); + let as_update_raa = get_revoke_commit_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_update_raa.0); check_added_monitors!(nodes[1], 1); - let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_cs = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_update_raa.1); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_update_raa.1); check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); expect_pending_htlcs_forwardable!(nodes[0]); expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000); @@ -817,6 +956,11 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -828,31 +972,37 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + [HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }] + ); check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + nodes[1].node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); - let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); + let commitment = updates.commitment_signed; + let bs_revoke_and_ack = + commitment_signed_dance!(nodes[1], nodes[2], commitment, false, true, false, true); check_added_monitors!(nodes[0], 0); // While the second channel is AwaitingRAA, forward a second payment to get it into the // holding cell. - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); - { - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); + check_added_monitors!(nodes[0], 1); - let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + let mut send_event = + SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -861,7 +1011,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Now fail monitor updating. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -869,16 +1019,16 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Forward a third payment which will also be added to the holding cell, despite the channel // being paused waiting a monitor update. - let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); - { - nodes[0].node.send_payment_with_route(route, payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let (route, payment_hash_3, _, payment_secret_3) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); + let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); + let id_3 = PaymentId(payment_hash_3.0); + nodes[0].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); + check_added_monitors!(nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); check_added_monitors!(nodes[1], 0); @@ -890,26 +1040,34 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { // Try to route another payment backwards from 2 to make sure 1 holds off on responding - let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); - nodes[2].node.send_payment_with_route(route, payment_hash_4, - RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap(); + let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = + get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); + let onion_4 = RecipientOnionFields::secret_only(payment_secret_4); + let id_4 = PaymentId(payment_hash_4.0); + nodes[2].node.send_payment_with_route(route, payment_hash_4, onion_4, id_4).unwrap(); check_added_monitors!(nodes[2], 1); - send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &send_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &send_event.commitment_msg); + send_event = + SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(node_c_id, &send_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &send_event.commitment_msg); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) - } else { (None, None) }; + } else { + (None, None) + }; // Restore monitor updating, ensuring we immediately get a fail-back update and a // update_add update. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_2.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); check_added_monitors!(nodes[1], 1); let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -921,10 +1079,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Note that the ordering of the events for different nodes is non-prescriptive, though the // ordering of the two events that both go to nodes[2] have to stay in the same order. - let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3); + let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut events_3); let messages_a = match nodes_0_event { MessageSendEvent::UpdateHTLCs { node_id, mut updates, channel_id: _ } => { - assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert_eq!(node_id, node_a_id); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -935,50 +1093,53 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { _ => panic!("Unexpected event type!"), }; - let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3); + let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events_3); let send_event_b = SendEvent::from_event(nodes_2_event); - assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id()); + assert_eq!(send_event_b.node_id, node_c_id); let raa = if test_ignore_second_cs { - let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3); + let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events_3); match nodes_2_event { MessageSendEvent::SendRevokeAndACK { node_id, msg } => { - assert_eq!(node_id, nodes[2].node.get_our_node_id()); + assert_eq!(node_id, node_c_id); Some(msg.clone()) }, _ => panic!("Unexpected event"), } - } else { None }; + } else { + None + }; // Now deliver the new messages... - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &messages_a.0); + nodes[0].node.handle_update_fail_htlc(node_b_id, &messages_a.0); commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false); expect_payment_failed!(nodes[0], payment_hash_1, true); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &send_event_b.msgs[0]); let as_cs; if test_ignore_second_cs { - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); - let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa.unwrap()); + let bs_revoke_and_ack = + get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[2].node.handle_revoke_and_ack(node_b_id, &raa.unwrap()); check_added_monitors!(nodes[2], 1); - let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let bs_cs = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(bs_cs.update_add_htlcs.is_empty()); assert!(bs_cs.update_fail_htlcs.is_empty()); assert!(bs_cs.update_fail_malformed_htlcs.is_empty()); assert!(bs_cs.update_fulfill_htlcs.is_empty()); assert!(bs_cs.update_fee.is_none()); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); check_added_monitors!(nodes[1], 1); - as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + as_cs = get_htlc_update_msgs!(nodes[1], node_c_id); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_cs.commitment_signed); check_added_monitors!(nodes[1], 1); } else { - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events(); @@ -986,24 +1147,26 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert_eq!(bs_revoke_and_commit.len(), 2); match bs_revoke_and_commit[0] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &msg); + assert_eq!(*node_id, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_c_id, &msg); check_added_monitors!(nodes[1], 1); }, _ => panic!("Unexpected event"), } - as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + as_cs = get_htlc_update_msgs!(nodes[1], node_c_id); match bs_revoke_and_commit[1] { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + assert_eq!(*node_id, node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &updates.commitment_signed); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_c_id, &updates.commitment_signed); check_added_monitors!(nodes[1], 1); }, _ => panic!("Unexpected event"), @@ -1015,27 +1178,26 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(as_cs.update_fail_malformed_htlcs.is_empty()); assert!(as_cs.update_fulfill_htlcs.is_empty()); assert!(as_cs.update_fee.is_none()); - let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]); - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &as_cs.commitment_signed); + nodes[2].node.handle_update_add_htlc(node_b_id, &as_cs.update_add_htlcs[0]); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &as_cs.commitment_signed); check_added_monitors!(nodes[2], 1); - let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_raa); + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); check_added_monitors!(nodes[2], 1); - let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let bs_second_cs = get_htlc_update_msgs!(nodes[2], node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_second_raa); + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_second_raa); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_second_cs.commitment_signed); check_added_monitors!(nodes[1], 1); - let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_second_raa); + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_second_raa); check_added_monitors!(nodes[2], 1); assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); @@ -1044,11 +1206,15 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let events_6 = nodes[2].node.get_and_clear_pending_events(); assert_eq!(events_6.len(), 2); match events_6[0] { - Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); }, + Event::PaymentClaimable { payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_2); + }, _ => panic!("Unexpected event"), }; match events_6[1] { - Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); }, + Event::PaymentClaimable { payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_3); + }, _ => panic!("Unexpected event"), }; @@ -1057,9 +1223,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[1], 1); send_event = SendEvent::from_node(&nodes[1]); - assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(send_event.node_id, node_a_id); assert_eq!(send_event.msgs.len(), 1); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event.msgs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); @@ -1067,7 +1233,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let events_9 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_9.len(), 1); match events_9[0] { - Event::PaymentClaimable { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()), + Event::PaymentClaimable { payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_4.unwrap()) + }, _ => panic!("Unexpected event"), }; claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap()); @@ -1091,87 +1259,93 @@ fn test_monitor_update_fail_reestablish() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_a_id); + nodes[0].node.peer_disconnected(node_b_id); nodes[2].node.claim_funds(payment_preimage); check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); - let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reestablish); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reestablish); - assert_eq!( - get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) - .contents.channel_flags & 2, 0); // The "disabled" bit should be unset as we just reconnected + nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); + + // The "disabled" bit should be unset as we just reconnected + let as_channel_upd = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); + assert_eq!(as_channel_upd.contents.channel_flags & 2, 0); nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell check_added_monitors!(nodes[1], 1); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_a_id); + nodes[0].node.peer_disconnected(node_b_id); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish); assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reestablish); - assert_eq!( - get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) - .contents.channel_flags & 2, 0); // The "disabled" bit should be unset as we just reconnected + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reestablish); + // The "disabled" bit should be unset as we just reconnected + let as_channel_upd = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); + assert_eq!(as_channel_upd.contents.channel_flags & 2, 0); + + nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); check_added_monitors!(nodes[1], 0); - assert_eq!( - get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()) - .contents.channel_flags & 2, 0); // The "disabled" bit should be unset as we just reconnected + + // The "disabled" bit should be unset as we just reconnected + let bs_channel_upd = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); + assert_eq!(bs_channel_upd.contents.channel_flags & 2, 0); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); - updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); expect_payment_sent!(nodes[0], payment_preimage); } @@ -1186,106 +1360,112 @@ fn raa_no_response_awaiting_raa_state() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); - let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (payment_preimage_2, payment_hash_2, payment_secret_2) = + get_payment_preimage_hash!(nodes[1]); + let (payment_preimage_3, payment_hash_3, payment_secret_3) = + get_payment_preimage_hash!(nodes[1]); // Queue up two payments - one will be delivered right away, one immediately goes into the // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA // immediately after a CS. By setting failing the monitor update failure from the CS (which // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS // generation during RAA while in monitor-update-failed state. - { - nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - nodes[0].node.send_payment_with_route(route.clone(), payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 0); - } + let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); + let id_1 = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion_1, id_1).unwrap(); + check_added_monitors!(nodes[0], 1); + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash_2, onion_2, id_2).unwrap(); + check_added_monitors!(nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); - let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); + let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA, // then restore channel monitor updates. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); // nodes[1] should be AwaitingRAA here! check_added_monitors!(nodes[1], 0); - let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); // We send a third payment here, which is somewhat of a redundant test, but the // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync // commitment transaction states) whereas here we can explicitly check for it. - { - nodes[0].node.send_payment_with_route(route, payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); - check_added_monitors!(nodes[0], 0); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - } - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); + let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); + let id_3 = PaymentId(payment_hash_3.0); + nodes[0].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); + check_added_monitors!(nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // Finally deliver the RAA to nodes[1] which results in a CS response to the last update - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); - let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_update = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_update.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000); @@ -1307,52 +1487,60 @@ fn claim_while_disconnected_monitor_update_fail() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; // Forward a payment for B to claim - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reconnect); - let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reconnect); + let _as_channel_update = + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reconnect); - let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_channel_reestablish(node_a_id, &as_reconnect); + let _bs_channel_update = + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Send a second payment from A to B, resulting in a commitment update that gets swallowed with // the monitor still failed - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); + check_added_monitors!(nodes[0], 1); - let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_updates.commitment_signed); + let as_updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_updates.commitment_signed); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC @@ -1361,7 +1549,7 @@ fn claim_while_disconnected_monitor_update_fail() { // Now un-fail the monitor, which will result in B sending its original commitment update, // receiving the commitment update from A, and the resulting commitment dances. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); @@ -1370,14 +1558,16 @@ fn claim_while_disconnected_monitor_update_fail() { match bs_msgs[0] { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + assert_eq!(*node_id, node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); }, _ => panic!("Unexpected event"), @@ -1385,30 +1575,30 @@ fn claim_while_disconnected_monitor_update_fail() { match bs_msgs[1] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), msg); + assert_eq!(*node_id, node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, msg); check_added_monitors!(nodes[0], 1); }, _ => panic!("Unexpected event"), } - let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_commitment = get_htlc_update_msgs!(nodes[0], node_b_id); - let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed); + let bs_commitment = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_commitment.commitment_signed); check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); @@ -1425,69 +1615,80 @@ fn monitor_failed_no_reestablish_response() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived; + let mut per_peer_lock; + let mut peer_state_lock; + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, channel_id) + .context_mut() + .announcement_sigs_state = AnnouncementSigsState::PeerReceived; } { - let mut node_1_per_peer_lock; - let mut node_1_peer_state_lock; - get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived; + let mut per_peer_lock; + let mut peer_state_lock; + get_channel_ref!(nodes[1], nodes[0], per_peer_lock, peer_state_lock, channel_id) + .context_mut() + .announcement_sigs_state = AnnouncementSigsState::PeerReceived; } // Route the payment and deliver the initial commitment_signed (with a monitor update failure // on receipt). - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1] // is still failing to update monitors. - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reconnect); - let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reconnect); - let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_channel_reestablish(node_a_id, &as_reconnect); + let _bs_channel_update = + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reconnect); + let _as_channel_update = + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); - let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1512,78 +1713,83 @@ fn first_message_on_recv_ordering() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; // Route the first payment outbound, holding the last RAA for B until we are set up so that we // can deliver it and fail the monitor update. - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); + let id_1 = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route, payment_hash_1, onion_1, id_1).unwrap(); + check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + assert_eq!(payment_event.node_id, node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); - let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // Route the second payment, generating an update_add_htlc/commitment_signed - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); + + check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.node_id, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Deliver the final RAA for the first payment, which does not require a response. RAAs // generally require a commitment_signed, so the fact that we're expecting an opposite response // to the next message also tests resetting the delivery order. - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an // RAA/CS response, which should be generated when we call channel_monitor_update (with the // appropriate HTLC acceptance). - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); - let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); + let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1604,13 +1810,19 @@ fn test_monitor_update_fail_claim() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); // Rebalance a bit so that we can send backwards from 3 to 2. send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // As long as the preimage isn't on-chain, we shouldn't expose the `PaymentClaimed` event to @@ -1625,12 +1837,12 @@ fn test_monitor_update_fail_claim() { // already-signed commitment transaction and will instead wait for it to resolve before // forwarding the payment onwards. - let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000); - { - nodes[2].node.send_payment_with_route(route.clone(), payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[2], 1); - } + let (route, payment_hash_2, _, payment_secret_2) = + get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000); + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[2].node.send_payment_with_route(route.clone(), payment_hash_2, onion_2, id_2).unwrap(); + check_added_monitors!(nodes[2], 1); // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be // paused, so forward shouldn't succeed until we call channel_monitor_updated(). @@ -1639,76 +1851,95 @@ fn test_monitor_update_fail_claim() { let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); expect_pending_htlcs_forwardable_ignore!(nodes[1]); let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]); - nodes[2].node.send_payment_with_route(route, payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); + let id_3 = PaymentId(payment_hash_3.0); + let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); + nodes[2].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); check_added_monitors!(nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); // Now restore monitor updating on the 0<->1 channel and claim the funds on B. let channel_id = chan_1.2; - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); - let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]); + let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_fulfill_update.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false); expect_payment_sent!(nodes[0], payment_preimage_1); // Get the payment forwards, note that they were batched into one commitment update. nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); - let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]); + let bs_forward_update = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[1]); commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[0]); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(payment_hash_2, *payment_hash); assert_eq!(1_000_000, amount_msat); - assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id()); + assert_eq!(receiver_node_id.unwrap(), node_a_id); assert_eq!(*via_channel_ids.last().unwrap(), (channel_id, Some(42))); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_2, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(payment_hash_3, *payment_hash); assert_eq!(1_000_000, amount_msat); - assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id()); - assert_eq!(*via_channel_ids, vec![(channel_id, Some(42))]); + assert_eq!(receiver_node_id.unwrap(), node_a_id); + assert_eq!(*via_channel_ids, [(channel_id, Some(42))]); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_3, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), @@ -1725,6 +1956,11 @@ fn test_monitor_update_on_pending_forwards() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -1733,39 +1969,45 @@ fn test_monitor_update_on_pending_forwards() { let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + [HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }] + ); check_added_monitors!(nodes[2], 1); - let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]); + let cs_fail_update = get_htlc_update_msgs!(nodes[2], node_b_id); + nodes[1].node.handle_update_fail_htlc(node_c_id, &cs_fail_update.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); - { - nodes[2].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[2], 1); - } + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[2].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); + check_added_monitors!(nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] + ); check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); - let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]); + let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &bs_updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_events(); @@ -1773,7 +2015,9 @@ fn test_monitor_update_on_pending_forwards() { if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] { assert_eq!(payment_hash, payment_hash_1); assert!(payment_failed_permanently); - } else { panic!("Unexpected event!"); } + } else { + panic!("Unexpected event!"); + } match events[2] { Event::PaymentFailed { payment_hash, .. } => { assert_eq!(payment_hash, Some(payment_hash_1)); @@ -1781,7 +2025,7 @@ fn test_monitor_update_on_pending_forwards() { _ => panic!("Unexpected event"), } match events[0] { - Event::PendingHTLCsForwardable { .. } => { }, + Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), }; nodes[0].node.process_pending_htlc_forwards(); @@ -1800,24 +2044,30 @@ fn monitor_update_claim_fail_no_response() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; // Forward a payment for B to claim - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); - check_added_monitors!(nodes[0], 1); - } + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); + let commitment = payment_event.commitment_msg; + let as_raa = commitment_signed_dance!(nodes[1], nodes[0], commitment, false, true, false, true); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); @@ -1826,19 +2076,19 @@ fn monitor_update_claim_fail_no_response() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); - let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); + let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); expect_payment_sent!(nodes[0], payment_preimage_1); @@ -1847,7 +2097,9 @@ fn monitor_update_claim_fail_no_response() { // restore_b_before_conf has no meaning if !confirm_a_first // restore_b_before_lock has no meaning if confirm_a_first -fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool, restore_b_before_lock: bool) { +fn do_during_funding_monitor_fail( + confirm_a_first: bool, restore_b_before_conf: bool, restore_b_before_lock: bool, +) { // Test that if the monitor update generated by funding_transaction_generated fails we continue // the channel setup happily after the update is restored. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -1855,40 +2107,66 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap(); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 100000, 10001, 43, None, None).unwrap(); + nodes[1].node.handle_open_channel( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id), + ); + nodes[0].node.handle_accept_channel( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), + ); - let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43); + let (temporary_channel_id, funding_tx, funding_output) = + create_funding_transaction(&nodes[0], &node_b_id, 100000, 43); - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap(); + nodes[0] + .node + .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) + .unwrap(); check_added_monitors!(nodes[0], 0); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - let channel_id = ChannelId::v1_from_funding_txid(funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); + let funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + let channel_id = ChannelId::v1_from_funding_txid( + funding_created_msg.funding_txid.as_byte_array(), + funding_created_msg.funding_output_index, + ); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); check_added_monitors!(nodes[1], 1); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); + nodes[0].node.handle_funding_signed( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id), + ); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &node_b_id); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 0); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].compute_txid(), funding_output.txid); + assert_eq!( + nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].compute_txid(), + funding_output.txid + ); if confirm_a_first { confirm_transaction(&nodes[0], &funding_tx); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_channel_ready( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id), + ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); } else { @@ -1898,14 +2176,14 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: } // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready.1 = confirm_a_first; reconnect_nodes(reconnect_args); // But we want to re-emit ChannelPending - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1916,25 +2194,39 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: } if !confirm_a_first && !restore_b_before_lock { confirm_transaction(&nodes[0], &funding_tx); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_channel_ready( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id), + ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); } chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { if !restore_b_before_lock { - let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); - (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready)) + let (channel_ready, channel_id) = + create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); + ( + channel_id, + create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready), + ) } else { - nodes[0].node.handle_channel_ready(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id())); + nodes[0].node.handle_channel_ready( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, node_a_id), + ); confirm_transaction(&nodes[0], &funding_tx); - let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); - (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready)) + let (channel_ready, channel_id) = + create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); + ( + channel_id, + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready), + ) } } else { if restore_b_before_conf { @@ -1942,27 +2234,32 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); confirm_transaction(&nodes[1], &funding_tx); } - let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); + let (channel_ready, channel_id) = + create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready)) }; for (i, node) in nodes.iter().enumerate() { let counterparty_node_id = nodes[(i + 1) % 2].node.get_our_node_id(); - assert!(node.gossip_sync.handle_channel_announcement(Some(counterparty_node_id), &announcement).unwrap()); + assert!(node + .gossip_sync + .handle_channel_announcement(Some(counterparty_node_id), &announcement) + .unwrap()); node.gossip_sync.handle_channel_update(Some(counterparty_node_id), &as_update).unwrap(); node.gossip_sync.handle_channel_update(Some(counterparty_node_id), &bs_update).unwrap(); } if !restore_b_before_lock { - expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_ready_event(&nodes[1], &node_a_id); } else { - expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_ready_event(&nodes[0], &node_b_id); } - send_payment(&nodes[0], &[&nodes[1]], 8000000); close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + let reason_a = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } #[test] @@ -1982,20 +2279,24 @@ fn test_path_paused_mpp() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2); let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id; - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); + let (mut route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3: let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_id; route.paths[0].hops[1].short_channel_id = chan_3_id; - route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_ann.contents.short_channel_id; route.paths[1].hops[1].short_channel_id = chan_4_id; @@ -2005,28 +2306,35 @@ fn test_path_paused_mpp() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // The first path should have succeeded with the second getting a MonitorUpdateInProgress err. - nodes[0].node.send_payment_with_route( - route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ).unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 2); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // Pass the first HTLC of the payment along to nodes[3]. let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None); + let path_1 = &[&nodes[1], &nodes[3]]; + let ev = events.pop().unwrap(); + pass_along_path(&nodes[0], path_1, 0, payment_hash, Some(payment_secret), ev, false, None); // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], chan_2_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2_id, latest_update); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None); + let path_2 = &[&nodes[2], &nodes[3]]; + let ev = events.pop().unwrap(); + pass_along_path(&nodes[0], path_2, 200_000, payment_hash, Some(payment_secret), ev, true, None); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new( + &nodes[0], + &[path_1, path_2], + payment_preimage, + )); } #[test] @@ -2046,14 +2354,19 @@ fn test_pending_update_fee_ack_on_reconnect() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); send_payment(&nodes[0], &[&nodes[1]], 100_000_00); - let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000); - nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[1], 1); - let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); // bs_initial_send_msgs are not delivered until they are re-generated after reconnect { @@ -2062,60 +2375,81 @@ fn test_pending_update_fee_ack_on_reconnect() { } nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); - let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(as_update_fee_msgs.update_fee.is_some()); - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed); + nodes[1].node.handle_update_fee(node_a_id, as_update_fee_msgs.update_fee.as_ref().unwrap()); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_update_fee_msgs.commitment_signed); check_added_monitors!(nodes[1], 1); - let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // bs_first_raa is not delivered until it is re-generated after reconnect - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_connect_msg); + nodes[1].node.handle_channel_reestablish(node_a_id, &as_connect_msg); let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(bs_resend_msgs.len(), 3); if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] { assert_eq!(*updates, bs_initial_send_msgs); - } else { panic!(); } + } else { + panic!(); + } if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] { assert_eq!(*msg, bs_first_raa); - } else { panic!(); } - if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); } + } else { + panic!(); + } + if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { + } else { + panic!(); + } - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_connect_msg); - get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_connect_msg); + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed); + nodes[0].node.handle_update_add_htlc(node_b_id, &bs_initial_send_msgs.update_add_htlcs[0]); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &bs_initial_send_msgs.commitment_signed); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_revoke_and_ack( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), + ); check_added_monitors!(nodes[1], 1); - let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed; + let bs_second_cs = get_htlc_update_msgs!(nodes[1], node_a_id).commitment_signed; - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test( + node_a_id, + &get_htlc_update_msgs!(nodes[0], node_b_id).commitment_signed, + ); check_added_monitors!(nodes[1], 1); - let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_revoke_and_ack( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), + ); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[0]); @@ -2139,10 +2473,15 @@ fn test_fail_htlc_on_broadcast_after_claim() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000); let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2); assert_eq!(bs_txn.len(), 1); @@ -2151,20 +2490,23 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 2000); - let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); - let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); + let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); mine_transaction(&nodes[1], &bs_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] + ); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true); expect_payment_path_successful!(nodes[0]); @@ -2179,6 +2521,9 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); send_payment(&nodes[0], &[&nodes[1]], 1000); @@ -2188,10 +2533,10 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { } nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); - let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_msgs = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(update_msgs.update_fee.is_some()); if deliver_update { - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap()); + nodes[1].node.handle_update_fee(node_a_id, update_msgs.update_fee.as_ref().unwrap()); } if parallel_updates { @@ -2203,59 +2548,73 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_connect_msg); - get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_channel_reestablish(node_a_id, &as_connect_msg); + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_connect_msg); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_connect_msg); let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_reconnect_msgs.len(), 2); - if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); } - let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap() - { updates } else { panic!(); }; + if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() { + } else { + panic!(); + } + let update_msgs = + if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap() { + updates + } else { + panic!(); + }; assert!(update_msgs.update_fee.is_some()); - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap()); + nodes[1].node.handle_update_fee(node_a_id, update_msgs.update_fee.as_ref().unwrap()); if parallel_updates { - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &update_msgs.commitment_signed); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); - let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_second_update = get_htlc_update_msgs!(nodes[0], node_b_id); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); check_added_monitors!(nodes[0], 1); - let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed); + nodes[1].node.handle_update_fee(node_a_id, as_second_update.update_fee.as_ref().unwrap()); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); - let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); + let bs_second_cs = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &bs_second_cs.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); check_added_monitors!(nodes[1], 1); } else { commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false); @@ -2279,14 +2638,20 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let persister; - let new_chain_monitor; + let new_chain_mon; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes_0_deserialized; + let nodes_0_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000).2; - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000); - let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + let chan_id = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000).2; + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(&nodes[0], nodes[1], 100000); + let (payment_preimage_2, payment_hash_2, payment_secret_2) = + get_payment_preimage_hash!(&nodes[1]); // Do a really complicated dance to get an HTLC into the holding cell, with // MonitorUpdateInProgress set but AwaitingRemoteRevoke unset. When this test was written, any @@ -2310,14 +2675,16 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // (c) will not be freed from the holding cell. let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[1], &[&nodes[0]], 100_000); - nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); + let id_1 = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion_1, id_1).unwrap(); check_added_monitors!(nodes[0], 1); let send = SendEvent::from_node(&nodes[0]); assert_eq!(send.msgs.len(), 1); - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); check_added_monitors!(nodes[0], 0); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); @@ -2326,13 +2693,13 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[0].node.claim_funds(payment_preimage_0); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send.commitment_msg); + nodes[1].node.handle_update_add_htlc(node_a_id, &send.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send.commitment_msg); check_added_monitors!(nodes[1], 1); - let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (raa, cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &raa); check_added_monitors!(nodes[0], 1); if disconnect { @@ -2340,31 +2707,34 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // disconnect the peers. Note that the fuzzer originally found this issue because // deserializing a ChannelManager in this state causes an assertion failure. if reload_a { - reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); + let node_ser = nodes[0].node.encode(); + let mons = &[&chan_0_monitor_serialized[..]]; + reload_node!(nodes[0], &node_ser, mons, persister, new_chain_mon, nodes_0_reload); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } else { - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); } - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_a_id); // Now reconnect the two - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); check_added_monitors!(nodes[1], 0); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); assert!(resp_0.0.is_none()); @@ -2380,7 +2750,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { assert!(pending_cs.update_fail_htlcs.is_empty()); assert!(pending_cs.update_fulfill_htlcs.is_empty()); assert_eq!(pending_cs.commitment_signed, cs); - } else { panic!(); } + } else { + panic!(); + } if reload_a { // The two pending monitor updates were replayed (but are still pending). @@ -2395,7 +2767,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // If we finish updating the monitor, we should free the holding cell right away (this did // not occur prior to #756). chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + let (mon_id, _) = get_latest_mon_update_id(&nodes[0], chan_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, mon_id); expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); @@ -2407,30 +2779,30 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { assert_eq!(events.len(), 1); // Deliver the pending in-flight CS - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &cs); check_added_monitors!(nodes[0], 1); let commitment_msg = match events.pop().unwrap() { MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(node_id, node_b_id); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_a_id, &updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[1], payment_preimage_0, None, false, false); assert_eq!(updates.update_add_htlcs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); updates.commitment_signed }, _ => panic!("Unexpected event type!"), }; - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_msg); check_added_monitors!(nodes[1], 1); - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000); check_added_monitors!(nodes[1], 1); @@ -2440,11 +2812,11 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PendingHTLCsForwardable { .. } => { }, + Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), }; match events[1] { - Event::PaymentPathSuccessful { .. } => { }, + Event::PaymentPathSuccessful { .. } => {}, _ => panic!("Unexpected event"), }; @@ -2479,80 +2851,99 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); let mut as_raa = None; if htlc_status == HTLCStatusAtDupClaim::HoldingCell { // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be // awaiting a remote revoke_and_ack from nodes[0]. - let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.send_payment_with_route(route, second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap(); + let (route, second_payment_hash, _, second_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + let onion_2 = RecipientOnionFields::secret_only(second_payment_secret); + let id_2 = PaymentId(second_payment_hash.0); + nodes[0].node.send_payment_with_route(route, second_payment_hash, onion_2, id_2).unwrap(); check_added_monitors!(nodes[0], 1); - let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send_event.commitment_msg); + let send_event = + SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event.commitment_msg); check_added_monitors!(nodes[1], 1); - let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); check_added_monitors!(nodes[0], 1); - as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); + as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id)); } - let fulfill_msg = msgs::UpdateFulfillHTLC { - channel_id: chan_id_2, - htlc_id: 0, - payment_preimage, - }; + let fulfill_msg = + msgs::UpdateFulfillHTLC { channel_id: chan_id_2, htlc_id: 0, payment_preimage }; if second_fails { nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + [HTLCHandlingFailureType::Receive { payment_hash }] + ); check_added_monitors!(nodes[2], 1); - get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + get_htlc_update_msgs!(nodes[2], node_b_id); } else { nodes[2].node.claim_funds(payment_preimage); check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); - let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1); // Check that the message we're about to deliver matches the one generated: assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]); } - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &fulfill_msg); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &fulfill_msg); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); let mut bs_updates = None; if htlc_status != HTLCStatusAtDupClaim::HoldingCell { - bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); + bs_updates = Some(get_htlc_update_msgs!(nodes[1], node_a_id)); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + node_b_id, + &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0], + ); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); if htlc_status == HTLCStatusAtDupClaim::Cleared { - commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); + commitment_signed_dance!( + nodes[0], + nodes[1], + &bs_updates.as_ref().unwrap().commitment_signed, + false + ); expect_payment_path_successful!(nodes[0]); } } else { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } - nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_c_id); + nodes[2].node.peer_disconnected(node_b_id); if second_fails { let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_fails.0 = 1; reconnect_nodes(reconnect_args); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] + ); } else { let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_claims.0 = 1; @@ -2560,17 +2951,25 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f } if htlc_status == HTLCStatusAtDupClaim::HoldingCell { - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa.unwrap()); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa.unwrap()); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it - bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); + bs_updates = Some(get_htlc_update_msgs!(nodes[1], node_a_id)); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + node_b_id, + &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0], + ); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); } if htlc_status != HTLCStatusAtDupClaim::Cleared { - commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); + commitment_signed_dance!( + nodes[0], + nodes[1], + &bs_updates.as_ref().unwrap().commitment_signed, + false + ); expect_payment_path_successful!(nodes[0]); } } @@ -2597,16 +2996,25 @@ fn test_temporary_error_during_shutdown() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap(); - nodes[1].node.handle_shutdown(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); + nodes[0].node.close_channel(&channel_id, &node_b_id).unwrap(); + nodes[1].node.handle_shutdown( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, node_b_id), + ); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_shutdown(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id())); + nodes[0].node.handle_shutdown( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id), + ); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -2614,30 +3022,38 @@ fn test_temporary_error_during_shutdown() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_closing_signed( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, node_b_id), + ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - nodes[0].node.handle_closing_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); - let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_closing_signed( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id), + ); + let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap()); - let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_closing_signed(node_a_id, &closing_signed_a.unwrap()); + let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(none_b.is_none()); let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(txn_a, txn_b); assert_eq!(txn_a.len(), 1); check_spends!(txn_a[0], funding_tx); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); - check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); } #[test] @@ -2648,16 +3064,21 @@ fn double_temp_error() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // `claim_funds` results in a ChannelMonitorUpdate. nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); - let (latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update_1, _) = get_latest_mon_update_id(&nodes[1], channel_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, @@ -2666,7 +3087,7 @@ fn double_temp_error() { check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update_2, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); @@ -2679,18 +3100,34 @@ fn double_temp_error() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1), + Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_1) + }, _ => panic!("Unexpected Event: {:?}", events[0]), } match events[1] { - Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2), + Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_2) + }, _ => panic!("Unexpected Event: {:?}", events[1]), } assert_eq!(msg_events.len(), 1); let (update_fulfill_1, commitment_signed_b1, node_id) = { match &msg_events[0] { - &MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + &MessageSendEvent::UpdateHTLCs { + ref node_id, + channel_id: _, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); @@ -2701,50 +3138,52 @@ fn double_temp_error() { _ => panic!("Unexpected event"), } }; - assert_eq!(node_id, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_1); + assert_eq!(node_id, node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_1); check_added_monitors!(nodes[0], 0); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed_b1); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed_b1); check_added_monitors!(nodes[0], 1); nodes[0].node.process_pending_htlc_forwards(); - let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], node_b_id); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa_a1); + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_a1); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_signed_a1); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed_a1); check_added_monitors!(nodes[1], 1); // Complete the second HTLC. let ((update_fulfill_2, commitment_signed_b2), raa_b2) = { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - (match &events[0] { - MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_fee.is_none()); - assert_eq!(updates.update_fulfill_htlcs.len(), 1); - (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone()) + ( + match &events[0] { + MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { + assert_eq!(*node_id, node_a_id); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), }, - _ => panic!("Unexpected event"), - }, - match events[1] { - MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - (*msg).clone() - }, - _ => panic!("Unexpected event"), - }) + match events[1] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, node_a_id); + (*msg).clone() + }, + _ => panic!("Unexpected event"), + }, + ) }; - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa_b2); + nodes[0].node.handle_revoke_and_ack(node_b_id, &raa_b2); check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_2); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_2); check_added_monitors!(nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false); @@ -2765,38 +3204,56 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { chan_config.manually_accept_inbound_channels = true; chan_config.channel_handshake_limits.trust_own_funding_0conf = true; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config.clone()), Some(chan_config)]); - let nodes_0_deserialized; + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config.clone()), Some(chan_config)]); + let node_a_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap(); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 100000, 10001, 43, None, None).unwrap(); + nodes[1].node.handle_open_channel( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id), + ); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::OpenChannelRequest { temporary_channel_id, .. } => { + Event::OpenChannelRequest { temporary_channel_id: chan_id, .. } => { if use_0conf { - nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0, None).unwrap(); + nodes[1] + .node + .accept_inbound_channel_from_trusted_peer_0conf(&chan_id, &node_a_id, 0, None) + .unwrap(); } else { - nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0, None).unwrap(); + nodes[1].node.accept_inbound_channel(&chan_id, &node_a_id, 0, None).unwrap(); } }, _ => panic!("Unexpected event"), }; - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); + nodes[0].node.handle_accept_channel( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), + ); - let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43); + let (temporary_channel_id, funding_tx, ..) = + create_funding_transaction(&nodes[0], &node_b_id, 100000, 43); - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap(); + nodes[0] + .node + .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) + .unwrap(); check_added_monitors!(nodes[0], 0); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); + let funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 }); @@ -2804,16 +3261,16 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { MessageSendEvent::SendFundingSigned { msg, .. } => { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &msg); + nodes[0].node.handle_funding_signed(node_b_id, &msg); check_added_monitors!(nodes[0], 1); - } + }, _ => panic!("Unexpected event"), } if use_0conf { match &bs_signed_locked[1] { MessageSendEvent::SendChannelReady { msg, .. } => { - nodes[0].node.handle_channel_ready(nodes[1].node.get_our_node_id(), &msg); - } + nodes[0].node.handle_channel_ready(node_b_id, &msg); + }, _ => panic!("Unexpected event"), } } @@ -2831,8 +3288,9 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { nodes[0].chain_source.watched_txn.lock().unwrap().clear(); nodes[0].chain_source.watched_outputs.lock().unwrap().clear(); - reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized); - check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 100000); + let node_a_ser = nodes[0].node.encode(); + reload_node!(nodes[0], &node_a_ser, &[], persister, new_chain_monitor, node_a_reload); + check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [node_b_id], 100000); assert!(nodes[0].node.list_channels().is_empty()); } @@ -2856,47 +3314,66 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo chan_config.manually_accept_inbound_channels = true; chan_config.channel_handshake_limits.trust_own_funding_0conf = true; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config.clone()), Some(chan_config)]); - let nodes_1_deserialized; + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config.clone()), Some(chan_config)]); + let node_b_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap(); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + + nodes[0].node.create_channel(node_b_id, 100000, 10001, 43, None, None).unwrap(); + nodes[1].node.handle_open_channel( + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id), + ); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::OpenChannelRequest { temporary_channel_id, .. } => { + Event::OpenChannelRequest { temporary_channel_id: chan_id, .. } => { if use_0conf { - nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0, None).unwrap(); + nodes[1] + .node + .accept_inbound_channel_from_trusted_peer_0conf(&chan_id, &node_a_id, 0, None) + .unwrap(); } else { - nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0, None).unwrap(); + nodes[1].node.accept_inbound_channel(&chan_id, &node_a_id, 0, None).unwrap(); } }, _ => panic!("Unexpected event"), }; - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); + nodes[0].node.handle_accept_channel( + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), + ); - let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43); + let (temporary_channel_id, funding_tx, ..) = + create_funding_transaction(&nodes[0], &node_b_id, 100000, 43); - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap(); + nodes[0] + .node + .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) + .unwrap(); check_added_monitors!(nodes[0], 0); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); check_added_monitors!(nodes[1], 1); // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding // transaction is confirmed. - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); check_added_monitors!(nodes[0], 1); - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &node_b_id); let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); if lock_commitment { @@ -2904,8 +3381,8 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo confirm_transaction(&nodes[1], &as_funding_tx[0]); } if use_0conf || lock_commitment { - let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_ready); + let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); + nodes[1].node.handle_channel_ready(node_a_id, &as_ready); } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2918,9 +3395,10 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo nodes[1].chain_source.watched_txn.lock().unwrap().clear(); nodes[1].chain_source.watched_outputs.lock().unwrap().clear(); - reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized); + let node_b_ser = nodes[1].node.encode(); + reload_node!(nodes[1], &node_b_ser, &[], persister, new_chain_monitor, node_b_reload); - check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [node_a_id], 100000); assert!(nodes[1].node.list_channels().is_empty()); } @@ -2941,6 +3419,10 @@ fn test_blocked_chan_preimage_release() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; @@ -2948,17 +3430,22 @@ fn test_blocked_chan_preimage_release() { // Tee up two payments in opposite directions across nodes[1], one it sent to generate a // PaymentSent event and one it forwards. - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000); - let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[1], &[&nodes[2]], 1_000_000); + let (payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000); // Claim the first payment to get a `PaymentSent` event (but don't handle it yet). nodes[2].node.claim_funds(payment_preimage_1); check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); - let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]); - do_commitment_signed_dance(&nodes[1], &nodes[2], &cs_htlc_fulfill_updates.commitment_signed, false, false); + let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], node_b_id); + nodes[1] + .node + .handle_update_fulfill_htlc(node_c_id, &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]); + let commitment = cs_htlc_fulfill_updates.commitment_signed; + do_commitment_signed_dance(&nodes[1], &nodes[2], &commitment, false, false); check_added_monitors(&nodes[1], 0); // Now claim the second payment on nodes[0], which will ultimately result in nodes[1] trying to @@ -2968,8 +3455,10 @@ fn test_blocked_chan_preimage_release() { check_added_monitors(&nodes[0], 1); expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000); - let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]); + let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], node_b_id); + nodes[1] + .node + .handle_update_fulfill_htlc(node_a_id, &as_htlc_fulfill_updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2)); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2977,35 +3466,51 @@ fn test_blocked_chan_preimage_release() { // Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the // channel. - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_htlc_fulfill_updates.commitment_signed); check_added_monitors(&nodes[1], 1); let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); assert!(a.is_none()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 3); - if let Event::PaymentSent { .. } = events[0] {} else { panic!(); } - if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); } - if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); } + if let Event::PaymentSent { .. } = events[0] { + } else { + panic!(); + } + if let Event::PaymentPathSuccessful { .. } = events[2] { + } else { + panic!(); + } + if let Event::PaymentForwarded { .. } = events[1] { + } else { + panic!(); + } // The event processing should release the last RAA updates on both channels. check_added_monitors(&nodes[1], 2); // When we fetch the next update the message getter will generate the next update for nodes[2], // generating a further monitor update. - let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], node_c_id); check_added_monitors(&nodes[1], 1); - nodes[2].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]); - do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false); + nodes[2] + .node + .handle_update_fulfill_htlc(node_b_id, &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]); + let commitment = bs_htlc_fulfill_updates.commitment_signed; + do_commitment_signed_dance(&nodes[2], &nodes[1], &commitment, false, false); expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); } -fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) { +fn do_test_inverted_mon_completion_order( + with_latest_manager: bool, complete_bc_commitment_dance: bool, +) { // When we forward a payment and receive `update_fulfill_htlc`+`commitment_signed` messages // from the downstream channel, we immediately claim the HTLC on the upstream channel, before // even doing a `commitment_signed` dance on the downstream channel. This implies that our @@ -3018,19 +3523,24 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let persister; - let new_chain_monitor; - let nodes_1_deserialized; + let chain_mon; + let node_b_reload; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2; let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2; // Route a payment from A, through B, to C, then claim it on C. Once we pass B the // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one // on the B<->C channel but leave the A<->B monitor update pending, then reload B. - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode(); let mut manager_b = Vec::new(); @@ -3043,8 +3553,8 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ expect_payment_claimed!(nodes[2], payment_hash, 100_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); + let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages // for it since the monitor update is marked in-progress. @@ -3054,20 +3564,21 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // Now step the Commitment Signed Dance between B and C forward a bit (or fully), ensuring we // won't get the preimage when the nodes reconnect and we have to get it from the // ChannelMonitor. - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &cs_updates.commitment_signed); check_added_monitors(&nodes[1], 1); if complete_bc_commitment_dance { - let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); + let (bs_revoke_and_ack, bs_commitment_signed) = + get_revoke_commit_msgs!(nodes[1], node_c_id); + nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); check_added_monitors(&nodes[2], 1); - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment_signed); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); check_added_monitors(&nodes[2], 1); - let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); // At this point node B still hasn't persisted the `ChannelMonitorUpdate` with the // preimage in the A <-> B channel, which will prevent it from persisting the // `ChannelMonitorUpdate` for the B<->C channel here to avoid "losing" the preimage. - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &cs_raa); + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_raa); check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } @@ -3078,10 +3589,10 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ } let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode(); - reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized); + reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, chain_mon, node_b_reload); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[2].node.peer_disconnected(node_b_id); if with_latest_manager { // If we used the latest ChannelManager to reload from, we should have both channels still @@ -3113,12 +3624,16 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating // process. - let (_, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).unwrap(); - - // When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has - // completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C - // channel. + let (_, ab_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_ab); + nodes[1] + .chain_monitor + .chain_monitor + .channel_monitor_updated(chan_id_ab, ab_update_id) + .unwrap(); + + // When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has + // completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C + // channel. } else { // If the ChannelManager used in the reload was stale, check that the B <-> C channel was // closed. @@ -3130,7 +3645,8 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ check_added_monitors(&nodes[1], 0); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - check_closed_event(&nodes[1], 1, ClosureReason::OutdatedChannelManager, false, &[nodes[2].node.get_our_node_id()], 100_000); + let reason = ClosureReason::OutdatedChannelManager; + check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100_000); check_added_monitors(&nodes[1], 2); nodes[1].node.timer_tick_occurred(); @@ -3141,8 +3657,12 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // ChannelMonitorUpdate hasn't yet completed. reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let (_, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).unwrap(); + let (_, ab_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_ab); + nodes[1] + .chain_monitor + .chain_monitor + .channel_monitor_updated(chan_id_ab, ab_update_id) + .unwrap(); // The ChannelMonitorUpdate which was completed prior to the reconnect only contained the // preimage (as it was a replay of the original ChannelMonitorUpdate from before we @@ -3152,13 +3672,20 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // node A. } - let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); check_added_monitors(&nodes[1], 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false); - expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, !with_latest_manager); + expect_payment_forwarded!( + nodes[1], + &nodes[0], + &nodes[2], + Some(1_000), + false, + !with_latest_manager + ); // Finally, check that the payment was, ultimately, seen as sent by node A. expect_payment_sent(&nodes[0], payment_preimage, None, true, true); @@ -3172,7 +3699,9 @@ fn test_inverted_mon_completion_order() { do_test_inverted_mon_completion_order(false, false); } -fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, close_only_a: bool, hold_post_reload_mon_update: bool) { +fn do_test_durable_preimages_on_closed_channel( + close_chans_before_reload: bool, close_only_a: bool, hold_post_reload_mon_update: bool, +) { // Test that we can apply a `ChannelMonitorUpdate` with a payment preimage even if the channel // is force-closed between when we generate the update on reload and when we go to handle the // update or prior to generating the update at all. @@ -3186,19 +3715,24 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let persister; - let new_chain_monitor; - let nodes_1_deserialized; + let chain_mon; + let node_b_reload; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2; let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2; // Route a payment from A, through B, to C, then claim it on C. Once we pass B the // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one // on the B<->C channel but leave the A<->B monitor update pending, then reload B. - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode(); @@ -3207,8 +3741,8 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); + let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages // for it since the monitor update is marked in-progress. @@ -3218,33 +3752,41 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, // Now step the Commitment Signed Dance between B and C forward a bit, ensuring we won't get // the preimage when the nodes reconnect, at which point we have to ensure we get it from the // ChannelMonitor. - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &cs_updates.commitment_signed); check_added_monitors(&nodes[1], 1); - let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let _ = get_revoke_commit_msgs!(nodes[1], node_c_id); let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode(); - let error_message = "Channel force-closed"; + let err_msg = "Channel force-closed".to_owned(); if close_chans_before_reload { if !close_only_a { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan_id_bc, &node_c_id, err_msg.clone()) + .unwrap(); check_closed_broadcast(&nodes[1], 1, true); - check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[2].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100000); } chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[1] + .node + .force_close_broadcasting_latest_txn(&chan_id_ab, &node_a_id, err_msg) + .unwrap(); check_closed_broadcast(&nodes[1], 1, true); - check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } // Now reload node B let manager_b = nodes[1].node.encode(); - reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized); + reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, chain_mon, node_b_reload); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[2].node.peer_disconnected(node_b_id); if close_chans_before_reload { // If the channels were already closed, B will rebroadcast its closing transactions here. @@ -3255,10 +3797,11 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, assert_eq!(bs_close_txn.len(), 3); } } - let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); - check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100000); + let err_msg = "Channel force-closed".to_owned(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, err_msg).unwrap(); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(as_closing_tx.len(), 1); @@ -3270,7 +3813,15 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, // After a timer tick a payment preimage ChannelMonitorUpdate is applied to the A<->B // ChannelMonitor (possible twice), even though the channel has since been closed. check_added_monitors(&nodes[1], 0); - let mons_added = if close_chans_before_reload { if !close_only_a { 4 } else { 3 } } else { 2 }; + let mons_added = if close_chans_before_reload { + if !close_only_a { + 4 + } else { + 3 + } + } else { + 2 + }; if hold_post_reload_mon_update { for _ in 0..mons_added { persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); @@ -3282,12 +3833,16 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, // Finally, check that B created a payment preimage transaction and close out the payment. let bs_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_txn.len(), if close_chans_before_reload && !close_only_a { 2 } else { 1 }); - let bs_preimage_tx = bs_txn.iter().find(|tx| tx.input[0].previous_output.txid == as_closing_tx[0].compute_txid()).unwrap(); + let bs_preimage_tx = bs_txn + .iter() + .find(|tx| tx.input[0].previous_output.txid == as_closing_tx[0].compute_txid()) + .unwrap(); check_spends!(bs_preimage_tx, as_closing_tx[0]); if !close_chans_before_reload { check_closed_broadcast(&nodes[1], 1, true); - check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]); @@ -3305,12 +3860,11 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, reconnect_args.pending_raa.1 = true; reconnect_nodes(reconnect_args); - } // Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending // `PaymentForwarded` event will finally be released. - let (ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); + let (_, ab_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_ab); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id_ab, ab_update_id); // If the A<->B channel was closed before we reload, we'll replay the claim against it on @@ -3318,8 +3872,8 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, let evs = nodes[1].node.get_and_clear_pending_events(); assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 }); for ev in evs { - if let Event::PaymentForwarded { .. } = ev { } - else { + if let Event::PaymentForwarded { .. } = ev { + } else { panic!(); } } @@ -3350,12 +3904,15 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let persister; - let new_chain_monitor; - let nodes_1_deserialized; + let chain_mon; + let node_b_reload; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2; let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2; @@ -3364,15 +3921,16 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // We complete the commitment signed dance on the B<->C channel but leave the A<->B monitor // update pending, then reload B. At that point, the final monitor update on the B<->C channel // is still pending because it can't fly until the preimage is persisted on the A<->B monitor. - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); nodes[2].node.claim_funds(payment_preimage); check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); + let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages // for it since the monitor update is marked in-progress. @@ -3381,17 +3939,17 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Now step the Commitment Signed Dance between B and C and check that after the final RAA B // doesn't let the preimage-removing monitor update fly. - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &cs_updates.commitment_signed); check_added_monitors(&nodes[1], 1); - let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], node_c_id); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors(&nodes[2], 1); - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); check_added_monitors(&nodes[2], 1); - let cs_final_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &cs_final_raa); + let cs_final_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_final_raa); check_added_monitors(&nodes[1], 0); // Finally, reload node B and check that after we call `process_pending_events` once we realize @@ -3400,25 +3958,35 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode(); let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode(); let manager_b = nodes[1].node.encode(); - reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized); + reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, chain_mon, node_b_reload); - let error_message = "Channel force-closed"; + let msg = "Channel force-closed".to_owned(); if close_during_reload { // Test that we still free the B<->C channel if the A<->B channel closed while we reloaded // (as learned about during the on-reload block connection). - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, msg).unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); - check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100_000); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100_000); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]); } - let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().1; + let (_, bc_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_bc); let mut events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), if close_during_reload { 2 } else { 1 }); - expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000), - None, close_during_reload, false, false); + expect_payment_forwarded( + events.pop().unwrap(), + &nodes[1], + &nodes[0], + &nodes[2], + Some(1000), + None, + close_during_reload, + false, + false, + ); if close_during_reload { match events[0] { Event::ChannelClosed { .. } => {}, @@ -3430,12 +3998,12 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Once we run event processing the monitor should free, check that it was indeed the B<->C // channel which was updated. check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 }); - let post_ev_bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().1; + let (_, post_ev_bc_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_bc); assert!(bc_update_id != post_ev_bc_update_id); // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates // fine. - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(node_b_id); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); send_payment(&nodes[1], &[&nodes[2]], 100_000); } @@ -3459,6 +4027,7 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); let node_c_id = nodes[2].node.get_our_node_id(); let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2; @@ -3466,29 +4035,30 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // Route a payment from A, through B, to C, then claim it on C. Replay the // `update_fulfill_htlc` twice on B to check that B doesn't hang. - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); nodes[2].node.claim_funds(payment_preimage); check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); - let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); if hold_chan_a { // The first update will be on the A <-> B channel, which we optionally allow to complete. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[1], 1); if !hold_chan_a { - let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); + let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); expect_payment_sent!(&nodes[0], payment_preimage); } - nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_c_id); + nodes[2].node.peer_disconnected(node_b_id); let mut reconnect = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect.pending_htlc_claims = (1, 0); @@ -3506,8 +4076,9 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // With the A<->B preimage persistence not yet complete, the B<->C channel is stuck // waiting. - nodes[1].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[1].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -3515,36 +4086,55 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // ...but once we complete the A<->B channel preimage persistence, the B<->C channel // unlocks and we send both peers commitment updates. - let (ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - assert!(nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).is_ok()); + let (ab_update_id, _) = get_latest_mon_update_id(&nodes[1], chan_id_ab); + assert!(nodes[1] + .chain_monitor + .chain_monitor + .channel_monitor_updated(chan_id_ab, ab_update_id) + .is_ok()); let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 2); check_added_monitors(&nodes[1], 2); - let mut c_update = msg_events.iter() - .filter(|ev| matches!(ev, MessageSendEvent::UpdateHTLCs { node_id, .. } if *node_id == node_c_id)) - .cloned().collect::>(); - let a_filtermap = |ev| if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } = ev { - if node_id == node_a_id { - Some(updates) + let mut c_update = msg_events + .iter() + .filter( + |ev| matches!(ev, MessageSendEvent::UpdateHTLCs { node_id, .. } if *node_id == node_c_id), + ) + .cloned() + .collect::>(); + let a_filtermap = |ev| { + if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } = ev { + if node_id == node_a_id { + Some(updates) + } else { + None + } } else { None } - } else { - None }; let a_update = msg_events.drain(..).filter_map(|ev| a_filtermap(ev)).collect::>(); assert_eq!(a_update.len(), 1); assert_eq!(c_update.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &a_update[0].update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &a_update[0].update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], a_update[0].commitment_signed, false); expect_payment_sent(&nodes[0], payment_preimage, None, true, true); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - pass_along_path(&nodes[1], &[&nodes[2]], 1_000_000, payment_hash_2, Some(payment_secret_2), c_update.pop().unwrap(), true, None); + pass_along_path( + &nodes[1], + &[&nodes[2]], + 1_000_000, + payment_hash_2, + Some(payment_secret_2), + c_update.pop().unwrap(), + true, + None, + ); claim_payment(&nodes[1], &[&nodes[2]], payment_preimage_2); } } @@ -3565,6 +4155,11 @@ fn test_partial_claim_mon_update_compl_actions() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let chan_1_scid = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let chan_2_scid = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; let (chan_3_update, _, chan_3_id, ..) = create_announced_chan_between_nodes(&nodes, 1, 3); @@ -3572,16 +4167,18 @@ fn test_partial_claim_mon_update_compl_actions() { let (chan_4_update, _, chan_4_id, ..) = create_announced_chan_between_nodes(&nodes, 2, 3); let chan_4_scid = chan_4_update.contents.short_channel_id; - let (mut route, payment_hash, preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); + let (mut route, payment_hash, preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_scid; route.paths[0].hops[1].short_channel_id = chan_3_scid; - route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_scid; route.paths[1].hops[1].short_channel_id = chan_4_scid; - send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret); + let paths = &[&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]]; + send_along_route_with_secret(&nodes[0], route, paths, 200_000, payment_hash, payment_secret); // Claim along both paths, but only complete one of the two monitor updates. chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); @@ -3595,21 +4192,21 @@ fn test_partial_claim_mon_update_compl_actions() { // blocks. nodes[3].chain_monitor.complete_sole_pending_chan_update(&chan_3_id); expect_payment_claimed!(&nodes[3], payment_hash, 200_000); - let updates = get_htlc_update_msgs(&nodes[3], &nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[3], &node_b_id); - nodes[1].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_d_id, &updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[3], Some(1000), false, false); - let _bs_updates_for_a = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let _bs_updates_for_a = get_htlc_update_msgs(&nodes[1], &node_a_id); - nodes[1].node.handle_commitment_signed_batch_test(nodes[3].node.get_our_node_id(), &updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test(node_d_id, &updates.commitment_signed); check_added_monitors(&nodes[1], 1); - let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &nodes[3].node.get_our_node_id()); + let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &node_d_id); - nodes[3].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[3].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors(&nodes[3], 0); - nodes[3].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs); + nodes[3].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); check_added_monitors(&nodes[3], 0); assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); @@ -3624,37 +4221,39 @@ fn test_partial_claim_mon_update_compl_actions() { assert_eq!(ds_msgs.len(), 2); check_added_monitors(&nodes[3], 2); - match remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut ds_msgs) { + match remove_first_msg_event_to_node(&node_b_id, &mut ds_msgs) { MessageSendEvent::SendRevokeAndACK { msg, .. } => { - nodes[1].node.handle_revoke_and_ack(nodes[3].node.get_our_node_id(), &msg); + nodes[1].node.handle_revoke_and_ack(node_d_id, &msg); check_added_monitors(&nodes[1], 1); - } + }, _ => panic!(), } - match remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut ds_msgs) { + match remove_first_msg_event_to_node(&node_c_id, &mut ds_msgs) { MessageSendEvent::UpdateHTLCs { updates, .. } => { - nodes[2].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[2].node.handle_update_fulfill_htlc(node_d_id, &updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[2], 1); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); - let _cs_updates_for_a = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); + let _cs_updates_for_a = get_htlc_update_msgs(&nodes[2], &node_a_id); - nodes[2].node.handle_commitment_signed_batch_test(nodes[3].node.get_our_node_id(), &updates.commitment_signed); + nodes[2] + .node + .handle_commitment_signed_batch_test(node_d_id, &updates.commitment_signed); check_added_monitors(&nodes[2], 1); }, _ => panic!(), } - let (cs_raa, cs_cs) = get_revoke_commit_msgs(&nodes[2], &nodes[3].node.get_our_node_id()); + let (cs_raa, cs_cs) = get_revoke_commit_msgs(&nodes[2], &node_d_id); - nodes[3].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &cs_raa); + nodes[3].node.handle_revoke_and_ack(node_c_id, &cs_raa); check_added_monitors(&nodes[3], 1); - nodes[3].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_cs); + nodes[3].node.handle_commitment_signed_batch_test(node_c_id, &cs_cs); check_added_monitors(&nodes[3], 1); - let ds_raa = get_event_msg!(nodes[3], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(nodes[3].node.get_our_node_id(), &ds_raa); + let ds_raa = get_event_msg!(nodes[3], MessageSendEvent::SendRevokeAndACK, node_c_id); + nodes[2].node.handle_revoke_and_ack(node_d_id, &ds_raa); check_added_monitors(&nodes[2], 1); // Our current `ChannelMonitor`s store preimages one RAA longer than they need to. That's nice @@ -3669,7 +4268,6 @@ fn test_partial_claim_mon_update_compl_actions() { assert!(!get_monitor!(nodes[3], chan_4_id).get_stored_preimages().contains_key(&payment_hash)); } - #[test] fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { // One of the last features for async persistence we implemented was the correct blocking of @@ -3681,16 +4279,26 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); - // First open channels, route a payment, and force-close the first hop. - let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); - let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000); - - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); - nodes[0].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[1].node.get_our_node_id(), String::new()).unwrap(); + // First open channels, route a payment, and force-close the first hop. + let chan_a = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); + let chan_b = + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000); + + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + + nodes[0] + .node + .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, String::new()) + .unwrap(); check_added_monitors!(nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; - check_closed_event!(nodes[0], 1, a_reason, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[0], 1, a_reason, [node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -3698,7 +4306,7 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); check_closed_broadcast!(nodes[1], true); // Now that B has a pending forwarded payment across it with the inbound edge on-chain, claim @@ -3707,16 +4315,19 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); check_added_monitors!(nodes[1], 1); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); // At this point nodes[1] has the preimage and is waiting for the `ChannelMonitorUpdate` for // channel A to hit disk. Until it does so, it shouldn't ever let the preimage dissapear from // channel B's `ChannelMonitor` - assert!(get_monitor!(nodes[1], chan_b.2).get_all_current_outbound_htlcs().iter().any(|(_, (_, preimage))| *preimage == Some(payment_preimage))); + assert!(get_monitor!(nodes[1], chan_b.2) + .get_all_current_outbound_htlcs() + .iter() + .any(|(_, (_, preimage))| *preimage == Some(payment_preimage))); // Once we complete the `ChannelMonitorUpdate` on channel A, and the `ChannelManager` processes // background events (via `get_and_clear_pending_msg_events`), the final `ChannelMonitorUpdate` @@ -3729,7 +4340,10 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - assert!(!get_monitor!(nodes[1], chan_b.2).get_all_current_outbound_htlcs().iter().any(|(_, (_, preimage))| *preimage == Some(payment_preimage))); + assert!(!get_monitor!(nodes[1], chan_b.2) + .get_all_current_outbound_htlcs() + .iter() + .any(|(_, (_, preimage))| *preimage == Some(payment_preimage))); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false); } @@ -3744,15 +4358,22 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // First open channels, route a payment, and force-close the first hop. - let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); + let chan_a = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - nodes[0].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[1].node.get_our_node_id(), String::new()).unwrap(); + nodes[0] + .node + .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, String::new()) + .unwrap(); check_added_monitors!(nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; - check_closed_event!(nodes[0], 1, a_reason, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[0], 1, a_reason, [node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -3760,7 +4381,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); check_closed_broadcast!(nodes[1], true); // Now that B has a pending payment with the inbound HTLC on a closed channel, claim the @@ -3795,8 +4416,14 @@ fn test_single_channel_multiple_mpp() { let node_chanmgrs = create_node_chanmgrs(9, &node_cfgs, &configs); let mut nodes = create_network(9, &node_cfgs, &node_chanmgrs); - let node_7_id = nodes[7].node.get_our_node_id(); - let node_8_id = nodes[8].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let node_e_id = nodes[4].node.get_our_node_id(); + let node_f_id = nodes[5].node.get_our_node_id(); + let node_g_id = nodes[6].node.get_our_node_id(); + let node_h_id = nodes[7].node.get_our_node_id(); + let node_i_id = nodes[8].node.get_our_node_id(); // Send an MPP payment in six parts along the path shown from top to bottom // 0 @@ -3841,9 +4468,24 @@ fn test_single_channel_multiple_mpp() { create_announced_chan_between_nodes_with_value(&nodes, 6, 7, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 7, 8, 1_000_000, 0); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[8], 50_000_000); - - send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[7], &nodes[8]], &[&nodes[2], &nodes[7], &nodes[8]], &[&nodes[3], &nodes[7], &nodes[8]], &[&nodes[4], &nodes[7], &nodes[8]], &[&nodes[5], &nodes[7], &nodes[8]], &[&nodes[6], &nodes[7], &nodes[8]]], 50_000_000, payment_hash, payment_secret); + let (mut route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[8], 50_000_000); + + send_along_route_with_secret( + &nodes[0], + route, + &[ + &[&nodes[1], &nodes[7], &nodes[8]], + &[&nodes[2], &nodes[7], &nodes[8]], + &[&nodes[3], &nodes[7], &nodes[8]], + &[&nodes[4], &nodes[7], &nodes[8]], + &[&nodes[5], &nodes[7], &nodes[8]], + &[&nodes[6], &nodes[7], &nodes[8]], + ], + 50_000_000, + payment_hash, + payment_secret, + ); let (do_a_write, blocker) = std::sync::mpsc::sync_channel(0); *nodes[8].chain_monitor.write_blocker.lock().unwrap() = Some(blocker); @@ -3883,23 +4525,23 @@ fn test_single_channel_multiple_mpp() { do_a_write_background.send(()).unwrap(); }); block_thrd2.store(false, Ordering::Release); - let first_updates = get_htlc_update_msgs(&nodes[8], &nodes[7].node.get_our_node_id()); + let first_updates = get_htlc_update_msgs(&nodes[8], &node_h_id); thrd2.join().unwrap(); // Disconnect node 6 from all its peers so it doesn't bother to fail the HTLCs back - nodes[7].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[7].node.peer_disconnected(nodes[2].node.get_our_node_id()); - nodes[7].node.peer_disconnected(nodes[3].node.get_our_node_id()); - nodes[7].node.peer_disconnected(nodes[4].node.get_our_node_id()); - nodes[7].node.peer_disconnected(nodes[5].node.get_our_node_id()); - nodes[7].node.peer_disconnected(nodes[6].node.get_our_node_id()); - - nodes[7].node.handle_update_fulfill_htlc(node_8_id, &first_updates.update_fulfill_htlcs[0]); + nodes[7].node.peer_disconnected(node_b_id); + nodes[7].node.peer_disconnected(node_c_id); + nodes[7].node.peer_disconnected(node_d_id); + nodes[7].node.peer_disconnected(node_e_id); + nodes[7].node.peer_disconnected(node_f_id); + nodes[7].node.peer_disconnected(node_g_id); + + nodes[7].node.handle_update_fulfill_htlc(node_i_id, &first_updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[7], 1); expect_payment_forwarded!(nodes[7], nodes[1], nodes[8], Some(1000), false, false); - nodes[7].node.handle_commitment_signed_batch_test(node_8_id, &first_updates.commitment_signed); + nodes[7].node.handle_commitment_signed_batch_test(node_i_id, &first_updates.commitment_signed); check_added_monitors(&nodes[7], 1); - let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_i_id); // Now, handle the `revoke_and_ack` from node 5. Note that `claim_funds` is still blocked on // our peer lock, so we have to release a write to let it process. @@ -3918,7 +4560,7 @@ fn test_single_channel_multiple_mpp() { do_a_write_background.send(()).unwrap(); }); block_thrd3.store(false, Ordering::Release); - nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); + nodes[8].node.handle_revoke_and_ack(node_h_id, &raa); thrd3.join().unwrap(); assert!(!thrd.is_finished()); @@ -3938,63 +4580,63 @@ fn test_single_channel_multiple_mpp() { // Now drive everything to the end, at least as far as node 7 is concerned... *nodes[8].chain_monitor.write_blocker.lock().unwrap() = None; - nodes[8].node.handle_commitment_signed_batch_test(node_7_id, &cs); + nodes[8].node.handle_commitment_signed_batch_test(node_h_id, &cs); check_added_monitors(&nodes[8], 1); - let (updates, raa) = get_updates_and_revoke(&nodes[8], &nodes[7].node.get_our_node_id()); + let (updates, raa) = get_updates_and_revoke(&nodes[8], &node_h_id); - nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[0]); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, &updates.update_fulfill_htlcs[0]); expect_payment_forwarded!(nodes[7], nodes[2], nodes[8], Some(1000), false, false); - nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[1]); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, &updates.update_fulfill_htlcs[1]); expect_payment_forwarded!(nodes[7], nodes[3], nodes[8], Some(1000), false, false); let mut next_source = 4; if let Some(update) = updates.update_fulfill_htlcs.get(2) { - nodes[7].node.handle_update_fulfill_htlc(node_8_id, update); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, update); expect_payment_forwarded!(nodes[7], nodes[4], nodes[8], Some(1000), false, false); next_source += 1; } - nodes[7].node.handle_commitment_signed_batch_test(node_8_id, &updates.commitment_signed); - nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + nodes[7].node.handle_commitment_signed_batch_test(node_i_id, &updates.commitment_signed); + nodes[7].node.handle_revoke_and_ack(node_i_id, &raa); if updates.update_fulfill_htlcs.get(2).is_some() { check_added_monitors(&nodes[7], 5); } else { check_added_monitors(&nodes[7], 4); } - let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_i_id); - nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); - nodes[8].node.handle_commitment_signed_batch_test(node_7_id, &cs); + nodes[8].node.handle_revoke_and_ack(node_h_id, &raa); + nodes[8].node.handle_commitment_signed_batch_test(node_h_id, &cs); check_added_monitors(&nodes[8], 2); - let (updates, raa) = get_updates_and_revoke(&nodes[8], &node_7_id); + let (updates, raa) = get_updates_and_revoke(&nodes[8], &node_h_id); - nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[0]); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, &updates.update_fulfill_htlcs[0]); expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); next_source += 1; - nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[1]); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, &updates.update_fulfill_htlcs[1]); expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); next_source += 1; if let Some(update) = updates.update_fulfill_htlcs.get(2) { - nodes[7].node.handle_update_fulfill_htlc(node_8_id, update); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, update); expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); } - nodes[7].node.handle_commitment_signed_batch_test(node_8_id, &updates.commitment_signed); - nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + nodes[7].node.handle_commitment_signed_batch_test(node_i_id, &updates.commitment_signed); + nodes[7].node.handle_revoke_and_ack(node_i_id, &raa); if updates.update_fulfill_htlcs.get(2).is_some() { check_added_monitors(&nodes[7], 5); } else { check_added_monitors(&nodes[7], 4); } - let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); - nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); - nodes[8].node.handle_commitment_signed_batch_test(node_7_id, &cs); + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_i_id); + nodes[8].node.handle_revoke_and_ack(node_h_id, &raa); + nodes[8].node.handle_commitment_signed_batch_test(node_h_id, &cs); check_added_monitors(&nodes[8], 2); - let raa = get_event_msg!(nodes[8], MessageSendEvent::SendRevokeAndACK, node_7_id); - nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + let raa = get_event_msg!(nodes[8], MessageSendEvent::SendRevokeAndACK, node_h_id); + nodes[7].node.handle_revoke_and_ack(node_i_id, &raa); check_added_monitors(&nodes[7], 1); } diff --git a/lightning/src/ln/max_payment_path_len_tests.rs b/lightning/src/ln/max_payment_path_len_tests.rs index 95a1fbaaa10..1fb948ffd28 100644 --- a/lightning/src/ln/max_payment_path_len_tests.rs +++ b/lightning/src/ln/max_payment_path_len_tests.rs @@ -10,14 +10,14 @@ //! Tests for calculating the maximum length of a path based on the payment metadata, custom TLVs, //! and/or blinded paths present. -use bitcoin::secp256k1::{Secp256k1, PublicKey}; +use crate::blinded_path::payment::{ + BlindedPayInfo, BlindedPaymentPath, Bolt12RefundContext, PaymentConstraints, PaymentContext, + UnauthenticatedReceiveTlvs, +}; use crate::blinded_path::BlindedHop; -use crate::blinded_path::payment::{BlindedPayInfo, BlindedPaymentPath, Bolt12RefundContext, PaymentConstraints, PaymentContext, UnauthenticatedReceiveTlvs}; use crate::events::Event; -use crate::types::payment::PaymentSecret; use crate::ln::blinded_payment_tests::get_blinded_route_parameters; use crate::ln::channelmanager::PaymentId; -use crate::types::features::BlindedHopFeatures; use crate::ln::functional_test_utils::*; use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, OnionMessageHandler}; @@ -26,11 +26,16 @@ use crate::ln::onion_utils::MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY; use crate::ln::outbound_payment::{RecipientOnionFields, Retry, RetryableSendFailure}; use crate::offers::nonce::Nonce; use crate::prelude::*; -use crate::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig, DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA}; +use crate::routing::router::{ + PaymentParameters, RouteParameters, RouteParametersConfig, DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, +}; use crate::sign::NodeSigner; +use crate::types::features::BlindedHopFeatures; +use crate::types::payment::PaymentSecret; use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::test_utils; +use bitcoin::secp256k1::{PublicKey, Secp256k1}; // 3+32 (payload length and HMAC) + 2+8 (amt_to_forward) + // 2+4 (outgoing_cltv_value) + 2+8 (short_channel_id) @@ -56,14 +61,16 @@ fn large_payment_metadata() { // without exceeding the max onion packet size. let final_payload_len_without_metadata = msgs::OutboundOnionPayload::Receive { payment_data: Some(msgs::FinalOnionHopData { - payment_secret: PaymentSecret([0; 32]), total_msat: MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY + payment_secret: PaymentSecret([0; 32]), + total_msat: MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, }), payment_metadata: None, keysend_preimage: None, custom_tlvs: &Vec::new(), sender_intended_htlc_amt_msat: MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, cltv_expiry_height: nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, - }.serialized_length(); + } + .serialized_length(); let max_metadata_len = 1300 - 1 // metadata type - crate::util::ser::BigSize(1200).serialized_length() // metadata length @@ -73,46 +80,71 @@ fn large_payment_metadata() { let mut payment_metadata = vec![42; max_metadata_len]; // Check that the maximum-size metadata is sendable. - let (mut route_0_1, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); - let mut recipient_onion_max_md_size = RecipientOnionFields { + let (mut route_0_1, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); + let mut max_sized_onion = RecipientOnionFields { payment_secret: Some(payment_secret), payment_metadata: Some(payment_metadata.clone()), custom_tlvs: Vec::new(), }; - nodes[0].node.send_payment(payment_hash, recipient_onion_max_md_size.clone(), PaymentId(payment_hash.0), route_0_1.route_params.clone().unwrap(), Retry::Attempts(0)).unwrap(); + let route_params = route_0_1.route_params.clone().unwrap(); + let id = PaymentId(payment_hash.0); + nodes[0] + .node + .send_payment(payment_hash, max_sized_onion.clone(), id, route_params, Retry::Attempts(0)) + .unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1]]; - let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) - .with_payment_secret(payment_secret) - .with_payment_metadata(payment_metadata.clone()); + let args = + PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) + .with_payment_secret(payment_secret) + .with_payment_metadata(payment_metadata.clone()); do_pass_along_path(args); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], payment_preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new( + &nodes[0], + &[&[&nodes[1]]], + payment_preimage, + )); // Check that the payment parameter for max path length will prevent us from routing past our // next-hop peer given the payment_metadata size. - let (mut route_0_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], &nodes[2], amt_msat); + let (mut route_0_2, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(&nodes[0], &nodes[2], amt_msat); let mut route_params_0_2 = route_0_2.route_params.clone().unwrap(); route_params_0_2.payment_params.max_path_length = 1; nodes[0].router.expect_find_route_query(route_params_0_2); - let err = nodes[0].node.send_payment(payment_hash_2, recipient_onion_max_md_size.clone(), PaymentId(payment_hash_2.0), route_0_2.route_params.clone().unwrap(), Retry::Attempts(0)).unwrap_err(); + + let id = PaymentId(payment_hash_2.0); + let route_params = route_0_2.route_params.clone().unwrap(); + let err = nodes[0] + .node + .send_payment(payment_hash_2, max_sized_onion.clone(), id, route_params, Retry::Attempts(0)) + .unwrap_err(); assert_eq!(err, RetryableSendFailure::RouteNotFound); // If our payment_metadata contains 1 additional byte, we'll fail prior to pathfinding. - let mut recipient_onion_too_large_md = recipient_onion_max_md_size.clone(); - recipient_onion_too_large_md.payment_metadata.as_mut().map(|mut md| md.push(42)); - let err = nodes[0].node.send_payment(payment_hash, recipient_onion_too_large_md.clone(), PaymentId(payment_hash.0), route_0_1.route_params.clone().unwrap(), Retry::Attempts(0)).unwrap_err(); - assert_eq!(err, RetryableSendFailure::OnionPacketSizeExceeded); + let mut too_large_onion = max_sized_onion.clone(); + too_large_onion.payment_metadata.as_mut().map(|mut md| md.push(42)); - // Confirm that we'll fail to construct an onion packet given this payment_metadata that's too - // large for even a 1-hop path. + // First confirm we'll fail to create the onion packet directly. let secp_ctx = Secp256k1::signing_only(); route_0_1.paths[0].hops[0].fee_msat = MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY; route_0_1.paths[0].hops[0].cltv_expiry_delta = DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA; - let err = onion_utils::create_payment_onion(&secp_ctx, &route_0_1.paths[0], &test_utils::privkey(42), MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, &recipient_onion_too_large_md, nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, &payment_hash, &None, None, [0; 32]).unwrap_err(); + let err = onion_utils::create_payment_onion( + &secp_ctx, + &route_0_1.paths[0], + &test_utils::privkey(42), + MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, + &too_large_onion, + nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, + &payment_hash, + &None, + None, + [0; 32], + ) + .unwrap_err(); match err { APIError::InvalidRoute { err } => { assert_eq!(err, "Route size too large considering onion data"); @@ -120,28 +152,43 @@ fn large_payment_metadata() { _ => panic!(), } + let route_params = route_0_1.route_params.clone().unwrap(); + let err = nodes[0] + .node + .send_payment(payment_hash_2, too_large_onion, id, route_params, Retry::Attempts(0)) + .unwrap_err(); + assert_eq!(err, RetryableSendFailure::OnionPacketSizeExceeded); + // If we remove enough payment_metadata bytes to allow for 2 hops, we're now able to send to // nodes[2]. - let mut recipient_onion_allows_2_hops = RecipientOnionFields { + let two_hop_metadata = vec![42; max_metadata_len - INTERMED_PAYLOAD_LEN_ESTIMATE]; + let mut onion_allowing_2_hops = RecipientOnionFields { payment_secret: Some(payment_secret_2), - payment_metadata: Some(vec![42; max_metadata_len - INTERMED_PAYLOAD_LEN_ESTIMATE]), + payment_metadata: Some(two_hop_metadata.clone()), custom_tlvs: Vec::new(), }; let mut route_params_0_2 = route_0_2.route_params.clone().unwrap(); route_params_0_2.payment_params.max_path_length = 2; nodes[0].router.expect_find_route_query(route_params_0_2); - nodes[0].node.send_payment(payment_hash_2, recipient_onion_allows_2_hops.clone(), PaymentId(payment_hash_2.0), route_0_2.route_params.unwrap(), Retry::Attempts(0)).unwrap(); + let route_params = route_0_2.route_params.unwrap(); + nodes[0] + .node + .send_payment(payment_hash_2, onion_allowing_2_hops, id, route_params, Retry::Attempts(0)) + .unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; - let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash_2, events.pop().unwrap()) - .with_payment_secret(payment_secret_2) - .with_payment_metadata(recipient_onion_allows_2_hops.payment_metadata.unwrap()); + let args = + PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash_2, events.pop().unwrap()) + .with_payment_secret(payment_secret_2) + .with_payment_metadata(two_hop_metadata); do_pass_along_path(args); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage_2) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new( + &nodes[0], + &[&[&nodes[1], &nodes[2]]], + payment_preimage_2, + )); } #[test] @@ -154,16 +201,18 @@ fn one_hop_blinded_path_with_custom_tlv() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1); - let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents; + let chan_upd_1_2 = + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents; // Start with all nodes at the same height - connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); - connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); - connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + connect_blocks(&nodes[0], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); // Construct the route parameters for sending to nodes[2]'s 1-hop blinded path. let amt_msat = 100_000; - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None); + let (payment_preimage, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[2], Some(amt_msat), None); let payee_tlvs = UnauthenticatedReceiveTlvs { payment_secret, payment_constraints: PaymentConstraints { @@ -177,9 +226,15 @@ fn one_hop_blinded_path_with_custom_tlv() { let payee_tlvs = payee_tlvs.authenticate(nonce, &expanded_key); let mut secp_ctx = Secp256k1::new(); let blinded_path = BlindedPaymentPath::new( - &[], nodes[2].node.get_our_node_id(), payee_tlvs, u64::MAX, TEST_FINAL_CLTV as u16, - &chanmon_cfgs[2].keys_manager, &secp_ctx - ).unwrap(); + &[], + nodes[2].node.get_our_node_id(), + payee_tlvs, + u64::MAX, + TEST_FINAL_CLTV as u16, + &chanmon_cfgs[2].keys_manager, + &secp_ctx, + ) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::blinded(vec![blinded_path.clone()]), amt_msat, @@ -195,8 +250,9 @@ fn one_hop_blinded_path_with_custom_tlv() { intro_node_blinding_point: Some(blinded_path.blinding_point()), keysend_preimage: None, invoice_request: None, - custom_tlvs: &Vec::new() - }.serialized_length(); + custom_tlvs: &Vec::new(), + } + .serialized_length(); let max_custom_tlv_len = 1300 - crate::util::ser::BigSize(CUSTOM_TLV_TYPE).serialized_length() // custom TLV type - crate::util::ser::BigSize(1200).serialized_length() // custom TLV length @@ -205,52 +261,70 @@ fn one_hop_blinded_path_with_custom_tlv() { - final_payload_len_without_custom_tlv; // Check that we can send the maximum custom TLV with 1 blinded hop. - let recipient_onion_max_custom_tlv_size = RecipientOnionFields::spontaneous_empty() + let max_sized_onion = RecipientOnionFields::spontaneous_empty() .with_custom_tlvs(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]) .unwrap(); - nodes[1].node.send_payment(payment_hash, recipient_onion_max_custom_tlv_size.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap(); + let id = PaymentId(payment_hash.0); + let no_retry = Retry::Attempts(0); + nodes[1] + .node + .send_payment(payment_hash, max_sized_onion.clone(), id, route_params.clone(), no_retry) + .unwrap(); check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[2]]; - let args = PassAlongPathArgs::new(&nodes[1], path, amt_msat, payment_hash, events.pop().unwrap()) - .with_payment_secret(payment_secret) - .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()); + let args = + PassAlongPathArgs::new(&nodes[1], path, amt_msat, payment_hash, events.pop().unwrap()) + .with_payment_secret(payment_secret) + .with_custom_tlvs(max_sized_onion.custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[1], &[&[&nodes[2]]], payment_preimage) - .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()) + .with_custom_tlvs(max_sized_onion.custom_tlvs.clone()), ); // If 1 byte is added to the custom TLV value, we'll fail to send prior to pathfinding. - let mut recipient_onion_too_large_custom_tlv = recipient_onion_max_custom_tlv_size.clone(); - recipient_onion_too_large_custom_tlv.custom_tlvs[0].1.push(42); - let err = nodes[1].node.send_payment(payment_hash, recipient_onion_too_large_custom_tlv, PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap_err(); + let mut too_large_custom_tlv_onion = max_sized_onion.clone(); + too_large_custom_tlv_onion.custom_tlvs[0].1.push(42); + let err = nodes[1] + .node + .send_payment(payment_hash, too_large_custom_tlv_onion, id, route_params.clone(), no_retry) + .unwrap_err(); assert_eq!(err, RetryableSendFailure::OnionPacketSizeExceeded); // With the maximum-size custom TLV, our max path length is limited to 1, so attempting to route // nodes[0] -> nodes[2] will fail. - let err = nodes[0].node.send_payment(payment_hash, recipient_onion_max_custom_tlv_size.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap_err(); + let err = nodes[0] + .node + .send_payment(payment_hash, max_sized_onion.clone(), id, route_params.clone(), no_retry) + .unwrap_err(); assert_eq!(err, RetryableSendFailure::RouteNotFound); // If we remove enough custom TLV bytes to allow for 1 intermediate unblinded hop, we're now able // to send nodes[0] -> nodes[2]. - let mut recipient_onion_allows_2_hops = recipient_onion_max_custom_tlv_size.clone(); - recipient_onion_allows_2_hops.custom_tlvs[0].1.resize(max_custom_tlv_len - INTERMED_PAYLOAD_LEN_ESTIMATE, 0); - nodes[0].node.send_payment(payment_hash, recipient_onion_allows_2_hops.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap(); + let mut onion_allows_2_hops = max_sized_onion.clone(); + onion_allows_2_hops.custom_tlvs[0] + .1 + .resize(max_custom_tlv_len - INTERMED_PAYLOAD_LEN_ESTIMATE, 0); + nodes[0] + .node + .send_payment(payment_hash, onion_allows_2_hops.clone(), id, route_params.clone(), no_retry) + .unwrap(); check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; - let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) - .with_payment_secret(payment_secret) - .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs.clone()); + let args = + PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) + .with_payment_secret(payment_secret) + .with_custom_tlvs(onion_allows_2_hops.custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs) + .with_custom_tlvs(onion_allows_2_hops.custom_tlvs), ); } @@ -264,10 +338,12 @@ fn blinded_path_with_custom_tlv() { let nodes = create_network(4, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); - let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents; + let chan_upd_2_3 = + create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents; // Ensure all nodes are at the same height - let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); @@ -275,25 +351,35 @@ fn blinded_path_with_custom_tlv() { // Construct the route parameters for sending to nodes[3]'s blinded path. let amt_msat = 100_000; - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None); - let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000, - nodes.iter().skip(2).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_2_3], - &chanmon_cfgs[3].keys_manager); + let (payment_preimage, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[3], Some(amt_msat), None); + let route_params = get_blinded_route_parameters( + amt_msat, + payment_secret, + 1, + 1_0000_0000, + nodes.iter().skip(2).map(|n| n.node.get_our_node_id()).collect(), + &[&chan_upd_2_3], + &chanmon_cfgs[3].keys_manager, + ); // Calculate the maximum custom TLV value size where a valid onion packet is still possible. const CUSTOM_TLV_TYPE: u64 = 65537; let mut route = get_route(&nodes[1], &route_params).unwrap(); let reserved_packet_bytes_without_custom_tlv: usize = onion_utils::build_onion_payloads( - &route.paths[0], MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, + &route.paths[0], + MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, &RecipientOnionFields::spontaneous_empty(), - nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, &None, - None, None + nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, + &None, + None, + None, ) - .unwrap() - .0 - .iter() - .map(|payload| payload.serialized_length() + PAYLOAD_HMAC_LEN) - .sum(); + .unwrap() + .0 + .iter() + .map(|payload| payload.serialized_length() + PAYLOAD_HMAC_LEN) + .sum(); let max_custom_tlv_len = 1300 - crate::util::ser::BigSize(CUSTOM_TLV_TYPE).serialized_length() // custom TLV type - crate::util::ser::BigSize(1200).serialized_length() // custom TLV length @@ -301,35 +387,56 @@ fn blinded_path_with_custom_tlv() { - reserved_packet_bytes_without_custom_tlv; // Check that we can send the maximum custom TLV size with 0 intermediate unblinded hops. - let recipient_onion_max_custom_tlv_size = RecipientOnionFields::spontaneous_empty() + let max_sized_onion = RecipientOnionFields::spontaneous_empty() .with_custom_tlvs(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]) .unwrap(); - nodes[1].node.send_payment(payment_hash, recipient_onion_max_custom_tlv_size.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap(); + let no_retry = Retry::Attempts(0); + let id = PaymentId(payment_hash.0); + nodes[1] + .node + .send_payment(payment_hash, max_sized_onion.clone(), id, route_params.clone(), no_retry) + .unwrap(); check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[2], &nodes[3]]; - let args = PassAlongPathArgs::new(&nodes[1], path, amt_msat, payment_hash, events.pop().unwrap()) - .with_payment_secret(payment_secret) - .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()); + let args = + PassAlongPathArgs::new(&nodes[1], path, amt_msat, payment_hash, events.pop().unwrap()) + .with_payment_secret(payment_secret) + .with_custom_tlvs(max_sized_onion.custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[1], &[&[&nodes[2], &nodes[3]]], payment_preimage) - .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()) + .with_custom_tlvs(max_sized_onion.custom_tlvs.clone()), ); // If 1 byte is added to the custom TLV value, we'll fail to send prior to pathfinding. - let mut recipient_onion_too_large_custom_tlv = recipient_onion_max_custom_tlv_size.clone(); - recipient_onion_too_large_custom_tlv.custom_tlvs[0].1.push(42); - let err = nodes[1].node.send_payment(payment_hash, recipient_onion_too_large_custom_tlv.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap_err(); + let mut too_large_onion = max_sized_onion.clone(); + too_large_onion.custom_tlvs[0].1.push(42); + let err = nodes[1] + .node + .send_payment(payment_hash, too_large_onion.clone(), id, route_params.clone(), no_retry) + .unwrap_err(); assert_eq!(err, RetryableSendFailure::OnionPacketSizeExceeded); // Confirm that we can't construct an onion packet given this too-large custom TLV. let secp_ctx = Secp256k1::signing_only(); route.paths[0].hops[0].fee_msat = MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY; route.paths[0].hops[0].cltv_expiry_delta = DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA; - let err = onion_utils::create_payment_onion(&secp_ctx, &route.paths[0], &test_utils::privkey(42), MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, &recipient_onion_too_large_custom_tlv, nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, &payment_hash, &None, None, [0; 32]).unwrap_err(); + let err = onion_utils::create_payment_onion( + &secp_ctx, + &route.paths[0], + &test_utils::privkey(42), + MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, + &too_large_onion, + nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, + &payment_hash, + &None, + None, + [0; 32], + ) + .unwrap_err(); match err { APIError::InvalidRoute { err } => { assert_eq!(err, "Route size too large considering onion data"); @@ -339,26 +446,39 @@ fn blinded_path_with_custom_tlv() { // With the maximum-size custom TLV, we can't have any intermediate unblinded hops, so attempting // to route nodes[0] -> nodes[3] will fail. - let err = nodes[0].node.send_payment(payment_hash, recipient_onion_max_custom_tlv_size.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap_err(); + let err = nodes[0] + .node + .send_payment(payment_hash, max_sized_onion.clone(), id, route_params.clone(), no_retry) + .unwrap_err(); assert_eq!(err, RetryableSendFailure::RouteNotFound); // If we remove enough custom TLV bytes to allow for 1 intermediate unblinded hop, we're now able // to send nodes[0] -> nodes[3]. - let mut recipient_onion_allows_2_hops = recipient_onion_max_custom_tlv_size.clone(); - recipient_onion_allows_2_hops.custom_tlvs[0].1.resize(max_custom_tlv_len - INTERMED_PAYLOAD_LEN_ESTIMATE, 0); - nodes[0].node.send_payment(payment_hash, recipient_onion_allows_2_hops.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap(); + let mut onion_allowing_2_hops = max_sized_onion.clone(); + onion_allowing_2_hops.custom_tlvs[0] + .1 + .resize(max_custom_tlv_len - INTERMED_PAYLOAD_LEN_ESTIMATE, 0); + nodes[0] + .node + .send_payment(payment_hash, onion_allowing_2_hops.clone(), id, route_params, no_retry) + .unwrap(); check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2], &nodes[3]]; - let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) - .with_payment_secret(payment_secret) - .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs.clone()); + let args = + PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) + .with_payment_secret(payment_secret) + .with_custom_tlvs(onion_allowing_2_hops.custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2], &nodes[3]]], payment_preimage) - .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs) + ClaimAlongRouteArgs::new( + &nodes[0], + &[&[&nodes[1], &nodes[2], &nodes[3]]], + payment_preimage, + ) + .with_custom_tlvs(onion_allowing_2_hops.custom_tlvs), ); } @@ -372,40 +492,53 @@ fn bolt12_invoice_too_large_blinded_paths() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1); - nodes[1].router.expect_blinded_payment_paths(vec![ - BlindedPaymentPath::from_raw( - PublicKey::from_slice(&[2; 33]).unwrap(), PublicKey::from_slice(&[2; 33]).unwrap(), - vec![ - BlindedHop { - blinded_node_id: PublicKey::from_slice(&[2; 33]).unwrap(), - encrypted_payload: vec![42; 1300], - }, - BlindedHop { - blinded_node_id: PublicKey::from_slice(&[2; 33]).unwrap(), - encrypted_payload: vec![42; 1300], - }, - ], - BlindedPayInfo { - fee_base_msat: 42, - fee_proportional_millionths: 42, - cltv_expiry_delta: 42, - htlc_minimum_msat: 42, - htlc_maximum_msat: 42_000_000, - features: BlindedHopFeatures::empty(), - } - ) - ]); + nodes[1].router.expect_blinded_payment_paths(vec![BlindedPaymentPath::from_raw( + PublicKey::from_slice(&[2; 33]).unwrap(), + PublicKey::from_slice(&[2; 33]).unwrap(), + vec![ + BlindedHop { + blinded_node_id: PublicKey::from_slice(&[2; 33]).unwrap(), + encrypted_payload: vec![42; 1300], + }, + BlindedHop { + blinded_node_id: PublicKey::from_slice(&[2; 33]).unwrap(), + encrypted_payload: vec![42; 1300], + }, + ], + BlindedPayInfo { + fee_base_msat: 42, + fee_proportional_millionths: 42, + cltv_expiry_delta: 42, + htlc_minimum_msat: 42, + htlc_maximum_msat: 42_000_000, + features: BlindedHopFeatures::empty(), + }, + )]); let offer = nodes[1].node.create_offer_builder(None).unwrap().build().unwrap(); let payment_id = PaymentId([1; 32]); - nodes[0].node.pay_for_offer(&offer, None, Some(5000), None, payment_id, Retry::Attempts(0), RouteParametersConfig::default()).unwrap(); - let invreq_om = nodes[0].onion_messenger.next_onion_message_for_peer(nodes[1].node.get_our_node_id()).unwrap(); + let route_config = RouteParametersConfig::default(); + nodes[0] + .node + .pay_for_offer(&offer, None, Some(5000), None, payment_id, Retry::Attempts(0), route_config) + .unwrap(); + let invreq_om = nodes[0] + .onion_messenger + .next_onion_message_for_peer(nodes[1].node.get_our_node_id()) + .unwrap(); nodes[1].onion_messenger.handle_onion_message(nodes[0].node.get_our_node_id(), &invreq_om); - let invoice_om = nodes[1].onion_messenger.next_onion_message_for_peer(nodes[0].node.get_our_node_id()).unwrap(); + let invoice_om = nodes[1] + .onion_messenger + .next_onion_message_for_peer(nodes[0].node.get_our_node_id()) + .unwrap(); nodes[0].onion_messenger.handle_onion_message(nodes[1].node.get_our_node_id(), &invoice_om); // TODO: assert on the invoice error once we support replying to invoice OMs with failure info - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Failed paying invoice: OnionPacketSizeExceeded", 1); + nodes[0].logger.assert_log_contains( + "lightning::ln::channelmanager", + "Failed paying invoice: OnionPacketSizeExceeded", + 1, + ); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); diff --git a/rustfmt_excluded_files b/rustfmt_excluded_files index 34316d2c2f9..fabf324c84b 100644 --- a/rustfmt_excluded_files +++ b/rustfmt_excluded_files @@ -1,19 +1,15 @@ -lightning/src/chain/chaininterface.rs lightning/src/chain/chainmonitor.rs lightning/src/chain/channelmonitor.rs lightning/src/chain/mod.rs lightning/src/chain/onchaintx.rs lightning/src/chain/package.rs -lightning/src/chain/transaction.rs lightning/src/lib.rs lightning/src/ln/async_signer_tests.rs lightning/src/ln/blinded_payment_tests.rs lightning/src/ln/chan_utils.rs -lightning/src/ln/chanmon_update_fail_tests.rs lightning/src/ln/channel.rs lightning/src/ln/channelmanager.rs lightning/src/ln/functional_test_utils.rs -lightning/src/ln/max_payment_path_len_tests.rs lightning/src/ln/mod.rs lightning/src/ln/monitor_tests.rs lightning/src/ln/offers_tests.rs