From d4c66812596c169e1e6e89eb2fdd5527d858b8bb Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 18 May 2025 16:04:23 +0000 Subject: [PATCH 01/12] Clean up `chain/transaction.rs` tests marginally before rustfmt --- lightning/src/chain/transaction.rs | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/lightning/src/chain/transaction.rs b/lightning/src/chain/transaction.rs index f7cd9783589..745efd27099 100644 --- a/lightning/src/chain/transaction.rs +++ b/lightning/src/chain/transaction.rs @@ -92,20 +92,24 @@ mod tests { use crate::chain::transaction::OutPoint; use crate::ln::types::ChannelId; - use bitcoin::transaction::Transaction; use bitcoin::consensus::encode; use bitcoin::hex::FromHex; + use bitcoin::transaction::Transaction; #[test] fn test_channel_id_calculation() { - let tx: Transaction = encode::deserialize(&>::from_hex("020000000001010e0adef48412e4361325ac1c6e36411299ab09d4f083b9d8ddb55fbc06e1b0c00000000000feffffff0220a1070000000000220020f81d95e040bd0a493e38bae27bff52fe2bb58b93b293eb579c01c31b05c5af1dc072cfee54a3000016001434b1d6211af5551905dc2642d05f5b04d25a8fe80247304402207f570e3f0de50546aad25a872e3df059d277e776dda4269fa0d2cc8c2ee6ec9a022054e7fae5ca94d47534c86705857c24ceea3ad51c69dd6051c5850304880fc43a012103cb11a1bacc223d98d91f1946c6752e358a5eb1a1c983b3e6fb15378f453b76bd00000000").unwrap()[..]).unwrap(); - assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint { - txid: tx.compute_txid(), - index: 0 - }).0[..], &>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e").unwrap()[..]); - assert_eq!(&ChannelId::v1_from_funding_outpoint(OutPoint { - txid: tx.compute_txid(), - index: 1 - }).0[..], &>::from_hex("3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f").unwrap()[..]); + let tx_hex = "020000000001010e0adef48412e4361325ac1c6e36411299ab09d4f083b9d8ddb55fbc06e1b0c00000000000feffffff0220a1070000000000220020f81d95e040bd0a493e38bae27bff52fe2bb58b93b293eb579c01c31b05c5af1dc072cfee54a3000016001434b1d6211af5551905dc2642d05f5b04d25a8fe80247304402207f570e3f0de50546aad25a872e3df059d277e776dda4269fa0d2cc8c2ee6ec9a022054e7fae5ca94d47534c86705857c24ceea3ad51c69dd6051c5850304880fc43a012103cb11a1bacc223d98d91f1946c6752e358a5eb1a1c983b3e6fb15378f453b76bd00000000"; + let tx: Transaction = + encode::deserialize(&>::from_hex(tx_hex).unwrap()[..]).unwrap(); + + let txid = tx.compute_txid(); + + let id_0 = ChannelId::v1_from_funding_outpoint(OutPoint { txid, index: 0 }); + let expected_0 = "3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25e"; + assert_eq!(&id_0.0[..], &Vec::::from_hex(expected_0).unwrap()[..]); + + let id_1 = ChannelId::v1_from_funding_outpoint(OutPoint { txid, index: 1 }); + let expected_1 = "3e88dd7165faf7be58b3c5bb2c9c452aebef682807ea57080f62e6f6e113c25f"; + assert_eq!(&id_1.0[..], &Vec::::from_hex(expected_1).unwrap()[..]); } } From 1faaab6193c4ddad26bbd87e01dbc68cdc53fe3d Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 18 May 2025 15:47:59 +0000 Subject: [PATCH 02/12] Run `rustfmt` on `chain/transaction.rs` --- lightning/src/chain/transaction.rs | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/lightning/src/chain/transaction.rs b/lightning/src/chain/transaction.rs index 745efd27099..cde4256be40 100644 --- a/lightning/src/chain/transaction.rs +++ b/lightning/src/chain/transaction.rs @@ -1,5 +1,3 @@ -#![cfg_attr(rustfmt, rustfmt_skip)] - // This file is Copyright its original authors, visible in version control // history. // @@ -63,10 +61,7 @@ impl OutPoint { /// This is not exported to bindings users as the same type is used universally in the C bindings /// for all outpoints pub fn into_bitcoin_outpoint(self) -> BitcoinOutPoint { - BitcoinOutPoint { - txid: self.txid, - vout: self.index as u32, - } + BitcoinOutPoint { txid: self.txid, vout: self.index as u32 } } } From 4349898de967cfd62c5e8150d7fe834ee7fac5e3 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 18 May 2025 15:50:03 +0000 Subject: [PATCH 03/12] Run rustfmt on `chain/chaininterface.rs` --- lightning/src/chain/chaininterface.rs | 32 +++++++++++++++++---------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/lightning/src/chain/chaininterface.rs b/lightning/src/chain/chaininterface.rs index 5da9f4a22ae..b6198215bcb 100644 --- a/lightning/src/chain/chaininterface.rs +++ b/lightning/src/chain/chaininterface.rs @@ -1,5 +1,3 @@ -#![cfg_attr(rustfmt, rustfmt_skip)] - // This file is Copyright its original authors, visible in version control // history. // @@ -189,25 +187,29 @@ pub const FEERATE_FLOOR_SATS_PER_KW: u32 = 253; /// /// Note that this does *not* implement [`FeeEstimator`] to make it harder to accidentally mix the /// two. -pub(crate) struct LowerBoundedFeeEstimator(pub F) where F::Target: FeeEstimator; - -impl LowerBoundedFeeEstimator where F::Target: FeeEstimator { +pub(crate) struct LowerBoundedFeeEstimator(pub F) +where + F::Target: FeeEstimator; + +impl LowerBoundedFeeEstimator +where + F::Target: FeeEstimator, +{ /// Creates a new `LowerBoundedFeeEstimator` which wraps the provided fee_estimator pub fn new(fee_estimator: F) -> Self { LowerBoundedFeeEstimator(fee_estimator) } pub fn bounded_sat_per_1000_weight(&self, confirmation_target: ConfirmationTarget) -> u32 { - cmp::max( - self.0.get_est_sat_per_1000_weight(confirmation_target), - FEERATE_FLOOR_SATS_PER_KW, - ) + cmp::max(self.0.get_est_sat_per_1000_weight(confirmation_target), FEERATE_FLOOR_SATS_PER_KW) } } #[cfg(test)] mod tests { - use super::{FEERATE_FLOOR_SATS_PER_KW, LowerBoundedFeeEstimator, ConfirmationTarget, FeeEstimator}; + use super::{ + ConfirmationTarget, FeeEstimator, LowerBoundedFeeEstimator, FEERATE_FLOOR_SATS_PER_KW, + }; struct TestFeeEstimator { sat_per_kw: u32, @@ -225,7 +227,10 @@ mod tests { let test_fee_estimator = &TestFeeEstimator { sat_per_kw }; let fee_estimator = LowerBoundedFeeEstimator::new(test_fee_estimator); - assert_eq!(fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee), FEERATE_FLOOR_SATS_PER_KW); + assert_eq!( + fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee), + FEERATE_FLOOR_SATS_PER_KW + ); } #[test] @@ -234,6 +239,9 @@ mod tests { let test_fee_estimator = &TestFeeEstimator { sat_per_kw }; let fee_estimator = LowerBoundedFeeEstimator::new(test_fee_estimator); - assert_eq!(fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee), sat_per_kw); + assert_eq!( + fee_estimator.bounded_sat_per_1000_weight(ConfirmationTarget::AnchorChannelFee), + sat_per_kw + ); } } From 6d0c940412d4c7f21647a331229ead5d43a3f46a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 18 May 2025 18:26:28 +0000 Subject: [PATCH 04/12] Run `rustfmt` on `max_payment_path_len_tests.rs` --- .../src/ln/max_payment_path_len_tests.rs | 374 ++++++++++++++---- 1 file changed, 287 insertions(+), 87 deletions(-) diff --git a/lightning/src/ln/max_payment_path_len_tests.rs b/lightning/src/ln/max_payment_path_len_tests.rs index d83f1800e1e..2755bbbd38b 100644 --- a/lightning/src/ln/max_payment_path_len_tests.rs +++ b/lightning/src/ln/max_payment_path_len_tests.rs @@ -1,5 +1,3 @@ -#![cfg_attr(rustfmt, rustfmt_skip)] - // This file is Copyright its original authors, visible in version control // history. // @@ -12,14 +10,14 @@ //! Tests for calculating the maximum length of a path based on the payment metadata, custom TLVs, //! and/or blinded paths present. -use bitcoin::secp256k1::{Secp256k1, PublicKey}; +use crate::blinded_path::payment::{ + BlindedPayInfo, BlindedPaymentPath, Bolt12RefundContext, PaymentConstraints, PaymentContext, + UnauthenticatedReceiveTlvs, +}; use crate::blinded_path::BlindedHop; -use crate::blinded_path::payment::{BlindedPayInfo, BlindedPaymentPath, Bolt12RefundContext, PaymentConstraints, PaymentContext, UnauthenticatedReceiveTlvs}; use crate::events::Event; -use crate::types::payment::PaymentSecret; use crate::ln::blinded_payment_tests::get_blinded_route_parameters; use crate::ln::channelmanager::PaymentId; -use crate::types::features::BlindedHopFeatures; use crate::ln::functional_test_utils::*; use crate::ln::msgs; use crate::ln::msgs::{BaseMessageHandler, OnionMessageHandler}; @@ -28,11 +26,16 @@ use crate::ln::onion_utils::MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY; use crate::ln::outbound_payment::{RecipientOnionFields, Retry, RetryableSendFailure}; use crate::offers::nonce::Nonce; use crate::prelude::*; -use crate::routing::router::{PaymentParameters, RouteParameters, RouteParametersConfig, DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA}; +use crate::routing::router::{ + PaymentParameters, RouteParameters, RouteParametersConfig, DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, +}; use crate::sign::NodeSigner; +use crate::types::features::BlindedHopFeatures; +use crate::types::payment::PaymentSecret; use crate::util::errors::APIError; use crate::util::ser::Writeable; use crate::util::test_utils; +use bitcoin::secp256k1::{PublicKey, Secp256k1}; // 3+32 (payload length and HMAC) + 2+8 (amt_to_forward) + // 2+4 (outgoing_cltv_value) + 2+8 (short_channel_id) @@ -58,14 +61,16 @@ fn large_payment_metadata() { // without exceeding the max onion packet size. let final_payload_len_without_metadata = msgs::OutboundOnionPayload::Receive { payment_data: Some(msgs::FinalOnionHopData { - payment_secret: PaymentSecret([0; 32]), total_msat: MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY + payment_secret: PaymentSecret([0; 32]), + total_msat: MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, }), payment_metadata: None, keysend_preimage: None, custom_tlvs: &Vec::new(), sender_intended_htlc_amt_msat: MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, cltv_expiry_height: nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, - }.serialized_length(); + } + .serialized_length(); let max_metadata_len = 1300 - 1 // metadata type - crate::util::ser::BigSize(1200).serialized_length() // metadata length @@ -75,38 +80,70 @@ fn large_payment_metadata() { let mut payment_metadata = vec![42; max_metadata_len]; // Check that the maximum-size metadata is sendable. - let (mut route_0_1, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); + let (mut route_0_1, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); let mut recipient_onion_max_md_size = RecipientOnionFields { payment_secret: Some(payment_secret), payment_metadata: Some(payment_metadata.clone()), custom_tlvs: Vec::new(), }; - nodes[0].node.send_payment(payment_hash, recipient_onion_max_md_size.clone(), PaymentId(payment_hash.0), route_0_1.route_params.clone().unwrap(), Retry::Attempts(0)).unwrap(); + nodes[0] + .node + .send_payment( + payment_hash, + recipient_onion_max_md_size.clone(), + PaymentId(payment_hash.0), + route_0_1.route_params.clone().unwrap(), + Retry::Attempts(0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1]]; - let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) - .with_payment_secret(payment_secret) - .with_payment_metadata(payment_metadata.clone()); + let args = + PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) + .with_payment_secret(payment_secret) + .with_payment_metadata(payment_metadata.clone()); do_pass_along_path(args); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1]]], payment_preimage) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new( + &nodes[0], + &[&[&nodes[1]]], + payment_preimage, + )); // Check that the payment parameter for max path length will prevent us from routing past our // next-hop peer given the payment_metadata size. - let (mut route_0_2, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], &nodes[2], amt_msat); + let (mut route_0_2, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(&nodes[0], &nodes[2], amt_msat); let mut route_params_0_2 = route_0_2.route_params.clone().unwrap(); route_params_0_2.payment_params.max_path_length = 1; nodes[0].router.expect_find_route_query(route_params_0_2); - let err = nodes[0].node.send_payment(payment_hash_2, recipient_onion_max_md_size.clone(), PaymentId(payment_hash_2.0), route_0_2.route_params.clone().unwrap(), Retry::Attempts(0)).unwrap_err(); + let err = nodes[0] + .node + .send_payment( + payment_hash_2, + recipient_onion_max_md_size.clone(), + PaymentId(payment_hash_2.0), + route_0_2.route_params.clone().unwrap(), + Retry::Attempts(0), + ) + .unwrap_err(); assert_eq!(err, RetryableSendFailure::RouteNotFound); // If our payment_metadata contains 1 additional byte, we'll fail prior to pathfinding. let mut recipient_onion_too_large_md = recipient_onion_max_md_size.clone(); recipient_onion_too_large_md.payment_metadata.as_mut().map(|mut md| md.push(42)); - let err = nodes[0].node.send_payment(payment_hash, recipient_onion_too_large_md.clone(), PaymentId(payment_hash.0), route_0_1.route_params.clone().unwrap(), Retry::Attempts(0)).unwrap_err(); + let err = nodes[0] + .node + .send_payment( + payment_hash, + recipient_onion_too_large_md.clone(), + PaymentId(payment_hash.0), + route_0_1.route_params.clone().unwrap(), + Retry::Attempts(0), + ) + .unwrap_err(); assert_eq!(err, RetryableSendFailure::OnionPacketSizeExceeded); // Confirm that we'll fail to construct an onion packet given this payment_metadata that's too @@ -114,7 +151,19 @@ fn large_payment_metadata() { let secp_ctx = Secp256k1::signing_only(); route_0_1.paths[0].hops[0].fee_msat = MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY; route_0_1.paths[0].hops[0].cltv_expiry_delta = DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA; - let err = onion_utils::create_payment_onion(&secp_ctx, &route_0_1.paths[0], &test_utils::privkey(42), MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, &recipient_onion_too_large_md, nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, &payment_hash, &None, None, [0; 32]).unwrap_err(); + let err = onion_utils::create_payment_onion( + &secp_ctx, + &route_0_1.paths[0], + &test_utils::privkey(42), + MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, + &recipient_onion_too_large_md, + nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, + &payment_hash, + &None, + None, + [0; 32], + ) + .unwrap_err(); match err { APIError::InvalidRoute { err } => { assert_eq!(err, "Route size too large considering onion data"); @@ -132,18 +181,30 @@ fn large_payment_metadata() { let mut route_params_0_2 = route_0_2.route_params.clone().unwrap(); route_params_0_2.payment_params.max_path_length = 2; nodes[0].router.expect_find_route_query(route_params_0_2); - nodes[0].node.send_payment(payment_hash_2, recipient_onion_allows_2_hops.clone(), PaymentId(payment_hash_2.0), route_0_2.route_params.unwrap(), Retry::Attempts(0)).unwrap(); + nodes[0] + .node + .send_payment( + payment_hash_2, + recipient_onion_allows_2_hops.clone(), + PaymentId(payment_hash_2.0), + route_0_2.route_params.unwrap(), + Retry::Attempts(0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; - let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash_2, events.pop().unwrap()) - .with_payment_secret(payment_secret_2) - .with_payment_metadata(recipient_onion_allows_2_hops.payment_metadata.unwrap()); + let args = + PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash_2, events.pop().unwrap()) + .with_payment_secret(payment_secret_2) + .with_payment_metadata(recipient_onion_allows_2_hops.payment_metadata.unwrap()); do_pass_along_path(args); - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage_2) - ); + claim_payment_along_route(ClaimAlongRouteArgs::new( + &nodes[0], + &[&[&nodes[1], &nodes[2]]], + payment_preimage_2, + )); } #[test] @@ -156,16 +217,18 @@ fn one_hop_blinded_path_with_custom_tlv() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1); - let chan_upd_1_2 = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents; + let chan_upd_1_2 = + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 0).0.contents; // Start with all nodes at the same height - connect_blocks(&nodes[0], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); - connect_blocks(&nodes[1], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); - connect_blocks(&nodes[2], 2*CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); + connect_blocks(&nodes[0], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[0].best_block_info().1); + connect_blocks(&nodes[1], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[1].best_block_info().1); + connect_blocks(&nodes[2], 2 * CHAN_CONFIRM_DEPTH + 1 - nodes[2].best_block_info().1); // Construct the route parameters for sending to nodes[2]'s 1-hop blinded path. let amt_msat = 100_000; - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[2], Some(amt_msat), None); + let (payment_preimage, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[2], Some(amt_msat), None); let payee_tlvs = UnauthenticatedReceiveTlvs { payment_secret, payment_constraints: PaymentConstraints { @@ -179,9 +242,15 @@ fn one_hop_blinded_path_with_custom_tlv() { let payee_tlvs = payee_tlvs.authenticate(nonce, &expanded_key); let mut secp_ctx = Secp256k1::new(); let blinded_path = BlindedPaymentPath::new( - &[], nodes[2].node.get_our_node_id(), payee_tlvs, u64::MAX, TEST_FINAL_CLTV as u16, - &chanmon_cfgs[2].keys_manager, &secp_ctx - ).unwrap(); + &[], + nodes[2].node.get_our_node_id(), + payee_tlvs, + u64::MAX, + TEST_FINAL_CLTV as u16, + &chanmon_cfgs[2].keys_manager, + &secp_ctx, + ) + .unwrap(); let route_params = RouteParameters::from_payment_params_and_value( PaymentParameters::blinded(vec![blinded_path.clone()]), amt_msat, @@ -197,8 +266,9 @@ fn one_hop_blinded_path_with_custom_tlv() { intro_node_blinding_point: Some(blinded_path.blinding_point()), keysend_preimage: None, invoice_request: None, - custom_tlvs: &Vec::new() - }.serialized_length(); + custom_tlvs: &Vec::new(), + } + .serialized_length(); let max_custom_tlv_len = 1300 - crate::util::ser::BigSize(CUSTOM_TLV_TYPE).serialized_length() // custom TLV type - crate::util::ser::BigSize(1200).serialized_length() // custom TLV length @@ -210,49 +280,89 @@ fn one_hop_blinded_path_with_custom_tlv() { let recipient_onion_max_custom_tlv_size = RecipientOnionFields::spontaneous_empty() .with_custom_tlvs(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]) .unwrap(); - nodes[1].node.send_payment(payment_hash, recipient_onion_max_custom_tlv_size.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap(); + nodes[1] + .node + .send_payment( + payment_hash, + recipient_onion_max_custom_tlv_size.clone(), + PaymentId(payment_hash.0), + route_params.clone(), + Retry::Attempts(0), + ) + .unwrap(); check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[2]]; - let args = PassAlongPathArgs::new(&nodes[1], path, amt_msat, payment_hash, events.pop().unwrap()) - .with_payment_secret(payment_secret) - .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()); + let args = + PassAlongPathArgs::new(&nodes[1], path, amt_msat, payment_hash, events.pop().unwrap()) + .with_payment_secret(payment_secret) + .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[1], &[&[&nodes[2]]], payment_preimage) - .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()) + .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()), ); // If 1 byte is added to the custom TLV value, we'll fail to send prior to pathfinding. let mut recipient_onion_too_large_custom_tlv = recipient_onion_max_custom_tlv_size.clone(); recipient_onion_too_large_custom_tlv.custom_tlvs[0].1.push(42); - let err = nodes[1].node.send_payment(payment_hash, recipient_onion_too_large_custom_tlv, PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap_err(); + let err = nodes[1] + .node + .send_payment( + payment_hash, + recipient_onion_too_large_custom_tlv, + PaymentId(payment_hash.0), + route_params.clone(), + Retry::Attempts(0), + ) + .unwrap_err(); assert_eq!(err, RetryableSendFailure::OnionPacketSizeExceeded); // With the maximum-size custom TLV, our max path length is limited to 1, so attempting to route // nodes[0] -> nodes[2] will fail. - let err = nodes[0].node.send_payment(payment_hash, recipient_onion_max_custom_tlv_size.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap_err(); + let err = nodes[0] + .node + .send_payment( + payment_hash, + recipient_onion_max_custom_tlv_size.clone(), + PaymentId(payment_hash.0), + route_params.clone(), + Retry::Attempts(0), + ) + .unwrap_err(); assert_eq!(err, RetryableSendFailure::RouteNotFound); // If we remove enough custom TLV bytes to allow for 1 intermediate unblinded hop, we're now able // to send nodes[0] -> nodes[2]. let mut recipient_onion_allows_2_hops = recipient_onion_max_custom_tlv_size.clone(); - recipient_onion_allows_2_hops.custom_tlvs[0].1.resize(max_custom_tlv_len - INTERMED_PAYLOAD_LEN_ESTIMATE, 0); - nodes[0].node.send_payment(payment_hash, recipient_onion_allows_2_hops.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap(); + recipient_onion_allows_2_hops.custom_tlvs[0] + .1 + .resize(max_custom_tlv_len - INTERMED_PAYLOAD_LEN_ESTIMATE, 0); + nodes[0] + .node + .send_payment( + payment_hash, + recipient_onion_allows_2_hops.clone(), + PaymentId(payment_hash.0), + route_params.clone(), + Retry::Attempts(0), + ) + .unwrap(); check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2]]; - let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) - .with_payment_secret(payment_secret) - .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs.clone()); + let args = + PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) + .with_payment_secret(payment_secret) + .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs) + .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs), ); } @@ -266,10 +376,12 @@ fn blinded_path_with_custom_tlv() { let nodes = create_network(4, &node_cfgs, &node_chanmgrs); create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); - let chan_upd_2_3 = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents; + let chan_upd_2_3 = + create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 1_000_000, 0).0.contents; // Ensure all nodes are at the same height - let node_max_height = nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; + let node_max_height = + nodes.iter().map(|node| node.blocks.lock().unwrap().len()).max().unwrap() as u32; connect_blocks(&nodes[0], node_max_height - nodes[0].best_block_info().1); connect_blocks(&nodes[1], node_max_height - nodes[1].best_block_info().1); connect_blocks(&nodes[2], node_max_height - nodes[2].best_block_info().1); @@ -277,25 +389,35 @@ fn blinded_path_with_custom_tlv() { // Construct the route parameters for sending to nodes[3]'s blinded path. let amt_msat = 100_000; - let (payment_preimage, payment_hash, payment_secret) = get_payment_preimage_hash(&nodes[3], Some(amt_msat), None); - let route_params = get_blinded_route_parameters(amt_msat, payment_secret, 1, 1_0000_0000, - nodes.iter().skip(2).map(|n| n.node.get_our_node_id()).collect(), &[&chan_upd_2_3], - &chanmon_cfgs[3].keys_manager); + let (payment_preimage, payment_hash, payment_secret) = + get_payment_preimage_hash(&nodes[3], Some(amt_msat), None); + let route_params = get_blinded_route_parameters( + amt_msat, + payment_secret, + 1, + 1_0000_0000, + nodes.iter().skip(2).map(|n| n.node.get_our_node_id()).collect(), + &[&chan_upd_2_3], + &chanmon_cfgs[3].keys_manager, + ); // Calculate the maximum custom TLV value size where a valid onion packet is still possible. const CUSTOM_TLV_TYPE: u64 = 65537; let mut route = get_route(&nodes[1], &route_params).unwrap(); let reserved_packet_bytes_without_custom_tlv: usize = onion_utils::build_onion_payloads( - &route.paths[0], MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, + &route.paths[0], + MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, &RecipientOnionFields::spontaneous_empty(), - nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, &None, - None, None + nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, + &None, + None, + None, ) - .unwrap() - .0 - .iter() - .map(|payload| payload.serialized_length() + PAYLOAD_HMAC_LEN) - .sum(); + .unwrap() + .0 + .iter() + .map(|payload| payload.serialized_length() + PAYLOAD_HMAC_LEN) + .sum(); let max_custom_tlv_len = 1300 - crate::util::ser::BigSize(CUSTOM_TLV_TYPE).serialized_length() // custom TLV type - crate::util::ser::BigSize(1200).serialized_length() // custom TLV length @@ -306,32 +428,63 @@ fn blinded_path_with_custom_tlv() { let recipient_onion_max_custom_tlv_size = RecipientOnionFields::spontaneous_empty() .with_custom_tlvs(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]) .unwrap(); - nodes[1].node.send_payment(payment_hash, recipient_onion_max_custom_tlv_size.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap(); + nodes[1] + .node + .send_payment( + payment_hash, + recipient_onion_max_custom_tlv_size.clone(), + PaymentId(payment_hash.0), + route_params.clone(), + Retry::Attempts(0), + ) + .unwrap(); check_added_monitors(&nodes[1], 1); let mut events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[2], &nodes[3]]; - let args = PassAlongPathArgs::new(&nodes[1], path, amt_msat, payment_hash, events.pop().unwrap()) - .with_payment_secret(payment_secret) - .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()); + let args = + PassAlongPathArgs::new(&nodes[1], path, amt_msat, payment_hash, events.pop().unwrap()) + .with_payment_secret(payment_secret) + .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[1], &[&[&nodes[2], &nodes[3]]], payment_preimage) - .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()) + .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()), ); // If 1 byte is added to the custom TLV value, we'll fail to send prior to pathfinding. let mut recipient_onion_too_large_custom_tlv = recipient_onion_max_custom_tlv_size.clone(); recipient_onion_too_large_custom_tlv.custom_tlvs[0].1.push(42); - let err = nodes[1].node.send_payment(payment_hash, recipient_onion_too_large_custom_tlv.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap_err(); + let err = nodes[1] + .node + .send_payment( + payment_hash, + recipient_onion_too_large_custom_tlv.clone(), + PaymentId(payment_hash.0), + route_params.clone(), + Retry::Attempts(0), + ) + .unwrap_err(); assert_eq!(err, RetryableSendFailure::OnionPacketSizeExceeded); // Confirm that we can't construct an onion packet given this too-large custom TLV. let secp_ctx = Secp256k1::signing_only(); route.paths[0].hops[0].fee_msat = MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY; route.paths[0].hops[0].cltv_expiry_delta = DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA; - let err = onion_utils::create_payment_onion(&secp_ctx, &route.paths[0], &test_utils::privkey(42), MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, &recipient_onion_too_large_custom_tlv, nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, &payment_hash, &None, None, [0; 32]).unwrap_err(); + let err = onion_utils::create_payment_onion( + &secp_ctx, + &route.paths[0], + &test_utils::privkey(42), + MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, + &recipient_onion_too_large_custom_tlv, + nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, + &payment_hash, + &None, + None, + [0; 32], + ) + .unwrap_err(); match err { APIError::InvalidRoute { err } => { assert_eq!(err, "Route size too large considering onion data"); @@ -341,26 +494,51 @@ fn blinded_path_with_custom_tlv() { // With the maximum-size custom TLV, we can't have any intermediate unblinded hops, so attempting // to route nodes[0] -> nodes[3] will fail. - let err = nodes[0].node.send_payment(payment_hash, recipient_onion_max_custom_tlv_size.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap_err(); + let err = nodes[0] + .node + .send_payment( + payment_hash, + recipient_onion_max_custom_tlv_size.clone(), + PaymentId(payment_hash.0), + route_params.clone(), + Retry::Attempts(0), + ) + .unwrap_err(); assert_eq!(err, RetryableSendFailure::RouteNotFound); // If we remove enough custom TLV bytes to allow for 1 intermediate unblinded hop, we're now able // to send nodes[0] -> nodes[3]. let mut recipient_onion_allows_2_hops = recipient_onion_max_custom_tlv_size.clone(); - recipient_onion_allows_2_hops.custom_tlvs[0].1.resize(max_custom_tlv_len - INTERMED_PAYLOAD_LEN_ESTIMATE, 0); - nodes[0].node.send_payment(payment_hash, recipient_onion_allows_2_hops.clone(), PaymentId(payment_hash.0), route_params.clone(), Retry::Attempts(0)).unwrap(); + recipient_onion_allows_2_hops.custom_tlvs[0] + .1 + .resize(max_custom_tlv_len - INTERMED_PAYLOAD_LEN_ESTIMATE, 0); + nodes[0] + .node + .send_payment( + payment_hash, + recipient_onion_allows_2_hops.clone(), + PaymentId(payment_hash.0), + route_params.clone(), + Retry::Attempts(0), + ) + .unwrap(); check_added_monitors(&nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let path = &[&nodes[1], &nodes[2], &nodes[3]]; - let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) - .with_payment_secret(payment_secret) - .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs.clone()); + let args = + PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) + .with_payment_secret(payment_secret) + .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2], &nodes[3]]], payment_preimage) - .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs) + ClaimAlongRouteArgs::new( + &nodes[0], + &[&[&nodes[1], &nodes[2], &nodes[3]]], + payment_preimage, + ) + .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs), ); } @@ -376,7 +554,8 @@ fn bolt12_invoice_too_large_blinded_paths() { nodes[1].router.expect_blinded_payment_paths(vec![ BlindedPaymentPath::from_blinded_path_and_payinfo( - PublicKey::from_slice(&[2; 33]).unwrap(), PublicKey::from_slice(&[2; 33]).unwrap(), + PublicKey::from_slice(&[2; 33]).unwrap(), + PublicKey::from_slice(&[2; 33]).unwrap(), vec![ BlindedHop { blinded_node_id: PublicKey::from_slice(&[2; 33]).unwrap(), @@ -394,20 +573,41 @@ fn bolt12_invoice_too_large_blinded_paths() { htlc_minimum_msat: 42, htlc_maximum_msat: 42_000_000, features: BlindedHopFeatures::empty(), - } - ) + }, + ), ]); let offer = nodes[1].node.create_offer_builder(None).unwrap().build().unwrap(); let payment_id = PaymentId([1; 32]); - nodes[0].node.pay_for_offer(&offer, None, Some(5000), None, payment_id, Retry::Attempts(0), RouteParametersConfig::default()).unwrap(); - let invreq_om = nodes[0].onion_messenger.next_onion_message_for_peer(nodes[1].node.get_our_node_id()).unwrap(); + nodes[0] + .node + .pay_for_offer( + &offer, + None, + Some(5000), + None, + payment_id, + Retry::Attempts(0), + RouteParametersConfig::default(), + ) + .unwrap(); + let invreq_om = nodes[0] + .onion_messenger + .next_onion_message_for_peer(nodes[1].node.get_our_node_id()) + .unwrap(); nodes[1].onion_messenger.handle_onion_message(nodes[0].node.get_our_node_id(), &invreq_om); - let invoice_om = nodes[1].onion_messenger.next_onion_message_for_peer(nodes[0].node.get_our_node_id()).unwrap(); + let invoice_om = nodes[1] + .onion_messenger + .next_onion_message_for_peer(nodes[0].node.get_our_node_id()) + .unwrap(); nodes[0].onion_messenger.handle_onion_message(nodes[1].node.get_our_node_id(), &invoice_om); // TODO: assert on the invoice error once we support replying to invoice OMs with failure info - nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Failed paying invoice: OnionPacketSizeExceeded", 1); + nodes[0].logger.assert_log_contains( + "lightning::ln::channelmanager", + "Failed paying invoice: OnionPacketSizeExceeded", + 1, + ); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); From cd135f63ee3f836c5fcd941d334d8df99194c7cd Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 18 May 2025 18:29:28 +0000 Subject: [PATCH 05/12] Cleanup `max_payment_path_len_tests` somewhat after `rustfmt`ing it --- .../src/ln/max_payment_path_len_tests.rs | 183 ++++++------------ 1 file changed, 58 insertions(+), 125 deletions(-) diff --git a/lightning/src/ln/max_payment_path_len_tests.rs b/lightning/src/ln/max_payment_path_len_tests.rs index 2755bbbd38b..ff5053644d8 100644 --- a/lightning/src/ln/max_payment_path_len_tests.rs +++ b/lightning/src/ln/max_payment_path_len_tests.rs @@ -82,20 +82,16 @@ fn large_payment_metadata() { // Check that the maximum-size metadata is sendable. let (mut route_0_1, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], &nodes[1], amt_msat); - let mut recipient_onion_max_md_size = RecipientOnionFields { + let mut max_sized_onion = RecipientOnionFields { payment_secret: Some(payment_secret), payment_metadata: Some(payment_metadata.clone()), custom_tlvs: Vec::new(), }; + let route_params = route_0_1.route_params.clone().unwrap(); + let id = PaymentId(payment_hash.0); nodes[0] .node - .send_payment( - payment_hash, - recipient_onion_max_md_size.clone(), - PaymentId(payment_hash.0), - route_0_1.route_params.clone().unwrap(), - Retry::Attempts(0), - ) + .send_payment(payment_hash, max_sized_onion.clone(), id, route_params, Retry::Attempts(0)) .unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -119,35 +115,20 @@ fn large_payment_metadata() { let mut route_params_0_2 = route_0_2.route_params.clone().unwrap(); route_params_0_2.payment_params.max_path_length = 1; nodes[0].router.expect_find_route_query(route_params_0_2); + + let id = PaymentId(payment_hash_2.0); + let route_params = route_0_2.route_params.clone().unwrap(); let err = nodes[0] .node - .send_payment( - payment_hash_2, - recipient_onion_max_md_size.clone(), - PaymentId(payment_hash_2.0), - route_0_2.route_params.clone().unwrap(), - Retry::Attempts(0), - ) + .send_payment(payment_hash_2, max_sized_onion.clone(), id, route_params, Retry::Attempts(0)) .unwrap_err(); assert_eq!(err, RetryableSendFailure::RouteNotFound); // If our payment_metadata contains 1 additional byte, we'll fail prior to pathfinding. - let mut recipient_onion_too_large_md = recipient_onion_max_md_size.clone(); - recipient_onion_too_large_md.payment_metadata.as_mut().map(|mut md| md.push(42)); - let err = nodes[0] - .node - .send_payment( - payment_hash, - recipient_onion_too_large_md.clone(), - PaymentId(payment_hash.0), - route_0_1.route_params.clone().unwrap(), - Retry::Attempts(0), - ) - .unwrap_err(); - assert_eq!(err, RetryableSendFailure::OnionPacketSizeExceeded); + let mut too_large_onion = max_sized_onion.clone(); + too_large_onion.payment_metadata.as_mut().map(|mut md| md.push(42)); - // Confirm that we'll fail to construct an onion packet given this payment_metadata that's too - // large for even a 1-hop path. + // First confirm we'll fail to create the onion packet directly. let secp_ctx = Secp256k1::signing_only(); route_0_1.paths[0].hops[0].fee_msat = MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY; route_0_1.paths[0].hops[0].cltv_expiry_delta = DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA; @@ -156,7 +137,7 @@ fn large_payment_metadata() { &route_0_1.paths[0], &test_utils::privkey(42), MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, - &recipient_onion_too_large_md, + &too_large_onion, nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, &payment_hash, &None, @@ -171,25 +152,28 @@ fn large_payment_metadata() { _ => panic!(), } + let route_params = route_0_1.route_params.clone().unwrap(); + let err = nodes[0] + .node + .send_payment(payment_hash_2, too_large_onion, id, route_params, Retry::Attempts(0)) + .unwrap_err(); + assert_eq!(err, RetryableSendFailure::OnionPacketSizeExceeded); + // If we remove enough payment_metadata bytes to allow for 2 hops, we're now able to send to // nodes[2]. - let mut recipient_onion_allows_2_hops = RecipientOnionFields { + let two_hop_metadata = vec![42; max_metadata_len - INTERMED_PAYLOAD_LEN_ESTIMATE]; + let mut onion_allowing_2_hops = RecipientOnionFields { payment_secret: Some(payment_secret_2), - payment_metadata: Some(vec![42; max_metadata_len - INTERMED_PAYLOAD_LEN_ESTIMATE]), + payment_metadata: Some(two_hop_metadata.clone()), custom_tlvs: Vec::new(), }; let mut route_params_0_2 = route_0_2.route_params.clone().unwrap(); route_params_0_2.payment_params.max_path_length = 2; nodes[0].router.expect_find_route_query(route_params_0_2); + let route_params = route_0_2.route_params.unwrap(); nodes[0] .node - .send_payment( - payment_hash_2, - recipient_onion_allows_2_hops.clone(), - PaymentId(payment_hash_2.0), - route_0_2.route_params.unwrap(), - Retry::Attempts(0), - ) + .send_payment(payment_hash_2, onion_allowing_2_hops, id, route_params, Retry::Attempts(0)) .unwrap(); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -198,7 +182,7 @@ fn large_payment_metadata() { let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash_2, events.pop().unwrap()) .with_payment_secret(payment_secret_2) - .with_payment_metadata(recipient_onion_allows_2_hops.payment_metadata.unwrap()); + .with_payment_metadata(two_hop_metadata); do_pass_along_path(args); claim_payment_along_route(ClaimAlongRouteArgs::new( &nodes[0], @@ -277,18 +261,14 @@ fn one_hop_blinded_path_with_custom_tlv() { - final_payload_len_without_custom_tlv; // Check that we can send the maximum custom TLV with 1 blinded hop. - let recipient_onion_max_custom_tlv_size = RecipientOnionFields::spontaneous_empty() + let max_sized_onion = RecipientOnionFields::spontaneous_empty() .with_custom_tlvs(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]) .unwrap(); + let id = PaymentId(payment_hash.0); + let no_retry = Retry::Attempts(0); nodes[1] .node - .send_payment( - payment_hash, - recipient_onion_max_custom_tlv_size.clone(), - PaymentId(payment_hash.0), - route_params.clone(), - Retry::Attempts(0), - ) + .send_payment(payment_hash, max_sized_onion.clone(), id, route_params.clone(), no_retry) .unwrap(); check_added_monitors(&nodes[1], 1); @@ -298,25 +278,19 @@ fn one_hop_blinded_path_with_custom_tlv() { let args = PassAlongPathArgs::new(&nodes[1], path, amt_msat, payment_hash, events.pop().unwrap()) .with_payment_secret(payment_secret) - .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()); + .with_custom_tlvs(max_sized_onion.custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[1], &[&[&nodes[2]]], payment_preimage) - .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()), + .with_custom_tlvs(max_sized_onion.custom_tlvs.clone()), ); // If 1 byte is added to the custom TLV value, we'll fail to send prior to pathfinding. - let mut recipient_onion_too_large_custom_tlv = recipient_onion_max_custom_tlv_size.clone(); - recipient_onion_too_large_custom_tlv.custom_tlvs[0].1.push(42); + let mut too_large_custom_tlv_onion = max_sized_onion.clone(); + too_large_custom_tlv_onion.custom_tlvs[0].1.push(42); let err = nodes[1] .node - .send_payment( - payment_hash, - recipient_onion_too_large_custom_tlv, - PaymentId(payment_hash.0), - route_params.clone(), - Retry::Attempts(0), - ) + .send_payment(payment_hash, too_large_custom_tlv_onion, id, route_params.clone(), no_retry) .unwrap_err(); assert_eq!(err, RetryableSendFailure::OnionPacketSizeExceeded); @@ -324,31 +298,19 @@ fn one_hop_blinded_path_with_custom_tlv() { // nodes[0] -> nodes[2] will fail. let err = nodes[0] .node - .send_payment( - payment_hash, - recipient_onion_max_custom_tlv_size.clone(), - PaymentId(payment_hash.0), - route_params.clone(), - Retry::Attempts(0), - ) + .send_payment(payment_hash, max_sized_onion.clone(), id, route_params.clone(), no_retry) .unwrap_err(); assert_eq!(err, RetryableSendFailure::RouteNotFound); // If we remove enough custom TLV bytes to allow for 1 intermediate unblinded hop, we're now able // to send nodes[0] -> nodes[2]. - let mut recipient_onion_allows_2_hops = recipient_onion_max_custom_tlv_size.clone(); - recipient_onion_allows_2_hops.custom_tlvs[0] + let mut onion_allows_2_hops = max_sized_onion.clone(); + onion_allows_2_hops.custom_tlvs[0] .1 .resize(max_custom_tlv_len - INTERMED_PAYLOAD_LEN_ESTIMATE, 0); nodes[0] .node - .send_payment( - payment_hash, - recipient_onion_allows_2_hops.clone(), - PaymentId(payment_hash.0), - route_params.clone(), - Retry::Attempts(0), - ) + .send_payment(payment_hash, onion_allows_2_hops.clone(), id, route_params.clone(), no_retry) .unwrap(); check_added_monitors(&nodes[0], 1); @@ -358,11 +320,11 @@ fn one_hop_blinded_path_with_custom_tlv() { let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) .with_payment_secret(payment_secret) - .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs.clone()); + .with_custom_tlvs(onion_allows_2_hops.custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[2]]], payment_preimage) - .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs), + .with_custom_tlvs(onion_allows_2_hops.custom_tlvs), ); } @@ -425,18 +387,14 @@ fn blinded_path_with_custom_tlv() { - reserved_packet_bytes_without_custom_tlv; // Check that we can send the maximum custom TLV size with 0 intermediate unblinded hops. - let recipient_onion_max_custom_tlv_size = RecipientOnionFields::spontaneous_empty() + let max_sized_onion = RecipientOnionFields::spontaneous_empty() .with_custom_tlvs(vec![(CUSTOM_TLV_TYPE, vec![42; max_custom_tlv_len])]) .unwrap(); + let no_retry = Retry::Attempts(0); + let id = PaymentId(payment_hash.0); nodes[1] .node - .send_payment( - payment_hash, - recipient_onion_max_custom_tlv_size.clone(), - PaymentId(payment_hash.0), - route_params.clone(), - Retry::Attempts(0), - ) + .send_payment(payment_hash, max_sized_onion.clone(), id, route_params.clone(), no_retry) .unwrap(); check_added_monitors(&nodes[1], 1); @@ -446,25 +404,19 @@ fn blinded_path_with_custom_tlv() { let args = PassAlongPathArgs::new(&nodes[1], path, amt_msat, payment_hash, events.pop().unwrap()) .with_payment_secret(payment_secret) - .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()); + .with_custom_tlvs(max_sized_onion.custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( ClaimAlongRouteArgs::new(&nodes[1], &[&[&nodes[2], &nodes[3]]], payment_preimage) - .with_custom_tlvs(recipient_onion_max_custom_tlv_size.custom_tlvs.clone()), + .with_custom_tlvs(max_sized_onion.custom_tlvs.clone()), ); // If 1 byte is added to the custom TLV value, we'll fail to send prior to pathfinding. - let mut recipient_onion_too_large_custom_tlv = recipient_onion_max_custom_tlv_size.clone(); - recipient_onion_too_large_custom_tlv.custom_tlvs[0].1.push(42); + let mut too_large_onion = max_sized_onion.clone(); + too_large_onion.custom_tlvs[0].1.push(42); let err = nodes[1] .node - .send_payment( - payment_hash, - recipient_onion_too_large_custom_tlv.clone(), - PaymentId(payment_hash.0), - route_params.clone(), - Retry::Attempts(0), - ) + .send_payment(payment_hash, too_large_onion.clone(), id, route_params.clone(), no_retry) .unwrap_err(); assert_eq!(err, RetryableSendFailure::OnionPacketSizeExceeded); @@ -477,7 +429,7 @@ fn blinded_path_with_custom_tlv() { &route.paths[0], &test_utils::privkey(42), MIN_FINAL_VALUE_ESTIMATE_WITH_OVERPAY, - &recipient_onion_too_large_custom_tlv, + &too_large_onion, nodes[0].best_block_info().1 + DEFAULT_MAX_TOTAL_CLTV_EXPIRY_DELTA, &payment_hash, &None, @@ -496,31 +448,19 @@ fn blinded_path_with_custom_tlv() { // to route nodes[0] -> nodes[3] will fail. let err = nodes[0] .node - .send_payment( - payment_hash, - recipient_onion_max_custom_tlv_size.clone(), - PaymentId(payment_hash.0), - route_params.clone(), - Retry::Attempts(0), - ) + .send_payment(payment_hash, max_sized_onion.clone(), id, route_params.clone(), no_retry) .unwrap_err(); assert_eq!(err, RetryableSendFailure::RouteNotFound); // If we remove enough custom TLV bytes to allow for 1 intermediate unblinded hop, we're now able // to send nodes[0] -> nodes[3]. - let mut recipient_onion_allows_2_hops = recipient_onion_max_custom_tlv_size.clone(); - recipient_onion_allows_2_hops.custom_tlvs[0] + let mut onion_allowing_2_hops = max_sized_onion.clone(); + onion_allowing_2_hops.custom_tlvs[0] .1 .resize(max_custom_tlv_len - INTERMED_PAYLOAD_LEN_ESTIMATE, 0); nodes[0] .node - .send_payment( - payment_hash, - recipient_onion_allows_2_hops.clone(), - PaymentId(payment_hash.0), - route_params.clone(), - Retry::Attempts(0), - ) + .send_payment(payment_hash, onion_allowing_2_hops.clone(), id, route_params, no_retry) .unwrap(); check_added_monitors(&nodes[0], 1); @@ -530,7 +470,7 @@ fn blinded_path_with_custom_tlv() { let args = PassAlongPathArgs::new(&nodes[0], path, amt_msat, payment_hash, events.pop().unwrap()) .with_payment_secret(payment_secret) - .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs.clone()); + .with_custom_tlvs(onion_allowing_2_hops.custom_tlvs.clone()); do_pass_along_path(args); claim_payment_along_route( ClaimAlongRouteArgs::new( @@ -538,7 +478,7 @@ fn blinded_path_with_custom_tlv() { &[&[&nodes[1], &nodes[2], &nodes[3]]], payment_preimage, ) - .with_custom_tlvs(recipient_onion_allows_2_hops.custom_tlvs), + .with_custom_tlvs(onion_allowing_2_hops.custom_tlvs), ); } @@ -579,17 +519,10 @@ fn bolt12_invoice_too_large_blinded_paths() { let offer = nodes[1].node.create_offer_builder(None).unwrap().build().unwrap(); let payment_id = PaymentId([1; 32]); + let route_config = RouteParametersConfig::default(); nodes[0] .node - .pay_for_offer( - &offer, - None, - Some(5000), - None, - payment_id, - Retry::Attempts(0), - RouteParametersConfig::default(), - ) + .pay_for_offer(&offer, None, Some(5000), None, payment_id, Retry::Attempts(0), route_config) .unwrap(); let invreq_om = nodes[0] .onion_messenger From 1d36d1d4d712d1992b1757de8544d04a7ca50253 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 18 May 2025 19:17:14 +0000 Subject: [PATCH 06/12] Run `rustfmt` on `chanmon_update_fail_tests.rs` --- lightning/src/ln/chanmon_update_fail_tests.rs | 3053 +++++++++++++---- 1 file changed, 2437 insertions(+), 616 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index d58934b4bcb..3bf713db1bc 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -1,5 +1,3 @@ -#![cfg_attr(rustfmt, rustfmt_skip)] - // This file is Copyright its original authors, visible in version control // history. // @@ -14,29 +12,31 @@ //! There are a bunch of these as their handling is relatively error-prone so they are split out //! here. See also the chanmon_fail_consistency fuzz test. -use bitcoin::constants::genesis_block; -use bitcoin::hash_types::BlockHash; -use bitcoin::network::Network; -use crate::chain::channelmonitor::{ANTI_REORG_DELAY, ChannelMonitor}; +use crate::chain::channelmonitor::{ChannelMonitor, ANTI_REORG_DELAY}; use crate::chain::{ChannelMonitorUpdateStatus, Listen, Watch}; -use crate::events::{Event, PaymentPurpose, ClosureReason, HTLCHandlingFailureType}; -use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; +use crate::events::{ClosureReason, Event, HTLCHandlingFailureType, PaymentPurpose}; use crate::ln::channel::AnnouncementSigsState; +use crate::ln::channelmanager::{PaymentId, RAACommitmentOrder, RecipientOnionFields}; use crate::ln::msgs; +use crate::ln::msgs::{ + BaseMessageHandler, ChannelMessageHandler, MessageSendEvent, RoutingMessageHandler, +}; use crate::ln::types::ChannelId; -use crate::ln::msgs::{BaseMessageHandler, ChannelMessageHandler, RoutingMessageHandler, MessageSendEvent}; -use crate::util::test_channel_signer::TestChannelSigner; use crate::util::ser::{ReadableArgs, Writeable}; +use crate::util::test_channel_signer::TestChannelSigner; use crate::util::test_utils::TestBroadcaster; +use bitcoin::constants::genesis_block; +use bitcoin::hash_types::BlockHash; +use bitcoin::network::Network; use crate::ln::functional_test_utils::*; use crate::util::test_utils; use crate::io; -use bitcoin::hashes::Hash; use crate::prelude::*; use crate::sync::{Arc, Mutex}; +use bitcoin::hashes::Hash; #[test] fn test_monitor_and_persister_update_fail() { @@ -52,7 +52,7 @@ fn test_monitor_and_persister_update_fail() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); // Rebalance the network to generate htlc in the two directions - send_payment(&nodes[0], &vec!(&nodes[1])[..], 10_000_000); + send_payment(&nodes[0], &vec![&nodes[1]][..], 10_000_000); // Route an HTLC from node 0 to node 1 (but don't settle) let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); @@ -75,15 +75,31 @@ fn test_monitor_and_persister_update_fail() { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan.2).unwrap(); let new_monitor = <(BlockHash, ChannelMonitor)>::read( - &mut io::Cursor::new(&monitor.encode()), (nodes[0].keys_manager, nodes[0].keys_manager)).unwrap().1; + &mut io::Cursor::new(&monitor.encode()), + (nodes[0].keys_manager, nodes[0].keys_manager), + ) + .unwrap() + .1; assert!(new_monitor == *monitor); new_monitor }; - let chain_mon = test_utils::TestChainMonitor::new(Some(&chain_source), &tx_broadcaster, &logger, &chanmon_cfgs[0].fee_estimator, &persister, &node_cfgs[0].keys_manager); - assert_eq!(chain_mon.watch_channel(chan.2, new_monitor), Ok(ChannelMonitorUpdateStatus::Completed)); + let chain_mon = test_utils::TestChainMonitor::new( + Some(&chain_source), + &tx_broadcaster, + &logger, + &chanmon_cfgs[0].fee_estimator, + &persister, + &node_cfgs[0].keys_manager, + ); + assert_eq!( + chain_mon.watch_channel(chan.2, new_monitor), + Ok(ChannelMonitorUpdateStatus::Completed) + ); chain_mon }; - chain_mon.chain_monitor.block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200); + chain_mon + .chain_monitor + .block_connected(&create_dummy_block(BlockHash::all_zeros(), 42, Vec::new()), 200); // Try to update ChannelMonitor nodes[1].node.claim_funds(preimage); @@ -92,24 +108,51 @@ fn test_monitor_and_persister_update_fail() { let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &updates.update_fulfill_htlcs[0], + ); { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - if let Some(channel) = get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, chan.2).as_funded_mut() { + if let Some(channel) = get_channel_ref!( + nodes[0], + nodes[1], + node_0_per_peer_lock, + node_0_peer_state_lock, + chan.2 + ) + .as_funded_mut() + { assert_eq!(updates.commitment_signed.len(), 1); - if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) { + if let Ok(Some(update)) = + channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) + { // Check that the persister returns InProgress (and will never actually complete) // as the monitor update errors. - if let ChannelMonitorUpdateStatus::InProgress = chain_mon.chain_monitor.update_channel(chan.2, &update) {} else { panic!("Expected monitor paused"); } - logger.assert_log_regex("lightning::chain::chainmonitor", regex::Regex::new("Failed to update ChannelMonitor for channel [0-9a-f]*.").unwrap(), 1); + if let ChannelMonitorUpdateStatus::InProgress = + chain_mon.chain_monitor.update_channel(chan.2, &update) + { + } else { + panic!("Expected monitor paused"); + } + logger.assert_log_regex( + "lightning::chain::chainmonitor", + regex::Regex::new("Failed to update ChannelMonitor for channel [0-9a-f]*.") + .unwrap(), + 1, + ); // Apply the monitor update to the original ChainMonitor, ensuring the // ChannelManager and ChannelMonitor aren't out of sync. - assert_eq!(nodes[0].chain_monitor.update_channel(chan.2, &update), - ChannelMonitorUpdateStatus::Completed); - } else { assert!(false); } + assert_eq!( + nodes[0].chain_monitor.update_channel(chan.2, &update), + ChannelMonitorUpdateStatus::Completed + ); + } else { + assert!(false); + } } else { assert!(false); } @@ -129,14 +172,21 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let user_channel_id = nodes[1].node.list_channels()[0].user_channel_id; - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); { - nodes[0].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0) - ).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), + PaymentId(payment_hash_1.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } @@ -153,7 +203,14 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[0] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); @@ -169,17 +226,26 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let events_3 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(payment_hash_1, *payment_hash); assert_eq!(amount_msat, 1_000_000); assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); assert_eq!(*via_channel_ids, vec![(channel_id, Some(user_channel_id))]); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_1, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), @@ -188,12 +254,19 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_1); // Now set it to failed again... - let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); + let (route, payment_hash_2, _, payment_secret_2) = + get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) - ).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), + PaymentId(payment_hash_2.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } @@ -209,7 +282,14 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // ...and make sure we can force-close a frozen channel let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0] + .node + .force_close_broadcasting_latest_txn( + &channel_id, + &nodes[1].node.get_our_node_id(), + error_message.to_string(), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); @@ -217,7 +297,13 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // PaymentPathFailed event assert_eq!(nodes[0].node.list_channels().len(), 0); - check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!( + nodes[0], + 1, + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, + [nodes[1].node.get_our_node_id()], + 100000 + ); } #[test] @@ -253,15 +339,23 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let user_channel_id = nodes[1].node.list_channels()[0].user_channel_id; - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); // Now try to send a second payment which will fail to send - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0) - ).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), + PaymentId(payment_hash_2.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } @@ -278,7 +372,19 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_2 = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); let (bs_initial_fulfill, bs_initial_commitment_signed) = match events_2[0] { - MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + MessageSendEvent::UpdateHTLCs { + ref node_id, + channel_id: _, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); @@ -287,7 +393,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(update_fee.is_none()); if (disconnect_count & 16) == 0 { - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &update_fulfill_htlcs[0], + ); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -298,7 +407,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + commitment_signed, + ); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -315,55 +427,108 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Now fix monitor updating... chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[0] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); - macro_rules! disconnect_reconnect_peers { () => { { - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); - assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); - let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); - assert_eq!(reestablish_2.len(), 1); - - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); - let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); - let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); - - assert!(as_resp.0.is_none()); - assert!(bs_resp.0.is_none()); - - (reestablish_1, reestablish_2, as_resp, bs_resp) - } } } + macro_rules! disconnect_reconnect_peers { + () => {{ + nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + + nodes[0] + .node + .peer_connected( + nodes[1].node.get_our_node_id(), + &msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }, + true, + ) + .unwrap(); + let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); + assert_eq!(reestablish_1.len(), 1); + nodes[1] + .node + .peer_connected( + nodes[0].node.get_our_node_id(), + &msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }, + false, + ) + .unwrap(); + let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); + assert_eq!(reestablish_2.len(), 1); + + nodes[0] + .node + .handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); + nodes[1] + .node + .handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); + + assert!(as_resp.0.is_none()); + assert!(bs_resp.0.is_none()); + + (reestablish_1, reestablish_2, as_resp, bs_resp) + }}; + } let (payment_event, initial_revoke_and_ack) = if disconnect_count & !disconnect_flags > 0 { assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + nodes[0] + .node + .peer_connected( + nodes[1].node.get_our_node_id(), + &msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }, + true, + ) + .unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1] + .node + .peer_connected( + nodes[0].node.get_our_node_id(), + &msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }, + false, + ) + .unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + nodes[0] + .node + .handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); check_added_monitors!(nodes[0], 0); let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + nodes[1] + .node + .handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); check_added_monitors!(nodes[1], 0); let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); @@ -387,7 +552,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_resp.1.is_none()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0], + ); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -398,8 +566,15 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_resp.2.as_ref().unwrap().commitment_signed); - let as_resp_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &bs_resp.2.as_ref().unwrap().commitment_signed, + ); + let as_resp_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -408,7 +583,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } if disconnect_count & !disconnect_flags > 1 { - let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = disconnect_reconnect_peers!(); + let (second_reestablish_1, second_reestablish_2, second_as_resp, second_bs_resp) = + disconnect_reconnect_peers!(); if (disconnect_count & 16) == 0 { assert!(reestablish_1 == second_reestablish_1); @@ -418,24 +594,41 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(bs_resp == second_bs_resp); } - (SendEvent::from_commitment_update(nodes[1].node.get_our_node_id(), channel_id, as_resp.2.unwrap()), as_resp.1.unwrap()) + ( + SendEvent::from_commitment_update( + nodes[1].node.get_our_node_id(), + channel_id, + as_resp.2.unwrap(), + ), + as_resp.1.unwrap(), + ) } else { let mut events_4 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_4.len(), 2); - (SendEvent::from_event(events_4.remove(0)), match events_4[0] { - MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - msg.clone() + ( + SendEvent::from_event(events_4.remove(0)), + match events_4[0] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + msg.clone() + }, + _ => panic!("Unexpected event"), }, - _ => panic!("Unexpected event"), - }) + ) }; assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); - let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &payment_event.commitment_msg, + ); + let bs_revoke_and_ack = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[0].node.get_our_node_id() + ); // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); @@ -452,27 +645,36 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let as_commitment_update; let bs_second_commitment_update; - macro_rules! handle_bs_raa { () => { - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); - as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - assert!(as_commitment_update.update_add_htlcs.is_empty()); - assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); - assert!(as_commitment_update.update_fail_htlcs.is_empty()); - assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); - assert!(as_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[0], 1); - } } - - macro_rules! handle_initial_raa { () => { - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &initial_revoke_and_ack); - bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); - assert!(bs_second_commitment_update.update_fee.is_none()); - check_added_monitors!(nodes[1], 1); - } } + macro_rules! handle_bs_raa { + () => { + nodes[0] + .node + .handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); + as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + assert!(as_commitment_update.update_add_htlcs.is_empty()); + assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(as_commitment_update.update_fail_htlcs.is_empty()); + assert!(as_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(as_commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[0], 1); + }; + } + + macro_rules! handle_initial_raa { + () => { + nodes[1] + .node + .handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &initial_revoke_and_ack); + bs_second_commitment_update = + get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fail_malformed_htlcs.is_empty()); + assert!(bs_second_commitment_update.update_fee.is_none()); + check_added_monitors!(nodes[1], 1); + }; + } if (disconnect_count & 8) == 0 { handle_bs_raa!(); @@ -528,13 +730,27 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } } - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_commitment_update.commitment_signed); - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &bs_second_commitment_update.commitment_signed, + ); + let as_revoke_and_ack = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment_update.commitment_signed); - let bs_second_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &as_commitment_update.commitment_signed, + ); + let bs_second_revoke_and_ack = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[0].node.get_our_node_id() + ); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); @@ -552,17 +768,26 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let events_5 = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events_5.len(), 1); match events_5[0] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(payment_hash_2, *payment_hash); assert_eq!(amount_msat, 1_000_000); assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); assert_eq!(*via_channel_ids, vec![(channel_id, Some(user_channel_id))]); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_2, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), @@ -608,24 +833,43 @@ fn test_monitor_update_fail_cs() { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let user_channel_id = nodes[1].node.list_channels()[0].user_channel_id; - let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, our_payment_hash, payment_preimage, our_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), PaymentId(our_payment_hash.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + our_payment_hash, + RecipientOnionFields::secret_only(our_payment_secret), + PaymentId(our_payment_hash.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } - let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + let send_event = + SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &send_event.commitment_msg, + ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); @@ -649,7 +893,10 @@ fn test_monitor_update_fail_cs() { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &updates.commitment_signed, + ); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -658,11 +905,22 @@ fn test_monitor_update_fail_cs() { } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[0] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); - let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let final_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &final_raa); check_added_monitors!(nodes[1], 1); @@ -671,17 +929,26 @@ fn test_monitor_update_fail_cs() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::PaymentClaimable { payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(payment_hash, our_payment_hash); assert_eq!(amount_msat, 1_000_000); assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); assert_eq!(*via_channel_ids, vec![(channel_id, Some(user_channel_id))]); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(our_payment_secret, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), @@ -701,16 +968,33 @@ fn test_monitor_update_fail_no_rebroadcast() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment_with_route(route, our_payment_hash, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(our_payment_hash.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + our_payment_hash, + RecipientOnionFields::secret_only(payment_secret_1), + PaymentId(our_payment_hash.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } - let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + let send_event = + SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); - let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true, false, true); + let bs_raa = commitment_signed_dance!( + nodes[1], + nodes[0], + send_event.commitment_msg, + false, + true, + false, + true + ); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &bs_raa); @@ -720,7 +1004,14 @@ fn test_monitor_update_fail_no_rebroadcast() { check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); @@ -749,31 +1040,59 @@ fn test_monitor_update_raa_while_paused() { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; send_payment(&nodes[0], &[&nodes[1]], 5000000); - let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment_with_route(route, our_payment_hash_1, - RecipientOnionFields::secret_only(our_payment_secret_1), PaymentId(our_payment_hash_1.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + our_payment_hash_1, + RecipientOnionFields::secret_only(our_payment_secret_1), + PaymentId(our_payment_hash_1.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } - let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + let send_event_1 = + SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000); + let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = + get_route_and_payment_hash!(nodes[1], nodes[0], 1000000); { - nodes[1].node.send_payment_with_route(route, our_payment_hash_2, - RecipientOnionFields::secret_only(our_payment_secret_2), PaymentId(our_payment_hash_2.0)).unwrap(); + nodes[1] + .node + .send_payment_with_route( + route, + our_payment_hash_2, + RecipientOnionFields::secret_only(our_payment_secret_2), + PaymentId(our_payment_hash_2.0), + ) + .unwrap(); check_added_monitors!(nodes[1], 1); } - let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); + let send_event_2 = + SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send_event_1.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &send_event_1.commitment_msg, + ); check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_raa = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[0].node.get_our_node_id() + ); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_event_2.commitment_msg); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &send_event_2.commitment_msg, + ); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -782,7 +1101,14 @@ fn test_monitor_update_raa_while_paused() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[0] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); @@ -791,13 +1117,26 @@ fn test_monitor_update_raa_while_paused() { check_added_monitors!(nodes[1], 1); let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_update_raa.1); + nodes[1] + .node + .handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_update_raa.1); check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_second_raa = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[0].node.get_our_node_id() + ); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &bs_cs.commitment_signed, + ); check_added_monitors!(nodes[0], 1); - let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_second_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); check_added_monitors!(nodes[0], 1); @@ -830,7 +1169,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Fail the payment backwards, failing the monitor update on nodes[1]'s receipt of the RAA nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }] + ); check_added_monitors!(nodes[2], 1); let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -839,21 +1181,40 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); - - let bs_revoke_and_ack = commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false, true, false, true); + nodes[1] + .node + .handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + + let bs_revoke_and_ack = commitment_signed_dance!( + nodes[1], + nodes[2], + updates.commitment_signed, + false, + true, + false, + true + ); check_added_monitors!(nodes[0], 0); // While the second channel is AwaitingRAA, forward a second payment to get it into the // holding cell. - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); { - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), + PaymentId(payment_hash_2.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } - let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + let mut send_event = + SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false); @@ -871,10 +1232,18 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Forward a third payment which will also be added to the holding cell, despite the channel // being paused waiting a monitor update. - let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); + let (route, payment_hash_3, _, payment_secret_3) = + get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); { - nodes[0].node.send_payment_with_route(route, payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash_3, + RecipientOnionFields::secret_only(payment_secret_3), + PaymentId(payment_hash_3.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } @@ -892,26 +1261,53 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let (payment_preimage_4, payment_hash_4) = if test_ignore_second_cs { // Try to route another payment backwards from 2 to make sure 1 holds off on responding - let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); - nodes[2].node.send_payment_with_route(route, payment_hash_4, - RecipientOnionFields::secret_only(payment_secret_4), PaymentId(payment_hash_4.0)).unwrap(); + let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = + get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); + nodes[2] + .node + .send_payment_with_route( + route, + payment_hash_4, + RecipientOnionFields::secret_only(payment_secret_4), + PaymentId(payment_hash_4.0), + ) + .unwrap(); check_added_monitors!(nodes[2], 1); - send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); + send_event = + SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &send_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &send_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[2].node.get_our_node_id(), + &send_event.commitment_msg, + ); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) - } else { (None, None) }; + } else { + (None, None) + }; // Restore monitor updating, ensuring we immediately get a fail-back update and a // update_add update. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2.2).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&chan_2.2) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); check_added_monitors!(nodes[1], 0); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2 + }] + ); check_added_monitors!(nodes[1], 1); let mut events_3 = nodes[1].node.get_and_clear_pending_msg_events(); @@ -923,7 +1319,8 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Note that the ordering of the events for different nodes is non-prescriptive, though the // ordering of the two events that both go to nodes[2] have to stay in the same order. - let nodes_0_event = remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3); + let nodes_0_event = + remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3); let messages_a = match nodes_0_event { MessageSendEvent::UpdateHTLCs { node_id, mut updates, channel_id: _ } => { assert_eq!(node_id, nodes[0].node.get_our_node_id()); @@ -937,12 +1334,14 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { _ => panic!("Unexpected event type!"), }; - let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3); + let nodes_2_event = + remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3); let send_event_b = SendEvent::from_event(nodes_2_event); assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id()); let raa = if test_ignore_second_cs { - let nodes_2_event = remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3); + let nodes_2_event = + remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3); match nodes_2_event { MessageSendEvent::SendRevokeAndACK { node_id, msg } => { assert_eq!(node_id, nodes[2].node.get_our_node_id()); @@ -950,7 +1349,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { }, _ => panic!("Unexpected event"), } - } else { None }; + } else { + None + }; // Now deliver the new messages... @@ -961,9 +1362,16 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]); let as_cs; if test_ignore_second_cs { - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); + nodes[2].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &send_event_b.commitment_msg, + ); check_added_monitors!(nodes[2], 1); - let bs_revoke_and_ack = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let bs_revoke_and_ack = get_event_msg!( + nodes[2], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa.unwrap()); check_added_monitors!(nodes[2], 1); let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -977,10 +1385,16 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[1], 1); as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &bs_cs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[2].node.get_our_node_id(), + &bs_cs.commitment_signed, + ); check_added_monitors!(nodes[1], 1); } else { - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &send_event_b.commitment_msg); + nodes[2].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &send_event_b.commitment_msg, + ); check_added_monitors!(nodes[2], 1); let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events(); @@ -1005,7 +1419,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[2].node.get_our_node_id(), + &updates.commitment_signed, + ); check_added_monitors!(nodes[1], 1); }, _ => panic!("Unexpected event"), @@ -1017,13 +1434,25 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(as_cs.update_fail_malformed_htlcs.is_empty()); assert!(as_cs.update_fulfill_htlcs.is_empty()); assert!(as_cs.update_fee.is_none()); - let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); - + let as_raa = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[2].node.get_our_node_id() + ); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]); - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &as_cs.commitment_signed); + nodes[2] + .node + .handle_update_add_htlc(nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]); + nodes[2].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &as_cs.commitment_signed, + ); check_added_monitors!(nodes[2], 1); - let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let bs_second_raa = get_event_msg!( + nodes[2], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[2], 1); @@ -1033,9 +1462,16 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &bs_second_cs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[2].node.get_our_node_id(), + &bs_second_cs.commitment_signed, + ); check_added_monitors!(nodes[1], 1); - let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + let as_second_raa = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[2].node.get_our_node_id() + ); nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_second_raa); check_added_monitors!(nodes[2], 1); @@ -1046,11 +1482,15 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let events_6 = nodes[2].node.get_and_clear_pending_events(); assert_eq!(events_6.len(), 2); match events_6[0] { - Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_2); }, + Event::PaymentClaimable { payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_2); + }, _ => panic!("Unexpected event"), }; match events_6[1] { - Event::PaymentClaimable { payment_hash, .. } => { assert_eq!(payment_hash, payment_hash_3); }, + Event::PaymentClaimable { payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_3); + }, _ => panic!("Unexpected event"), }; @@ -1069,7 +1509,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let events_9 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_9.len(), 1); match events_9[0] { - Event::PaymentClaimable { payment_hash, .. } => assert_eq!(payment_hash, payment_hash_4.unwrap()), + Event::PaymentClaimable { payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_4.unwrap()) + }, _ => panic!("Unexpected event"), }; claim_payment(&nodes[2], &[&nodes[1], &nodes[0]], payment_preimage_4.unwrap()); @@ -1096,7 +1538,8 @@ fn test_monitor_update_fail_reestablish() { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); @@ -1111,19 +1554,40 @@ fn test_monitor_update_fail_reestablish() { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc( + nodes[2].node.get_our_node_id(), + &updates.update_fulfill_htlcs[0], + ); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[0] + .node + .peer_connected( + nodes[1].node.get_our_node_id(), + &msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }, + true, + ) + .unwrap(); + nodes[1] + .node + .peer_connected( + nodes[0].node.get_our_node_id(), + &msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }, + false, + ) + .unwrap(); let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -1132,8 +1596,16 @@ fn test_monitor_update_fail_reestablish() { nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reestablish); assert_eq!( - get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) - .contents.channel_flags & 2, 0); // The "disabled" bit should be unset as we just reconnected + get_event_msg!( + nodes[0], + MessageSendEvent::SendChannelUpdate, + nodes[1].node.get_our_node_id() + ) + .contents + .channel_flags + & 2, + 0 + ); // The "disabled" bit should be unset as we just reconnected nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell check_added_monitors!(nodes[1], 1); @@ -1141,29 +1613,70 @@ fn test_monitor_update_fail_reestablish() { nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[0] + .node + .peer_connected( + nodes[1].node.get_our_node_id(), + &msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }, + true, + ) + .unwrap(); + nodes[1] + .node + .peer_connected( + nodes[0].node.get_our_node_id(), + &msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }, + false, + ) + .unwrap(); assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish); assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish); nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reestablish); assert_eq!( - get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()) - .contents.channel_flags & 2, 0); // The "disabled" bit should be unset as we just reconnected + get_event_msg!( + nodes[0], + MessageSendEvent::SendChannelUpdate, + nodes[1].node.get_our_node_id() + ) + .contents + .channel_flags + & 2, + 0 + ); // The "disabled" bit should be unset as we just reconnected nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reestablish); check_added_monitors!(nodes[1], 0); assert_eq!( - get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()) - .contents.channel_flags & 2, 0); // The "disabled" bit should be unset as we just reconnected + get_event_msg!( + nodes[1], + MessageSendEvent::SendChannelUpdate, + nodes[0].node.get_our_node_id() + ) + .contents + .channel_flags + & 2, + 0 + ); // The "disabled" bit should be unset as we just reconnected chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&chan_1.2) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); @@ -1173,7 +1686,10 @@ fn test_monitor_update_fail_reestablish() { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &updates.update_fulfill_htlcs[0], + ); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); expect_payment_sent!(nodes[0], payment_preimage); } @@ -1190,9 +1706,12 @@ fn raa_no_response_awaiting_raa_state() { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(nodes[1]); - let (payment_preimage_3, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[1]); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (payment_preimage_2, payment_hash_2, payment_secret_2) = + get_payment_preimage_hash!(nodes[1]); + let (payment_preimage_3, payment_hash_3, payment_secret_3) = + get_payment_preimage_hash!(nodes[1]); // Queue up two payments - one will be delivered right away, one immediately goes into the // holding cell as nodes[0] is AwaitingRAA. Ultimately this allows us to deliver an RAA @@ -1200,11 +1719,25 @@ fn raa_no_response_awaiting_raa_state() { // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS // generation during RAA while in monitor-update-failed state. { - nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route.clone(), + payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), + PaymentId(payment_hash_1.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); - nodes[0].node.send_payment_with_route(route.clone(), payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route.clone(), + payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), + PaymentId(payment_hash_2.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 0); } @@ -1212,7 +1745,10 @@ fn raa_no_response_awaiting_raa_state() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &payment_event.commitment_msg, + ); check_added_monitors!(nodes[1], 1); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -1222,9 +1758,15 @@ fn raa_no_response_awaiting_raa_state() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0] + .node + .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA, @@ -1232,7 +1774,10 @@ fn raa_no_response_awaiting_raa_state() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &payment_event.commitment_msg, + ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1241,7 +1786,14 @@ fn raa_no_response_awaiting_raa_state() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); // nodes[1] should be AwaitingRAA here! check_added_monitors!(nodes[1], 0); @@ -1253,8 +1805,15 @@ fn raa_no_response_awaiting_raa_state() { // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync // commitment transaction states) whereas here we can explicitly check for it. { - nodes[0].node.send_payment_with_route(route, payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash_3, + RecipientOnionFields::secret_only(payment_secret_3), + PaymentId(payment_hash_3.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -1264,14 +1823,27 @@ fn raa_no_response_awaiting_raa_state() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0] + .node + .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &payment_event.commitment_msg, + ); check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_raa = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[0].node.get_our_node_id() + ); // Finally deliver the RAA to nodes[1] which results in a CS response to the last update nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); @@ -1283,9 +1855,16 @@ fn raa_no_response_awaiting_raa_state() { nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_update.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &bs_update.commitment_signed, + ); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); @@ -1312,7 +1891,8 @@ fn claim_while_disconnected_monitor_update_fail() { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; // Forward a payment for B to claim - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); @@ -1321,40 +1901,79 @@ fn claim_while_disconnected_monitor_update_fail() { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[0] + .node + .peer_connected( + nodes[1].node.get_our_node_id(), + &msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }, + true, + ) + .unwrap(); + nodes[1] + .node + .peer_connected( + nodes[0].node.get_our_node_id(), + &msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }, + false, + ) + .unwrap(); let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reconnect); - let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + let _as_channel_update = get_event_msg!( + nodes[0], + MessageSendEvent::SendChannelUpdate, + nodes[1].node.get_our_node_id() + ); // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reconnect); - let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); + let _bs_channel_update = get_event_msg!( + nodes[1], + MessageSendEvent::SendChannelUpdate, + nodes[0].node.get_our_node_id() + ); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Send a second payment from A to B, resulting in a commitment update that gets swallowed with // the monitor still failed - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), + PaymentId(payment_hash_2.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_updates.commitment_signed); + nodes[1] + .node + .handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &as_updates.commitment_signed, + ); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC @@ -1363,7 +1982,14 @@ fn claim_while_disconnected_monitor_update_fail() { // Now un-fail the monitor, which will result in B sending its original commitment update, // receiving the commitment update from A, and the resulting commitment dances. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); @@ -1373,12 +1999,22 @@ fn claim_while_disconnected_monitor_update_fail() { match bs_msgs[0] { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &updates.update_fulfill_htlcs[0], + ); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &updates.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &updates.commitment_signed, + ); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); }, @@ -1397,13 +2033,27 @@ fn claim_while_disconnected_monitor_update_fail() { let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &bs_commitment.commitment_signed, + ); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_commitment.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &as_commitment.commitment_signed, + ); check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_raa = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[0].node.get_our_node_id() + ); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); @@ -1431,20 +2081,44 @@ fn monitor_failed_no_reestablish_response() { { let mut node_0_per_peer_lock; let mut node_0_peer_state_lock; - get_channel_ref!(nodes[0], nodes[1], node_0_per_peer_lock, node_0_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived; + get_channel_ref!( + nodes[0], + nodes[1], + node_0_per_peer_lock, + node_0_peer_state_lock, + channel_id + ) + .context_mut() + .announcement_sigs_state = AnnouncementSigsState::PeerReceived; } { let mut node_1_per_peer_lock; let mut node_1_peer_state_lock; - get_channel_ref!(nodes[1], nodes[0], node_1_per_peer_lock, node_1_peer_state_lock, channel_id).context_mut().announcement_sigs_state = AnnouncementSigsState::PeerReceived; + get_channel_ref!( + nodes[1], + nodes[0], + node_1_per_peer_lock, + node_1_peer_state_lock, + channel_id + ) + .context_mut() + .announcement_sigs_state = AnnouncementSigsState::PeerReceived; } // Route the payment and deliver the initial commitment_signed (with a monitor update failure // on receipt). - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), + PaymentId(payment_hash_1.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1453,7 +2127,10 @@ fn monitor_failed_no_reestablish_response() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &payment_event.commitment_msg, + ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); @@ -1462,33 +2139,72 @@ fn monitor_failed_no_reestablish_response() { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[0] + .node + .peer_connected( + nodes[1].node.get_our_node_id(), + &msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }, + true, + ) + .unwrap(); + nodes[1] + .node + .peer_connected( + nodes[0].node.get_our_node_id(), + &msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }, + false, + ) + .unwrap(); let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reconnect); - let _bs_channel_update = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); + let _bs_channel_update = get_event_msg!( + nodes[1], + MessageSendEvent::SendChannelUpdate, + nodes[0].node.get_our_node_id() + ); nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reconnect); - let _as_channel_update = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + let _as_channel_update = get_event_msg!( + nodes[0], + MessageSendEvent::SendChannelUpdate, + nodes[1].node.get_our_node_id() + ); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0] + .node + .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); @@ -1518,10 +2234,18 @@ fn first_message_on_recv_ordering() { // Route the first payment outbound, holding the last RAA for B until we are set up so that we // can deliver it and fail the monitor update. - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment_with_route(route, payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), + PaymentId(payment_hash_1.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1530,22 +2254,39 @@ fn first_message_on_recv_ordering() { let payment_event = SendEvent::from_event(events.pop().unwrap()); assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &payment_event.commitment_msg, + ); check_added_monitors!(nodes[1], 1); let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0] + .node + .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); // Route the second payment, generating an update_add_htlc/commitment_signed - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), + PaymentId(payment_hash_2.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1566,12 +2307,22 @@ fn first_message_on_recv_ordering() { // RAA/CS response, which should be generated when we call channel_monitor_update (with the // appropriate HTLC acceptance). nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &payment_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &payment_event.commitment_msg, + ); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); @@ -1581,10 +2332,16 @@ fn first_message_on_recv_ordering() { let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + nodes[0] + .node + .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); @@ -1612,7 +2369,8 @@ fn test_monitor_update_fail_claim() { // Rebalance a bit so that we can send backwards from 3 to 2. send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5000000); - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // As long as the preimage isn't on-chain, we shouldn't expose the `PaymentClaimed` event to @@ -1627,10 +2385,18 @@ fn test_monitor_update_fail_claim() { // already-signed commitment transaction and will instead wait for it to resolve before // forwarding the payment onwards. - let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000); + let (route, payment_hash_2, _, payment_secret_2) = + get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000); { - nodes[2].node.send_payment_with_route(route.clone(), payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[2] + .node + .send_payment_with_route( + route.clone(), + payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), + PaymentId(payment_hash_2.0), + ) + .unwrap(); check_added_monitors!(nodes[2], 1); } @@ -1648,8 +2414,15 @@ fn test_monitor_update_fail_claim() { expect_pending_htlcs_forwardable_ignore!(nodes[1]); let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]); - nodes[2].node.send_payment_with_route(route, payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), PaymentId(payment_hash_3.0)).unwrap(); + nodes[2] + .node + .send_payment_with_route( + route, + payment_hash_3, + RecipientOnionFields::secret_only(payment_secret_3), + PaymentId(payment_hash_3.0), + ) + .unwrap(); check_added_monitors!(nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -1662,13 +2435,23 @@ fn test_monitor_update_fail_claim() { // Now restore monitor updating on the 0<->1 channel and claim the funds on B. let channel_id = chan_1.2; - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_fulfill_update.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &bs_fulfill_update.update_fulfill_htlcs[0], + ); commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false); expect_payment_sent!(nodes[0], payment_preimage_1); @@ -1676,41 +2459,65 @@ fn test_monitor_update_fail_claim() { nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[0]); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_forward_update.update_add_htlcs[1]); + nodes[0].node.handle_update_add_htlc( + nodes[1].node.get_our_node_id(), + &bs_forward_update.update_add_htlcs[0], + ); + nodes[0].node.handle_update_add_htlc( + nodes[1].node.get_our_node_id(), + &bs_forward_update.update_add_htlcs[1], + ); commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[0]); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(payment_hash_2, *payment_hash); assert_eq!(1_000_000, amount_msat); assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id()); assert_eq!(*via_channel_ids.last().unwrap(), (channel_id, Some(42))); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_2, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), } match events[1] { - Event::PaymentClaimable { ref payment_hash, ref purpose, amount_msat, receiver_node_id, ref via_channel_ids, .. } => { + Event::PaymentClaimable { + ref payment_hash, + ref purpose, + amount_msat, + receiver_node_id, + ref via_channel_ids, + .. + } => { assert_eq!(payment_hash_3, *payment_hash); assert_eq!(1_000_000, amount_msat); assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id()); assert_eq!(*via_channel_ids, vec![(channel_id, Some(42))]); match &purpose { - PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. } => { + PaymentPurpose::Bolt11InvoicePayment { + payment_preimage, payment_secret, .. + } => { assert!(payment_preimage.is_none()); assert_eq!(payment_secret_3, *payment_secret); }, - _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment") + _ => panic!("expected PaymentPurpose::Bolt11InvoicePayment"), } }, _ => panic!("Unexpected event"), @@ -1735,18 +2542,32 @@ fn test_monitor_update_on_pending_forwards() { let (_, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1000000); nodes[2].node.fail_htlc_backwards(&payment_hash_1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }] + ); check_added_monitors!(nodes[2], 1); let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &cs_fail_update.update_fail_htlcs[0]); + nodes[1].node.handle_update_fail_htlc( + nodes[2].node.get_our_node_id(), + &cs_fail_update.update_fail_htlcs[0], + ); commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); { - nodes[2].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[2] + .node + .send_payment_with_route( + route, + payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), + PaymentId(payment_hash_2.0), + ) + .unwrap(); check_added_monitors!(nodes[2], 1); } @@ -1757,17 +2578,34 @@ fn test_monitor_update_on_pending_forwards() { commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_2.2 + }] + ); check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_1.2).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&chan_1.2) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]); + nodes[0] + .node + .handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]); + nodes[0] + .node + .handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_events(); @@ -1775,7 +2613,9 @@ fn test_monitor_update_on_pending_forwards() { if let Event::PaymentPathFailed { payment_hash, payment_failed_permanently, .. } = events[1] { assert_eq!(payment_hash, payment_hash_1); assert!(payment_failed_permanently); - } else { panic!("Unexpected event!"); } + } else { + panic!("Unexpected event!"); + } match events[2] { Event::PaymentFailed { payment_hash, .. } => { assert_eq!(payment_hash, Some(payment_hash_1)); @@ -1783,7 +2623,7 @@ fn test_monitor_update_on_pending_forwards() { _ => panic!("Unexpected event"), } match events[0] { - Event::PendingHTLCsForwardable { .. } => { }, + Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), }; nodes[0].node.process_pending_htlc_forwards(); @@ -1805,13 +2645,22 @@ fn monitor_update_claim_fail_no_response() { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; // Forward a payment for B to claim - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA - let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); + let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = + get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); { - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), + PaymentId(payment_hash_2.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); } @@ -1819,7 +2668,15 @@ fn monitor_update_claim_fail_no_response() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); - let as_raa = commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false, true, false, true); + let as_raa = commitment_signed_dance!( + nodes[1], + nodes[0], + payment_event.commitment_msg, + false, + true, + false, + true + ); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); @@ -1828,7 +2685,14 @@ fn monitor_update_claim_fail_no_response() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); @@ -1840,7 +2704,10 @@ fn monitor_update_claim_fail_no_response() { expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &bs_updates.update_fulfill_htlcs[0], + ); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); expect_payment_sent!(nodes[0], payment_preimage_1); @@ -1849,7 +2716,9 @@ fn monitor_update_claim_fail_no_response() { // restore_b_before_conf has no meaning if !confirm_a_first // restore_b_before_lock has no meaning if confirm_a_first -fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: bool, restore_b_before_lock: bool) { +fn do_during_funding_monitor_fail( + confirm_a_first: bool, restore_b_before_conf: bool, restore_b_before_lock: bool, +) { // Test that if the monitor update generated by funding_transaction_generated fails we continue // the channel setup happily after the update is restored. let chanmon_cfgs = create_chanmon_cfgs(2); @@ -1857,28 +2726,74 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap(); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); - - let (temporary_channel_id, funding_tx, funding_output) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43); + nodes[0] + .node + .create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None) + .unwrap(); + nodes[1].node.handle_open_channel( + nodes[0].node.get_our_node_id(), + &get_event_msg!( + nodes[0], + MessageSendEvent::SendOpenChannel, + nodes[1].node.get_our_node_id() + ), + ); + nodes[0].node.handle_accept_channel( + nodes[1].node.get_our_node_id(), + &get_event_msg!( + nodes[1], + MessageSendEvent::SendAcceptChannel, + nodes[0].node.get_our_node_id() + ), + ); - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap(); + let (temporary_channel_id, funding_tx, funding_output) = + create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43); + + nodes[0] + .node + .funding_transaction_generated( + temporary_channel_id, + nodes[1].node.get_our_node_id(), + funding_tx.clone(), + ) + .unwrap(); check_added_monitors!(nodes[0], 0); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); - let channel_id = ChannelId::v1_from_funding_txid(funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index); + let funding_created_msg = get_event_msg!( + nodes[0], + MessageSendEvent::SendFundingCreated, + nodes[1].node.get_our_node_id() + ); + let channel_id = ChannelId::v1_from_funding_txid( + funding_created_msg.funding_txid.as_byte_array(), + funding_created_msg.funding_output_index, + ); nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id())); + nodes[0].node.handle_funding_signed( + nodes[1].node.get_our_node_id(), + &get_event_msg!( + nodes[1], + MessageSendEvent::SendFundingSigned, + nodes[0].node.get_our_node_id() + ), + ); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[0] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); @@ -1886,11 +2801,21 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 0); assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().len(), 1); - assert_eq!(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].compute_txid(), funding_output.txid); + assert_eq!( + nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0)[0].compute_txid(), + funding_output.txid + ); if confirm_a_first { confirm_transaction(&nodes[0], &funding_tx); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_channel_ready( + nodes[0].node.get_our_node_id(), + &get_event_msg!( + nodes[0], + MessageSendEvent::SendChannelReady, + nodes[1].node.get_our_node_id() + ), + ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); } else { @@ -1918,25 +2843,54 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: } if !confirm_a_first && !restore_b_before_lock { confirm_transaction(&nodes[0], &funding_tx); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_channel_ready( + nodes[0].node.get_our_node_id(), + &get_event_msg!( + nodes[0], + MessageSendEvent::SendChannelReady, + nodes[1].node.get_our_node_id() + ), + ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); } chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let (channel_id, (announcement, as_update, bs_update)) = if !confirm_a_first { if !restore_b_before_lock { - let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); - (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready)) + let (channel_ready, channel_id) = + create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); + ( + channel_id, + create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready), + ) } else { - nodes[0].node.handle_channel_ready(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, nodes[0].node.get_our_node_id())); + nodes[0].node.handle_channel_ready( + nodes[1].node.get_our_node_id(), + &get_event_msg!( + nodes[1], + MessageSendEvent::SendChannelReady, + nodes[0].node.get_our_node_id() + ), + ); confirm_transaction(&nodes[0], &funding_tx); - let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); - (channel_id, create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready)) + let (channel_ready, channel_id) = + create_chan_between_nodes_with_value_confirm_second(&nodes[1], &nodes[0]); + ( + channel_id, + create_chan_between_nodes_with_value_b(&nodes[0], &nodes[1], &channel_ready), + ) } } else { if restore_b_before_conf { @@ -1944,12 +2898,16 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); confirm_transaction(&nodes[1], &funding_tx); } - let (channel_ready, channel_id) = create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); + let (channel_ready, channel_id) = + create_chan_between_nodes_with_value_confirm_second(&nodes[0], &nodes[1]); (channel_id, create_chan_between_nodes_with_value_b(&nodes[1], &nodes[0], &channel_ready)) }; for (i, node) in nodes.iter().enumerate() { let counterparty_node_id = nodes[(i + 1) % 2].node.get_our_node_id(); - assert!(node.gossip_sync.handle_channel_announcement(Some(counterparty_node_id), &announcement).unwrap()); + assert!(node + .gossip_sync + .handle_channel_announcement(Some(counterparty_node_id), &announcement) + .unwrap()); node.gossip_sync.handle_channel_update(Some(counterparty_node_id), &as_update).unwrap(); node.gossip_sync.handle_channel_update(Some(counterparty_node_id), &bs_update).unwrap(); } @@ -1960,11 +2918,22 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf: expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); } - send_payment(&nodes[0], &[&nodes[1]], 8000000); close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true); - check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); - check_closed_event!(nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!( + nodes[0], + 1, + ClosureReason::CounterpartyInitiatedCooperativeClosure, + [nodes[1].node.get_our_node_id()], + 100000 + ); + check_closed_event!( + nodes[1], + 1, + ClosureReason::LocallyInitiatedCooperativeClosure, + [nodes[0].node.get_our_node_id()], + 100000 + ); } #[test] @@ -1989,7 +2958,8 @@ fn test_path_paused_mpp() { let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; let chan_4_id = create_announced_chan_between_nodes(&nodes, 2, 3).0.contents.short_channel_id; - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); + let (mut route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3: let path = route.paths[0].clone(); @@ -2007,28 +2977,61 @@ fn test_path_paused_mpp() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // The first path should have succeeded with the second getting a MonitorUpdateInProgress err. - nodes[0].node.send_payment_with_route( - route, payment_hash, RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0) - ).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash, + RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 2); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // Pass the first HTLC of the payment along to nodes[3]. let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 0, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), false, None); + pass_along_path( + &nodes[0], + &[&nodes[1], &nodes[3]], + 0, + payment_hash.clone(), + Some(payment_secret), + events.pop().unwrap(), + false, + None, + ); // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_2_id).unwrap().clone(); + let (latest_update, _) = nodes[0] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&chan_2_id) + .unwrap() + .clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2_id, latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 200_000, payment_hash.clone(), Some(payment_secret), events.pop().unwrap(), true, None); - - claim_payment_along_route( - ClaimAlongRouteArgs::new(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], payment_preimage) + pass_along_path( + &nodes[0], + &[&nodes[2], &nodes[3]], + 200_000, + payment_hash.clone(), + Some(payment_secret), + events.pop().unwrap(), + true, + None, ); + + claim_payment_along_route(ClaimAlongRouteArgs::new( + &nodes[0], + &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], + payment_preimage, + )); } #[test] @@ -2051,9 +3054,17 @@ fn test_pending_update_fee_ack_on_reconnect() { create_announced_chan_between_nodes(&nodes, 0, 1); send_payment(&nodes[0], &[&nodes[1]], 100_000_00); - let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000); - nodes[1].node.send_payment_with_route(route, payment_hash, - RecipientOnionFields::secret_only(payment_secret), PaymentId(payment_hash.0)).unwrap(); + let (route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000); + nodes[1] + .node + .send_payment_with_route( + route, + payment_hash, + RecipientOnionFields::secret_only(payment_secret), + PaymentId(payment_hash.0), + ) + .unwrap(); check_added_monitors!(nodes[1], 1); let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); // bs_initial_send_msgs are not delivered until they are re-generated after reconnect @@ -2067,22 +3078,50 @@ fn test_pending_update_fee_ack_on_reconnect() { let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); assert!(as_update_fee_msgs.update_fee.is_some()); - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), as_update_fee_msgs.update_fee.as_ref().unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_update_fee_msgs.commitment_signed); + nodes[1].node.handle_update_fee( + nodes[0].node.get_our_node_id(), + as_update_fee_msgs.update_fee.as_ref().unwrap(), + ); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &as_update_fee_msgs.commitment_signed, + ); check_added_monitors!(nodes[1], 1); - let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_first_raa = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[0].node.get_our_node_id() + ); // bs_first_raa is not delivered until it is re-generated after reconnect nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + nodes[0] + .node + .peer_connected( + nodes[1].node.get_our_node_id(), + &msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }, + true, + ) + .unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1] + .node + .peer_connected( + nodes[0].node.get_our_node_id(), + &msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }, + false, + ) + .unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_connect_msg); @@ -2090,34 +3129,71 @@ fn test_pending_update_fee_ack_on_reconnect() { assert_eq!(bs_resend_msgs.len(), 3); if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] { assert_eq!(*updates, bs_initial_send_msgs); - } else { panic!(); } + } else { + panic!(); + } if let MessageSendEvent::SendRevokeAndACK { ref msg, .. } = bs_resend_msgs[1] { assert_eq!(*msg, bs_first_raa); - } else { panic!(); } - if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { } else { panic!(); } + } else { + panic!(); + } + if let MessageSendEvent::SendChannelUpdate { .. } = bs_resend_msgs[2] { + } else { + panic!(); + } nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_connect_msg); get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.update_add_htlcs[0]); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_initial_send_msgs.commitment_signed); + nodes[0].node.handle_update_add_htlc( + nodes[1].node.get_our_node_id(), + &bs_initial_send_msgs.update_add_htlcs[0], + ); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &bs_initial_send_msgs.commitment_signed, + ); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_revoke_and_ack( + nodes[0].node.get_our_node_id(), + &get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ), + ); check_added_monitors!(nodes[1], 1); - let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed; + let bs_second_cs = + get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed; nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed, + ); check_added_monitors!(nodes[1], 1); - let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_third_raa = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[0].node.get_our_node_id() + ); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs); + nodes[0] + .node + .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs); check_added_monitors!(nodes[0], 1); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_revoke_and_ack( + nodes[0].node.get_our_node_id(), + &get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ), + ); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[0]); @@ -2144,7 +3220,8 @@ fn test_fail_htlc_on_broadcast_after_claim() { create_announced_chan_between_nodes(&nodes, 0, 1); let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 2000); let bs_txn = get_local_commitment_txn!(nodes[2], chan_id_2); assert_eq!(bs_txn.len(), 1); @@ -2154,19 +3231,37 @@ fn test_fail_htlc_on_broadcast_after_claim() { expect_payment_claimed!(nodes[2], payment_hash, 2000); let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc( + nodes[2].node.get_our_node_id(), + &cs_updates.update_fulfill_htlcs[0], + ); let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); check_added_monitors!(nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); mine_transaction(&nodes[1], &bs_txn[0]); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000); + check_closed_event!( + nodes[1], + 1, + ClosureReason::CommitmentTxConfirmed, + [nodes[2].node.get_our_node_id()], + 100000 + ); check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_id_2 + }] + ); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &bs_updates.update_fulfill_htlcs[0], + ); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true); expect_payment_path_successful!(nodes[0]); @@ -2193,7 +3288,10 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); assert!(update_msgs.update_fee.is_some()); if deliver_update { - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap()); + nodes[1].node.handle_update_fee( + nodes[0].node.get_our_node_id(), + update_msgs.update_fee.as_ref().unwrap(), + ); } if parallel_updates { @@ -2208,13 +3306,31 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + nodes[0] + .node + .peer_connected( + nodes[1].node.get_our_node_id(), + &msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }, + true, + ) + .unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1] + .node + .peer_connected( + nodes[0].node.get_our_node_id(), + &msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }, + false, + ) + .unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_connect_msg); @@ -2224,27 +3340,57 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_connect_msg); let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_reconnect_msgs.len(), 2); - if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() {} else { panic!(); } - let update_msgs = if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap() - { updates } else { panic!(); }; + if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() { + } else { + panic!(); + } + let update_msgs = + if let MessageSendEvent::UpdateHTLCs { updates, .. } = as_reconnect_msgs.pop().unwrap() { + updates + } else { + panic!(); + }; assert!(update_msgs.update_fee.is_some()); - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), update_msgs.update_fee.as_ref().unwrap()); + nodes[1].node.handle_update_fee( + nodes[0].node.get_our_node_id(), + update_msgs.update_fee.as_ref().unwrap(), + ); if parallel_updates { - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &update_msgs.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &update_msgs.commitment_signed, + ); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (bs_first_raa, bs_first_cs) = + get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); check_added_monitors!(nodes[0], 1); let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); + nodes[0] + .node + .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); check_added_monitors!(nodes[0], 1); - let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - - nodes[1].node.handle_update_fee(nodes[0].node.get_our_node_id(), as_second_update.update_fee.as_ref().unwrap()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_second_update.commitment_signed); + let as_first_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); + + nodes[1].node.handle_update_fee( + nodes[0].node.get_our_node_id(), + as_second_update.update_fee.as_ref().unwrap(), + ); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &as_second_update.commitment_signed, + ); check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id()); + let bs_second_raa = get_event_msg!( + nodes[1], + MessageSendEvent::SendRevokeAndACK, + nodes[0].node.get_our_node_id() + ); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2253,9 +3399,16 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs.commitment_signed); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &bs_second_cs.commitment_signed, + ); check_added_monitors!(nodes[0], 1); - let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_second_raa = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); check_added_monitors!(nodes[1], 1); @@ -2286,9 +3439,12 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let nodes_0_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000).2; - let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(&nodes[0], nodes[1], 100000); - let (payment_preimage_2, payment_hash_2, payment_secret_2) = get_payment_preimage_hash!(&nodes[1]); + let chan_id = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000).2; + let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = + get_route_and_payment_hash!(&nodes[0], nodes[1], 100000); + let (payment_preimage_2, payment_hash_2, payment_secret_2) = + get_payment_preimage_hash!(&nodes[1]); // Do a really complicated dance to get an HTLC into the holding cell, with // MonitorUpdateInProgress set but AwaitingRemoteRevoke unset. When this test was written, any @@ -2312,14 +3468,28 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // (c) will not be freed from the holding cell. let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[1], &[&nodes[0]], 100_000); - nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), PaymentId(payment_hash_1.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route.clone(), + payment_hash_1, + RecipientOnionFields::secret_only(payment_secret_1), + PaymentId(payment_hash_1.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); let send = SendEvent::from_node(&nodes[0]); assert_eq!(send.msgs.len(), 1); - nodes[0].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[0] + .node + .send_payment_with_route( + route, + payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), + PaymentId(payment_hash_2.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 0); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); @@ -2329,7 +3499,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { check_added_monitors!(nodes[0], 1); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send.commitment_msg); + nodes[1] + .node + .handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send.commitment_msg); check_added_monitors!(nodes[1], 1); let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2342,7 +3514,14 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // disconnect the peers. Note that the fuzzer originally found this issue because // deserializing a ChannelManager in this state causes an assertion failure. if reload_a { - reload_node!(nodes[0], &nodes[0].node.encode(), &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_0_deserialized); + reload_node!( + nodes[0], + &nodes[0].node.encode(), + &[&chan_0_monitor_serialized], + persister, + new_chain_monitor, + nodes_0_deserialized + ); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } else { @@ -2351,22 +3530,44 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); // Now reconnect the two - nodes[0].node.peer_connected(nodes[1].node.get_our_node_id(), &msgs::Init { - features: nodes[1].node.init_features(), networks: None, remote_network_address: None - }, true).unwrap(); + nodes[0] + .node + .peer_connected( + nodes[1].node.get_our_node_id(), + &msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }, + true, + ) + .unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1].node.peer_connected(nodes[0].node.get_our_node_id(), &msgs::Init { - features: nodes[0].node.init_features(), networks: None, remote_network_address: None - }, false).unwrap(); + nodes[1] + .node + .peer_connected( + nodes[0].node.get_our_node_id(), + &msgs::Init { + features: nodes[0].node.init_features(), + networks: None, + remote_network_address: None, + }, + false, + ) + .unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + nodes[1] + .node + .handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); check_added_monitors!(nodes[1], 0); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + nodes[0] + .node + .handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); assert!(resp_0.0.is_none()); @@ -2382,7 +3583,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { assert!(pending_cs.update_fail_htlcs.is_empty()); assert!(pending_cs.update_fulfill_htlcs.is_empty()); assert_eq!(pending_cs.commitment_signed, cs); - } else { panic!(); } + } else { + panic!(); + } if reload_a { // The two pending monitor updates were replayed (but are still pending). @@ -2397,7 +3600,14 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // If we finish updating the monitor, we should free the holding cell right away (this did // not occur prior to #756). chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (mon_id, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id).unwrap().clone(); + let (mon_id, _) = nodes[0] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&chan_id) + .unwrap() + .clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, mon_id); expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); @@ -2419,19 +3629,31 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc(nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc( + nodes[0].node.get_our_node_id(), + &updates.update_fulfill_htlcs[0], + ); expect_payment_sent(&nodes[1], payment_preimage_0, None, false, false); assert_eq!(updates.update_add_htlcs.len(), 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); + nodes[1].node.handle_update_add_htlc( + nodes[0].node.get_our_node_id(), + &updates.update_add_htlcs[0], + ); updates.commitment_signed }, _ => panic!("Unexpected event type!"), }; - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_msg); + nodes[1] + .node + .handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_msg); check_added_monitors!(nodes[1], 1); - let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let as_revoke_and_ack = get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000); @@ -2442,11 +3664,11 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PendingHTLCsForwardable { .. } => { }, + Event::PendingHTLCsForwardable { .. } => {}, _ => panic!("Unexpected event"), }; match events[1] { - Event::PaymentPathSuccessful { .. } => { }, + Event::PaymentPathSuccessful { .. } => {}, _ => panic!("Unexpected event"), }; @@ -2484,20 +3706,33 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f create_announced_chan_between_nodes(&nodes, 0, 1); let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); let mut as_raa = None; if htlc_status == HTLCStatusAtDupClaim::HoldingCell { // In order to get the HTLC claim into the holding cell at nodes[1], we need nodes[1] to be // awaiting a remote revoke_and_ack from nodes[0]. - let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0].node.send_payment_with_route(route, second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), PaymentId(second_payment_hash.0)).unwrap(); + let (route, second_payment_hash, _, second_payment_secret) = + get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); + nodes[0] + .node + .send_payment_with_route( + route, + second_payment_hash, + RecipientOnionFields::secret_only(second_payment_secret), + PaymentId(second_payment_hash.0), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); - let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); + let send_event = + SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send_event.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &send_event.commitment_msg, + ); check_added_monitors!(nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); @@ -2506,17 +3741,21 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs); check_added_monitors!(nodes[0], 1); - as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id())); + as_raa = Some(get_event_msg!( + nodes[0], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + )); } - let fulfill_msg = msgs::UpdateFulfillHTLC { - channel_id: chan_id_2, - htlc_id: 0, - payment_preimage, - }; + let fulfill_msg = + msgs::UpdateFulfillHTLC { channel_id: chan_id_2, htlc_id: 0, payment_preimage }; if second_fails { nodes[2].node.fail_htlc_backwards(&payment_hash); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[2], vec![HTLCHandlingFailureType::Receive { payment_hash }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[2], + vec![HTLCHandlingFailureType::Receive { payment_hash }] + ); check_added_monitors!(nodes[2], 1); get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); } else { @@ -2537,10 +3776,18 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f if htlc_status != HTLCStatusAtDupClaim::HoldingCell { bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0], + ); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); if htlc_status == HTLCStatusAtDupClaim::Cleared { - commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); + commitment_signed_dance!( + nodes[0], + nodes[1], + &bs_updates.as_ref().unwrap().commitment_signed, + false + ); expect_payment_path_successful!(nodes[0]); } } else { @@ -2554,7 +3801,13 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_fails.0 = 1; reconnect_nodes(reconnect_args); - expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); + expect_pending_htlcs_forwardable_and_htlc_handling_failed!( + nodes[1], + vec![HTLCHandlingFailureType::Forward { + node_id: Some(nodes[2].node.get_our_node_id()), + channel_id: chan_id_2 + }] + ); } else { let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect_args.pending_htlc_claims.0 = 1; @@ -2568,11 +3821,19 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0], + ); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); } if htlc_status != HTLCStatusAtDupClaim::Cleared { - commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); + commitment_signed_dance!( + nodes[0], + nodes[1], + &bs_updates.as_ref().unwrap().commitment_signed, + false + ); expect_payment_path_successful!(nodes[0]); } } @@ -2605,10 +3866,16 @@ fn test_temporary_error_during_shutdown() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap(); - nodes[1].node.handle_shutdown(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_shutdown( + nodes[0].node.get_our_node_id(), + &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()), + ); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_shutdown(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id())); + nodes[0].node.handle_shutdown( + nodes[1].node.get_our_node_id(), + &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()), + ); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -2616,21 +3883,52 @@ fn test_temporary_error_during_shutdown() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[0] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, nodes[1].node.get_our_node_id())); + nodes[1].node.handle_closing_signed( + nodes[0].node.get_our_node_id(), + &get_event_msg!( + nodes[0], + MessageSendEvent::SendClosingSigned, + nodes[1].node.get_our_node_id() + ), + ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); - nodes[0].node.handle_closing_signed(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, nodes[0].node.get_our_node_id())); - let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_closing_signed( + nodes[1].node.get_our_node_id(), + &get_event_msg!( + nodes[1], + MessageSendEvent::SendClosingSigned, + nodes[0].node.get_our_node_id() + ), + ); + let (_, closing_signed_a) = + get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - nodes[1].node.handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap()); + nodes[1] + .node + .handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap()); let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); assert!(none_b.is_none()); let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -2638,8 +3936,20 @@ fn test_temporary_error_during_shutdown() { assert_eq!(txn_a, txn_b); assert_eq!(txn_a.len(), 1); check_spends!(txn_a[0], funding_tx); - check_closed_event!(nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, [nodes[0].node.get_our_node_id()], 100000); - check_closed_event!(nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, [nodes[1].node.get_our_node_id()], 100000); + check_closed_event!( + nodes[1], + 1, + ClosureReason::CounterpartyInitiatedCooperativeClosure, + [nodes[0].node.get_our_node_id()], + 100000 + ); + check_closed_event!( + nodes[0], + 1, + ClosureReason::LocallyInitiatedCooperativeClosure, + [nodes[1].node.get_our_node_id()], + 100000 + ); } #[test] @@ -2652,14 +3962,23 @@ fn double_temp_error() { let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[0], &[&nodes[1]], 1_000_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // `claim_funds` results in a ChannelMonitorUpdate. nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); - let (latest_update_1, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update_1, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, @@ -2668,7 +3987,14 @@ fn double_temp_error() { check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update_2, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&channel_id).unwrap().clone(); + let (latest_update_2, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&channel_id) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); @@ -2681,18 +4007,34 @@ fn double_temp_error() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); match events[0] { - Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_1), + Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_1) + }, _ => panic!("Unexpected Event: {:?}", events[0]), } match events[1] { - Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => assert_eq!(payment_hash, payment_hash_2), + Event::PaymentClaimed { amount_msat: 1_000_000, payment_hash, .. } => { + assert_eq!(payment_hash, payment_hash_2) + }, _ => panic!("Unexpected Event: {:?}", events[1]), } assert_eq!(msg_events.len(), 1); let (update_fulfill_1, commitment_signed_b1, node_id) = { match &msg_events[0] { - &MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, updates: msgs::CommitmentUpdate { ref update_add_htlcs, ref update_fulfill_htlcs, ref update_fail_htlcs, ref update_fail_malformed_htlcs, ref update_fee, ref commitment_signed } } => { + &MessageSendEvent::UpdateHTLCs { + ref node_id, + channel_id: _, + updates: + msgs::CommitmentUpdate { + ref update_add_htlcs, + ref update_fulfill_htlcs, + ref update_fail_htlcs, + ref update_fail_malformed_htlcs, + ref update_fee, + ref commitment_signed, + }, + } => { assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); @@ -2707,40 +4049,49 @@ fn double_temp_error() { nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_1); check_added_monitors!(nodes[0], 0); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &commitment_signed_b1); + nodes[0].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &commitment_signed_b1, + ); check_added_monitors!(nodes[0], 1); nodes[0].node.process_pending_htlc_forwards(); - let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let (raa_a1, commitment_signed_a1) = + get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa_a1); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_signed_a1); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &commitment_signed_a1, + ); check_added_monitors!(nodes[1], 1); // Complete the second HTLC. let ((update_fulfill_2, commitment_signed_b2), raa_b2) = { let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 2); - (match &events[0] { - MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - assert!(updates.update_add_htlcs.is_empty()); - assert!(updates.update_fail_htlcs.is_empty()); - assert!(updates.update_fail_malformed_htlcs.is_empty()); - assert!(updates.update_fee.is_none()); - assert_eq!(updates.update_fulfill_htlcs.len(), 1); - (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone()) + ( + match &events[0] { + MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert!(updates.update_add_htlcs.is_empty()); + assert!(updates.update_fail_htlcs.is_empty()); + assert!(updates.update_fail_malformed_htlcs.is_empty()); + assert!(updates.update_fee.is_none()); + assert_eq!(updates.update_fulfill_htlcs.len(), 1); + (updates.update_fulfill_htlcs[0].clone(), updates.commitment_signed.clone()) + }, + _ => panic!("Unexpected event"), }, - _ => panic!("Unexpected event"), - }, - match events[1] { - MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - (*msg).clone() - }, - _ => panic!("Unexpected event"), - }) + match events[1] { + MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { + assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + (*msg).clone() + }, + _ => panic!("Unexpected event"), + }, + ) }; nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa_b2); check_added_monitors!(nodes[0], 1); @@ -2767,35 +4118,81 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { chan_config.manually_accept_inbound_channels = true; chan_config.channel_handshake_limits.trust_own_funding_0conf = true; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config.clone()), Some(chan_config)]); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config.clone()), Some(chan_config)]); let nodes_0_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap(); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); + nodes[0] + .node + .create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None) + .unwrap(); + nodes[1].node.handle_open_channel( + nodes[0].node.get_our_node_id(), + &get_event_msg!( + nodes[0], + MessageSendEvent::SendOpenChannel, + nodes[1].node.get_our_node_id() + ), + ); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { if use_0conf { - nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0, None).unwrap(); + nodes[1] + .node + .accept_inbound_channel_from_trusted_peer_0conf( + &temporary_channel_id, + &nodes[0].node.get_our_node_id(), + 0, + None, + ) + .unwrap(); } else { - nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0, None).unwrap(); + nodes[1] + .node + .accept_inbound_channel( + &temporary_channel_id, + &nodes[0].node.get_our_node_id(), + 0, + None, + ) + .unwrap(); } }, _ => panic!("Unexpected event"), }; - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); - - let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43); + nodes[0].node.handle_accept_channel( + nodes[1].node.get_our_node_id(), + &get_event_msg!( + nodes[1], + MessageSendEvent::SendAcceptChannel, + nodes[0].node.get_our_node_id() + ), + ); - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap(); + let (temporary_channel_id, funding_tx, ..) = + create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43); + + nodes[0] + .node + .funding_transaction_generated( + temporary_channel_id, + nodes[1].node.get_our_node_id(), + funding_tx.clone(), + ) + .unwrap(); check_added_monitors!(nodes[0], 0); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let funding_created_msg = get_event_msg!( + nodes[0], + MessageSendEvent::SendFundingCreated, + nodes[1].node.get_our_node_id() + ); nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); @@ -2808,14 +4205,14 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &msg); check_added_monitors!(nodes[0], 1); - } + }, _ => panic!("Unexpected event"), } if use_0conf { match &bs_signed_locked[1] { MessageSendEvent::SendChannelReady { msg, .. } => { nodes[0].node.handle_channel_ready(nodes[1].node.get_our_node_id(), &msg); - } + }, _ => panic!("Unexpected event"), } } @@ -2833,8 +4230,21 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { nodes[0].chain_source.watched_txn.lock().unwrap().clear(); nodes[0].chain_source.watched_outputs.lock().unwrap().clear(); - reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized); - check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 100000); + reload_node!( + nodes[0], + &nodes[0].node.encode(), + &[], + persister, + new_chain_monitor, + nodes_0_deserialized + ); + check_closed_event!( + nodes[0], + 1, + ClosureReason::DisconnectedPeer, + [nodes[1].node.get_our_node_id()], + 100000 + ); assert!(nodes[0].node.list_channels().is_empty()); } @@ -2858,35 +4268,81 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo chan_config.manually_accept_inbound_channels = true; chan_config.channel_handshake_limits.trust_own_funding_0conf = true; - let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config.clone()), Some(chan_config)]); + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config.clone()), Some(chan_config)]); let nodes_1_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); - nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None).unwrap(); - nodes[1].node.handle_open_channel(nodes[0].node.get_our_node_id(), &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id())); + nodes[0] + .node + .create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None) + .unwrap(); + nodes[1].node.handle_open_channel( + nodes[0].node.get_our_node_id(), + &get_event_msg!( + nodes[0], + MessageSendEvent::SendOpenChannel, + nodes[1].node.get_our_node_id() + ), + ); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { Event::OpenChannelRequest { temporary_channel_id, .. } => { if use_0conf { - nodes[1].node.accept_inbound_channel_from_trusted_peer_0conf(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0, None).unwrap(); + nodes[1] + .node + .accept_inbound_channel_from_trusted_peer_0conf( + &temporary_channel_id, + &nodes[0].node.get_our_node_id(), + 0, + None, + ) + .unwrap(); } else { - nodes[1].node.accept_inbound_channel(&temporary_channel_id, &nodes[0].node.get_our_node_id(), 0, None).unwrap(); + nodes[1] + .node + .accept_inbound_channel( + &temporary_channel_id, + &nodes[0].node.get_our_node_id(), + 0, + None, + ) + .unwrap(); } }, _ => panic!("Unexpected event"), }; - nodes[0].node.handle_accept_channel(nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); - - let (temporary_channel_id, funding_tx, ..) = create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43); + nodes[0].node.handle_accept_channel( + nodes[1].node.get_our_node_id(), + &get_event_msg!( + nodes[1], + MessageSendEvent::SendAcceptChannel, + nodes[0].node.get_our_node_id() + ), + ); - nodes[0].node.funding_transaction_generated(temporary_channel_id, nodes[1].node.get_our_node_id(), funding_tx.clone()).unwrap(); + let (temporary_channel_id, funding_tx, ..) = + create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43); + + nodes[0] + .node + .funding_transaction_generated( + temporary_channel_id, + nodes[1].node.get_our_node_id(), + funding_tx.clone(), + ) + .unwrap(); check_added_monitors!(nodes[0], 0); - let funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + let funding_created_msg = get_event_msg!( + nodes[0], + MessageSendEvent::SendFundingCreated, + nodes[1].node.get_our_node_id() + ); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); check_added_monitors!(nodes[1], 1); @@ -2894,7 +4350,11 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding // transaction is confirmed. - let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + let funding_signed_msg = get_event_msg!( + nodes[1], + MessageSendEvent::SendFundingSigned, + nodes[0].node.get_our_node_id() + ); nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); check_added_monitors!(nodes[0], 1); @@ -2906,7 +4366,11 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo confirm_transaction(&nodes[1], &as_funding_tx[0]); } if use_0conf || lock_commitment { - let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, nodes[1].node.get_our_node_id()); + let as_ready = get_event_msg!( + nodes[0], + MessageSendEvent::SendChannelReady, + nodes[1].node.get_our_node_id() + ); nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_ready); } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2920,9 +4384,22 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo nodes[1].chain_source.watched_txn.lock().unwrap().clear(); nodes[1].chain_source.watched_outputs.lock().unwrap().clear(); - reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized); + reload_node!( + nodes[1], + &nodes[1].node.encode(), + &[], + persister, + new_chain_monitor, + nodes_1_deserialized + ); - check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 100000); + check_closed_event!( + nodes[1], + 1, + ClosureReason::DisconnectedPeer, + [nodes[0].node.get_our_node_id()], + 100000 + ); assert!(nodes[1].node.list_channels().is_empty()); } @@ -2950,8 +4427,10 @@ fn test_blocked_chan_preimage_release() { // Tee up two payments in opposite directions across nodes[1], one it sent to generate a // PaymentSent event and one it forwards. - let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000); - let (payment_preimage_2, payment_hash_2, ..) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000); + let (payment_preimage_1, payment_hash_1, ..) = + route_payment(&nodes[1], &[&nodes[2]], 1_000_000); + let (payment_preimage_2, payment_hash_2, ..) = + route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000); // Claim the first payment to get a `PaymentSent` event (but don't handle it yet). nodes[2].node.claim_funds(payment_preimage_1); @@ -2959,8 +4438,17 @@ fn test_blocked_chan_preimage_release() { expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]); - do_commitment_signed_dance(&nodes[1], &nodes[2], &cs_htlc_fulfill_updates.commitment_signed, false, false); + nodes[1].node.handle_update_fulfill_htlc( + nodes[2].node.get_our_node_id(), + &cs_htlc_fulfill_updates.update_fulfill_htlcs[0], + ); + do_commitment_signed_dance( + &nodes[1], + &nodes[2], + &cs_htlc_fulfill_updates.commitment_signed, + false, + false, + ); check_added_monitors(&nodes[1], 0); // Now claim the second payment on nodes[0], which will ultimately result in nodes[1] trying to @@ -2971,7 +4459,10 @@ fn test_blocked_chan_preimage_release() { expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000); let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc( + nodes[0].node.get_our_node_id(), + &as_htlc_fulfill_updates.update_fulfill_htlcs[0], + ); check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2)); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2979,7 +4470,10 @@ fn test_blocked_chan_preimage_release() { // Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the // channel. - nodes[1].node.handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[0].node.get_our_node_id(), + &as_htlc_fulfill_updates.commitment_signed, + ); check_added_monitors(&nodes[1], 1); let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); assert!(a.is_none()); @@ -2990,9 +4484,18 @@ fn test_blocked_chan_preimage_release() { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 3); - if let Event::PaymentSent { .. } = events[0] {} else { panic!(); } - if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); } - if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); } + if let Event::PaymentSent { .. } = events[0] { + } else { + panic!(); + } + if let Event::PaymentPathSuccessful { .. } = events[2] { + } else { + panic!(); + } + if let Event::PaymentForwarded { .. } = events[1] { + } else { + panic!(); + } // The event processing should release the last RAA updates on both channels. check_added_monitors(&nodes[1], 2); @@ -3002,12 +4505,23 @@ fn test_blocked_chan_preimage_release() { let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); check_added_monitors(&nodes[1], 1); - nodes[2].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]); - do_commitment_signed_dance(&nodes[2], &nodes[1], &bs_htlc_fulfill_updates.commitment_signed, false, false); + nodes[2].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &bs_htlc_fulfill_updates.update_fulfill_htlcs[0], + ); + do_commitment_signed_dance( + &nodes[2], + &nodes[1], + &bs_htlc_fulfill_updates.commitment_signed, + false, + false, + ); expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); } -fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_commitment_dance: bool) { +fn do_test_inverted_mon_completion_order( + with_latest_manager: bool, complete_bc_commitment_dance: bool, +) { // When we forward a payment and receive `update_fulfill_htlc`+`commitment_signed` messages // from the downstream channel, we immediately claim the HTLC on the upstream channel, before // even doing a `commitment_signed` dance on the downstream channel. This implies that our @@ -3032,7 +4546,8 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // Route a payment from A, through B, to C, then claim it on C. Once we pass B the // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one // on the B<->C channel but leave the A<->B monitor update pending, then reload B. - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 100_000); let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode(); let mut manager_b = Vec::new(); @@ -3046,7 +4561,10 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc( + nodes[2].node.get_our_node_id(), + &cs_updates.update_fulfill_htlcs[0], + ); // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages // for it since the monitor update is marked in-progress. @@ -3056,15 +4574,26 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // Now step the Commitment Signed Dance between B and C forward a bit (or fully), ensuring we // won't get the preimage when the nodes reconnect and we have to get it from the // ChannelMonitor. - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[2].node.get_our_node_id(), + &cs_updates.commitment_signed, + ); check_added_monitors(&nodes[1], 1); if complete_bc_commitment_dance { - let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let (bs_revoke_and_ack, bs_commitment_signed) = + get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); check_added_monitors(&nodes[2], 1); - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_commitment_signed); + nodes[2].node.handle_commitment_signed_batch_test( + nodes[1].node.get_our_node_id(), + &bs_commitment_signed, + ); check_added_monitors(&nodes[2], 1); - let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let cs_raa = get_event_msg!( + nodes[2], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); // At this point node B still hasn't persisted the `ChannelMonitorUpdate` with the // preimage in the A <-> B channel, which will prevent it from persisting the @@ -3080,7 +4609,14 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ } let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode(); - reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized); + reload_node!( + nodes[1], + &manager_b, + &[&mon_ab, &mon_bc], + persister, + new_chain_monitor, + nodes_1_deserialized + ); nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); @@ -3115,12 +4651,23 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating // process. - let (_, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).unwrap(); - - // When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has - // completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C - // channel. + let (_, ab_update_id) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&chan_id_ab) + .unwrap() + .clone(); + nodes[1] + .chain_monitor + .chain_monitor + .channel_monitor_updated(chan_id_ab, ab_update_id) + .unwrap(); + + // When we fetch B's HTLC update messages next (now that the ChannelMonitorUpdate has + // completed), it will also release the final RAA ChannelMonitorUpdate on the B <-> C + // channel. } else { // If the ChannelManager used in the reload was stale, check that the B <-> C channel was // closed. @@ -3132,7 +4679,14 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ check_added_monitors(&nodes[1], 0); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - check_closed_event(&nodes[1], 1, ClosureReason::OutdatedChannelManager, false, &[nodes[2].node.get_our_node_id()], 100_000); + check_closed_event( + &nodes[1], + 1, + ClosureReason::OutdatedChannelManager, + false, + &[nodes[2].node.get_our_node_id()], + 100_000, + ); check_added_monitors(&nodes[1], 2); nodes[1].node.timer_tick_occurred(); @@ -3143,8 +4697,19 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ // ChannelMonitorUpdate hasn't yet completed. reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let (_, ab_update_id) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).unwrap(); + let (_, ab_update_id) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&chan_id_ab) + .unwrap() + .clone(); + nodes[1] + .chain_monitor + .chain_monitor + .channel_monitor_updated(chan_id_ab, ab_update_id) + .unwrap(); // The ChannelMonitorUpdate which was completed prior to the reconnect only contained the // preimage (as it was a replay of the original ChannelMonitorUpdate from before we @@ -3157,10 +4722,20 @@ fn do_test_inverted_mon_completion_order(with_latest_manager: bool, complete_bc_ let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); check_added_monitors(&nodes[1], 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &bs_updates.update_fulfill_htlcs[0], + ); do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false); - expect_payment_forwarded!(nodes[1], &nodes[0], &nodes[2], Some(1_000), false, !with_latest_manager); + expect_payment_forwarded!( + nodes[1], + &nodes[0], + &nodes[2], + Some(1_000), + false, + !with_latest_manager + ); // Finally, check that the payment was, ultimately, seen as sent by node A. expect_payment_sent(&nodes[0], payment_preimage, None, true, true); @@ -3174,7 +4749,9 @@ fn test_inverted_mon_completion_order() { do_test_inverted_mon_completion_order(false, false); } -fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, close_only_a: bool, hold_post_reload_mon_update: bool) { +fn do_test_durable_preimages_on_closed_channel( + close_chans_before_reload: bool, close_only_a: bool, hold_post_reload_mon_update: bool, +) { // Test that we can apply a `ChannelMonitorUpdate` with a payment preimage even if the channel // is force-closed between when we generate the update on reload and when we go to handle the // update or prior to generating the update at all. @@ -3200,7 +4777,8 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, // Route a payment from A, through B, to C, then claim it on C. Once we pass B the // `update_fulfill_htlc` we have a monitor update for both of B's channels. We complete the one // on the B<->C channel but leave the A<->B monitor update pending, then reload B. - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode(); @@ -3210,7 +4788,10 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc( + nodes[2].node.get_our_node_id(), + &cs_updates.update_fulfill_htlcs[0], + ); // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages // for it since the monitor update is marked in-progress. @@ -3220,7 +4801,10 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, // Now step the Commitment Signed Dance between B and C forward a bit, ensuring we won't get // the preimage when the nodes reconnect, at which point we have to ensure we get it from the // ChannelMonitor. - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[2].node.get_our_node_id(), + &cs_updates.commitment_signed, + ); check_added_monitors(&nodes[1], 1); let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); @@ -3230,20 +4814,55 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, if close_chans_before_reload { if !close_only_a { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_bc, &nodes[2].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[1] + .node + .force_close_broadcasting_latest_txn( + &chan_id_bc, + &nodes[2].node.get_our_node_id(), + error_message.to_string(), + ) + .unwrap(); check_closed_broadcast(&nodes[1], 1, true); - check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[2].node.get_our_node_id()], 100000); + check_closed_event( + &nodes[1], + 1, + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, + false, + &[nodes[2].node.get_our_node_id()], + 100000, + ); } chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[0].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[1] + .node + .force_close_broadcasting_latest_txn( + &chan_id_ab, + &nodes[0].node.get_our_node_id(), + error_message.to_string(), + ) + .unwrap(); check_closed_broadcast(&nodes[1], 1, true); - check_closed_event(&nodes[1], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[0].node.get_our_node_id()], 100000); + check_closed_event( + &nodes[1], + 1, + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, + false, + &[nodes[0].node.get_our_node_id()], + 100000, + ); } // Now reload node B let manager_b = nodes[1].node.encode(); - reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized); + reload_node!( + nodes[1], + &manager_b, + &[&mon_ab, &mon_bc], + persister, + new_chain_monitor, + nodes_1_deserialized + ); nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); @@ -3259,8 +4878,22 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, } let error_message = "Channel force-closed"; - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); - check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100000); + nodes[0] + .node + .force_close_broadcasting_latest_txn( + &chan_id_ab, + &nodes[1].node.get_our_node_id(), + error_message.to_string(), + ) + .unwrap(); + check_closed_event( + &nodes[0], + 1, + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, + false, + &[nodes[1].node.get_our_node_id()], + 100000, + ); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(as_closing_tx.len(), 1); @@ -3272,7 +4905,15 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, // After a timer tick a payment preimage ChannelMonitorUpdate is applied to the A<->B // ChannelMonitor (possible twice), even though the channel has since been closed. check_added_monitors(&nodes[1], 0); - let mons_added = if close_chans_before_reload { if !close_only_a { 4 } else { 3 } } else { 2 }; + let mons_added = if close_chans_before_reload { + if !close_only_a { + 4 + } else { + 3 + } + } else { + 2 + }; if hold_post_reload_mon_update { for _ in 0..mons_added { persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); @@ -3284,12 +4925,22 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, // Finally, check that B created a payment preimage transaction and close out the payment. let bs_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(bs_txn.len(), if close_chans_before_reload && !close_only_a { 2 } else { 1 }); - let bs_preimage_tx = bs_txn.iter().find(|tx| tx.input[0].previous_output.txid == as_closing_tx[0].compute_txid()).unwrap(); + let bs_preimage_tx = bs_txn + .iter() + .find(|tx| tx.input[0].previous_output.txid == as_closing_tx[0].compute_txid()) + .unwrap(); check_spends!(bs_preimage_tx, as_closing_tx[0]); if !close_chans_before_reload { check_closed_broadcast(&nodes[1], 1, true); - check_closed_event(&nodes[1], 1, ClosureReason::CommitmentTxConfirmed, false, &[nodes[0].node.get_our_node_id()], 100000); + check_closed_event( + &nodes[1], + 1, + ClosureReason::CommitmentTxConfirmed, + false, + &[nodes[0].node.get_our_node_id()], + 100000, + ); } mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]); @@ -3307,12 +4958,18 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, reconnect_args.pending_raa.1 = true; reconnect_nodes(reconnect_args); - } // Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending // `PaymentForwarded` event will finally be released. - let (ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); + let (ab_update_id, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&chan_id_ab) + .unwrap() + .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id_ab, ab_update_id); // If the A<->B channel was closed before we reload, we'll replay the claim against it on @@ -3320,8 +4977,8 @@ fn do_test_durable_preimages_on_closed_channel(close_chans_before_reload: bool, let evs = nodes[1].node.get_and_clear_pending_events(); assert_eq!(evs.len(), if close_chans_before_reload { 2 } else { 1 }); for ev in evs { - if let Event::PaymentForwarded { .. } = ev { } - else { + if let Event::PaymentForwarded { .. } = ev { + } else { panic!(); } } @@ -3366,7 +5023,8 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // We complete the commitment signed dance on the B<->C channel but leave the A<->B monitor // update pending, then reload B. At that point, the final monitor update on the B<->C channel // is still pending because it can't fly until the preimage is persisted on the A<->B monitor. - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); nodes[2].node.claim_funds(payment_preimage); check_added_monitors(&nodes[2], 1); @@ -3374,7 +5032,10 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc( + nodes[2].node.get_our_node_id(), + &cs_updates.update_fulfill_htlcs[0], + ); // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages // for it since the monitor update is marked in-progress. @@ -3383,7 +5044,10 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Now step the Commitment Signed Dance between B and C and check that after the final RAA B // doesn't let the preimage-removing monitor update fly. - nodes[1].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[2].node.get_our_node_id(), + &cs_updates.commitment_signed, + ); check_added_monitors(&nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); @@ -3392,7 +5056,11 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs); check_added_monitors(&nodes[2], 1); - let cs_final_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); + let cs_final_raa = get_event_msg!( + nodes[2], + MessageSendEvent::SendRevokeAndACK, + nodes[1].node.get_our_node_id() + ); nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &cs_final_raa); check_added_monitors(&nodes[1], 0); @@ -3402,25 +5070,56 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode(); let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode(); let manager_b = nodes[1].node.encode(); - reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, new_chain_monitor, nodes_1_deserialized); + reload_node!( + nodes[1], + &manager_b, + &[&mon_ab, &mon_bc], + persister, + new_chain_monitor, + nodes_1_deserialized + ); let error_message = "Channel force-closed"; if close_during_reload { // Test that we still free the B<->C channel if the A<->B channel closed while we reloaded // (as learned about during the on-reload block connection). - nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &nodes[1].node.get_our_node_id(), error_message.to_string()).unwrap(); + nodes[0] + .node + .force_close_broadcasting_latest_txn( + &chan_id_ab, + &nodes[1].node.get_our_node_id(), + error_message.to_string(), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); - check_closed_event(&nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, &[nodes[1].node.get_our_node_id()], 100_000); + check_closed_event( + &nodes[0], + 1, + ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, + false, + &[nodes[1].node.get_our_node_id()], + 100_000, + ); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]); } - let bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().1; + let bc_update_id = + nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().1; let mut events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), if close_during_reload { 2 } else { 1 }); - expect_payment_forwarded(events.pop().unwrap(), &nodes[1], &nodes[0], &nodes[2], Some(1000), - None, close_during_reload, false, false); + expect_payment_forwarded( + events.pop().unwrap(), + &nodes[1], + &nodes[0], + &nodes[2], + Some(1000), + None, + close_during_reload, + false, + false, + ); if close_during_reload { match events[0] { Event::ChannelClosed { .. } => {}, @@ -3432,7 +5131,8 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Once we run event processing the monitor should free, check that it was indeed the B<->C // channel which was updated. check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 }); - let post_ev_bc_update_id = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().1; + let post_ev_bc_update_id = + nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().1; assert!(bc_update_id != post_ev_bc_update_id); // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates @@ -3468,7 +5168,8 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // Route a payment from A, through B, to C, then claim it on C. Replay the // `update_fulfill_htlc` twice on B to check that B doesn't hang. - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); nodes[2].node.claim_funds(payment_preimage); check_added_monitors(&nodes[2], 1); @@ -3479,12 +5180,18 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // The first update will be on the A <-> B channel, which we optionally allow to complete. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc( + nodes[2].node.get_our_node_id(), + &cs_updates.update_fulfill_htlcs[0], + ); check_added_monitors(&nodes[1], 1); if !hold_chan_a { let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &bs_updates.update_fulfill_htlcs[0], + ); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); expect_payment_sent!(&nodes[0], payment_preimage); } @@ -3508,8 +5215,15 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // With the A<->B preimage persistence not yet complete, the B<->C channel is stuck // waiting. - nodes[1].node.send_payment_with_route(route, payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), PaymentId(payment_hash_2.0)).unwrap(); + nodes[1] + .node + .send_payment_with_route( + route, + payment_hash_2, + RecipientOnionFields::secret_only(payment_secret_2), + PaymentId(payment_hash_2.0), + ) + .unwrap(); check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -3517,36 +5231,65 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // ...but once we complete the A<->B channel preimage persistence, the B<->C channel // unlocks and we send both peers commitment updates. - let (ab_update_id, _) = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_ab).unwrap().clone(); - assert!(nodes[1].chain_monitor.chain_monitor.channel_monitor_updated(chan_id_ab, ab_update_id).is_ok()); + let (ab_update_id, _) = nodes[1] + .chain_monitor + .latest_monitor_update_id + .lock() + .unwrap() + .get(&chan_id_ab) + .unwrap() + .clone(); + assert!(nodes[1] + .chain_monitor + .chain_monitor + .channel_monitor_updated(chan_id_ab, ab_update_id) + .is_ok()); let mut msg_events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(msg_events.len(), 2); check_added_monitors(&nodes[1], 2); - let mut c_update = msg_events.iter() - .filter(|ev| matches!(ev, MessageSendEvent::UpdateHTLCs { node_id, .. } if *node_id == node_c_id)) - .cloned().collect::>(); - let a_filtermap = |ev| if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } = ev { - if node_id == node_a_id { - Some(updates) + let mut c_update = msg_events + .iter() + .filter( + |ev| matches!(ev, MessageSendEvent::UpdateHTLCs { node_id, .. } if *node_id == node_c_id), + ) + .cloned() + .collect::>(); + let a_filtermap = |ev| { + if let MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } = ev { + if node_id == node_a_id { + Some(updates) + } else { + None + } } else { None } - } else { - None }; let a_update = msg_events.drain(..).filter_map(|ev| a_filtermap(ev)).collect::>(); assert_eq!(a_update.len(), 1); assert_eq!(c_update.len(), 1); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &a_update[0].update_fulfill_htlcs[0]); + nodes[0].node.handle_update_fulfill_htlc( + nodes[1].node.get_our_node_id(), + &a_update[0].update_fulfill_htlcs[0], + ); commitment_signed_dance!(nodes[0], nodes[1], a_update[0].commitment_signed, false); expect_payment_sent(&nodes[0], payment_preimage, None, true, true); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); - pass_along_path(&nodes[1], &[&nodes[2]], 1_000_000, payment_hash_2, Some(payment_secret_2), c_update.pop().unwrap(), true, None); + pass_along_path( + &nodes[1], + &[&nodes[2]], + 1_000_000, + payment_hash_2, + Some(payment_secret_2), + c_update.pop().unwrap(), + true, + None, + ); claim_payment(&nodes[1], &[&nodes[2]], payment_preimage_2); } } @@ -3574,7 +5317,8 @@ fn test_partial_claim_mon_update_compl_actions() { let (chan_4_update, _, chan_4_id, ..) = create_announced_chan_between_nodes(&nodes, 2, 3); let chan_4_scid = chan_4_update.contents.short_channel_id; - let (mut route, payment_hash, preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); + let (mut route, payment_hash, preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); @@ -3583,7 +5327,14 @@ fn test_partial_claim_mon_update_compl_actions() { route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); route.paths[1].hops[0].short_channel_id = chan_2_scid; route.paths[1].hops[1].short_channel_id = chan_4_scid; - send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], 200_000, payment_hash, payment_secret); + send_along_route_with_secret( + &nodes[0], + route, + &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], + 200_000, + payment_hash, + payment_secret, + ); // Claim along both paths, but only complete one of the two monitor updates. chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); @@ -3599,12 +5350,18 @@ fn test_partial_claim_mon_update_compl_actions() { expect_payment_claimed!(&nodes[3], payment_hash, 200_000); let updates = get_htlc_update_msgs(&nodes[3], &nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc( + nodes[3].node.get_our_node_id(), + &updates.update_fulfill_htlcs[0], + ); check_added_monitors(&nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[3], Some(1000), false, false); let _bs_updates_for_a = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); - nodes[1].node.handle_commitment_signed_batch_test(nodes[3].node.get_our_node_id(), &updates.commitment_signed); + nodes[1].node.handle_commitment_signed_batch_test( + nodes[3].node.get_our_node_id(), + &updates.commitment_signed, + ); check_added_monitors(&nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &nodes[3].node.get_our_node_id()); @@ -3630,18 +5387,25 @@ fn test_partial_claim_mon_update_compl_actions() { MessageSendEvent::SendRevokeAndACK { msg, .. } => { nodes[1].node.handle_revoke_and_ack(nodes[3].node.get_our_node_id(), &msg); check_added_monitors(&nodes[1], 1); - } + }, _ => panic!(), } match remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut ds_msgs) { MessageSendEvent::UpdateHTLCs { updates, .. } => { - nodes[2].node.handle_update_fulfill_htlc(nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[2].node.handle_update_fulfill_htlc( + nodes[3].node.get_our_node_id(), + &updates.update_fulfill_htlcs[0], + ); check_added_monitors(&nodes[2], 1); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); - let _cs_updates_for_a = get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); + let _cs_updates_for_a = + get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); - nodes[2].node.handle_commitment_signed_batch_test(nodes[3].node.get_our_node_id(), &updates.commitment_signed); + nodes[2].node.handle_commitment_signed_batch_test( + nodes[3].node.get_our_node_id(), + &updates.commitment_signed, + ); check_added_monitors(&nodes[2], 1); }, _ => panic!(), @@ -3655,7 +5419,11 @@ fn test_partial_claim_mon_update_compl_actions() { nodes[3].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_cs); check_added_monitors(&nodes[3], 1); - let ds_raa = get_event_msg!(nodes[3], MessageSendEvent::SendRevokeAndACK, nodes[2].node.get_our_node_id()); + let ds_raa = get_event_msg!( + nodes[3], + MessageSendEvent::SendRevokeAndACK, + nodes[2].node.get_our_node_id() + ); nodes[2].node.handle_revoke_and_ack(nodes[3].node.get_our_node_id(), &ds_raa); check_added_monitors(&nodes[2], 1); @@ -3671,7 +5439,6 @@ fn test_partial_claim_mon_update_compl_actions() { assert!(!get_monitor!(nodes[3], chan_4_id).get_stored_preimages().contains_key(&payment_hash)); } - #[test] fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { // One of the last features for async persistence we implemented was the correct blocking of @@ -3684,12 +5451,22 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { let nodes = create_network(3, &node_cfgs, &node_chanmgrs); // First open channels, route a payment, and force-close the first hop. - let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); - let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000); - - let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); - - nodes[0].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[1].node.get_our_node_id(), String::new()).unwrap(); + let chan_a = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); + let chan_b = + create_announced_chan_between_nodes_with_value(&nodes, 1, 2, 1_000_000, 500_000_000); + + let (payment_preimage, payment_hash, ..) = + route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); + + nodes[0] + .node + .force_close_broadcasting_latest_txn( + &chan_a.2, + &nodes[1].node.get_our_node_id(), + String::new(), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; check_closed_event!(nodes[0], 1, a_reason, [nodes[1].node.get_our_node_id()], 1000000); @@ -3700,7 +5477,13 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!( + nodes[1], + 1, + ClosureReason::CommitmentTxConfirmed, + [nodes[0].node.get_our_node_id()], + 1000000 + ); check_closed_broadcast!(nodes[1], true); // Now that B has a pending forwarded payment across it with the inbound edge on-chain, claim @@ -3711,14 +5494,20 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[1].node.handle_update_fulfill_htlc( + nodes[2].node.get_our_node_id(), + &updates.update_fulfill_htlcs[0], + ); check_added_monitors!(nodes[1], 1); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); // At this point nodes[1] has the preimage and is waiting for the `ChannelMonitorUpdate` for // channel A to hit disk. Until it does so, it shouldn't ever let the preimage dissapear from // channel B's `ChannelMonitor` - assert!(get_monitor!(nodes[1], chan_b.2).get_all_current_outbound_htlcs().iter().any(|(_, (_, preimage))| *preimage == Some(payment_preimage))); + assert!(get_monitor!(nodes[1], chan_b.2) + .get_all_current_outbound_htlcs() + .iter() + .any(|(_, (_, preimage))| *preimage == Some(payment_preimage))); // Once we complete the `ChannelMonitorUpdate` on channel A, and the `ChannelManager` processes // background events (via `get_and_clear_pending_msg_events`), the final `ChannelMonitorUpdate` @@ -3731,7 +5520,10 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { nodes[1].chain_monitor.complete_sole_pending_chan_update(&chan_a.2); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - assert!(!get_monitor!(nodes[1], chan_b.2).get_all_current_outbound_htlcs().iter().any(|(_, (_, preimage))| *preimage == Some(payment_preimage))); + assert!(!get_monitor!(nodes[1], chan_b.2) + .get_all_current_outbound_htlcs() + .iter() + .any(|(_, (_, preimage))| *preimage == Some(payment_preimage))); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], None, true, false); } @@ -3747,11 +5539,19 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { let nodes = create_network(2, &node_cfgs, &node_chanmgrs); // First open channels, route a payment, and force-close the first hop. - let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); + let chan_a = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - nodes[0].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[1].node.get_our_node_id(), String::new()).unwrap(); + nodes[0] + .node + .force_close_broadcasting_latest_txn( + &chan_a.2, + &nodes[1].node.get_our_node_id(), + String::new(), + ) + .unwrap(); check_added_monitors!(nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; check_closed_event!(nodes[0], 1, a_reason, [nodes[1].node.get_our_node_id()], 1000000); @@ -3762,7 +5562,13 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_added_monitors!(nodes[1], 1); - check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 1000000); + check_closed_event!( + nodes[1], + 1, + ClosureReason::CommitmentTxConfirmed, + [nodes[0].node.get_our_node_id()], + 1000000 + ); check_closed_broadcast!(nodes[1], true); // Now that B has a pending payment with the inbound HTLC on a closed channel, claim the @@ -3843,9 +5649,24 @@ fn test_single_channel_multiple_mpp() { create_announced_chan_between_nodes_with_value(&nodes, 6, 7, 100_000, 0); create_announced_chan_between_nodes_with_value(&nodes, 7, 8, 1_000_000, 0); - let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[0], nodes[8], 50_000_000); - - send_along_route_with_secret(&nodes[0], route, &[&[&nodes[1], &nodes[7], &nodes[8]], &[&nodes[2], &nodes[7], &nodes[8]], &[&nodes[3], &nodes[7], &nodes[8]], &[&nodes[4], &nodes[7], &nodes[8]], &[&nodes[5], &nodes[7], &nodes[8]], &[&nodes[6], &nodes[7], &nodes[8]]], 50_000_000, payment_hash, payment_secret); + let (mut route, payment_hash, payment_preimage, payment_secret) = + get_route_and_payment_hash!(&nodes[0], nodes[8], 50_000_000); + + send_along_route_with_secret( + &nodes[0], + route, + &[ + &[&nodes[1], &nodes[7], &nodes[8]], + &[&nodes[2], &nodes[7], &nodes[8]], + &[&nodes[3], &nodes[7], &nodes[8]], + &[&nodes[4], &nodes[7], &nodes[8]], + &[&nodes[5], &nodes[7], &nodes[8]], + &[&nodes[6], &nodes[7], &nodes[8]], + ], + 50_000_000, + payment_hash, + payment_secret, + ); let (do_a_write, blocker) = std::sync::mpsc::sync_channel(0); *nodes[8].chain_monitor.write_blocker.lock().unwrap() = Some(blocker); From dd30a77c112bd2d0c0837bfa3dea8e42c18df201 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 18 May 2025 19:25:00 +0000 Subject: [PATCH 07/12] Remove some unnecessary `vec`s from `chanmon_update_fail_tests` --- lightning/src/ln/chanmon_update_fail_tests.rs | 26 +++++++++---------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 3bf713db1bc..260d9e20044 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -52,7 +52,7 @@ fn test_monitor_and_persister_update_fail() { let chan = create_announced_chan_between_nodes(&nodes, 0, 1); // Rebalance the network to generate htlc in the two directions - send_payment(&nodes[0], &vec![&nodes[1]][..], 10_000_000); + send_payment(&nodes[0], &[&nodes[1]], 10_000_000); // Route an HTLC from node 0 to node 1 (but don't settle) let (preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1]], 9_000_000); @@ -237,7 +237,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { assert_eq!(payment_hash_1, *payment_hash); assert_eq!(amount_msat, 1_000_000); assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); - assert_eq!(*via_channel_ids, vec![(channel_id, Some(user_channel_id))]); + assert_eq!(*via_channel_ids, &[(channel_id, Some(user_channel_id))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. @@ -547,7 +547,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none()); - assert!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs == vec![bs_initial_fulfill]); + assert_eq!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs, [bs_initial_fulfill]); assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed); assert!(as_resp.1.is_none()); @@ -779,7 +779,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert_eq!(payment_hash_2, *payment_hash); assert_eq!(amount_msat, 1_000_000); assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); - assert_eq!(*via_channel_ids, vec![(channel_id, Some(user_channel_id))]); + assert_eq!(via_channel_ids, [(channel_id, Some(user_channel_id))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. @@ -940,7 +940,7 @@ fn test_monitor_update_fail_cs() { assert_eq!(payment_hash, our_payment_hash); assert_eq!(amount_msat, 1_000_000); assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); - assert_eq!(*via_channel_ids, vec![(channel_id, Some(user_channel_id))]); + assert_eq!(via_channel_ids, [(channel_id, Some(user_channel_id))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. @@ -1171,7 +1171,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[2].node.fail_htlc_backwards(&payment_hash_1); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[2], - vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }] + [HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }] ); check_added_monitors!(nodes[2], 1); @@ -1303,7 +1303,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[1], - vec![HTLCHandlingFailureType::Forward { + [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }] @@ -2509,7 +2509,7 @@ fn test_monitor_update_fail_claim() { assert_eq!(payment_hash_3, *payment_hash); assert_eq!(1_000_000, amount_msat); assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id()); - assert_eq!(*via_channel_ids, vec![(channel_id, Some(42))]); + assert_eq!(via_channel_ids, [(channel_id, Some(42))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. @@ -2544,7 +2544,7 @@ fn test_monitor_update_on_pending_forwards() { nodes[2].node.fail_htlc_backwards(&payment_hash_1); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[2], - vec![HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }] + [HTLCHandlingFailureType::Receive { payment_hash: payment_hash_1 }] ); check_added_monitors!(nodes[2], 1); @@ -2580,7 +2580,7 @@ fn test_monitor_update_on_pending_forwards() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[1], - vec![HTLCHandlingFailureType::Forward { + [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_2.2 }] @@ -3252,7 +3252,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[1], - vec![HTLCHandlingFailureType::Forward { + [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] @@ -3754,7 +3754,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f nodes[2].node.fail_htlc_backwards(&payment_hash); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[2], - vec![HTLCHandlingFailureType::Receive { payment_hash }] + [HTLCHandlingFailureType::Receive { payment_hash }] ); check_added_monitors!(nodes[2], 1); get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); @@ -3803,7 +3803,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f reconnect_nodes(reconnect_args); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[1], - vec![HTLCHandlingFailureType::Forward { + [HTLCHandlingFailureType::Forward { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }] From a521e15f1a6d912a33e238954c9dfc35decccb86 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 18 May 2025 19:25:26 +0000 Subject: [PATCH 08/12] Avoid `assert!(a == b)` in `chanmon_update_fail_tests` `assert_eq` is generally much better than `assert` for equality tests as it provides debug output of `a` and `b`. --- lightning/src/ln/chanmon_update_fail_tests.rs | 42 +++++++++---------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 260d9e20044..5e1ddb07710 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -541,14 +541,14 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_resp.1.is_some()); assert!(as_resp.2.is_some()); - assert!(as_resp.3 == RAACommitmentOrder::CommitmentFirst); + assert_eq!(as_resp.3, RAACommitmentOrder::CommitmentFirst); } else { assert!(bs_resp.2.as_ref().unwrap().update_add_htlcs.is_empty()); assert!(bs_resp.2.as_ref().unwrap().update_fail_htlcs.is_empty()); assert!(bs_resp.2.as_ref().unwrap().update_fail_malformed_htlcs.is_empty()); assert!(bs_resp.2.as_ref().unwrap().update_fee.is_none()); assert_eq!(bs_resp.2.as_ref().unwrap().update_fulfill_htlcs, [bs_initial_fulfill]); - assert!(bs_resp.2.as_ref().unwrap().commitment_signed == bs_initial_commitment_signed); + assert_eq!(bs_resp.2.as_ref().unwrap().commitment_signed, bs_initial_commitment_signed); assert!(as_resp.1.is_none()); @@ -587,11 +587,11 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { disconnect_reconnect_peers!(); if (disconnect_count & 16) == 0 { - assert!(reestablish_1 == second_reestablish_1); - assert!(reestablish_2 == second_reestablish_2); + assert_eq!(reestablish_1, second_reestablish_1); + assert_eq!(reestablish_2, second_reestablish_2); } - assert!(as_resp == second_as_resp); - assert!(bs_resp == second_bs_resp); + assert_eq!(as_resp, second_as_resp); + assert_eq!(bs_resp, second_bs_resp); } ( @@ -635,8 +635,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { if disconnect_count & !disconnect_flags > 2 { let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); - assert!(as_resp.1.unwrap() == initial_revoke_and_ack); - assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + assert_eq!(as_resp.1.unwrap(), initial_revoke_and_ack); + assert_eq!(bs_resp.1.unwrap(), bs_revoke_and_ack); assert!(as_resp.2.is_none()); assert!(bs_resp.2.is_none()); @@ -682,13 +682,13 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { if disconnect_count & !disconnect_flags > 3 { let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); - assert!(as_resp.1.unwrap() == initial_revoke_and_ack); + assert_eq!(as_resp.1.unwrap(), initial_revoke_and_ack); assert!(bs_resp.1.is_none()); - assert!(as_resp.2.unwrap() == as_commitment_update); + assert_eq!(as_resp.2.unwrap(), as_commitment_update); assert!(bs_resp.2.is_none()); - assert!(as_resp.3 == RAACommitmentOrder::RevokeAndACKFirst); + assert_eq!(as_resp.3, RAACommitmentOrder::RevokeAndACKFirst); } handle_initial_raa!(); @@ -699,8 +699,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_resp.1.is_none()); assert!(bs_resp.1.is_none()); - assert!(as_resp.2.unwrap() == as_commitment_update); - assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + assert_eq!(as_resp.2.unwrap(), as_commitment_update); + assert_eq!(bs_resp.2.unwrap(), bs_second_commitment_update); } } else { handle_initial_raa!(); @@ -709,12 +709,12 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let (_, _, as_resp, bs_resp) = disconnect_reconnect_peers!(); assert!(as_resp.1.is_none()); - assert!(bs_resp.1.unwrap() == bs_revoke_and_ack); + assert_eq!(bs_resp.1.unwrap(), bs_revoke_and_ack); assert!(as_resp.2.is_none()); - assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + assert_eq!(bs_resp.2.unwrap(), bs_second_commitment_update); - assert!(bs_resp.3 == RAACommitmentOrder::RevokeAndACKFirst); + assert_eq!(bs_resp.3, RAACommitmentOrder::RevokeAndACKFirst); } handle_bs_raa!(); @@ -725,8 +725,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_resp.1.is_none()); assert!(bs_resp.1.is_none()); - assert!(as_resp.2.unwrap() == as_commitment_update); - assert!(bs_resp.2.unwrap() == bs_second_commitment_update); + assert_eq!(as_resp.2.unwrap(), as_commitment_update); + assert_eq!(bs_resp.2.unwrap(), bs_second_commitment_update); } } @@ -779,7 +779,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert_eq!(payment_hash_2, *payment_hash); assert_eq!(amount_msat, 1_000_000); assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); - assert_eq!(via_channel_ids, [(channel_id, Some(user_channel_id))]); + assert_eq!(*via_channel_ids, [(channel_id, Some(user_channel_id))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. @@ -940,7 +940,7 @@ fn test_monitor_update_fail_cs() { assert_eq!(payment_hash, our_payment_hash); assert_eq!(amount_msat, 1_000_000); assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); - assert_eq!(via_channel_ids, [(channel_id, Some(user_channel_id))]); + assert_eq!(*via_channel_ids, [(channel_id, Some(user_channel_id))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. @@ -2509,7 +2509,7 @@ fn test_monitor_update_fail_claim() { assert_eq!(payment_hash_3, *payment_hash); assert_eq!(1_000_000, amount_msat); assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id()); - assert_eq!(via_channel_ids, [(channel_id, Some(42))]); + assert_eq!(*via_channel_ids, [(channel_id, Some(42))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { payment_preimage, payment_secret, .. From b0de677049c6ec130a0514ecf0cfbe20a653e55a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 18 May 2025 20:02:50 +0000 Subject: [PATCH 09/12] Use `node_*_id` variables in chanmon_update_fail_tests --- lightning/src/ln/chanmon_update_fail_tests.rs | 1169 +++++++++-------- 1 file changed, 650 insertions(+), 519 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 5e1ddb07710..de25c4a8127 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -48,6 +48,9 @@ fn test_monitor_and_persister_update_fail() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // Create some initial channel let chan = create_announced_chan_between_nodes(&nodes, 0, 1); @@ -106,10 +109,10 @@ fn test_monitor_and_persister_update_fail() { expect_payment_claimed!(nodes[1], payment_hash, 9_000_000); check_added_monitors!(nodes[1], 1); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &updates.update_fulfill_htlcs[0], ); @@ -169,6 +172,10 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let user_channel_id = nodes[1].node.list_channels()[0].user_channel_id; @@ -195,8 +202,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { assert_eq!(nodes[0].node.list_channels().len(), 1); if disconnect { - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready = (true, true); reconnect_nodes(reconnect_args); @@ -217,8 +224,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let mut events_2 = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events_2.len(), 1); let payment_event = SendEvent::from_event(events_2.pop().unwrap()); - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + assert_eq!(payment_event.node_id, node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], payment_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -236,7 +243,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { } => { assert_eq!(payment_hash_1, *payment_hash); assert_eq!(amount_msat, 1_000_000); - assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); + assert_eq!(receiver_node_id.unwrap(), node_b_id); assert_eq!(*via_channel_ids, &[(channel_id, Some(user_channel_id))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { @@ -275,8 +282,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { assert_eq!(nodes[0].node.list_channels().len(), 1); if disconnect { - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); } @@ -286,7 +293,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { .node .force_close_broadcasting_latest_txn( &channel_id, - &nodes[1].node.get_our_node_id(), + &node_b_id, error_message.to_string(), ) .unwrap(); @@ -301,7 +308,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { nodes[0], 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - [nodes[1].node.get_our_node_id()], + [node_b_id], 100000 ); } @@ -336,6 +343,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let user_channel_id = nodes[1].node.list_channels()[0].user_channel_id; @@ -385,7 +396,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { ref commitment_signed, }, } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); assert!(update_add_htlcs.is_empty()); assert_eq!(update_fulfill_htlcs.len(), 1); assert!(update_fail_htlcs.is_empty()); @@ -394,7 +405,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { if (disconnect_count & 16) == 0 { nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &update_fulfill_htlcs[0], ); let events_3 = nodes[0].node.get_and_clear_pending_events(); @@ -408,7 +419,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } nodes[0].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, commitment_signed, ); check_added_monitors!(nodes[0], 1); @@ -421,8 +432,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { }; if disconnect_count & !disconnect_flags > 0 { - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); } // Now fix monitor updating... @@ -440,13 +451,13 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { macro_rules! disconnect_reconnect_peers { () => {{ - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); nodes[0] .node .peer_connected( - nodes[1].node.get_our_node_id(), + node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, @@ -460,7 +471,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[1] .node .peer_connected( - nodes[0].node.get_our_node_id(), + node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, @@ -474,11 +485,11 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[0] .node - .handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + .handle_channel_reestablish(node_b_id, &reestablish_2[0]); let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); nodes[1] .node - .handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + .handle_channel_reestablish(node_a_id, &reestablish_1[0]); let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); assert!(as_resp.0.is_none()); @@ -495,7 +506,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[0] .node .peer_connected( - nodes[1].node.get_our_node_id(), + node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, @@ -509,7 +520,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[1] .node .peer_connected( - nodes[0].node.get_our_node_id(), + node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, @@ -523,12 +534,12 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[0] .node - .handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + .handle_channel_reestablish(node_b_id, &reestablish_2[0]); check_added_monitors!(nodes[0], 0); let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); nodes[1] .node - .handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + .handle_channel_reestablish(node_a_id, &reestablish_1[0]); check_added_monitors!(nodes[1], 0); let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); @@ -553,7 +564,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(as_resp.1.is_none()); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_resp.2.as_ref().unwrap().update_fulfill_htlcs[0], ); let events_3 = nodes[0].node.get_and_clear_pending_events(); @@ -567,13 +578,13 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } nodes[0].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_resp.2.as_ref().unwrap().commitment_signed, ); let as_resp_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -596,7 +607,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { ( SendEvent::from_commitment_update( - nodes[1].node.get_our_node_id(), + node_b_id, channel_id, as_resp.2.unwrap(), ), @@ -609,7 +620,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { SendEvent::from_event(events_4.remove(0)), match events_4[0] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + assert_eq!(*node_id, node_b_id); msg.clone() }, _ => panic!("Unexpected event"), @@ -617,17 +628,17 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { ) }; - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.node_id, node_b_id); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &payment_event.commitment_msg, ); let bs_revoke_and_ack = get_event_msg!( nodes[1], MessageSendEvent::SendRevokeAndACK, - nodes[0].node.get_our_node_id() + node_a_id ); // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); @@ -649,8 +660,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { () => { nodes[0] .node - .handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); - as_commitment_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + .handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); + as_commitment_update = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(as_commitment_update.update_add_htlcs.is_empty()); assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); assert!(as_commitment_update.update_fail_htlcs.is_empty()); @@ -664,9 +675,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { () => { nodes[1] .node - .handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &initial_revoke_and_ack); + .handle_revoke_and_ack(node_a_id, &initial_revoke_and_ack); bs_second_commitment_update = - get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + get_htlc_update_msgs!(nodes[1], node_a_id); assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); @@ -731,34 +742,34 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } nodes[0].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_second_commitment_update.commitment_signed, ); let as_revoke_and_ack = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &as_commitment_update.commitment_signed, ); let bs_second_revoke_and_ack = get_event_msg!( nodes[1], MessageSendEvent::SendRevokeAndACK, - nodes[0].node.get_our_node_id() + node_a_id ); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_revoke_and_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_revoke_and_ack); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); @@ -778,7 +789,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } => { assert_eq!(payment_hash_2, *payment_hash); assert_eq!(amount_msat, 1_000_000); - assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); + assert_eq!(receiver_node_id.unwrap(), node_b_id); assert_eq!(*via_channel_ids, [(channel_id, Some(user_channel_id))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { @@ -830,6 +841,10 @@ fn test_monitor_update_fail_cs() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let user_channel_id = nodes[1].node.list_channels()[0].user_channel_id; @@ -850,11 +865,11 @@ fn test_monitor_update_fail_cs() { let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &send_event.commitment_msg, ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -877,8 +892,8 @@ fn test_monitor_update_fail_cs() { match responses[0] { MessageSendEvent::SendRevokeAndACK { ref msg, ref node_id } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &msg); + assert_eq!(*node_id, node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &msg); check_added_monitors!(nodes[0], 1); }, _ => panic!("Unexpected event"), @@ -890,11 +905,11 @@ fn test_monitor_update_fail_cs() { assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &updates.commitment_signed, ); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -919,9 +934,9 @@ fn test_monitor_update_fail_cs() { let final_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &final_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &final_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -939,7 +954,7 @@ fn test_monitor_update_fail_cs() { } => { assert_eq!(payment_hash, our_payment_hash); assert_eq!(amount_msat, 1_000_000); - assert_eq!(receiver_node_id.unwrap(), nodes[1].node.get_our_node_id()); + assert_eq!(receiver_node_id.unwrap(), node_b_id); assert_eq!(*via_channel_ids, [(channel_id, Some(user_channel_id))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { @@ -966,6 +981,9 @@ fn test_monitor_update_fail_no_rebroadcast() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = @@ -985,7 +1003,7 @@ fn test_monitor_update_fail_no_rebroadcast() { let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); let bs_raa = commitment_signed_dance!( nodes[1], nodes[0], @@ -997,7 +1015,7 @@ fn test_monitor_update_fail_no_rebroadcast() { ); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &bs_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &bs_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -1037,6 +1055,10 @@ fn test_monitor_update_raa_while_paused() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; send_payment(&nodes[0], &[&nodes[1]], 5000000); @@ -1074,30 +1096,30 @@ fn test_monitor_update_raa_while_paused() { let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event_1.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event_1.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &send_event_1.commitment_msg, ); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!( nodes[1], MessageSendEvent::SendRevokeAndACK, - nodes[0].node.get_our_node_id() + node_a_id ); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event_2.msgs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &send_event_2.msgs[0]); nodes[0].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &send_event_2.commitment_msg, ); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); @@ -1112,38 +1134,38 @@ fn test_monitor_update_raa_while_paused() { nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); - let as_update_raa = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_update_raa.0); + let as_update_raa = get_revoke_commit_msgs!(nodes[0], node_b_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_update_raa.0); check_added_monitors!(nodes[1], 1); - let bs_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_cs = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[1] .node - .handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &as_update_raa.1); + .handle_commitment_signed_batch_test(node_a_id, &as_update_raa.1); check_added_monitors!(nodes[1], 1); let bs_second_raa = get_event_msg!( nodes[1], MessageSendEvent::SendRevokeAndACK, - nodes[0].node.get_our_node_id() + node_a_id ); nodes[0].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_cs.commitment_signed, ); check_added_monitors!(nodes[0], 1); let as_second_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); expect_pending_htlcs_forwardable!(nodes[0]); expect_payment_claimable!(nodes[0], our_payment_hash_2, our_payment_secret_2, 1000000); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], our_payment_hash_1, our_payment_secret_1, 1000000); @@ -1158,6 +1180,11 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -1175,7 +1202,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { ); check_added_monitors!(nodes[2], 1); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); @@ -1183,7 +1210,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(updates.update_fee.is_none()); nodes[1] .node - .handle_update_fail_htlc(nodes[2].node.get_our_node_id(), &updates.update_fail_htlcs[0]); + .handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); let bs_revoke_and_ack = commitment_signed_dance!( nodes[1], @@ -1215,7 +1242,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[1]); @@ -1224,7 +1251,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Now fail monitor updating. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1249,7 +1276,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[0], send_event.commitment_msg, false, true); check_added_monitors!(nodes[1], 0); @@ -1276,9 +1303,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_c_id, &send_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test( - nodes[2].node.get_our_node_id(), + node_c_id, &send_event.commitment_msg, ); check_added_monitors!(nodes[1], 1); @@ -1304,7 +1331,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), + node_id: Some(node_c_id), channel_id: chan_2.2 }] ); @@ -1320,10 +1347,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Note that the ordering of the events for different nodes is non-prescriptive, though the // ordering of the two events that both go to nodes[2] have to stay in the same order. let nodes_0_event = - remove_first_msg_event_to_node(&nodes[0].node.get_our_node_id(), &mut events_3); + remove_first_msg_event_to_node(&node_a_id, &mut events_3); let messages_a = match nodes_0_event { MessageSendEvent::UpdateHTLCs { node_id, mut updates, channel_id: _ } => { - assert_eq!(node_id, nodes[0].node.get_our_node_id()); + assert_eq!(node_id, node_a_id); assert!(updates.update_fulfill_htlcs.is_empty()); assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -1335,16 +1362,16 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { }; let nodes_2_event = - remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3); + remove_first_msg_event_to_node(&node_c_id, &mut events_3); let send_event_b = SendEvent::from_event(nodes_2_event); - assert_eq!(send_event_b.node_id, nodes[2].node.get_our_node_id()); + assert_eq!(send_event_b.node_id, node_c_id); let raa = if test_ignore_second_cs { let nodes_2_event = - remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut events_3); + remove_first_msg_event_to_node(&node_c_id, &mut events_3); match nodes_2_event { MessageSendEvent::SendRevokeAndACK { node_id, msg } => { - assert_eq!(node_id, nodes[2].node.get_our_node_id()); + assert_eq!(node_id, node_c_id); Some(msg.clone()) }, _ => panic!("Unexpected event"), @@ -1355,44 +1382,44 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Now deliver the new messages... - nodes[0].node.handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &messages_a.0); + nodes[0].node.handle_update_fail_htlc(node_b_id, &messages_a.0); commitment_signed_dance!(nodes[0], nodes[1], messages_a.1, false); expect_payment_failed!(nodes[0], payment_hash_1, true); - nodes[2].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event_b.msgs[0]); + nodes[2].node.handle_update_add_htlc(node_b_id, &send_event_b.msgs[0]); let as_cs; if test_ignore_second_cs { nodes[2].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &send_event_b.commitment_msg, ); check_added_monitors!(nodes[2], 1); let bs_revoke_and_ack = get_event_msg!( nodes[2], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa.unwrap()); + nodes[2].node.handle_revoke_and_ack(node_b_id, &raa.unwrap()); check_added_monitors!(nodes[2], 1); - let bs_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let bs_cs = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(bs_cs.update_add_htlcs.is_empty()); assert!(bs_cs.update_fail_htlcs.is_empty()); assert!(bs_cs.update_fail_malformed_htlcs.is_empty()); assert!(bs_cs.update_fulfill_htlcs.is_empty()); assert!(bs_cs.update_fee.is_none()); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_revoke_and_ack); check_added_monitors!(nodes[1], 1); - as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + as_cs = get_htlc_update_msgs!(nodes[1], node_c_id); nodes[1].node.handle_commitment_signed_batch_test( - nodes[2].node.get_our_node_id(), + node_c_id, &bs_cs.commitment_signed, ); check_added_monitors!(nodes[1], 1); } else { nodes[2].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &send_event_b.commitment_msg, ); check_added_monitors!(nodes[2], 1); @@ -1402,25 +1429,25 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert_eq!(bs_revoke_and_commit.len(), 2); match bs_revoke_and_commit[0] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &msg); + assert_eq!(*node_id, node_b_id); + nodes[1].node.handle_revoke_and_ack(node_c_id, &msg); check_added_monitors!(nodes[1], 1); }, _ => panic!("Unexpected event"), } - as_cs = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + as_cs = get_htlc_update_msgs!(nodes[1], node_c_id); match bs_revoke_and_commit[1] { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { - assert_eq!(*node_id, nodes[1].node.get_our_node_id()); + assert_eq!(*node_id, node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fee.is_none()); nodes[1].node.handle_commitment_signed_batch_test( - nodes[2].node.get_our_node_id(), + node_c_id, &updates.commitment_signed, ); check_added_monitors!(nodes[1], 1); @@ -1437,43 +1464,43 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { let as_raa = get_event_msg!( nodes[1], MessageSendEvent::SendRevokeAndACK, - nodes[2].node.get_our_node_id() + node_c_id ); nodes[2] .node - .handle_update_add_htlc(nodes[1].node.get_our_node_id(), &as_cs.update_add_htlcs[0]); + .handle_update_add_htlc(node_b_id, &as_cs.update_add_htlcs[0]); nodes[2].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &as_cs.commitment_signed, ); check_added_monitors!(nodes[2], 1); let bs_second_raa = get_event_msg!( nodes[2], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_raa); + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); check_added_monitors!(nodes[2], 1); - let bs_second_cs = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let bs_second_cs = get_htlc_update_msgs!(nodes[2], node_b_id); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &bs_second_raa); + nodes[1].node.handle_revoke_and_ack(node_c_id, &bs_second_raa); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_commitment_signed_batch_test( - nodes[2].node.get_our_node_id(), + node_c_id, &bs_second_cs.commitment_signed, ); check_added_monitors!(nodes[1], 1); let as_second_raa = get_event_msg!( nodes[1], MessageSendEvent::SendRevokeAndACK, - nodes[2].node.get_our_node_id() + node_c_id ); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &as_second_raa); + nodes[2].node.handle_revoke_and_ack(node_b_id, &as_second_raa); check_added_monitors!(nodes[2], 1); assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty()); @@ -1499,9 +1526,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[1], 1); send_event = SendEvent::from_node(&nodes[1]); - assert_eq!(send_event.node_id, nodes[0].node.get_our_node_id()); + assert_eq!(send_event.node_id, node_a_id); assert_eq!(send_event.msgs.len(), 1); - nodes[0].node.handle_update_add_htlc(nodes[1].node.get_our_node_id(), &send_event.msgs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &send_event.msgs[0]); commitment_signed_dance!(nodes[0], nodes[1], send_event.commitment_msg, false); expect_pending_htlcs_forwardable!(nodes[0]); @@ -1535,27 +1562,32 @@ fn test_monitor_update_fail_reestablish() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); let (payment_preimage, payment_hash, ..) = route_payment(&nodes[0], &[&nodes[1], &nodes[2]], 1_000_000); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_a_id); + nodes[0].node.peer_disconnected(node_b_id); nodes[2].node.claim_funds(payment_preimage); check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); - let mut updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let mut updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc( - nodes[2].node.get_our_node_id(), + node_c_id, &updates.update_fulfill_htlcs[0], ); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); @@ -1567,7 +1599,7 @@ fn test_monitor_update_fail_reestablish() { nodes[0] .node .peer_connected( - nodes[1].node.get_our_node_id(), + node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, @@ -1579,7 +1611,7 @@ fn test_monitor_update_fail_reestablish() { nodes[1] .node .peer_connected( - nodes[0].node.get_our_node_id(), + node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, @@ -1592,14 +1624,14 @@ fn test_monitor_update_fail_reestablish() { let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reestablish); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reestablish); + nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); assert_eq!( get_event_msg!( nodes[0], MessageSendEvent::SendChannelUpdate, - nodes[1].node.get_our_node_id() + node_b_id ) .contents .channel_flags @@ -1610,13 +1642,13 @@ fn test_monitor_update_fail_reestablish() { nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell check_added_monitors!(nodes[1], 1); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_a_id); + nodes[0].node.peer_disconnected(node_b_id); nodes[0] .node .peer_connected( - nodes[1].node.get_our_node_id(), + node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, @@ -1628,7 +1660,7 @@ fn test_monitor_update_fail_reestablish() { nodes[1] .node .peer_connected( - nodes[0].node.get_our_node_id(), + node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, @@ -1641,12 +1673,12 @@ fn test_monitor_update_fail_reestablish() { assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish); assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reestablish); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); assert_eq!( get_event_msg!( nodes[0], MessageSendEvent::SendChannelUpdate, - nodes[1].node.get_our_node_id() + node_b_id ) .contents .channel_flags @@ -1654,13 +1686,13 @@ fn test_monitor_update_fail_reestablish() { 0 ); // The "disabled" bit should be unset as we just reconnected - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reestablish); + nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); check_added_monitors!(nodes[1], 0); assert_eq!( get_event_msg!( nodes[1], MessageSendEvent::SendChannelUpdate, - nodes[0].node.get_our_node_id() + node_a_id ) .contents .channel_flags @@ -1680,14 +1712,14 @@ fn test_monitor_update_fail_reestablish() { nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); - updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &updates.update_fulfill_htlcs[0], ); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); @@ -1704,6 +1736,10 @@ fn raa_no_response_awaiting_raa_state() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = @@ -1744,15 +1780,15 @@ fn raa_no_response_awaiting_raa_state() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &payment_event.commitment_msg, ); check_added_monitors!(nodes[1], 1); - let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); + let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1760,12 +1796,12 @@ fn raa_no_response_awaiting_raa_state() { nodes[0] .node - .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + .handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from @@ -1773,16 +1809,16 @@ fn raa_no_response_awaiting_raa_state() { // then restore channel monitor updates. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &payment_event.commitment_msg, ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); @@ -1797,7 +1833,7 @@ fn raa_no_response_awaiting_raa_state() { nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); // nodes[1] should be AwaitingRAA here! check_added_monitors!(nodes[1], 0); - let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); @@ -1817,7 +1853,7 @@ fn raa_no_response_awaiting_raa_state() { check_added_monitors!(nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1825,48 +1861,48 @@ fn raa_no_response_awaiting_raa_state() { nodes[0] .node - .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + .handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &payment_event.commitment_msg, ); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!( nodes[1], MessageSendEvent::SendRevokeAndACK, - nodes[0].node.get_our_node_id() + node_a_id ); // Finally deliver the RAA to nodes[1] which results in a CS response to the last update - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); - let bs_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_update = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors!(nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_update.commitment_signed, ); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_3, payment_secret_3, 1000000); @@ -1888,14 +1924,18 @@ fn claim_while_disconnected_monitor_update_fail() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; // Forward a payment for B to claim let (payment_preimage_1, payment_hash_1, ..) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); @@ -1904,7 +1944,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[0] .node .peer_connected( - nodes[1].node.get_our_node_id(), + node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, @@ -1916,7 +1956,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[1] .node .peer_connected( - nodes[0].node.get_our_node_id(), + node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, @@ -1929,22 +1969,22 @@ fn claim_while_disconnected_monitor_update_fail() { let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reconnect); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reconnect); let _as_channel_update = get_event_msg!( nodes[0], MessageSendEvent::SendChannelUpdate, - nodes[1].node.get_our_node_id() + node_b_id ); // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reconnect); + nodes[1].node.handle_channel_reestablish(node_a_id, &as_reconnect); let _bs_channel_update = get_event_msg!( nodes[1], MessageSendEvent::SendChannelUpdate, - nodes[0].node.get_our_node_id() + node_a_id ); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1966,12 +2006,12 @@ fn claim_while_disconnected_monitor_update_fail() { check_added_monitors!(nodes[0], 1); } - let as_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1] .node - .handle_update_add_htlc(nodes[0].node.get_our_node_id(), &as_updates.update_add_htlcs[0]); + .handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &as_updates.commitment_signed, ); check_added_monitors!(nodes[1], 1); @@ -1998,14 +2038,14 @@ fn claim_while_disconnected_monitor_update_fail() { match bs_msgs[0] { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &updates.update_fulfill_htlcs[0], ); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &updates.commitment_signed, ); check_added_monitors!(nodes[0], 1); @@ -2013,9 +2053,9 @@ fn claim_while_disconnected_monitor_update_fail() { let as_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); }, _ => panic!("Unexpected event"), @@ -2023,44 +2063,44 @@ fn claim_while_disconnected_monitor_update_fail() { match bs_msgs[1] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), msg); + assert_eq!(*node_id, node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, msg); check_added_monitors!(nodes[0], 1); }, _ => panic!("Unexpected event"), } - let as_commitment = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_commitment = get_htlc_update_msgs!(nodes[0], node_b_id); - let bs_commitment = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_commitment = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_commitment.commitment_signed, ); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &as_commitment.commitment_signed, ); check_added_monitors!(nodes[1], 1); let bs_raa = get_event_msg!( nodes[1], MessageSendEvent::SendRevokeAndACK, - nodes[0].node.get_our_node_id() + node_a_id ); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); @@ -2077,6 +2117,10 @@ fn monitor_failed_no_reestablish_response() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; { let mut node_0_per_peer_lock; @@ -2126,9 +2170,9 @@ fn monitor_failed_no_reestablish_response() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &payment_event.commitment_msg, ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2136,13 +2180,13 @@ fn monitor_failed_no_reestablish_response() { // Now disconnect and immediately reconnect, delivering the channel_reestablish while nodes[1] // is still failing to update monitors. - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); nodes[0] .node .peer_connected( - nodes[1].node.get_our_node_id(), + node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, @@ -2154,7 +2198,7 @@ fn monitor_failed_no_reestablish_response() { nodes[1] .node .peer_connected( - nodes[0].node.get_our_node_id(), + node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, @@ -2167,17 +2211,17 @@ fn monitor_failed_no_reestablish_response() { let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_reconnect); + nodes[1].node.handle_channel_reestablish(node_a_id, &as_reconnect); let _bs_channel_update = get_event_msg!( nodes[1], MessageSendEvent::SendChannelUpdate, - nodes[0].node.get_our_node_id() + node_a_id ); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_reconnect); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reconnect); let _as_channel_update = get_event_msg!( nodes[0], MessageSendEvent::SendChannelUpdate, - nodes[1].node.get_our_node_id() + node_b_id ); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); @@ -2191,21 +2235,21 @@ fn monitor_failed_no_reestablish_response() { .clone(); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); - let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); nodes[0] .node - .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + .handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -2230,6 +2274,10 @@ fn first_message_on_recv_ordering() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; // Route the first payment outbound, holding the last RAA for B until we are set up so that we @@ -2252,26 +2300,26 @@ fn first_message_on_recv_ordering() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + assert_eq!(payment_event.node_id, node_b_id); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &payment_event.commitment_msg, ); check_added_monitors!(nodes[1], 1); - let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); nodes[0] .node - .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + .handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); // Route the second payment, generating an update_add_htlc/commitment_signed @@ -2292,23 +2340,23 @@ fn first_message_on_recv_ordering() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - assert_eq!(payment_event.node_id, nodes[1].node.get_our_node_id()); + assert_eq!(payment_event.node_id, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Deliver the final RAA for the first payment, which does not require a response. RAAs // generally require a commitment_signed, so the fact that we're expecting an opposite response // to the next message also tests resetting the delivery order. - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); // Now deliver the update_add_htlc/commitment_signed for the second payment, which does need an // RAA/CS response, which should be generated when we call channel_monitor_update (with the // appropriate HTLC acceptance). - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &payment_event.commitment_msg, ); check_added_monitors!(nodes[1], 1); @@ -2329,20 +2377,20 @@ fn first_message_on_recv_ordering() { expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 1000000); - let bs_responses = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_responses.0); + let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); nodes[0] .node - .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_responses.1); + .handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); @@ -2363,6 +2411,11 @@ fn test_monitor_update_fail_claim() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); create_announced_chan_between_nodes(&nodes, 1, 2); @@ -2407,7 +2460,7 @@ fn test_monitor_update_fail_claim() { let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); @@ -2428,7 +2481,7 @@ fn test_monitor_update_fail_claim() { let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); let events = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 0); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false, true); @@ -2447,9 +2500,9 @@ fn test_monitor_update_fail_claim() { expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); - let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_fulfill_update.update_fulfill_htlcs[0], ); commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false); @@ -2458,13 +2511,13 @@ fn test_monitor_update_fail_claim() { // Get the payment forwards, note that they were batched into one commitment update. nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); - let bs_forward_update = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_forward_update = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_add_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_forward_update.update_add_htlcs[0], ); nodes[0].node.handle_update_add_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_forward_update.update_add_htlcs[1], ); commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false); @@ -2483,7 +2536,7 @@ fn test_monitor_update_fail_claim() { } => { assert_eq!(payment_hash_2, *payment_hash); assert_eq!(1_000_000, amount_msat); - assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id()); + assert_eq!(receiver_node_id.unwrap(), node_a_id); assert_eq!(*via_channel_ids.last().unwrap(), (channel_id, Some(42))); match &purpose { PaymentPurpose::Bolt11InvoicePayment { @@ -2508,7 +2561,7 @@ fn test_monitor_update_fail_claim() { } => { assert_eq!(payment_hash_3, *payment_hash); assert_eq!(1_000_000, amount_msat); - assert_eq!(receiver_node_id.unwrap(), nodes[0].node.get_our_node_id()); + assert_eq!(receiver_node_id.unwrap(), node_a_id); assert_eq!(*via_channel_ids, [(channel_id, Some(42))]); match &purpose { PaymentPurpose::Bolt11InvoicePayment { @@ -2534,6 +2587,11 @@ fn test_monitor_update_on_pending_forwards() { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1); let chan_2 = create_announced_chan_between_nodes(&nodes, 1, 2); @@ -2548,9 +2606,9 @@ fn test_monitor_update_on_pending_forwards() { ); check_added_monitors!(nodes[2], 1); - let cs_fail_update = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let cs_fail_update = get_htlc_update_msgs!(nodes[2], node_b_id); nodes[1].node.handle_update_fail_htlc( - nodes[2].node.get_our_node_id(), + node_c_id, &cs_fail_update.update_fail_htlcs[0], ); commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true); @@ -2574,14 +2632,14 @@ fn test_monitor_update_on_pending_forwards() { let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[2].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_c_id, &payment_event.msgs[0]); commitment_signed_dance!(nodes[1], nodes[2], payment_event.commitment_msg, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), + node_id: Some(node_c_id), channel_id: chan_2.2 }] ); @@ -2599,13 +2657,13 @@ fn test_monitor_update_on_pending_forwards() { nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); - let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0] .node - .handle_update_fail_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_fail_htlcs[0]); + .handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); nodes[0] .node - .handle_update_add_htlc(nodes[1].node.get_our_node_id(), &bs_updates.update_add_htlcs[0]); + .handle_update_add_htlc(node_b_id, &bs_updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_events(); @@ -2642,6 +2700,10 @@ fn monitor_update_claim_fail_no_response() { let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; // Forward a payment for B to claim @@ -2667,7 +2729,7 @@ fn monitor_update_claim_fail_no_response() { let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &payment_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); let as_raa = commitment_signed_dance!( nodes[1], nodes[0], @@ -2698,14 +2760,14 @@ fn monitor_update_claim_fail_no_response() { check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); - let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_updates.update_fulfill_htlcs[0], ); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); @@ -2726,35 +2788,38 @@ fn do_during_funding_monitor_fail( let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + nodes[0] .node - .create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None) + .create_channel(node_b_id, 100000, 10001, 43, None, None) .unwrap(); nodes[1].node.handle_open_channel( - nodes[0].node.get_our_node_id(), + node_a_id, &get_event_msg!( nodes[0], MessageSendEvent::SendOpenChannel, - nodes[1].node.get_our_node_id() + node_b_id ), ); nodes[0].node.handle_accept_channel( - nodes[1].node.get_our_node_id(), + node_b_id, &get_event_msg!( nodes[1], MessageSendEvent::SendAcceptChannel, - nodes[0].node.get_our_node_id() + node_a_id ), ); let (temporary_channel_id, funding_tx, funding_output) = - create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43); + create_funding_transaction(&nodes[0], &node_b_id, 100000, 43); nodes[0] .node .funding_transaction_generated( temporary_channel_id, - nodes[1].node.get_our_node_id(), + node_b_id, funding_tx.clone(), ) .unwrap(); @@ -2764,22 +2829,22 @@ fn do_during_funding_monitor_fail( let funding_created_msg = get_event_msg!( nodes[0], MessageSendEvent::SendFundingCreated, - nodes[1].node.get_our_node_id() + node_b_id ); let channel_id = ChannelId::v1_from_funding_txid( funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index, ); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); check_added_monitors!(nodes[1], 1); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed( - nodes[1].node.get_our_node_id(), + node_b_id, &get_event_msg!( nodes[1], MessageSendEvent::SendFundingSigned, - nodes[0].node.get_our_node_id() + node_a_id ), ); check_added_monitors!(nodes[0], 1); @@ -2796,7 +2861,7 @@ fn do_during_funding_monitor_fail( .clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &node_b_id); let events = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events.len(), 0); @@ -2809,11 +2874,11 @@ fn do_during_funding_monitor_fail( if confirm_a_first { confirm_transaction(&nodes[0], &funding_tx); nodes[1].node.handle_channel_ready( - nodes[0].node.get_our_node_id(), + node_a_id, &get_event_msg!( nodes[0], MessageSendEvent::SendChannelReady, - nodes[1].node.get_our_node_id() + node_b_id ), ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2825,14 +2890,14 @@ fn do_during_funding_monitor_fail( } // Make sure nodes[1] isn't stupid enough to re-send the ChannelReady on reconnect - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); let mut reconnect_args = ReconnectArgs::new(&nodes[0], &nodes[1]); reconnect_args.send_channel_ready.1 = confirm_a_first; reconnect_nodes(reconnect_args); // But we want to re-emit ChannelPending - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2844,11 +2909,11 @@ fn do_during_funding_monitor_fail( if !confirm_a_first && !restore_b_before_lock { confirm_transaction(&nodes[0], &funding_tx); nodes[1].node.handle_channel_ready( - nodes[0].node.get_our_node_id(), + node_a_id, &get_event_msg!( nodes[0], MessageSendEvent::SendChannelReady, - nodes[1].node.get_our_node_id() + node_b_id ), ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2877,11 +2942,11 @@ fn do_during_funding_monitor_fail( ) } else { nodes[0].node.handle_channel_ready( - nodes[1].node.get_our_node_id(), + node_b_id, &get_event_msg!( nodes[1], MessageSendEvent::SendChannelReady, - nodes[0].node.get_our_node_id() + node_a_id ), ); confirm_transaction(&nodes[0], &funding_tx); @@ -2913,9 +2978,9 @@ fn do_during_funding_monitor_fail( } if !restore_b_before_lock { - expect_channel_ready_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_ready_event(&nodes[1], &node_a_id); } else { - expect_channel_ready_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_ready_event(&nodes[0], &node_b_id); } send_payment(&nodes[0], &[&nodes[1]], 8000000); @@ -2924,14 +2989,14 @@ fn do_during_funding_monitor_fail( nodes[0], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, - [nodes[1].node.get_our_node_id()], + [node_b_id], 100000 ); check_closed_event!( nodes[1], 1, ClosureReason::LocallyInitiatedCooperativeClosure, - [nodes[0].node.get_our_node_id()], + [node_a_id], 100000 ); } @@ -2953,6 +3018,9 @@ fn test_path_paused_mpp() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_1_id = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let (chan_2_ann, _, chan_2_id, _) = create_announced_chan_between_nodes(&nodes, 0, 2); let chan_3_id = create_announced_chan_between_nodes(&nodes, 1, 3).0.contents.short_channel_id; @@ -2964,10 +3032,10 @@ fn test_path_paused_mpp() { // Set us up to take multiple routes, one 0 -> 1 -> 3 and one 0 -> 2 -> 3: let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_id; route.paths[0].hops[1].short_channel_id = chan_3_id; - route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_ann.contents.short_channel_id; route.paths[1].hops[1].short_channel_id = chan_4_id; @@ -3051,6 +3119,9 @@ fn test_pending_update_fee_ack_on_reconnect() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); send_payment(&nodes[0], &[&nodes[1]], 100_000_00); @@ -3066,7 +3137,7 @@ fn test_pending_update_fee_ack_on_reconnect() { ) .unwrap(); check_added_monitors!(nodes[1], 1); - let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); // bs_initial_send_msgs are not delivered until they are re-generated after reconnect { @@ -3075,32 +3146,32 @@ fn test_pending_update_fee_ack_on_reconnect() { } nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); - let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(as_update_fee_msgs.update_fee.is_some()); nodes[1].node.handle_update_fee( - nodes[0].node.get_our_node_id(), + node_a_id, as_update_fee_msgs.update_fee.as_ref().unwrap(), ); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &as_update_fee_msgs.commitment_signed, ); check_added_monitors!(nodes[1], 1); let bs_first_raa = get_event_msg!( nodes[1], MessageSendEvent::SendRevokeAndACK, - nodes[0].node.get_our_node_id() + node_a_id ); // bs_first_raa is not delivered until it is re-generated after reconnect - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); nodes[0] .node .peer_connected( - nodes[1].node.get_our_node_id(), + node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, @@ -3113,7 +3184,7 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[1] .node .peer_connected( - nodes[0].node.get_our_node_id(), + node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, @@ -3124,7 +3195,7 @@ fn test_pending_update_fee_ack_on_reconnect() { .unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_connect_msg); + nodes[1].node.handle_channel_reestablish(node_a_id, &as_connect_msg); let bs_resend_msgs = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(bs_resend_msgs.len(), 3); if let MessageSendEvent::UpdateHTLCs { ref updates, .. } = bs_resend_msgs[0] { @@ -3142,56 +3213,56 @@ fn test_pending_update_fee_ack_on_reconnect() { panic!(); } - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_connect_msg); - get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, nodes[1].node.get_our_node_id()); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_connect_msg); + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); nodes[0].node.handle_update_add_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_initial_send_msgs.update_add_htlcs[0], ); nodes[0].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_initial_send_msgs.commitment_signed, ); check_added_monitors!(nodes[0], 1); nodes[1].node.handle_revoke_and_ack( - nodes[0].node.get_our_node_id(), + node_a_id, &get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ), ); check_added_monitors!(nodes[1], 1); let bs_second_cs = - get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()).commitment_signed; + get_htlc_update_msgs!(nodes[1], node_a_id).commitment_signed; - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), - &get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()).commitment_signed, + node_a_id, + &get_htlc_update_msgs!(nodes[0], node_b_id).commitment_signed, ); check_added_monitors!(nodes[1], 1); let bs_third_raa = get_event_msg!( nodes[1], MessageSendEvent::SendRevokeAndACK, - nodes[0].node.get_our_node_id() + node_a_id ); nodes[0] .node - .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_second_cs); + .handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_third_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); check_added_monitors!(nodes[0], 1); nodes[1].node.handle_revoke_and_ack( - nodes[0].node.get_our_node_id(), + node_a_id, &get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ), ); check_added_monitors!(nodes[1], 1); @@ -3217,6 +3288,10 @@ fn test_fail_htlc_on_broadcast_after_claim() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; @@ -3230,12 +3305,12 @@ fn test_fail_htlc_on_broadcast_after_claim() { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 2000); - let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); nodes[1].node.handle_update_fulfill_htlc( - nodes[2].node.get_our_node_id(), + node_c_id, &cs_updates.update_fulfill_htlcs[0], ); - let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); @@ -3244,7 +3319,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { nodes[1], 1, ClosureReason::CommitmentTxConfirmed, - [nodes[2].node.get_our_node_id()], + [node_c_id], 100000 ); check_closed_broadcast!(nodes[1], true); @@ -3253,13 +3328,13 @@ fn test_fail_htlc_on_broadcast_after_claim() { expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), + node_id: Some(node_c_id), channel_id: chan_id_2 }] ); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_updates.update_fulfill_htlcs[0], ); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); @@ -3276,6 +3351,9 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); send_payment(&nodes[0], &[&nodes[1]], 1000); @@ -3285,11 +3363,11 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { } nodes[0].node.timer_tick_occurred(); check_added_monitors!(nodes[0], 1); - let update_msgs = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let update_msgs = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(update_msgs.update_fee.is_some()); if deliver_update { nodes[1].node.handle_update_fee( - nodes[0].node.get_our_node_id(), + node_a_id, update_msgs.update_fee.as_ref().unwrap(), ); } @@ -3303,13 +3381,13 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[1].node.peer_disconnected(node_a_id); nodes[0] .node .peer_connected( - nodes[1].node.get_our_node_id(), + node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, @@ -3322,7 +3400,7 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[1] .node .peer_connected( - nodes[0].node.get_our_node_id(), + node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, @@ -3333,11 +3411,11 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { .unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); - nodes[1].node.handle_channel_reestablish(nodes[0].node.get_our_node_id(), &as_connect_msg); - get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, nodes[0].node.get_our_node_id()); + nodes[1].node.handle_channel_reestablish(node_a_id, &as_connect_msg); + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0].node.handle_channel_reestablish(nodes[1].node.get_our_node_id(), &bs_connect_msg); + nodes[0].node.handle_channel_reestablish(node_b_id, &bs_connect_msg); let mut as_reconnect_msgs = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(as_reconnect_msgs.len(), 2); if let MessageSendEvent::SendChannelUpdate { .. } = as_reconnect_msgs.pop().unwrap() { @@ -3352,65 +3430,65 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { }; assert!(update_msgs.update_fee.is_some()); nodes[1].node.handle_update_fee( - nodes[0].node.get_our_node_id(), + node_a_id, update_msgs.update_fee.as_ref().unwrap(), ); if parallel_updates { nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &update_msgs.commitment_signed, ); check_added_monitors!(nodes[1], 1); let (bs_first_raa, bs_first_cs) = - get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_first_raa); + get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); - let as_second_update = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_second_update = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[0] .node - .handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_first_cs); + .handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); check_added_monitors!(nodes[0], 1); let as_first_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); nodes[1].node.handle_update_fee( - nodes[0].node.get_our_node_id(), + node_a_id, as_second_update.update_fee.as_ref().unwrap(), ); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &as_second_update.commitment_signed, ); check_added_monitors!(nodes[1], 1); let bs_second_raa = get_event_msg!( nodes[1], MessageSendEvent::SendRevokeAndACK, - nodes[0].node.get_our_node_id() + node_a_id ); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_first_raa); - let bs_second_cs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); + let bs_second_cs = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_second_raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); nodes[0].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_second_cs.commitment_signed, ); check_added_monitors!(nodes[0], 1); let as_second_raa = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_second_raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); check_added_monitors!(nodes[1], 1); } else { commitment_signed_dance!(nodes[1], nodes[0], update_msgs.commitment_signed, false); @@ -3439,6 +3517,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let nodes_0_deserialized; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let chan_id = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 15_000_000, 7_000_000_000).2; let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = @@ -3498,15 +3579,15 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[0].node.claim_funds(payment_preimage_0); check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send.msgs[0]); nodes[1] .node - .handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &send.commitment_msg); + .handle_commitment_signed_batch_test(node_a_id, &send.commitment_msg); check_added_monitors!(nodes[1], 1); - let (raa, cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + let (raa, cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa); + nodes[0].node.handle_revoke_and_ack(node_b_id, &raa); check_added_monitors!(nodes[0], 1); if disconnect { @@ -3525,15 +3606,15 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } else { - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); } - nodes[1].node.peer_disconnected(nodes[0].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_a_id); // Now reconnect the two nodes[0] .node .peer_connected( - nodes[1].node.get_our_node_id(), + node_b_id, &msgs::Init { features: nodes[1].node.init_features(), networks: None, @@ -3547,7 +3628,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[1] .node .peer_connected( - nodes[0].node.get_our_node_id(), + node_a_id, &msgs::Init { features: nodes[0].node.init_features(), networks: None, @@ -3561,13 +3642,13 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[1] .node - .handle_channel_reestablish(nodes[0].node.get_our_node_id(), &reestablish_1[0]); + .handle_channel_reestablish(node_a_id, &reestablish_1[0]); let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); check_added_monitors!(nodes[1], 0); nodes[0] .node - .handle_channel_reestablish(nodes[1].node.get_our_node_id(), &reestablish_2[0]); + .handle_channel_reestablish(node_b_id, &reestablish_2[0]); let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); assert!(resp_0.0.is_none()); @@ -3619,24 +3700,24 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { assert_eq!(events.len(), 1); // Deliver the pending in-flight CS - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &cs); check_added_monitors!(nodes[0], 1); let commitment_msg = match events.pop().unwrap() { MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { - assert_eq!(node_id, nodes[1].node.get_our_node_id()); + assert_eq!(node_id, node_b_id); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc( - nodes[0].node.get_our_node_id(), + node_a_id, &updates.update_fulfill_htlcs[0], ); expect_payment_sent(&nodes[1], payment_preimage_0, None, false, false); assert_eq!(updates.update_add_htlcs.len(), 1); nodes[1].node.handle_update_add_htlc( - nodes[0].node.get_our_node_id(), + node_a_id, &updates.update_add_htlcs[0], ); updates.commitment_signed @@ -3646,15 +3727,15 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[1] .node - .handle_commitment_signed_batch_test(nodes[0].node.get_our_node_id(), &commitment_msg); + .handle_commitment_signed_batch_test(node_a_id, &commitment_msg); check_added_monitors!(nodes[1], 1); let as_revoke_and_ack = get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_revoke_and_ack); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000); check_added_monitors!(nodes[1], 1); @@ -3703,6 +3784,10 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; @@ -3728,23 +3813,23 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); - nodes[1].node.handle_update_add_htlc(nodes[0].node.get_our_node_id(), &send_event.msgs[0]); + nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &send_event.commitment_msg, ); check_added_monitors!(nodes[1], 1); - let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); check_added_monitors!(nodes[0], 1); as_raa = Some(get_event_msg!( nodes[0], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id )); } @@ -3757,27 +3842,27 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f [HTLCHandlingFailureType::Receive { payment_hash }] ); check_added_monitors!(nodes[2], 1); - get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + get_htlc_update_msgs!(nodes[2], node_b_id); } else { nodes[2].node.claim_funds(payment_preimage); check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 100_000); - let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); assert_eq!(cs_updates.update_fulfill_htlcs.len(), 1); // Check that the message we're about to deliver matches the one generated: assert_eq!(fulfill_msg, cs_updates.update_fulfill_htlcs[0]); } - nodes[1].node.handle_update_fulfill_htlc(nodes[2].node.get_our_node_id(), &fulfill_msg); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &fulfill_msg); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); let mut bs_updates = None; if htlc_status != HTLCStatusAtDupClaim::HoldingCell { - bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); + bs_updates = Some(get_htlc_update_msgs!(nodes[1], node_a_id)); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0], ); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); @@ -3794,8 +3879,8 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } - nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_c_id); + nodes[2].node.peer_disconnected(node_b_id); if second_fails { let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); @@ -3804,7 +3889,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[1], [HTLCHandlingFailureType::Forward { - node_id: Some(nodes[2].node.get_our_node_id()), + node_id: Some(node_c_id), channel_id: chan_id_2 }] ); @@ -3815,14 +3900,14 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f } if htlc_status == HTLCStatusAtDupClaim::HoldingCell { - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &as_raa.unwrap()); + nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa.unwrap()); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable_ignore!(nodes[1]); // We finally receive the second payment, but don't claim it - bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); + bs_updates = Some(get_htlc_update_msgs!(nodes[1], node_a_id)); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0], ); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); @@ -3860,21 +3945,24 @@ fn test_temporary_error_during_shutdown() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config.clone()), Some(config)]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let (_, _, channel_id, funding_tx) = create_announced_chan_between_nodes(&nodes, 0, 1); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.close_channel(&channel_id, &nodes[1].node.get_our_node_id()).unwrap(); + nodes[0].node.close_channel(&channel_id, &node_b_id).unwrap(); nodes[1].node.handle_shutdown( - nodes[0].node.get_our_node_id(), - &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, nodes[1].node.get_our_node_id()), + node_a_id, + &get_event_msg!(nodes[0], MessageSendEvent::SendShutdown, node_b_id), ); check_added_monitors!(nodes[1], 1); nodes[0].node.handle_shutdown( - nodes[1].node.get_our_node_id(), - &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, nodes[0].node.get_our_node_id()), + node_b_id, + &get_event_msg!(nodes[1], MessageSendEvent::SendShutdown, node_a_id), ); check_added_monitors!(nodes[0], 1); @@ -3893,11 +3981,11 @@ fn test_temporary_error_during_shutdown() { .clone(); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[1].node.handle_closing_signed( - nodes[0].node.get_our_node_id(), + node_a_id, &get_event_msg!( nodes[0], MessageSendEvent::SendClosingSigned, - nodes[1].node.get_our_node_id() + node_b_id ), ); @@ -3915,21 +4003,21 @@ fn test_temporary_error_during_shutdown() { nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[0].node.handle_closing_signed( - nodes[1].node.get_our_node_id(), + node_b_id, &get_event_msg!( nodes[1], MessageSendEvent::SendClosingSigned, - nodes[0].node.get_our_node_id() + node_a_id ), ); let (_, closing_signed_a) = - get_closing_signed_broadcast!(nodes[0].node, nodes[1].node.get_our_node_id()); + get_closing_signed_broadcast!(nodes[0].node, node_b_id); let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); nodes[1] .node - .handle_closing_signed(nodes[0].node.get_our_node_id(), &closing_signed_a.unwrap()); - let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id()); + .handle_closing_signed(node_a_id, &closing_signed_a.unwrap()); + let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(none_b.is_none()); let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -3940,14 +4028,14 @@ fn test_temporary_error_during_shutdown() { nodes[1], 1, ClosureReason::CounterpartyInitiatedCooperativeClosure, - [nodes[0].node.get_our_node_id()], + [node_a_id], 100000 ); check_closed_event!( nodes[0], 1, ClosureReason::LocallyInitiatedCooperativeClosure, - [nodes[1].node.get_our_node_id()], + [node_b_id], 100000 ); } @@ -3960,6 +4048,9 @@ fn double_temp_error() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let (_, _, channel_id, _) = create_announced_chan_between_nodes(&nodes, 0, 1); let (payment_preimage_1, payment_hash_1, ..) = @@ -4045,24 +4136,24 @@ fn double_temp_error() { _ => panic!("Unexpected event"), } }; - assert_eq!(node_id, nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_1); + assert_eq!(node_id, node_a_id); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_1); check_added_monitors!(nodes[0], 0); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &commitment_signed_b1, ); check_added_monitors!(nodes[0], 1); nodes[0].node.process_pending_htlc_forwards(); let (raa_a1, commitment_signed_a1) = - get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + get_revoke_commit_msgs!(nodes[0], node_b_id); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa_a1); + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_a1); check_added_monitors!(nodes[1], 1); nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &commitment_signed_a1, ); check_added_monitors!(nodes[1], 1); @@ -4074,7 +4165,7 @@ fn double_temp_error() { ( match &events[0] { MessageSendEvent::UpdateHTLCs { node_id, channel_id: _, updates } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); assert!(updates.update_add_htlcs.is_empty()); assert!(updates.update_fail_htlcs.is_empty()); assert!(updates.update_fail_malformed_htlcs.is_empty()); @@ -4086,18 +4177,18 @@ fn double_temp_error() { }, match events[1] { MessageSendEvent::SendRevokeAndACK { ref node_id, ref msg } => { - assert_eq!(*node_id, nodes[0].node.get_our_node_id()); + assert_eq!(*node_id, node_a_id); (*msg).clone() }, _ => panic!("Unexpected event"), }, ) }; - nodes[0].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &raa_b2); + nodes[0].node.handle_revoke_and_ack(node_b_id, &raa_b2); check_added_monitors!(nodes[0], 1); expect_payment_path_successful!(nodes[0]); - nodes[0].node.handle_update_fulfill_htlc(nodes[1].node.get_our_node_id(), &update_fulfill_2); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_2); check_added_monitors!(nodes[0], 0); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); commitment_signed_dance!(nodes[0], nodes[1], commitment_signed_b2, false); @@ -4124,16 +4215,19 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + nodes[0] .node - .create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None) + .create_channel(node_b_id, 100000, 10001, 43, None, None) .unwrap(); nodes[1].node.handle_open_channel( - nodes[0].node.get_our_node_id(), + node_a_id, &get_event_msg!( nodes[0], MessageSendEvent::SendOpenChannel, - nodes[1].node.get_our_node_id() + node_b_id ), ); @@ -4146,7 +4240,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { .node .accept_inbound_channel_from_trusted_peer_0conf( &temporary_channel_id, - &nodes[0].node.get_our_node_id(), + &node_a_id, 0, None, ) @@ -4156,7 +4250,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { .node .accept_inbound_channel( &temporary_channel_id, - &nodes[0].node.get_our_node_id(), + &node_a_id, 0, None, ) @@ -4167,22 +4261,22 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { }; nodes[0].node.handle_accept_channel( - nodes[1].node.get_our_node_id(), + node_b_id, &get_event_msg!( nodes[1], MessageSendEvent::SendAcceptChannel, - nodes[0].node.get_our_node_id() + node_a_id ), ); let (temporary_channel_id, funding_tx, ..) = - create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43); + create_funding_transaction(&nodes[0], &node_b_id, 100000, 43); nodes[0] .node .funding_transaction_generated( temporary_channel_id, - nodes[1].node.get_our_node_id(), + node_b_id, funding_tx.clone(), ) .unwrap(); @@ -4191,11 +4285,11 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { let funding_created_msg = get_event_msg!( nodes[0], MessageSendEvent::SendFundingCreated, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); check_added_monitors!(nodes[1], 1); - expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + expect_channel_pending_event(&nodes[1], &node_a_id); let bs_signed_locked = nodes[1].node.get_and_clear_pending_msg_events(); assert_eq!(bs_signed_locked.len(), if use_0conf { 2 } else { 1 }); @@ -4203,7 +4297,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { MessageSendEvent::SendFundingSigned { msg, .. } => { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &msg); + nodes[0].node.handle_funding_signed(node_b_id, &msg); check_added_monitors!(nodes[0], 1); }, _ => panic!("Unexpected event"), @@ -4211,7 +4305,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { if use_0conf { match &bs_signed_locked[1] { MessageSendEvent::SendChannelReady { msg, .. } => { - nodes[0].node.handle_channel_ready(nodes[1].node.get_our_node_id(), &msg); + nodes[0].node.handle_channel_ready(node_b_id, &msg); }, _ => panic!("Unexpected event"), } @@ -4242,7 +4336,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { nodes[0], 1, ClosureReason::DisconnectedPeer, - [nodes[1].node.get_our_node_id()], + [node_b_id], 100000 ); assert!(nodes[0].node.list_channels().is_empty()); @@ -4274,16 +4368,19 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + nodes[0] .node - .create_channel(nodes[1].node.get_our_node_id(), 100000, 10001, 43, None, None) + .create_channel(node_b_id, 100000, 10001, 43, None, None) .unwrap(); nodes[1].node.handle_open_channel( - nodes[0].node.get_our_node_id(), + node_a_id, &get_event_msg!( nodes[0], MessageSendEvent::SendOpenChannel, - nodes[1].node.get_our_node_id() + node_b_id ), ); @@ -4296,7 +4393,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo .node .accept_inbound_channel_from_trusted_peer_0conf( &temporary_channel_id, - &nodes[0].node.get_our_node_id(), + &node_a_id, 0, None, ) @@ -4306,7 +4403,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo .node .accept_inbound_channel( &temporary_channel_id, - &nodes[0].node.get_our_node_id(), + &node_a_id, 0, None, ) @@ -4317,22 +4414,22 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo }; nodes[0].node.handle_accept_channel( - nodes[1].node.get_our_node_id(), + node_b_id, &get_event_msg!( nodes[1], MessageSendEvent::SendAcceptChannel, - nodes[0].node.get_our_node_id() + node_a_id ), ); let (temporary_channel_id, funding_tx, ..) = - create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 43); + create_funding_transaction(&nodes[0], &node_b_id, 100000, 43); nodes[0] .node .funding_transaction_generated( temporary_channel_id, - nodes[1].node.get_our_node_id(), + node_b_id, funding_tx.clone(), ) .unwrap(); @@ -4341,10 +4438,10 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo let funding_created_msg = get_event_msg!( nodes[0], MessageSendEvent::SendFundingCreated, - nodes[1].node.get_our_node_id() + node_b_id ); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_funding_created(nodes[0].node.get_our_node_id(), &funding_created_msg); + nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); check_added_monitors!(nodes[1], 1); // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the @@ -4353,12 +4450,12 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo let funding_signed_msg = get_event_msg!( nodes[1], MessageSendEvent::SendFundingSigned, - nodes[0].node.get_our_node_id() + node_a_id ); - nodes[0].node.handle_funding_signed(nodes[1].node.get_our_node_id(), &funding_signed_msg); + nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); check_added_monitors!(nodes[0], 1); - expect_channel_pending_event(&nodes[0], &nodes[1].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &node_b_id); let as_funding_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); if lock_commitment { @@ -4369,9 +4466,9 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo let as_ready = get_event_msg!( nodes[0], MessageSendEvent::SendChannelReady, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[1].node.handle_channel_ready(nodes[0].node.get_our_node_id(), &as_ready); + nodes[1].node.handle_channel_ready(node_a_id, &as_ready); } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -4397,7 +4494,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo nodes[1], 1, ClosureReason::DisconnectedPeer, - [nodes[0].node.get_our_node_id()], + [node_a_id], 100000 ); assert!(nodes[1].node.list_channels().is_empty()); @@ -4420,6 +4517,10 @@ fn test_blocked_chan_preimage_release() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + create_announced_chan_between_nodes(&nodes, 0, 1); let chan_id_2 = create_announced_chan_between_nodes(&nodes, 1, 2).2; @@ -4437,9 +4538,9 @@ fn test_blocked_chan_preimage_release() { check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); - let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], node_b_id); nodes[1].node.handle_update_fulfill_htlc( - nodes[2].node.get_our_node_id(), + node_c_id, &cs_htlc_fulfill_updates.update_fulfill_htlcs[0], ); do_commitment_signed_dance( @@ -4458,9 +4559,9 @@ fn test_blocked_chan_preimage_release() { check_added_monitors(&nodes[0], 1); expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000); - let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_fulfill_htlc( - nodes[0].node.get_our_node_id(), + node_a_id, &as_htlc_fulfill_updates.update_fulfill_htlcs[0], ); check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update @@ -4471,14 +4572,14 @@ fn test_blocked_chan_preimage_release() { // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the // channel. nodes[1].node.handle_commitment_signed_batch_test( - nodes[0].node.get_our_node_id(), + node_a_id, &as_htlc_fulfill_updates.commitment_signed, ); check_added_monitors(&nodes[1], 1); let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); assert!(a.is_none()); - nodes[1].node.handle_revoke_and_ack(nodes[0].node.get_our_node_id(), &raa); + nodes[1].node.handle_revoke_and_ack(node_a_id, &raa); check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -4502,11 +4603,11 @@ fn test_blocked_chan_preimage_release() { // When we fetch the next update the message getter will generate the next update for nodes[2], // generating a further monitor update. - let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], node_c_id); check_added_monitors(&nodes[1], 1); nodes[2].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_htlc_fulfill_updates.update_fulfill_htlcs[0], ); do_commitment_signed_dance( @@ -4540,6 +4641,10 @@ fn do_test_inverted_mon_completion_order( let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2; let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2; @@ -4560,9 +4665,9 @@ fn do_test_inverted_mon_completion_order( expect_payment_claimed!(nodes[2], payment_hash, 100_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fulfill_htlc( - nodes[2].node.get_our_node_id(), + node_c_id, &cs_updates.update_fulfill_htlcs[0], ); @@ -4575,30 +4680,30 @@ fn do_test_inverted_mon_completion_order( // won't get the preimage when the nodes reconnect and we have to get it from the // ChannelMonitor. nodes[1].node.handle_commitment_signed_batch_test( - nodes[2].node.get_our_node_id(), + node_c_id, &cs_updates.commitment_signed, ); check_added_monitors(&nodes[1], 1); if complete_bc_commitment_dance { let (bs_revoke_and_ack, bs_commitment_signed) = - get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_revoke_and_ack); + get_revoke_commit_msgs!(nodes[1], node_c_id); + nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); check_added_monitors(&nodes[2], 1); nodes[2].node.handle_commitment_signed_batch_test( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_commitment_signed, ); check_added_monitors(&nodes[2], 1); let cs_raa = get_event_msg!( nodes[2], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); // At this point node B still hasn't persisted the `ChannelMonitorUpdate` with the // preimage in the A <-> B channel, which will prevent it from persisting the // `ChannelMonitorUpdate` for the B<->C channel here to avoid "losing" the preimage. - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &cs_raa); + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_raa); check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); } @@ -4618,8 +4723,8 @@ fn do_test_inverted_mon_completion_order( nodes_1_deserialized ); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[2].node.peer_disconnected(node_b_id); if with_latest_manager { // If we used the latest ChannelManager to reload from, we should have both channels still @@ -4684,7 +4789,7 @@ fn do_test_inverted_mon_completion_order( 1, ClosureReason::OutdatedChannelManager, false, - &[nodes[2].node.get_our_node_id()], + &[node_c_id], 100_000, ); check_added_monitors(&nodes[1], 2); @@ -4719,11 +4824,11 @@ fn do_test_inverted_mon_completion_order( // node A. } - let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); check_added_monitors(&nodes[1], 1); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_updates.update_fulfill_htlcs[0], ); do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false); @@ -4771,6 +4876,10 @@ fn do_test_durable_preimages_on_closed_channel( let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2; let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2; @@ -4787,9 +4896,9 @@ fn do_test_durable_preimages_on_closed_channel( expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fulfill_htlc( - nodes[2].node.get_our_node_id(), + node_c_id, &cs_updates.update_fulfill_htlcs[0], ); @@ -4802,11 +4911,11 @@ fn do_test_durable_preimages_on_closed_channel( // the preimage when the nodes reconnect, at which point we have to ensure we get it from the // ChannelMonitor. nodes[1].node.handle_commitment_signed_batch_test( - nodes[2].node.get_our_node_id(), + node_c_id, &cs_updates.commitment_signed, ); check_added_monitors(&nodes[1], 1); - let _ = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let _ = get_revoke_commit_msgs!(nodes[1], node_c_id); let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode(); let error_message = "Channel force-closed"; @@ -4818,7 +4927,7 @@ fn do_test_durable_preimages_on_closed_channel( .node .force_close_broadcasting_latest_txn( &chan_id_bc, - &nodes[2].node.get_our_node_id(), + &node_c_id, error_message.to_string(), ) .unwrap(); @@ -4828,7 +4937,7 @@ fn do_test_durable_preimages_on_closed_channel( 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, - &[nodes[2].node.get_our_node_id()], + &[node_c_id], 100000, ); } @@ -4838,7 +4947,7 @@ fn do_test_durable_preimages_on_closed_channel( .node .force_close_broadcasting_latest_txn( &chan_id_ab, - &nodes[0].node.get_our_node_id(), + &node_a_id, error_message.to_string(), ) .unwrap(); @@ -4848,7 +4957,7 @@ fn do_test_durable_preimages_on_closed_channel( 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, - &[nodes[0].node.get_our_node_id()], + &[node_a_id], 100000, ); } @@ -4864,8 +4973,8 @@ fn do_test_durable_preimages_on_closed_channel( nodes_1_deserialized ); - nodes[0].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[0].node.peer_disconnected(node_b_id); + nodes[2].node.peer_disconnected(node_b_id); if close_chans_before_reload { // If the channels were already closed, B will rebroadcast its closing transactions here. @@ -4882,7 +4991,7 @@ fn do_test_durable_preimages_on_closed_channel( .node .force_close_broadcasting_latest_txn( &chan_id_ab, - &nodes[1].node.get_our_node_id(), + &node_b_id, error_message.to_string(), ) .unwrap(); @@ -4891,7 +5000,7 @@ fn do_test_durable_preimages_on_closed_channel( 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, - &[nodes[1].node.get_our_node_id()], + &[node_b_id], 100000, ); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4938,7 +5047,7 @@ fn do_test_durable_preimages_on_closed_channel( 1, ClosureReason::CommitmentTxConfirmed, false, - &[nodes[0].node.get_our_node_id()], + &[node_a_id], 100000, ); } @@ -5015,6 +5124,9 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2; let chan_id_bc = create_announced_chan_between_nodes(&nodes, 1, 2).2; @@ -5031,9 +5143,9 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); nodes[1].node.handle_update_fulfill_htlc( - nodes[2].node.get_our_node_id(), + node_c_id, &cs_updates.update_fulfill_htlcs[0], ); @@ -5045,23 +5157,23 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Now step the Commitment Signed Dance between B and C and check that after the final RAA B // doesn't let the preimage-removing monitor update fly. nodes[1].node.handle_commitment_signed_batch_test( - nodes[2].node.get_our_node_id(), + node_c_id, &cs_updates.commitment_signed, ); check_added_monitors(&nodes[1], 1); - let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], node_c_id); - nodes[2].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors(&nodes[2], 1); - nodes[2].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); check_added_monitors(&nodes[2], 1); let cs_final_raa = get_event_msg!( nodes[2], MessageSendEvent::SendRevokeAndACK, - nodes[1].node.get_our_node_id() + node_b_id ); - nodes[1].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &cs_final_raa); + nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_final_raa); check_added_monitors(&nodes[1], 0); // Finally, reload node B and check that after we call `process_pending_events` once we realize @@ -5087,7 +5199,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { .node .force_close_broadcasting_latest_txn( &chan_id_ab, - &nodes[1].node.get_our_node_id(), + &node_b_id, error_message.to_string(), ) .unwrap(); @@ -5098,7 +5210,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { 1, ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, false, - &[nodes[1].node.get_our_node_id()], + &[node_b_id], 100_000, ); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -5137,7 +5249,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates // fine. - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[2].node.peer_disconnected(node_b_id); reconnect_nodes(ReconnectArgs::new(&nodes[1], &nodes[2])); send_payment(&nodes[1], &[&nodes[2]], 100_000); } @@ -5161,6 +5273,7 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); let node_c_id = nodes[2].node.get_our_node_id(); let chan_id_ab = create_announced_chan_between_nodes(&nodes, 0, 1).2; @@ -5175,29 +5288,29 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { check_added_monitors(&nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); - let cs_updates = get_htlc_update_msgs(&nodes[2], &nodes[1].node.get_our_node_id()); + let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); if hold_chan_a { // The first update will be on the A <-> B channel, which we optionally allow to complete. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } nodes[1].node.handle_update_fulfill_htlc( - nodes[2].node.get_our_node_id(), + node_c_id, &cs_updates.update_fulfill_htlcs[0], ); check_added_monitors(&nodes[1], 1); if !hold_chan_a { - let bs_updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &bs_updates.update_fulfill_htlcs[0], ); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); expect_payment_sent!(&nodes[0], payment_preimage); } - nodes[1].node.peer_disconnected(nodes[2].node.get_our_node_id()); - nodes[2].node.peer_disconnected(nodes[1].node.get_our_node_id()); + nodes[1].node.peer_disconnected(node_c_id); + nodes[2].node.peer_disconnected(node_b_id); let mut reconnect = ReconnectArgs::new(&nodes[1], &nodes[2]); reconnect.pending_htlc_claims = (1, 0); @@ -5273,7 +5386,7 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { assert_eq!(c_update.len(), 1); nodes[0].node.handle_update_fulfill_htlc( - nodes[1].node.get_our_node_id(), + node_b_id, &a_update[0].update_fulfill_htlcs[0], ); commitment_signed_dance!(nodes[0], nodes[1], a_update[0].commitment_signed, false); @@ -5310,6 +5423,11 @@ fn test_partial_claim_mon_update_compl_actions() { let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let chan_1_scid = create_announced_chan_between_nodes(&nodes, 0, 1).0.contents.short_channel_id; let chan_2_scid = create_announced_chan_between_nodes(&nodes, 0, 2).0.contents.short_channel_id; let (chan_3_update, _, chan_3_id, ..) = create_announced_chan_between_nodes(&nodes, 1, 3); @@ -5321,10 +5439,10 @@ fn test_partial_claim_mon_update_compl_actions() { get_route_and_payment_hash!(&nodes[0], nodes[3], 100000); let path = route.paths[0].clone(); route.paths.push(path); - route.paths[0].hops[0].pubkey = nodes[1].node.get_our_node_id(); + route.paths[0].hops[0].pubkey = node_b_id; route.paths[0].hops[0].short_channel_id = chan_1_scid; route.paths[0].hops[1].short_channel_id = chan_3_scid; - route.paths[1].hops[0].pubkey = nodes[2].node.get_our_node_id(); + route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_scid; route.paths[1].hops[1].short_channel_id = chan_4_scid; send_along_route_with_secret( @@ -5348,27 +5466,27 @@ fn test_partial_claim_mon_update_compl_actions() { // blocks. nodes[3].chain_monitor.complete_sole_pending_chan_update(&chan_3_id); expect_payment_claimed!(&nodes[3], payment_hash, 200_000); - let updates = get_htlc_update_msgs(&nodes[3], &nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs(&nodes[3], &node_b_id); nodes[1].node.handle_update_fulfill_htlc( - nodes[3].node.get_our_node_id(), + node_d_id, &updates.update_fulfill_htlcs[0], ); check_added_monitors(&nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[3], Some(1000), false, false); - let _bs_updates_for_a = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + let _bs_updates_for_a = get_htlc_update_msgs(&nodes[1], &node_a_id); nodes[1].node.handle_commitment_signed_batch_test( - nodes[3].node.get_our_node_id(), + node_d_id, &updates.commitment_signed, ); check_added_monitors(&nodes[1], 1); - let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &nodes[3].node.get_our_node_id()); + let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &node_d_id); - nodes[3].node.handle_revoke_and_ack(nodes[1].node.get_our_node_id(), &bs_raa); + nodes[3].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors(&nodes[3], 0); - nodes[3].node.handle_commitment_signed_batch_test(nodes[1].node.get_our_node_id(), &bs_cs); + nodes[3].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); check_added_monitors(&nodes[3], 0); assert!(nodes[3].node.get_and_clear_pending_msg_events().is_empty()); @@ -5383,27 +5501,27 @@ fn test_partial_claim_mon_update_compl_actions() { assert_eq!(ds_msgs.len(), 2); check_added_monitors(&nodes[3], 2); - match remove_first_msg_event_to_node(&nodes[1].node.get_our_node_id(), &mut ds_msgs) { + match remove_first_msg_event_to_node(&node_b_id, &mut ds_msgs) { MessageSendEvent::SendRevokeAndACK { msg, .. } => { - nodes[1].node.handle_revoke_and_ack(nodes[3].node.get_our_node_id(), &msg); + nodes[1].node.handle_revoke_and_ack(node_d_id, &msg); check_added_monitors(&nodes[1], 1); }, _ => panic!(), } - match remove_first_msg_event_to_node(&nodes[2].node.get_our_node_id(), &mut ds_msgs) { + match remove_first_msg_event_to_node(&node_c_id, &mut ds_msgs) { MessageSendEvent::UpdateHTLCs { updates, .. } => { nodes[2].node.handle_update_fulfill_htlc( - nodes[3].node.get_our_node_id(), + node_d_id, &updates.update_fulfill_htlcs[0], ); check_added_monitors(&nodes[2], 1); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); let _cs_updates_for_a = - get_htlc_update_msgs(&nodes[2], &nodes[0].node.get_our_node_id()); + get_htlc_update_msgs(&nodes[2], &node_a_id); nodes[2].node.handle_commitment_signed_batch_test( - nodes[3].node.get_our_node_id(), + node_d_id, &updates.commitment_signed, ); check_added_monitors(&nodes[2], 1); @@ -5411,20 +5529,20 @@ fn test_partial_claim_mon_update_compl_actions() { _ => panic!(), } - let (cs_raa, cs_cs) = get_revoke_commit_msgs(&nodes[2], &nodes[3].node.get_our_node_id()); + let (cs_raa, cs_cs) = get_revoke_commit_msgs(&nodes[2], &node_d_id); - nodes[3].node.handle_revoke_and_ack(nodes[2].node.get_our_node_id(), &cs_raa); + nodes[3].node.handle_revoke_and_ack(node_c_id, &cs_raa); check_added_monitors(&nodes[3], 1); - nodes[3].node.handle_commitment_signed_batch_test(nodes[2].node.get_our_node_id(), &cs_cs); + nodes[3].node.handle_commitment_signed_batch_test(node_c_id, &cs_cs); check_added_monitors(&nodes[3], 1); let ds_raa = get_event_msg!( nodes[3], MessageSendEvent::SendRevokeAndACK, - nodes[2].node.get_our_node_id() + node_c_id ); - nodes[2].node.handle_revoke_and_ack(nodes[3].node.get_our_node_id(), &ds_raa); + nodes[2].node.handle_revoke_and_ack(node_d_id, &ds_raa); check_added_monitors(&nodes[2], 1); // Our current `ChannelMonitor`s store preimages one RAA longer than they need to. That's nice @@ -5450,6 +5568,10 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + // First open channels, route a payment, and force-close the first hop. let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); @@ -5463,13 +5585,13 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { .node .force_close_broadcasting_latest_txn( &chan_a.2, - &nodes[1].node.get_our_node_id(), + &node_b_id, String::new(), ) .unwrap(); check_added_monitors!(nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; - check_closed_event!(nodes[0], 1, a_reason, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[0], 1, a_reason, [node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -5481,7 +5603,7 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { nodes[1], 1, ClosureReason::CommitmentTxConfirmed, - [nodes[0].node.get_our_node_id()], + [node_a_id], 1000000 ); check_closed_broadcast!(nodes[1], true); @@ -5492,10 +5614,10 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { check_added_monitors!(nodes[2], 1); expect_payment_claimed!(nodes[2], payment_hash, 1_000_000); - let updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + let updates = get_htlc_update_msgs!(nodes[2], node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_update_fulfill_htlc( - nodes[2].node.get_our_node_id(), + node_c_id, &updates.update_fulfill_htlcs[0], ); check_added_monitors!(nodes[1], 1); @@ -5538,6 +5660,9 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let node_a_id = nodes[0].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + // First open channels, route a payment, and force-close the first hop. let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000); @@ -5548,13 +5673,13 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { .node .force_close_broadcasting_latest_txn( &chan_a.2, - &nodes[1].node.get_our_node_id(), + &node_b_id, String::new(), ) .unwrap(); check_added_monitors!(nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; - check_closed_event!(nodes[0], 1, a_reason, [nodes[1].node.get_our_node_id()], 1000000); + check_closed_event!(nodes[0], 1, a_reason, [node_b_id], 1000000); check_closed_broadcast!(nodes[0], true); let as_commit_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -5566,7 +5691,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { nodes[1], 1, ClosureReason::CommitmentTxConfirmed, - [nodes[0].node.get_our_node_id()], + [node_a_id], 1000000 ); check_closed_broadcast!(nodes[1], true); @@ -5603,8 +5728,14 @@ fn test_single_channel_multiple_mpp() { let node_chanmgrs = create_node_chanmgrs(9, &node_cfgs, &configs); let mut nodes = create_network(9, &node_cfgs, &node_chanmgrs); - let node_7_id = nodes[7].node.get_our_node_id(); - let node_8_id = nodes[8].node.get_our_node_id(); + let node_b_id = nodes[1].node.get_our_node_id(); + let node_c_id = nodes[2].node.get_our_node_id(); + let node_d_id = nodes[3].node.get_our_node_id(); + let node_e_id = nodes[4].node.get_our_node_id(); + let node_f_id = nodes[5].node.get_our_node_id(); + let node_g_id = nodes[6].node.get_our_node_id(); + let node_h_id = nodes[7].node.get_our_node_id(); + let node_i_id = nodes[8].node.get_our_node_id(); // Send an MPP payment in six parts along the path shown from top to bottom // 0 @@ -5706,23 +5837,23 @@ fn test_single_channel_multiple_mpp() { do_a_write_background.send(()).unwrap(); }); block_thrd2.store(false, Ordering::Release); - let first_updates = get_htlc_update_msgs(&nodes[8], &nodes[7].node.get_our_node_id()); + let first_updates = get_htlc_update_msgs(&nodes[8], &node_h_id); thrd2.join().unwrap(); // Disconnect node 6 from all its peers so it doesn't bother to fail the HTLCs back - nodes[7].node.peer_disconnected(nodes[1].node.get_our_node_id()); - nodes[7].node.peer_disconnected(nodes[2].node.get_our_node_id()); - nodes[7].node.peer_disconnected(nodes[3].node.get_our_node_id()); - nodes[7].node.peer_disconnected(nodes[4].node.get_our_node_id()); - nodes[7].node.peer_disconnected(nodes[5].node.get_our_node_id()); - nodes[7].node.peer_disconnected(nodes[6].node.get_our_node_id()); - - nodes[7].node.handle_update_fulfill_htlc(node_8_id, &first_updates.update_fulfill_htlcs[0]); + nodes[7].node.peer_disconnected(node_b_id); + nodes[7].node.peer_disconnected(node_c_id); + nodes[7].node.peer_disconnected(node_d_id); + nodes[7].node.peer_disconnected(node_e_id); + nodes[7].node.peer_disconnected(node_f_id); + nodes[7].node.peer_disconnected(node_g_id); + + nodes[7].node.handle_update_fulfill_htlc(node_i_id, &first_updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[7], 1); expect_payment_forwarded!(nodes[7], nodes[1], nodes[8], Some(1000), false, false); - nodes[7].node.handle_commitment_signed_batch_test(node_8_id, &first_updates.commitment_signed); + nodes[7].node.handle_commitment_signed_batch_test(node_i_id, &first_updates.commitment_signed); check_added_monitors(&nodes[7], 1); - let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_i_id); // Now, handle the `revoke_and_ack` from node 5. Note that `claim_funds` is still blocked on // our peer lock, so we have to release a write to let it process. @@ -5741,7 +5872,7 @@ fn test_single_channel_multiple_mpp() { do_a_write_background.send(()).unwrap(); }); block_thrd3.store(false, Ordering::Release); - nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); + nodes[8].node.handle_revoke_and_ack(node_h_id, &raa); thrd3.join().unwrap(); assert!(!thrd.is_finished()); @@ -5761,63 +5892,63 @@ fn test_single_channel_multiple_mpp() { // Now drive everything to the end, at least as far as node 7 is concerned... *nodes[8].chain_monitor.write_blocker.lock().unwrap() = None; - nodes[8].node.handle_commitment_signed_batch_test(node_7_id, &cs); + nodes[8].node.handle_commitment_signed_batch_test(node_h_id, &cs); check_added_monitors(&nodes[8], 1); - let (updates, raa) = get_updates_and_revoke(&nodes[8], &nodes[7].node.get_our_node_id()); + let (updates, raa) = get_updates_and_revoke(&nodes[8], &node_h_id); - nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[0]); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, &updates.update_fulfill_htlcs[0]); expect_payment_forwarded!(nodes[7], nodes[2], nodes[8], Some(1000), false, false); - nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[1]); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, &updates.update_fulfill_htlcs[1]); expect_payment_forwarded!(nodes[7], nodes[3], nodes[8], Some(1000), false, false); let mut next_source = 4; if let Some(update) = updates.update_fulfill_htlcs.get(2) { - nodes[7].node.handle_update_fulfill_htlc(node_8_id, update); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, update); expect_payment_forwarded!(nodes[7], nodes[4], nodes[8], Some(1000), false, false); next_source += 1; } - nodes[7].node.handle_commitment_signed_batch_test(node_8_id, &updates.commitment_signed); - nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + nodes[7].node.handle_commitment_signed_batch_test(node_i_id, &updates.commitment_signed); + nodes[7].node.handle_revoke_and_ack(node_i_id, &raa); if updates.update_fulfill_htlcs.get(2).is_some() { check_added_monitors(&nodes[7], 5); } else { check_added_monitors(&nodes[7], 4); } - let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_i_id); - nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); - nodes[8].node.handle_commitment_signed_batch_test(node_7_id, &cs); + nodes[8].node.handle_revoke_and_ack(node_h_id, &raa); + nodes[8].node.handle_commitment_signed_batch_test(node_h_id, &cs); check_added_monitors(&nodes[8], 2); - let (updates, raa) = get_updates_and_revoke(&nodes[8], &node_7_id); + let (updates, raa) = get_updates_and_revoke(&nodes[8], &node_h_id); - nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[0]); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, &updates.update_fulfill_htlcs[0]); expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); next_source += 1; - nodes[7].node.handle_update_fulfill_htlc(node_8_id, &updates.update_fulfill_htlcs[1]); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, &updates.update_fulfill_htlcs[1]); expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); next_source += 1; if let Some(update) = updates.update_fulfill_htlcs.get(2) { - nodes[7].node.handle_update_fulfill_htlc(node_8_id, update); + nodes[7].node.handle_update_fulfill_htlc(node_i_id, update); expect_payment_forwarded!(nodes[7], nodes[next_source], nodes[8], Some(1000), false, false); } - nodes[7].node.handle_commitment_signed_batch_test(node_8_id, &updates.commitment_signed); - nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + nodes[7].node.handle_commitment_signed_batch_test(node_i_id, &updates.commitment_signed); + nodes[7].node.handle_revoke_and_ack(node_i_id, &raa); if updates.update_fulfill_htlcs.get(2).is_some() { check_added_monitors(&nodes[7], 5); } else { check_added_monitors(&nodes[7], 4); } - let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_8_id); - nodes[8].node.handle_revoke_and_ack(node_7_id, &raa); - nodes[8].node.handle_commitment_signed_batch_test(node_7_id, &cs); + let (raa, cs) = get_revoke_commit_msgs(&nodes[7], &node_i_id); + nodes[8].node.handle_revoke_and_ack(node_h_id, &raa); + nodes[8].node.handle_commitment_signed_batch_test(node_h_id, &cs); check_added_monitors(&nodes[8], 2); - let raa = get_event_msg!(nodes[8], MessageSendEvent::SendRevokeAndACK, node_7_id); - nodes[7].node.handle_revoke_and_ack(node_8_id, &raa); + let raa = get_event_msg!(nodes[8], MessageSendEvent::SendRevokeAndACK, node_h_id); + nodes[7].node.handle_revoke_and_ack(node_i_id, &raa); check_added_monitors(&nodes[7], 1); } From df1802a48a13c810bb47774bcfcaef0ab5c5e5c2 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 18 May 2025 20:04:02 +0000 Subject: [PATCH 10/12] Re-`rustfmt` `chanmon_update_fail_tests` --- lightning/src/ln/chanmon_update_fail_tests.rs | 1036 ++++------------- 1 file changed, 233 insertions(+), 803 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index de25c4a8127..c9798e914e3 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -111,10 +111,7 @@ fn test_monitor_and_persister_update_fail() { let updates = get_htlc_update_msgs!(nodes[1], node_a_id); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc( - node_b_id, - &updates.update_fulfill_htlcs[0], - ); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); { let mut node_0_per_peer_lock; @@ -291,11 +288,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { let error_message = "Channel force-closed"; nodes[0] .node - .force_close_broadcasting_latest_txn( - &channel_id, - &node_b_id, - error_message.to_string(), - ) + .force_close_broadcasting_latest_txn(&channel_id, &node_b_id, error_message.to_string()) .unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); @@ -404,10 +397,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(update_fee.is_none()); if (disconnect_count & 16) == 0 { - nodes[0].node.handle_update_fulfill_htlc( - node_b_id, - &update_fulfill_htlcs[0], - ); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_htlcs[0]); let events_3 = nodes[0].node.get_and_clear_pending_events(); assert_eq!(events_3.len(), 1); match events_3[0] { @@ -418,10 +408,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { _ => panic!("Unexpected event"), } - nodes[0].node.handle_commitment_signed_batch_test( - node_b_id, - commitment_signed, - ); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, commitment_signed); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); } @@ -483,13 +470,9 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[0] - .node - .handle_channel_reestablish(node_b_id, &reestablish_2[0]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); let as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1] - .node - .handle_channel_reestablish(node_a_id, &reestablish_1[0]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); let bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); assert!(as_resp.0.is_none()); @@ -532,14 +515,10 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[0] - .node - .handle_channel_reestablish(node_b_id, &reestablish_2[0]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); check_added_monitors!(nodes[0], 0); let mut as_resp = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); - nodes[1] - .node - .handle_channel_reestablish(node_a_id, &reestablish_1[0]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); check_added_monitors!(nodes[1], 0); let mut bs_resp = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); @@ -581,11 +560,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { node_b_id, &bs_resp.2.as_ref().unwrap().commitment_signed, ); - let as_resp_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_resp_raa = + get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); @@ -606,11 +582,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { } ( - SendEvent::from_commitment_update( - node_b_id, - channel_id, - as_resp.2.unwrap(), - ), + SendEvent::from_commitment_update(node_b_id, channel_id, as_resp.2.unwrap()), as_resp.1.unwrap(), ) } else { @@ -631,15 +603,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert_eq!(payment_event.node_id, node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &payment_event.commitment_msg, - ); - let bs_revoke_and_ack = get_event_msg!( - nodes[1], - MessageSendEvent::SendRevokeAndACK, - node_a_id - ); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); + let bs_revoke_and_ack = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // nodes[1] is awaiting an RAA from nodes[0] still so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); @@ -658,9 +623,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { macro_rules! handle_bs_raa { () => { - nodes[0] - .node - .handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); + nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); as_commitment_update = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(as_commitment_update.update_add_htlcs.is_empty()); assert!(as_commitment_update.update_fulfill_htlcs.is_empty()); @@ -673,11 +636,8 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { macro_rules! handle_initial_raa { () => { - nodes[1] - .node - .handle_revoke_and_ack(node_a_id, &initial_revoke_and_ack); - bs_second_commitment_update = - get_htlc_update_msgs!(nodes[1], node_a_id); + nodes[1].node.handle_revoke_and_ack(node_a_id, &initial_revoke_and_ack); + bs_second_commitment_update = get_htlc_update_msgs!(nodes[1], node_a_id); assert!(bs_second_commitment_update.update_add_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fulfill_htlcs.is_empty()); assert!(bs_second_commitment_update.update_fail_htlcs.is_empty()); @@ -745,23 +705,15 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { node_b_id, &bs_second_commitment_update.commitment_signed, ); - let as_revoke_and_ack = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[0], 1); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &as_commitment_update.commitment_signed, - ); - let bs_second_revoke_and_ack = get_event_msg!( - nodes[1], - MessageSendEvent::SendRevokeAndACK, - node_a_id - ); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_commitment_update.commitment_signed); + let bs_second_revoke_and_ack = + get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // No commitment_signed so get_event_msg's assert(len == 1) passes check_added_monitors!(nodes[1], 1); @@ -868,10 +820,7 @@ fn test_monitor_update_fail_cs() { nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &send_event.commitment_msg, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -908,10 +857,9 @@ fn test_monitor_update_fail_cs() { assert_eq!(*node_id, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0].node.handle_commitment_signed_batch_test( - node_b_id, - &updates.commitment_signed, - ); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -931,11 +879,7 @@ fn test_monitor_update_fail_cs() { nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); - let final_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let final_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &final_raa); check_added_monitors!(nodes[1], 1); @@ -1097,24 +1041,14 @@ fn test_monitor_update_raa_while_paused() { SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event_1.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &send_event_1.commitment_msg, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event_1.commitment_msg); check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!( - nodes[1], - MessageSendEvent::SendRevokeAndACK, - node_a_id - ); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_update_add_htlc(node_b_id, &send_event_2.msgs[0]); - nodes[0].node.handle_commitment_signed_batch_test( - node_b_id, - &send_event_2.commitment_msg, - ); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &send_event_2.commitment_msg); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -1139,26 +1073,13 @@ fn test_monitor_update_raa_while_paused() { check_added_monitors!(nodes[1], 1); let bs_cs = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[1] - .node - .handle_commitment_signed_batch_test(node_a_id, &as_update_raa.1); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_update_raa.1); check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!( - nodes[1], - MessageSendEvent::SendRevokeAndACK, - node_a_id - ); + let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - nodes[0].node.handle_commitment_signed_batch_test( - node_b_id, - &bs_cs.commitment_signed, - ); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_second_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); @@ -1208,9 +1129,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert_eq!(updates.update_fail_htlcs.len(), 1); assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[1] - .node - .handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); + nodes[1].node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); let bs_revoke_and_ack = commitment_signed_dance!( nodes[1], @@ -1304,10 +1223,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { send_event = SendEvent::from_event(nodes[2].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_c_id, &send_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test( - node_c_id, - &send_event.commitment_msg, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &send_event.commitment_msg); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); (Some(payment_preimage_4), Some(payment_hash_4)) @@ -1330,10 +1246,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[1], - [HTLCHandlingFailureType::Forward { - node_id: Some(node_c_id), - channel_id: chan_2.2 - }] + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] ); check_added_monitors!(nodes[1], 1); @@ -1346,8 +1259,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Note that the ordering of the events for different nodes is non-prescriptive, though the // ordering of the two events that both go to nodes[2] have to stay in the same order. - let nodes_0_event = - remove_first_msg_event_to_node(&node_a_id, &mut events_3); + let nodes_0_event = remove_first_msg_event_to_node(&node_a_id, &mut events_3); let messages_a = match nodes_0_event { MessageSendEvent::UpdateHTLCs { node_id, mut updates, channel_id: _ } => { assert_eq!(node_id, node_a_id); @@ -1361,14 +1273,12 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { _ => panic!("Unexpected event type!"), }; - let nodes_2_event = - remove_first_msg_event_to_node(&node_c_id, &mut events_3); + let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events_3); let send_event_b = SendEvent::from_event(nodes_2_event); assert_eq!(send_event_b.node_id, node_c_id); let raa = if test_ignore_second_cs { - let nodes_2_event = - remove_first_msg_event_to_node(&node_c_id, &mut events_3); + let nodes_2_event = remove_first_msg_event_to_node(&node_c_id, &mut events_3); match nodes_2_event { MessageSendEvent::SendRevokeAndACK { node_id, msg } => { assert_eq!(node_id, node_c_id); @@ -1389,16 +1299,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { nodes[2].node.handle_update_add_htlc(node_b_id, &send_event_b.msgs[0]); let as_cs; if test_ignore_second_cs { - nodes[2].node.handle_commitment_signed_batch_test( - node_b_id, - &send_event_b.commitment_msg, - ); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); - let bs_revoke_and_ack = get_event_msg!( - nodes[2], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let bs_revoke_and_ack = + get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &raa.unwrap()); check_added_monitors!(nodes[2], 1); let bs_cs = get_htlc_update_msgs!(nodes[2], node_b_id); @@ -1412,16 +1316,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[1], 1); as_cs = get_htlc_update_msgs!(nodes[1], node_c_id); - nodes[1].node.handle_commitment_signed_batch_test( - node_c_id, - &bs_cs.commitment_signed, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_cs.commitment_signed); check_added_monitors!(nodes[1], 1); } else { - nodes[2].node.handle_commitment_signed_batch_test( - node_b_id, - &send_event_b.commitment_msg, - ); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &send_event_b.commitment_msg); check_added_monitors!(nodes[2], 1); let bs_revoke_and_commit = nodes[2].node.get_and_clear_pending_msg_events(); @@ -1446,10 +1344,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fulfill_htlcs.is_empty()); assert!(updates.update_fee.is_none()); - nodes[1].node.handle_commitment_signed_batch_test( - node_c_id, - &updates.commitment_signed, - ); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_c_id, &updates.commitment_signed); check_added_monitors!(nodes[1], 1); }, _ => panic!("Unexpected event"), @@ -1461,25 +1358,12 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(as_cs.update_fail_malformed_htlcs.is_empty()); assert!(as_cs.update_fulfill_htlcs.is_empty()); assert!(as_cs.update_fee.is_none()); - let as_raa = get_event_msg!( - nodes[1], - MessageSendEvent::SendRevokeAndACK, - node_c_id - ); + let as_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); - nodes[2] - .node - .handle_update_add_htlc(node_b_id, &as_cs.update_add_htlcs[0]); - nodes[2].node.handle_commitment_signed_batch_test( - node_b_id, - &as_cs.commitment_signed, - ); + nodes[2].node.handle_update_add_htlc(node_b_id, &as_cs.update_add_htlcs[0]); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &as_cs.commitment_signed); check_added_monitors!(nodes[2], 1); - let bs_second_raa = get_event_msg!( - nodes[2], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let bs_second_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_raa); check_added_monitors!(nodes[2], 1); @@ -1489,16 +1373,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); - nodes[1].node.handle_commitment_signed_batch_test( - node_c_id, - &bs_second_cs.commitment_signed, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &bs_second_cs.commitment_signed); check_added_monitors!(nodes[1], 1); - let as_second_raa = get_event_msg!( - nodes[1], - MessageSendEvent::SendRevokeAndACK, - node_c_id - ); + let as_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_c_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &as_second_raa); check_added_monitors!(nodes[2], 1); @@ -1586,10 +1463,7 @@ fn test_monitor_update_fail_reestablish() { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc( - node_c_id, - &updates.update_fulfill_htlcs[0], - ); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1628,13 +1502,9 @@ fn test_monitor_update_fail_reestablish() { nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); assert_eq!( - get_event_msg!( - nodes[0], - MessageSendEvent::SendChannelUpdate, - node_b_id - ) - .contents - .channel_flags + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id) + .contents + .channel_flags & 2, 0 ); // The "disabled" bit should be unset as we just reconnected @@ -1675,13 +1545,9 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); assert_eq!( - get_event_msg!( - nodes[0], - MessageSendEvent::SendChannelUpdate, - node_b_id - ) - .contents - .channel_flags + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id) + .contents + .channel_flags & 2, 0 ); // The "disabled" bit should be unset as we just reconnected @@ -1689,13 +1555,9 @@ fn test_monitor_update_fail_reestablish() { nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); check_added_monitors!(nodes[1], 0); assert_eq!( - get_event_msg!( - nodes[1], - MessageSendEvent::SendChannelUpdate, - node_a_id - ) - .contents - .channel_flags + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id) + .contents + .channel_flags & 2, 0 ); // The "disabled" bit should be unset as we just reconnected @@ -1718,10 +1580,7 @@ fn test_monitor_update_fail_reestablish() { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[0].node.handle_update_fulfill_htlc( - node_b_id, - &updates.update_fulfill_htlcs[0], - ); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], updates.commitment_signed, false); expect_payment_sent!(nodes[0], payment_preimage); } @@ -1781,10 +1640,7 @@ fn raa_no_response_awaiting_raa_state() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &payment_event.commitment_msg, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); @@ -1794,15 +1650,9 @@ fn raa_no_response_awaiting_raa_state() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[0] - .node - .handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // Now we have a CS queued up which adds a new HTLC (which will need a RAA/CS response from // nodes[1]) followed by an RAA. Fail the monitor updating prior to the CS, deliver the RAA, @@ -1810,10 +1660,7 @@ fn raa_no_response_awaiting_raa_state() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &payment_event.commitment_msg, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -1859,27 +1706,14 @@ fn raa_no_response_awaiting_raa_state() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); - nodes[0] - .node - .handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &payment_event.commitment_msg, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!( - nodes[1], - MessageSendEvent::SendRevokeAndACK, - node_a_id - ); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // Finally deliver the RAA to nodes[1] which results in a CS response to the last update nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); @@ -1891,16 +1725,9 @@ fn raa_no_response_awaiting_raa_state() { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test( - node_b_id, - &bs_update.commitment_signed, - ); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_update.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); @@ -1970,22 +1797,16 @@ fn claim_while_disconnected_monitor_update_fail() { let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reconnect); - let _as_channel_update = get_event_msg!( - nodes[0], - MessageSendEvent::SendChannelUpdate, - node_b_id - ); + let _as_channel_update = + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); // Now deliver a's reestablish, freeing the claim from the holding cell, but fail the monitor // update. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_channel_reestablish(node_a_id, &as_reconnect); - let _bs_channel_update = get_event_msg!( - nodes[1], - MessageSendEvent::SendChannelUpdate, - node_a_id - ); + let _bs_channel_update = + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2007,13 +1828,8 @@ fn claim_while_disconnected_monitor_update_fail() { } let as_updates = get_htlc_update_msgs!(nodes[0], node_b_id); - nodes[1] - .node - .handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &as_updates.commitment_signed, - ); + nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_updates.commitment_signed); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Note that nodes[1] not updating monitor here is OK - it wont take action on the new HTLC @@ -2039,22 +1855,14 @@ fn claim_while_disconnected_monitor_update_fail() { match bs_msgs[0] { MessageSendEvent::UpdateHTLCs { ref node_id, channel_id: _, ref updates } => { assert_eq!(*node_id, node_a_id); - nodes[0].node.handle_update_fulfill_htlc( - node_b_id, - &updates.update_fulfill_htlcs[0], - ); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test( - node_b_id, - &updates.commitment_signed, - ); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &updates.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); }, @@ -2073,27 +1881,13 @@ fn claim_while_disconnected_monitor_update_fail() { let as_commitment = get_htlc_update_msgs!(nodes[0], node_b_id); let bs_commitment = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_commitment_signed_batch_test( - node_b_id, - &bs_commitment.commitment_signed, - ); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &as_commitment.commitment_signed, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &as_commitment.commitment_signed); check_added_monitors!(nodes[1], 1); - let bs_raa = get_event_msg!( - nodes[1], - MessageSendEvent::SendRevokeAndACK, - node_a_id - ); + let bs_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); @@ -2171,10 +1965,7 @@ fn monitor_failed_no_reestablish_response() { assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &payment_event.commitment_msg, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); @@ -2212,17 +2003,11 @@ fn monitor_failed_no_reestablish_response() { let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(node_a_id, &as_reconnect); - let _bs_channel_update = get_event_msg!( - nodes[1], - MessageSendEvent::SendChannelUpdate, - node_a_id - ); + let _bs_channel_update = + get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reconnect); - let _as_channel_update = get_event_msg!( - nodes[0], - MessageSendEvent::SendChannelUpdate, - node_b_id - ); + let _as_channel_update = + get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = nodes[1] @@ -2239,16 +2024,10 @@ fn monitor_failed_no_reestablish_response() { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0] - .node - .handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); @@ -2302,25 +2081,16 @@ fn first_message_on_recv_ordering() { let payment_event = SendEvent::from_event(events.pop().unwrap()); assert_eq!(payment_event.node_id, node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &payment_event.commitment_msg, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0] - .node - .handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); // Route the second payment, generating an update_add_htlc/commitment_signed let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = @@ -2355,10 +2125,7 @@ fn first_message_on_recv_ordering() { // RAA/CS response, which should be generated when we call channel_monitor_update (with the // appropriate HTLC acceptance). nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &payment_event.commitment_msg, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &payment_event.commitment_msg); check_added_monitors!(nodes[1], 1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2380,16 +2147,10 @@ fn first_message_on_recv_ordering() { let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); - nodes[0] - .node - .handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_responses.1); check_added_monitors!(nodes[0], 1); - let as_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_raa); check_added_monitors!(nodes[1], 1); @@ -2501,10 +2262,7 @@ fn test_monitor_update_fail_claim() { check_added_monitors!(nodes[1], 0); let bs_fulfill_update = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc( - node_b_id, - &bs_fulfill_update.update_fulfill_htlcs[0], - ); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_fulfill_update.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_fulfill_update.commitment_signed, false); expect_payment_sent!(nodes[0], payment_preimage_1); @@ -2512,14 +2270,8 @@ fn test_monitor_update_fail_claim() { nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); let bs_forward_update = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_add_htlc( - node_b_id, - &bs_forward_update.update_add_htlcs[0], - ); - nodes[0].node.handle_update_add_htlc( - node_b_id, - &bs_forward_update.update_add_htlcs[1], - ); + nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &bs_forward_update.update_add_htlcs[1]); commitment_signed_dance!(nodes[0], nodes[1], bs_forward_update.commitment_signed, false); expect_pending_htlcs_forwardable!(nodes[0]); @@ -2607,10 +2359,7 @@ fn test_monitor_update_on_pending_forwards() { check_added_monitors!(nodes[2], 1); let cs_fail_update = get_htlc_update_msgs!(nodes[2], node_b_id); - nodes[1].node.handle_update_fail_htlc( - node_c_id, - &cs_fail_update.update_fail_htlcs[0], - ); + nodes[1].node.handle_update_fail_htlc(node_c_id, &cs_fail_update.update_fail_htlcs[0]); commitment_signed_dance!(nodes[1], nodes[2], cs_fail_update.commitment_signed, true, true); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -2638,10 +2387,7 @@ fn test_monitor_update_on_pending_forwards() { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[1], - [HTLCHandlingFailureType::Forward { - node_id: Some(node_c_id), - channel_id: chan_2.2 - }] + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_2.2 }] ); check_added_monitors!(nodes[1], 1); @@ -2658,12 +2404,8 @@ fn test_monitor_update_on_pending_forwards() { check_added_monitors!(nodes[1], 0); let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0] - .node - .handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); - nodes[0] - .node - .handle_update_add_htlc(node_b_id, &bs_updates.update_add_htlcs[0]); + nodes[0].node.handle_update_fail_htlc(node_b_id, &bs_updates.update_fail_htlcs[0]); + nodes[0].node.handle_update_add_htlc(node_b_id, &bs_updates.update_add_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false, true); let events = nodes[0].node.get_and_clear_pending_events(); @@ -2766,10 +2508,7 @@ fn monitor_update_claim_fail_no_response() { expect_payment_claimable!(nodes[1], payment_hash_2, payment_secret_2, 1000000); let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); - nodes[0].node.handle_update_fulfill_htlc( - node_b_id, - &bs_updates.update_fulfill_htlcs[0], - ); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); expect_payment_sent!(nodes[0], payment_preimage_1); @@ -2791,25 +2530,14 @@ fn do_during_funding_monitor_fail( let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 43, None, None) - .unwrap(); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 43, None, None).unwrap(); nodes[1].node.handle_open_channel( node_a_id, - &get_event_msg!( - nodes[0], - MessageSendEvent::SendOpenChannel, - node_b_id - ), + &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id), ); nodes[0].node.handle_accept_channel( node_b_id, - &get_event_msg!( - nodes[1], - MessageSendEvent::SendAcceptChannel, - node_a_id - ), + &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), ); let (temporary_channel_id, funding_tx, funding_output) = @@ -2817,20 +2545,13 @@ fn do_during_funding_monitor_fail( nodes[0] .node - .funding_transaction_generated( - temporary_channel_id, - node_b_id, - funding_tx.clone(), - ) + .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); check_added_monitors!(nodes[0], 0); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - let funding_created_msg = get_event_msg!( - nodes[0], - MessageSendEvent::SendFundingCreated, - node_b_id - ); + let funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); let channel_id = ChannelId::v1_from_funding_txid( funding_created_msg.funding_txid.as_byte_array(), funding_created_msg.funding_output_index, @@ -2841,11 +2562,7 @@ fn do_during_funding_monitor_fail( chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[0].node.handle_funding_signed( node_b_id, - &get_event_msg!( - nodes[1], - MessageSendEvent::SendFundingSigned, - node_a_id - ), + &get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id), ); check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -2875,11 +2592,7 @@ fn do_during_funding_monitor_fail( confirm_transaction(&nodes[0], &funding_tx); nodes[1].node.handle_channel_ready( node_a_id, - &get_event_msg!( - nodes[0], - MessageSendEvent::SendChannelReady, - node_b_id - ), + &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id), ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -2910,11 +2623,7 @@ fn do_during_funding_monitor_fail( confirm_transaction(&nodes[0], &funding_tx); nodes[1].node.handle_channel_ready( node_a_id, - &get_event_msg!( - nodes[0], - MessageSendEvent::SendChannelReady, - node_b_id - ), + &get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id), ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -2943,11 +2652,7 @@ fn do_during_funding_monitor_fail( } else { nodes[0].node.handle_channel_ready( node_b_id, - &get_event_msg!( - nodes[1], - MessageSendEvent::SendChannelReady, - node_a_id - ), + &get_event_msg!(nodes[1], MessageSendEvent::SendChannelReady, node_a_id), ); confirm_transaction(&nodes[0], &funding_tx); let (channel_ready, channel_id) = @@ -3149,20 +2854,12 @@ fn test_pending_update_fee_ack_on_reconnect() { let as_update_fee_msgs = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(as_update_fee_msgs.update_fee.is_some()); - nodes[1].node.handle_update_fee( - node_a_id, - as_update_fee_msgs.update_fee.as_ref().unwrap(), - ); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &as_update_fee_msgs.commitment_signed, - ); + nodes[1].node.handle_update_fee(node_a_id, as_update_fee_msgs.update_fee.as_ref().unwrap()); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_update_fee_msgs.commitment_signed); check_added_monitors!(nodes[1], 1); - let bs_first_raa = get_event_msg!( - nodes[1], - MessageSendEvent::SendRevokeAndACK, - node_a_id - ); + let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); // bs_first_raa is not delivered until it is re-generated after reconnect nodes[0].node.peer_disconnected(node_b_id); @@ -3216,26 +2913,17 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[0].node.handle_channel_reestablish(node_b_id, &bs_connect_msg); get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); - nodes[0].node.handle_update_add_htlc( - node_b_id, - &bs_initial_send_msgs.update_add_htlcs[0], - ); - nodes[0].node.handle_commitment_signed_batch_test( - node_b_id, - &bs_initial_send_msgs.commitment_signed, - ); + nodes[0].node.handle_update_add_htlc(node_b_id, &bs_initial_send_msgs.update_add_htlcs[0]); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &bs_initial_send_msgs.commitment_signed); check_added_monitors!(nodes[0], 1); nodes[1].node.handle_revoke_and_ack( node_a_id, - &get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ), + &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); check_added_monitors!(nodes[1], 1); - let bs_second_cs = - get_htlc_update_msgs!(nodes[1], node_a_id).commitment_signed; + let bs_second_cs = get_htlc_update_msgs!(nodes[1], node_a_id).commitment_signed; nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); @@ -3244,26 +2932,16 @@ fn test_pending_update_fee_ack_on_reconnect() { &get_htlc_update_msgs!(nodes[0], node_b_id).commitment_signed, ); check_added_monitors!(nodes[1], 1); - let bs_third_raa = get_event_msg!( - nodes[1], - MessageSendEvent::SendRevokeAndACK, - node_a_id - ); + let bs_third_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); - nodes[0] - .node - .handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_second_cs); check_added_monitors!(nodes[0], 1); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_third_raa); check_added_monitors!(nodes[0], 1); nodes[1].node.handle_revoke_and_ack( node_a_id, - &get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ), + &get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id), ); check_added_monitors!(nodes[1], 1); @@ -3306,37 +2984,22 @@ fn test_fail_htlc_on_broadcast_after_claim() { expect_payment_claimed!(nodes[2], payment_hash, 2000); let cs_updates = get_htlc_update_msgs!(nodes[2], node_b_id); - nodes[1].node.handle_update_fulfill_htlc( - node_c_id, - &cs_updates.update_fulfill_htlcs[0], - ); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); let bs_updates = get_htlc_update_msgs!(nodes[1], node_a_id); check_added_monitors!(nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); mine_transaction(&nodes[1], &bs_txn[0]); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CommitmentTxConfirmed, - [node_c_id], - 100000 - ); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_c_id], 100000); check_closed_broadcast!(nodes[1], true); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); check_added_monitors!(nodes[1], 1); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[1], - [HTLCHandlingFailureType::Forward { - node_id: Some(node_c_id), - channel_id: chan_id_2 - }] + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] ); - nodes[0].node.handle_update_fulfill_htlc( - node_b_id, - &bs_updates.update_fulfill_htlcs[0], - ); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[0], payment_preimage, None, false, false); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true); expect_payment_path_successful!(nodes[0]); @@ -3366,10 +3029,7 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { let update_msgs = get_htlc_update_msgs!(nodes[0], node_b_id); assert!(update_msgs.update_fee.is_some()); if deliver_update { - nodes[1].node.handle_update_fee( - node_a_id, - update_msgs.update_fee.as_ref().unwrap(), - ); + nodes[1].node.handle_update_fee(node_a_id, update_msgs.update_fee.as_ref().unwrap()); } if parallel_updates { @@ -3429,46 +3089,27 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { panic!(); }; assert!(update_msgs.update_fee.is_some()); - nodes[1].node.handle_update_fee( - node_a_id, - update_msgs.update_fee.as_ref().unwrap(), - ); + nodes[1].node.handle_update_fee(node_a_id, update_msgs.update_fee.as_ref().unwrap()); if parallel_updates { - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &update_msgs.commitment_signed, - ); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &update_msgs.commitment_signed); check_added_monitors!(nodes[1], 1); - let (bs_first_raa, bs_first_cs) = - get_revoke_commit_msgs!(nodes[1], node_a_id); + let (bs_first_raa, bs_first_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_first_raa); check_added_monitors!(nodes[0], 1); let as_second_update = get_htlc_update_msgs!(nodes[0], node_b_id); - nodes[0] - .node - .handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_first_cs); check_added_monitors!(nodes[0], 1); - let as_first_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_first_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); - nodes[1].node.handle_update_fee( - node_a_id, - as_second_update.update_fee.as_ref().unwrap(), - ); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &as_second_update.commitment_signed, - ); + nodes[1].node.handle_update_fee(node_a_id, as_second_update.update_fee.as_ref().unwrap()); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_second_update.commitment_signed); check_added_monitors!(nodes[1], 1); - let bs_second_raa = get_event_msg!( - nodes[1], - MessageSendEvent::SendRevokeAndACK, - node_a_id - ); + let bs_second_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, node_a_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_first_raa); let bs_second_cs = get_htlc_update_msgs!(nodes[1], node_a_id); @@ -3477,16 +3118,11 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_second_raa); check_added_monitors!(nodes[0], 1); - nodes[0].node.handle_commitment_signed_batch_test( - node_b_id, - &bs_second_cs.commitment_signed, - ); + nodes[0] + .node + .handle_commitment_signed_batch_test(node_b_id, &bs_second_cs.commitment_signed); check_added_monitors!(nodes[0], 1); - let as_second_raa = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_second_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_second_raa); check_added_monitors!(nodes[1], 1); @@ -3580,9 +3216,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { check_added_monitors!(nodes[0], 1); nodes[1].node.handle_update_add_htlc(node_a_id, &send.msgs[0]); - nodes[1] - .node - .handle_commitment_signed_batch_test(node_a_id, &send.commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send.commitment_msg); check_added_monitors!(nodes[1], 1); let (raa, cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); @@ -3640,15 +3274,11 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); - nodes[1] - .node - .handle_channel_reestablish(node_a_id, &reestablish_1[0]); + nodes[1].node.handle_channel_reestablish(node_a_id, &reestablish_1[0]); let resp_1 = handle_chan_reestablish_msgs!(nodes[1], nodes[0]); check_added_monitors!(nodes[1], 0); - nodes[0] - .node - .handle_channel_reestablish(node_b_id, &reestablish_2[0]); + nodes[0].node.handle_channel_reestablish(node_b_id, &reestablish_2[0]); let resp_0 = handle_chan_reestablish_msgs!(nodes[0], nodes[1]); assert!(resp_0.0.is_none()); @@ -3710,31 +3340,19 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { assert!(updates.update_fail_malformed_htlcs.is_empty()); assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); - nodes[1].node.handle_update_fulfill_htlc( - node_a_id, - &updates.update_fulfill_htlcs[0], - ); + nodes[1].node.handle_update_fulfill_htlc(node_a_id, &updates.update_fulfill_htlcs[0]); expect_payment_sent(&nodes[1], payment_preimage_0, None, false, false); assert_eq!(updates.update_add_htlcs.len(), 1); - nodes[1].node.handle_update_add_htlc( - node_a_id, - &updates.update_add_htlcs[0], - ); + nodes[1].node.handle_update_add_htlc(node_a_id, &updates.update_add_htlcs[0]); updates.commitment_signed }, _ => panic!("Unexpected event type!"), }; - nodes[1] - .node - .handle_commitment_signed_batch_test(node_a_id, &commitment_msg); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_msg); check_added_monitors!(nodes[1], 1); - let as_revoke_and_ack = get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let as_revoke_and_ack = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_a_id, &as_revoke_and_ack); expect_pending_htlcs_forwardable!(nodes[1]); expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000); @@ -3814,10 +3432,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &send_event.commitment_msg, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &send_event.commitment_msg); check_added_monitors!(nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], node_a_id); @@ -3826,11 +3441,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); check_added_monitors!(nodes[0], 1); - as_raa = Some(get_event_msg!( - nodes[0], - MessageSendEvent::SendRevokeAndACK, - node_b_id - )); + as_raa = Some(get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, node_b_id)); } let fulfill_msg = @@ -3888,10 +3499,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f reconnect_nodes(reconnect_args); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( nodes[1], - [HTLCHandlingFailureType::Forward { - node_id: Some(node_c_id), - channel_id: chan_id_2 - }] + [HTLCHandlingFailureType::Forward { node_id: Some(node_c_id), channel_id: chan_id_2 }] ); } else { let mut reconnect_args = ReconnectArgs::new(&nodes[1], &nodes[2]); @@ -3982,11 +3590,7 @@ fn test_temporary_error_during_shutdown() { nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[1].node.handle_closing_signed( node_a_id, - &get_event_msg!( - nodes[0], - MessageSendEvent::SendClosingSigned, - node_b_id - ), + &get_event_msg!(nodes[0], MessageSendEvent::SendClosingSigned, node_b_id), ); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -4004,19 +3608,12 @@ fn test_temporary_error_during_shutdown() { nodes[0].node.handle_closing_signed( node_b_id, - &get_event_msg!( - nodes[1], - MessageSendEvent::SendClosingSigned, - node_a_id - ), + &get_event_msg!(nodes[1], MessageSendEvent::SendClosingSigned, node_a_id), ); - let (_, closing_signed_a) = - get_closing_signed_broadcast!(nodes[0].node, node_b_id); + let (_, closing_signed_a) = get_closing_signed_broadcast!(nodes[0].node, node_b_id); let txn_a = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); - nodes[1] - .node - .handle_closing_signed(node_a_id, &closing_signed_a.unwrap()); + nodes[1].node.handle_closing_signed(node_a_id, &closing_signed_a.unwrap()); let (_, none_b) = get_closing_signed_broadcast!(nodes[1].node, node_a_id); assert!(none_b.is_none()); let txn_b = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); @@ -4140,22 +3737,15 @@ fn double_temp_error() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, &update_fulfill_1); check_added_monitors!(nodes[0], 0); expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); - nodes[0].node.handle_commitment_signed_batch_test( - node_b_id, - &commitment_signed_b1, - ); + nodes[0].node.handle_commitment_signed_batch_test(node_b_id, &commitment_signed_b1); check_added_monitors!(nodes[0], 1); nodes[0].node.process_pending_htlc_forwards(); - let (raa_a1, commitment_signed_a1) = - get_revoke_commit_msgs!(nodes[0], node_b_id); + let (raa_a1, commitment_signed_a1) = get_revoke_commit_msgs!(nodes[0], node_b_id); check_added_monitors!(nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); nodes[1].node.handle_revoke_and_ack(node_a_id, &raa_a1); check_added_monitors!(nodes[1], 1); - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &commitment_signed_a1, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_a_id, &commitment_signed_a1); check_added_monitors!(nodes[1], 1); // Complete the second HTLC. @@ -4218,17 +3808,10 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 43, None, None) - .unwrap(); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 43, None, None).unwrap(); nodes[1].node.handle_open_channel( node_a_id, - &get_event_msg!( - nodes[0], - MessageSendEvent::SendOpenChannel, - node_b_id - ), + &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id), ); let events = nodes[1].node.get_and_clear_pending_events(); @@ -4248,12 +3831,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { } else { nodes[1] .node - .accept_inbound_channel( - &temporary_channel_id, - &node_a_id, - 0, - None, - ) + .accept_inbound_channel(&temporary_channel_id, &node_a_id, 0, None) .unwrap(); } }, @@ -4262,11 +3840,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { nodes[0].node.handle_accept_channel( node_b_id, - &get_event_msg!( - nodes[1], - MessageSendEvent::SendAcceptChannel, - node_a_id - ), + &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), ); let (temporary_channel_id, funding_tx, ..) = @@ -4274,19 +3848,12 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { nodes[0] .node - .funding_transaction_generated( - temporary_channel_id, - node_b_id, - funding_tx.clone(), - ) + .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); check_added_monitors!(nodes[0], 0); - let funding_created_msg = get_event_msg!( - nodes[0], - MessageSendEvent::SendFundingCreated, - node_b_id - ); + let funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); check_added_monitors!(nodes[1], 1); expect_channel_pending_event(&nodes[1], &node_a_id); @@ -4332,13 +3899,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { new_chain_monitor, nodes_0_deserialized ); - check_closed_event!( - nodes[0], - 1, - ClosureReason::DisconnectedPeer, - [node_b_id], - 100000 - ); + check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [node_b_id], 100000); assert!(nodes[0].node.list_channels().is_empty()); } @@ -4371,17 +3932,10 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo let node_a_id = nodes[0].node.get_our_node_id(); let node_b_id = nodes[1].node.get_our_node_id(); - nodes[0] - .node - .create_channel(node_b_id, 100000, 10001, 43, None, None) - .unwrap(); + nodes[0].node.create_channel(node_b_id, 100000, 10001, 43, None, None).unwrap(); nodes[1].node.handle_open_channel( node_a_id, - &get_event_msg!( - nodes[0], - MessageSendEvent::SendOpenChannel, - node_b_id - ), + &get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, node_b_id), ); let events = nodes[1].node.get_and_clear_pending_events(); @@ -4401,12 +3955,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo } else { nodes[1] .node - .accept_inbound_channel( - &temporary_channel_id, - &node_a_id, - 0, - None, - ) + .accept_inbound_channel(&temporary_channel_id, &node_a_id, 0, None) .unwrap(); } }, @@ -4415,11 +3964,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo nodes[0].node.handle_accept_channel( node_b_id, - &get_event_msg!( - nodes[1], - MessageSendEvent::SendAcceptChannel, - node_a_id - ), + &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, node_a_id), ); let (temporary_channel_id, funding_tx, ..) = @@ -4427,19 +3972,12 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo nodes[0] .node - .funding_transaction_generated( - temporary_channel_id, - node_b_id, - funding_tx.clone(), - ) + .funding_transaction_generated(temporary_channel_id, node_b_id, funding_tx.clone()) .unwrap(); check_added_monitors!(nodes[0], 0); - let funding_created_msg = get_event_msg!( - nodes[0], - MessageSendEvent::SendFundingCreated, - node_b_id - ); + let funding_created_msg = + get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_funding_created(node_a_id, &funding_created_msg); check_added_monitors!(nodes[1], 1); @@ -4447,11 +3985,8 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo // nodes[1] happily sends its funding_signed even though its awaiting the persistence of the // initial ChannelMonitor, but it will decline to send its channel_ready even if the funding // transaction is confirmed. - let funding_signed_msg = get_event_msg!( - nodes[1], - MessageSendEvent::SendFundingSigned, - node_a_id - ); + let funding_signed_msg = + get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, node_a_id); nodes[0].node.handle_funding_signed(node_b_id, &funding_signed_msg); check_added_monitors!(nodes[0], 1); @@ -4463,11 +3998,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo confirm_transaction(&nodes[1], &as_funding_tx[0]); } if use_0conf || lock_commitment { - let as_ready = get_event_msg!( - nodes[0], - MessageSendEvent::SendChannelReady, - node_b_id - ); + let as_ready = get_event_msg!(nodes[0], MessageSendEvent::SendChannelReady, node_b_id); nodes[1].node.handle_channel_ready(node_a_id, &as_ready); } assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -4490,13 +4021,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo nodes_1_deserialized ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::DisconnectedPeer, - [node_a_id], - 100000 - ); + check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [node_a_id], 100000); assert!(nodes[1].node.list_channels().is_empty()); } @@ -4539,10 +4064,9 @@ fn test_blocked_chan_preimage_release() { expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], node_b_id); - nodes[1].node.handle_update_fulfill_htlc( - node_c_id, - &cs_htlc_fulfill_updates.update_fulfill_htlcs[0], - ); + nodes[1] + .node + .handle_update_fulfill_htlc(node_c_id, &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]); do_commitment_signed_dance( &nodes[1], &nodes[2], @@ -4560,10 +4084,9 @@ fn test_blocked_chan_preimage_release() { expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000); let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], node_b_id); - nodes[1].node.handle_update_fulfill_htlc( - node_a_id, - &as_htlc_fulfill_updates.update_fulfill_htlcs[0], - ); + nodes[1] + .node + .handle_update_fulfill_htlc(node_a_id, &as_htlc_fulfill_updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update assert!(get_monitor!(nodes[1], chan_id_2).get_stored_preimages().contains_key(&payment_hash_2)); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); @@ -4571,10 +4094,9 @@ fn test_blocked_chan_preimage_release() { // Finish the CS dance between nodes[0] and nodes[1]. Note that until the event handling, the // update_fulfill_htlc + CS is held, even though the preimage is already on disk for the // channel. - nodes[1].node.handle_commitment_signed_batch_test( - node_a_id, - &as_htlc_fulfill_updates.commitment_signed, - ); + nodes[1] + .node + .handle_commitment_signed_batch_test(node_a_id, &as_htlc_fulfill_updates.commitment_signed); check_added_monitors(&nodes[1], 1); let (a, raa) = do_main_commitment_signed_dance(&nodes[1], &nodes[0], false); assert!(a.is_none()); @@ -4606,10 +4128,9 @@ fn test_blocked_chan_preimage_release() { let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], node_c_id); check_added_monitors(&nodes[1], 1); - nodes[2].node.handle_update_fulfill_htlc( - node_b_id, - &bs_htlc_fulfill_updates.update_fulfill_htlcs[0], - ); + nodes[2] + .node + .handle_update_fulfill_htlc(node_b_id, &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]); do_commitment_signed_dance( &nodes[2], &nodes[1], @@ -4666,10 +4187,7 @@ fn do_test_inverted_mon_completion_order( chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); - nodes[1].node.handle_update_fulfill_htlc( - node_c_id, - &cs_updates.update_fulfill_htlcs[0], - ); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages // for it since the monitor update is marked in-progress. @@ -4679,26 +4197,16 @@ fn do_test_inverted_mon_completion_order( // Now step the Commitment Signed Dance between B and C forward a bit (or fully), ensuring we // won't get the preimage when the nodes reconnect and we have to get it from the // ChannelMonitor. - nodes[1].node.handle_commitment_signed_batch_test( - node_c_id, - &cs_updates.commitment_signed, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &cs_updates.commitment_signed); check_added_monitors(&nodes[1], 1); if complete_bc_commitment_dance { let (bs_revoke_and_ack, bs_commitment_signed) = get_revoke_commit_msgs!(nodes[1], node_c_id); nodes[2].node.handle_revoke_and_ack(node_b_id, &bs_revoke_and_ack); check_added_monitors(&nodes[2], 1); - nodes[2].node.handle_commitment_signed_batch_test( - node_b_id, - &bs_commitment_signed, - ); + nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &bs_commitment_signed); check_added_monitors(&nodes[2], 1); - let cs_raa = get_event_msg!( - nodes[2], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let cs_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); // At this point node B still hasn't persisted the `ChannelMonitorUpdate` with the // preimage in the A <-> B channel, which will prevent it from persisting the @@ -4827,10 +4335,7 @@ fn do_test_inverted_mon_completion_order( let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); check_added_monitors(&nodes[1], 1); - nodes[0].node.handle_update_fulfill_htlc( - node_b_id, - &bs_updates.update_fulfill_htlcs[0], - ); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); do_commitment_signed_dance(&nodes[0], &nodes[1], &bs_updates.commitment_signed, false, false); expect_payment_forwarded!( @@ -4897,10 +4402,7 @@ fn do_test_durable_preimages_on_closed_channel( chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); - nodes[1].node.handle_update_fulfill_htlc( - node_c_id, - &cs_updates.update_fulfill_htlcs[0], - ); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages // for it since the monitor update is marked in-progress. @@ -4910,10 +4412,7 @@ fn do_test_durable_preimages_on_closed_channel( // Now step the Commitment Signed Dance between B and C forward a bit, ensuring we won't get // the preimage when the nodes reconnect, at which point we have to ensure we get it from the // ChannelMonitor. - nodes[1].node.handle_commitment_signed_batch_test( - node_c_id, - &cs_updates.commitment_signed, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &cs_updates.commitment_signed); check_added_monitors(&nodes[1], 1); let _ = get_revoke_commit_msgs!(nodes[1], node_c_id); @@ -4945,11 +4444,7 @@ fn do_test_durable_preimages_on_closed_channel( chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1] .node - .force_close_broadcasting_latest_txn( - &chan_id_ab, - &node_a_id, - error_message.to_string(), - ) + .force_close_broadcasting_latest_txn(&chan_id_ab, &node_a_id, error_message.to_string()) .unwrap(); check_closed_broadcast(&nodes[1], 1, true); check_closed_event( @@ -4989,11 +4484,7 @@ fn do_test_durable_preimages_on_closed_channel( nodes[0] .node - .force_close_broadcasting_latest_txn( - &chan_id_ab, - &node_b_id, - error_message.to_string(), - ) + .force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, error_message.to_string()) .unwrap(); check_closed_event( &nodes[0], @@ -5144,10 +4635,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let cs_updates = get_htlc_update_msgs(&nodes[2], &node_b_id); - nodes[1].node.handle_update_fulfill_htlc( - node_c_id, - &cs_updates.update_fulfill_htlcs[0], - ); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); // B generates a new monitor update for the A <-> B channel, but doesn't send the new messages // for it since the monitor update is marked in-progress. @@ -5156,10 +4644,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Now step the Commitment Signed Dance between B and C and check that after the final RAA B // doesn't let the preimage-removing monitor update fly. - nodes[1].node.handle_commitment_signed_batch_test( - node_c_id, - &cs_updates.commitment_signed, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_c_id, &cs_updates.commitment_signed); check_added_monitors(&nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs!(nodes[1], node_c_id); @@ -5168,11 +4653,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { nodes[2].node.handle_commitment_signed_batch_test(node_b_id, &bs_cs); check_added_monitors(&nodes[2], 1); - let cs_final_raa = get_event_msg!( - nodes[2], - MessageSendEvent::SendRevokeAndACK, - node_b_id - ); + let cs_final_raa = get_event_msg!(nodes[2], MessageSendEvent::SendRevokeAndACK, node_b_id); nodes[1].node.handle_revoke_and_ack(node_c_id, &cs_final_raa); check_added_monitors(&nodes[1], 0); @@ -5197,11 +4678,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // (as learned about during the on-reload block connection). nodes[0] .node - .force_close_broadcasting_latest_txn( - &chan_id_ab, - &node_b_id, - error_message.to_string(), - ) + .force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, error_message.to_string()) .unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); @@ -5293,18 +4770,12 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // The first update will be on the A <-> B channel, which we optionally allow to complete. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } - nodes[1].node.handle_update_fulfill_htlc( - node_c_id, - &cs_updates.update_fulfill_htlcs[0], - ); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &cs_updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[1], 1); if !hold_chan_a { let bs_updates = get_htlc_update_msgs(&nodes[1], &node_a_id); - nodes[0].node.handle_update_fulfill_htlc( - node_b_id, - &bs_updates.update_fulfill_htlcs[0], - ); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &bs_updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, false); expect_payment_sent!(&nodes[0], payment_preimage); } @@ -5385,10 +4856,7 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { assert_eq!(a_update.len(), 1); assert_eq!(c_update.len(), 1); - nodes[0].node.handle_update_fulfill_htlc( - node_b_id, - &a_update[0].update_fulfill_htlcs[0], - ); + nodes[0].node.handle_update_fulfill_htlc(node_b_id, &a_update[0].update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], a_update[0].commitment_signed, false); expect_payment_sent(&nodes[0], payment_preimage, None, true, true); expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false); @@ -5468,18 +4936,12 @@ fn test_partial_claim_mon_update_compl_actions() { expect_payment_claimed!(&nodes[3], payment_hash, 200_000); let updates = get_htlc_update_msgs(&nodes[3], &node_b_id); - nodes[1].node.handle_update_fulfill_htlc( - node_d_id, - &updates.update_fulfill_htlcs[0], - ); + nodes[1].node.handle_update_fulfill_htlc(node_d_id, &updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[1], 1); expect_payment_forwarded!(nodes[1], nodes[0], nodes[3], Some(1000), false, false); let _bs_updates_for_a = get_htlc_update_msgs(&nodes[1], &node_a_id); - nodes[1].node.handle_commitment_signed_batch_test( - node_d_id, - &updates.commitment_signed, - ); + nodes[1].node.handle_commitment_signed_batch_test(node_d_id, &updates.commitment_signed); check_added_monitors(&nodes[1], 1); let (bs_raa, bs_cs) = get_revoke_commit_msgs(&nodes[1], &node_d_id); @@ -5511,19 +4973,14 @@ fn test_partial_claim_mon_update_compl_actions() { match remove_first_msg_event_to_node(&node_c_id, &mut ds_msgs) { MessageSendEvent::UpdateHTLCs { updates, .. } => { - nodes[2].node.handle_update_fulfill_htlc( - node_d_id, - &updates.update_fulfill_htlcs[0], - ); + nodes[2].node.handle_update_fulfill_htlc(node_d_id, &updates.update_fulfill_htlcs[0]); check_added_monitors(&nodes[2], 1); expect_payment_forwarded!(nodes[2], nodes[0], nodes[3], Some(1000), false, false); - let _cs_updates_for_a = - get_htlc_update_msgs(&nodes[2], &node_a_id); + let _cs_updates_for_a = get_htlc_update_msgs(&nodes[2], &node_a_id); - nodes[2].node.handle_commitment_signed_batch_test( - node_d_id, - &updates.commitment_signed, - ); + nodes[2] + .node + .handle_commitment_signed_batch_test(node_d_id, &updates.commitment_signed); check_added_monitors(&nodes[2], 1); }, _ => panic!(), @@ -5537,11 +4994,7 @@ fn test_partial_claim_mon_update_compl_actions() { nodes[3].node.handle_commitment_signed_batch_test(node_c_id, &cs_cs); check_added_monitors(&nodes[3], 1); - let ds_raa = get_event_msg!( - nodes[3], - MessageSendEvent::SendRevokeAndACK, - node_c_id - ); + let ds_raa = get_event_msg!(nodes[3], MessageSendEvent::SendRevokeAndACK, node_c_id); nodes[2].node.handle_revoke_and_ack(node_d_id, &ds_raa); check_added_monitors(&nodes[2], 1); @@ -5583,11 +5036,7 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { nodes[0] .node - .force_close_broadcasting_latest_txn( - &chan_a.2, - &node_b_id, - String::new(), - ) + .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, String::new()) .unwrap(); check_added_monitors!(nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; @@ -5599,13 +5048,7 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_added_monitors!(nodes[1], 1); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CommitmentTxConfirmed, - [node_a_id], - 1000000 - ); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); check_closed_broadcast!(nodes[1], true); // Now that B has a pending forwarded payment across it with the inbound edge on-chain, claim @@ -5616,10 +5059,7 @@ fn test_claim_to_closed_channel_blocks_forwarded_preimage_removal() { let updates = get_htlc_update_msgs!(nodes[2], node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[1].node.handle_update_fulfill_htlc( - node_c_id, - &updates.update_fulfill_htlcs[0], - ); + nodes[1].node.handle_update_fulfill_htlc(node_c_id, &updates.update_fulfill_htlcs[0]); check_added_monitors!(nodes[1], 1); commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); @@ -5671,11 +5111,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { nodes[0] .node - .force_close_broadcasting_latest_txn( - &chan_a.2, - &node_b_id, - String::new(), - ) + .force_close_broadcasting_latest_txn(&chan_a.2, &node_b_id, String::new()) .unwrap(); check_added_monitors!(nodes[0], 1); let a_reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; @@ -5687,13 +5123,7 @@ fn test_claim_to_closed_channel_blocks_claimed_event() { mine_transaction(&nodes[1], &as_commit_tx[0]); check_added_monitors!(nodes[1], 1); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CommitmentTxConfirmed, - [node_a_id], - 1000000 - ); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [node_a_id], 1000000); check_closed_broadcast!(nodes[1], true); // Now that B has a pending payment with the inbound HTLC on a closed channel, claim the From 8ca43a16b6f9a17a38b9702d856be51480fa4a39 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 18 May 2025 20:13:31 +0000 Subject: [PATCH 11/12] Introduce a fn to fetch the monitor update id in chanmon tests --- lightning/src/ln/chanmon_update_fail_tests.rs | 256 +++--------------- 1 file changed, 36 insertions(+), 220 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index c9798e914e3..4216322e0d2 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -38,6 +38,13 @@ use crate::prelude::*; use crate::sync::{Arc, Mutex}; use bitcoin::hashes::Hash; +fn get_latest_mon_update_id<'a, 'b, 'c>( + node: &Node<'a, 'b, 'c>, channel_id: ChannelId, +) -> (u64, u64) { + let monitor_id_state = node.chain_monitor.latest_monitor_update_id.lock().unwrap(); + monitor_id_state.get(&channel_id).unwrap().clone() +} + #[test] fn test_monitor_and_persister_update_fail() { // Test that if both updating the `ChannelMonitor` and persisting the updated @@ -207,14 +214,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); @@ -425,14 +425,7 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Now fix monitor updating... chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); @@ -826,14 +819,7 @@ fn test_monitor_update_fail_cs() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let responses = nodes[1].node.get_and_clear_pending_msg_events(); @@ -868,14 +854,7 @@ fn test_monitor_update_fail_cs() { } chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); @@ -966,14 +945,7 @@ fn test_monitor_update_fail_no_rebroadcast() { check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); @@ -1057,14 +1029,7 @@ fn test_monitor_update_raa_while_paused() { assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[0], 1); - let (latest_update, _) = nodes[0] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); @@ -1234,14 +1199,7 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Restore monitor updating, ensuring we immediately get a fail-back update and a // update_add update. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&chan_2.2) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_2.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2.2, latest_update); check_added_monitors!(nodes[1], 0); expect_pending_htlcs_forwardable_and_htlc_handling_failed!( @@ -1563,14 +1521,7 @@ fn test_monitor_update_fail_reestablish() { ); // The "disabled" bit should be unset as we just reconnected chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&chan_1.2) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); @@ -1669,14 +1620,7 @@ fn raa_no_response_awaiting_raa_state() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 1); - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); // nodes[1] should be AwaitingRAA here! check_added_monitors!(nodes[1], 0); @@ -1838,14 +1782,7 @@ fn claim_while_disconnected_monitor_update_fail() { // Now un-fail the monitor, which will result in B sending its original commitment update, // receiving the commitment update from A, and the resulting commitment dances. chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); @@ -2010,14 +1947,7 @@ fn monitor_failed_no_reestablish_response() { get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); let bs_responses = get_revoke_commit_msgs!(nodes[1], node_a_id); @@ -2130,14 +2060,7 @@ fn first_message_on_recv_ordering() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); @@ -2249,14 +2172,7 @@ fn test_monitor_update_fail_claim() { // Now restore monitor updating on the 0<->1 channel and claim the funds on B. let channel_id = chan_1.2; - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); @@ -2392,14 +2308,7 @@ fn test_monitor_update_on_pending_forwards() { check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&chan_1.2) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_1.2, latest_update); check_added_monitors!(nodes[1], 0); @@ -2489,14 +2398,7 @@ fn monitor_update_claim_fail_no_response() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); check_added_monitors!(nodes[1], 0); @@ -2568,14 +2470,7 @@ fn do_during_funding_monitor_fail( assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[0], 0); expect_channel_pending_event(&nodes[0], &node_b_id); @@ -2630,14 +2525,7 @@ fn do_during_funding_monitor_fail( } chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); check_added_monitors!(nodes[1], 0); @@ -2778,14 +2666,7 @@ fn test_path_paused_mpp() { // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. - let (latest_update, _) = nodes[0] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&chan_2_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], chan_2_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2_id, latest_update); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -3311,14 +3192,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // If we finish updating the monitor, we should free the holding cell right away (this did // not occur prior to #756). chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (mon_id, _) = nodes[0] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&chan_id) - .unwrap() - .clone(); + let (mon_id, _) = get_latest_mon_update_id(&nodes[0], chan_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id, mon_id); expect_payment_claimed!(nodes[0], payment_hash_0, 100_000); @@ -3579,14 +3453,7 @@ fn test_temporary_error_during_shutdown() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[0] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[0], channel_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[1].node.handle_closing_signed( node_a_id, @@ -3596,14 +3463,7 @@ fn test_temporary_error_during_shutdown() { assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update); nodes[0].node.handle_closing_signed( @@ -3659,14 +3519,7 @@ fn double_temp_error() { // `claim_funds` results in a ChannelMonitorUpdate. nodes[1].node.claim_funds(payment_preimage_1); check_added_monitors!(nodes[1], 1); - let (latest_update_1, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update_1, _) = get_latest_mon_update_id(&nodes[1], channel_id); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // Previously, this would've panicked due to a double-call to `Channel::monitor_update_failed`, @@ -3675,14 +3528,7 @@ fn double_temp_error() { check_added_monitors!(nodes[1], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); - let (latest_update_2, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&channel_id) - .unwrap() - .clone(); + let (latest_update_2, _) = get_latest_mon_update_id(&nodes[1], channel_id); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(channel_id, latest_update_1); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); check_added_monitors!(nodes[1], 0); @@ -4264,14 +4110,7 @@ fn do_test_inverted_mon_completion_order( // (Finally) complete the A <-> B ChannelMonitorUpdate, ensuring the preimage is durably on // disk in the proper ChannelMonitor, unblocking the B <-> C ChannelMonitor updating // process. - let (_, ab_update_id) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&chan_id_ab) - .unwrap() - .clone(); + let (_, ab_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_ab); nodes[1] .chain_monitor .chain_monitor @@ -4310,14 +4149,7 @@ fn do_test_inverted_mon_completion_order( // ChannelMonitorUpdate hasn't yet completed. reconnect_nodes(ReconnectArgs::new(&nodes[0], &nodes[1])); - let (_, ab_update_id) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&chan_id_ab) - .unwrap() - .clone(); + let (_, ab_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_ab); nodes[1] .chain_monitor .chain_monitor @@ -4562,14 +4394,7 @@ fn do_test_durable_preimages_on_closed_channel( // Once the blocked `ChannelMonitorUpdate` *finally* completes, the pending // `PaymentForwarded` event will finally be released. - let (ab_update_id, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&chan_id_ab) - .unwrap() - .clone(); + let (_, ab_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_ab); nodes[1].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_id_ab, ab_update_id); // If the A<->B channel was closed before we reload, we'll replay the claim against it on @@ -4694,8 +4519,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]); } - let bc_update_id = - nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().1; + let (_, bc_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_bc); let mut events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), if close_during_reload { 2 } else { 1 }); expect_payment_forwarded( @@ -4720,8 +4544,7 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { // Once we run event processing the monitor should free, check that it was indeed the B<->C // channel which was updated. check_added_monitors(&nodes[1], if close_during_reload { 2 } else { 1 }); - let post_ev_bc_update_id = - nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap().get(&chan_id_bc).unwrap().1; + let (_, post_ev_bc_update_id) = get_latest_mon_update_id(&nodes[1], chan_id_bc); assert!(bc_update_id != post_ev_bc_update_id); // Finally, check that there's nothing left to do on B<->C reconnect and the channel operates @@ -4815,14 +4638,7 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // ...but once we complete the A<->B channel preimage persistence, the B<->C channel // unlocks and we send both peers commitment updates. - let (ab_update_id, _) = nodes[1] - .chain_monitor - .latest_monitor_update_id - .lock() - .unwrap() - .get(&chan_id_ab) - .unwrap() - .clone(); + let (ab_update_id, _) = get_latest_mon_update_id(&nodes[1], chan_id_ab); assert!(nodes[1] .chain_monitor .chain_monitor From b9d0d607aa56b2141ec5b7a05553743bd1e0c0ce Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Sun, 18 May 2025 21:12:58 +0000 Subject: [PATCH 12/12] Clean up various overly vertical nonsense in chanmon fail tests --- lightning/src/ln/chanmon_update_fail_tests.rs | 1104 ++++------------- 1 file changed, 273 insertions(+), 831 deletions(-) diff --git a/lightning/src/ln/chanmon_update_fail_tests.rs b/lightning/src/ln/chanmon_update_fail_tests.rs index 4216322e0d2..ef8f256ed5e 100644 --- a/lightning/src/ln/chanmon_update_fail_tests.rs +++ b/lightning/src/ln/chanmon_update_fail_tests.rs @@ -33,7 +33,6 @@ use crate::ln::functional_test_utils::*; use crate::util::test_utils; -use crate::io; use crate::prelude::*; use crate::sync::{Arc, Mutex}; use bitcoin::hashes::Hash; @@ -84,12 +83,11 @@ fn test_monitor_and_persister_update_fail() { let chain_mon = { let new_monitor = { let monitor = nodes[0].chain_monitor.chain_monitor.get_monitor(chan.2).unwrap(); - let new_monitor = <(BlockHash, ChannelMonitor)>::read( - &mut io::Cursor::new(&monitor.encode()), + let (_, new_monitor) = <(BlockHash, ChannelMonitor)>::read( + &mut &monitor.encode()[..], (nodes[0].keys_manager, nodes[0].keys_manager), ) - .unwrap() - .1; + .unwrap(); assert!(new_monitor == *monitor); new_monitor }; @@ -121,17 +119,10 @@ fn test_monitor_and_persister_update_fail() { nodes[0].node.handle_update_fulfill_htlc(node_b_id, &updates.update_fulfill_htlcs[0]); { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - if let Some(channel) = get_channel_ref!( - nodes[0], - nodes[1], - node_0_per_peer_lock, - node_0_peer_state_lock, - chan.2 - ) - .as_funded_mut() - { + let mut per_peer_lock; + let mut peer_state_lock; + let chan_opt = get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, chan.2); + if let Some(channel) = chan_opt.as_funded_mut() { assert_eq!(updates.commitment_signed.len(), 1); if let Ok(Some(update)) = channel.commitment_signed(&updates.commitment_signed[0], &node_cfgs[0].logger) @@ -188,18 +179,10 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - { - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), - PaymentId(payment_hash_1.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -260,19 +243,12 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // Now set it to failed again... let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(&nodes[0], nodes[1], 1000000); - { - chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -285,11 +261,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { } // ...and make sure we can force-close a frozen channel - let error_message = "Channel force-closed"; - nodes[0] - .node - .force_close_broadcasting_latest_txn(&channel_id, &node_b_id, error_message.to_string()) - .unwrap(); + let err_msg = "Channel force-closed".to_owned(); + nodes[0].node.force_close_broadcasting_latest_txn(&channel_id, &node_b_id, err_msg).unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); @@ -297,13 +270,8 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) { // PaymentPathFailed event assert_eq!(nodes[0].node.list_channels().len(), 0); - check_closed_event!( - nodes[0], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - [node_b_id], - 100000 - ); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event!(nodes[0], 1, reason, [node_b_id], 100000); } #[test] @@ -349,19 +317,11 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { // Now try to send a second payment which will fail to send let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); @@ -434,32 +394,16 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -479,32 +423,15 @@ fn do_test_monitor_temporary_update_fail(disconnect_count: usize) { assert!(nodes[0].node.get_and_clear_pending_events().is_empty()); assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -795,18 +722,10 @@ fn test_monitor_update_fail_cs() { let (route, our_payment_hash, payment_preimage, our_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0] - .node - .send_payment_with_route( - route, - our_payment_hash, - RecipientOnionFields::secret_only(our_payment_secret), - PaymentId(our_payment_hash.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion = RecipientOnionFields::secret_only(our_payment_secret); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -911,31 +830,16 @@ fn test_monitor_update_fail_no_rebroadcast() { let (route, our_payment_hash, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0] - .node - .send_payment_with_route( - route, - our_payment_hash, - RecipientOnionFields::secret_only(payment_secret_1), - PaymentId(our_payment_hash.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id = PaymentId(our_payment_hash.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); nodes[1].node.handle_update_add_htlc(node_a_id, &send_event.msgs[0]); - let bs_raa = commitment_signed_dance!( - nodes[1], - nodes[0], - send_event.commitment_msg, - false, - true, - false, - true - ); + let commitment = send_event.commitment_msg; + let bs_raa = commitment_signed_dance!(nodes[1], nodes[0], commitment, false, true, false, true); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.handle_revoke_and_ack(node_a_id, &bs_raa); @@ -980,35 +884,21 @@ fn test_monitor_update_raa_while_paused() { send_payment(&nodes[0], &[&nodes[1]], 5000000); let (route, our_payment_hash_1, payment_preimage_1, our_payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0] - .node - .send_payment_with_route( - route, - our_payment_hash_1, - RecipientOnionFields::secret_only(our_payment_secret_1), - PaymentId(our_payment_hash_1.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion = RecipientOnionFields::secret_only(our_payment_secret_1); + let id = PaymentId(our_payment_hash_1.0); + nodes[0].node.send_payment_with_route(route, our_payment_hash_1, onion, id).unwrap(); + + check_added_monitors!(nodes[0], 1); let send_event_1 = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); let (route, our_payment_hash_2, payment_preimage_2, our_payment_secret_2) = get_route_and_payment_hash!(nodes[1], nodes[0], 1000000); - { - nodes[1] - .node - .send_payment_with_route( - route, - our_payment_hash_2, - RecipientOnionFields::secret_only(our_payment_secret_2), - PaymentId(our_payment_hash_2.0), - ) - .unwrap(); - check_added_monitors!(nodes[1], 1); - } + let onion_2 = RecipientOnionFields::secret_only(our_payment_secret_2); + let id_2 = PaymentId(our_payment_hash_2.0); + nodes[1].node.send_payment_with_route(route, our_payment_hash_2, onion_2, id_2).unwrap(); + + check_added_monitors!(nodes[1], 1); let send_event_2 = SendEvent::from_event(nodes[1].node.get_and_clear_pending_msg_events().remove(0)); @@ -1096,33 +986,19 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { assert!(updates.update_fee.is_none()); nodes[1].node.handle_update_fail_htlc(node_c_id, &updates.update_fail_htlcs[0]); - let bs_revoke_and_ack = commitment_signed_dance!( - nodes[1], - nodes[2], - updates.commitment_signed, - false, - true, - false, - true - ); + let commitment = updates.commitment_signed; + let bs_revoke_and_ack = + commitment_signed_dance!(nodes[1], nodes[2], commitment, false, true, false, true); check_added_monitors!(nodes[0], 0); // While the second channel is AwaitingRAA, forward a second payment to get it into the // holding cell. let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); - { - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); + check_added_monitors!(nodes[0], 1); let mut send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -1145,18 +1021,10 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // being paused waiting a monitor update. let (route, payment_hash_3, _, payment_secret_3) = get_route_and_payment_hash!(nodes[0], nodes[2], 1000000); - { - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), - PaymentId(payment_hash_3.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); + let id_3 = PaymentId(payment_hash_3.0); + nodes[0].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); + check_added_monitors!(nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // We succeed in updating the monitor for the first channel send_event = SendEvent::from_event(nodes[0].node.get_and_clear_pending_msg_events().remove(0)); @@ -1174,15 +1042,9 @@ fn do_test_monitor_update_fail_raa(test_ignore_second_cs: bool) { // Try to route another payment backwards from 2 to make sure 1 holds off on responding let (route, payment_hash_4, payment_preimage_4, payment_secret_4) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); - nodes[2] - .node - .send_payment_with_route( - route, - payment_hash_4, - RecipientOnionFields::secret_only(payment_secret_4), - PaymentId(payment_hash_4.0), - ) - .unwrap(); + let onion_4 = RecipientOnionFields::secret_only(payment_secret_4); + let id_4 = PaymentId(payment_hash_4.0); + nodes[2].node.send_payment_with_route(route, payment_hash_4, onion_4, id_4).unwrap(); check_added_monitors!(nodes[2], 1); send_event = @@ -1428,30 +1290,13 @@ fn test_monitor_update_fail_reestablish() { commitment_signed_dance!(nodes[1], nodes[2], updates.commitment_signed, false); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let as_reestablish = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reestablish = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -1459,13 +1304,10 @@ fn test_monitor_update_fail_reestablish() { nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); - assert_eq!( - get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id) - .contents - .channel_flags - & 2, - 0 - ); // The "disabled" bit should be unset as we just reconnected + + // The "disabled" bit should be unset as we just reconnected + let as_channel_upd = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); + assert_eq!(as_channel_upd.contents.channel_flags & 2, 0); nodes[1].node.get_and_clear_pending_msg_events(); // Free the holding cell check_added_monitors!(nodes[1], 1); @@ -1473,52 +1315,24 @@ fn test_monitor_update_fail_reestablish() { nodes[1].node.peer_disconnected(node_a_id); nodes[0].node.peer_disconnected(node_b_id); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); assert_eq!(get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(), as_reestablish); assert_eq!(get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(), bs_reestablish); nodes[0].node.handle_channel_reestablish(node_b_id, &bs_reestablish); - assert_eq!( - get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id) - .contents - .channel_flags - & 2, - 0 - ); // The "disabled" bit should be unset as we just reconnected + + // The "disabled" bit should be unset as we just reconnected + let as_channel_upd = get_event_msg!(nodes[0], MessageSendEvent::SendChannelUpdate, node_b_id); + assert_eq!(as_channel_upd.contents.channel_flags & 2, 0); nodes[1].node.handle_channel_reestablish(node_a_id, &as_reestablish); check_added_monitors!(nodes[1], 0); - assert_eq!( - get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id) - .contents - .channel_flags - & 2, - 0 - ); // The "disabled" bit should be unset as we just reconnected + + // The "disabled" bit should be unset as we just reconnected + let bs_channel_upd = get_event_msg!(nodes[1], MessageSendEvent::SendChannelUpdate, node_a_id); + assert_eq!(bs_channel_upd.contents.channel_flags & 2, 0); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); let (latest_update, _) = get_latest_mon_update_id(&nodes[1], chan_1.2); @@ -1564,28 +1378,14 @@ fn raa_no_response_awaiting_raa_state() { // immediately after a CS. By setting failing the monitor update failure from the CS (which // requires only an RAA response due to AwaitingRAA) we can deliver the RAA and require the CS // generation during RAA while in monitor-update-failed state. - { - nodes[0] - .node - .send_payment_with_route( - route.clone(), - payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), - PaymentId(payment_hash_1.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - nodes[0] - .node - .send_payment_with_route( - route.clone(), - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 0); - } + let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); + let id_1 = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion_1, id_1).unwrap(); + check_added_monitors!(nodes[0], 1); + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash_2, onion_2, id_2).unwrap(); + check_added_monitors!(nodes[0], 0); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -1631,19 +1431,11 @@ fn raa_no_response_awaiting_raa_state() { // We send a third payment here, which is somewhat of a redundant test, but the // chanmon_fail_consistency test required it to actually find the bug (by seeing out-of-sync // commitment transaction states) whereas here we can explicitly check for it. - { - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), - PaymentId(payment_hash_3.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 0); - assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); - } + let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); + let id_3 = PaymentId(payment_hash_3.0); + nodes[0].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); + check_added_monitors!(nodes[0], 0); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); nodes[0].node.handle_revoke_and_ack(node_b_id, &bs_responses.0); check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1712,30 +1504,13 @@ fn claim_while_disconnected_monitor_update_fail() { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], payment_hash_1, 1_000_000); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -1758,18 +1533,10 @@ fn claim_while_disconnected_monitor_update_fail() { // the monitor still failed let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); + check_added_monitors!(nodes[0], 1); let as_updates = get_htlc_update_msgs!(nodes[0], node_b_id); nodes[1].node.handle_update_add_htlc(node_a_id, &as_updates.update_add_htlcs[0]); @@ -1854,48 +1621,28 @@ fn monitor_failed_no_reestablish_response() { let channel_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; { - let mut node_0_per_peer_lock; - let mut node_0_peer_state_lock; - get_channel_ref!( - nodes[0], - nodes[1], - node_0_per_peer_lock, - node_0_peer_state_lock, - channel_id - ) - .context_mut() - .announcement_sigs_state = AnnouncementSigsState::PeerReceived; + let mut per_peer_lock; + let mut peer_state_lock; + get_channel_ref!(nodes[0], nodes[1], per_peer_lock, peer_state_lock, channel_id) + .context_mut() + .announcement_sigs_state = AnnouncementSigsState::PeerReceived; } { - let mut node_1_per_peer_lock; - let mut node_1_peer_state_lock; - get_channel_ref!( - nodes[1], - nodes[0], - node_1_per_peer_lock, - node_1_peer_state_lock, - channel_id - ) - .context_mut() - .announcement_sigs_state = AnnouncementSigsState::PeerReceived; + let mut per_peer_lock; + let mut peer_state_lock; + get_channel_ref!(nodes[1], nodes[0], per_peer_lock, peer_state_lock, channel_id) + .context_mut() + .announcement_sigs_state = AnnouncementSigsState::PeerReceived; } // Route the payment and deliver the initial commitment_signed (with a monitor update failure // on receipt). let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), - PaymentId(payment_hash_1.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion = RecipientOnionFields::secret_only(payment_secret_1); + let id = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route, payment_hash_1, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); @@ -1911,30 +1658,13 @@ fn monitor_failed_no_reestablish_response() { nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let as_reconnect = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); let bs_reconnect = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); @@ -1993,18 +1723,10 @@ fn first_message_on_recv_ordering() { // can deliver it and fail the monitor update. let (route, payment_hash_1, payment_preimage_1, payment_secret_1) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), - PaymentId(payment_hash_1.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); + let id_1 = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route, payment_hash_1, onion_1, id_1).unwrap(); + check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2025,18 +1747,11 @@ fn first_message_on_recv_ordering() { // Route the second payment, generating an update_add_htlc/commitment_signed let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); + + check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); @@ -2124,18 +1839,10 @@ fn test_monitor_update_fail_claim() { let (route, payment_hash_2, _, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1_000_000); - { - nodes[2] - .node - .send_payment_with_route( - route.clone(), - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); - check_added_monitors!(nodes[2], 1); - } + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[2].node.send_payment_with_route(route.clone(), payment_hash_2, onion_2, id_2).unwrap(); + check_added_monitors!(nodes[2], 1); // Successfully update the monitor on the 1<->2 channel, but the 0<->1 channel should still be // paused, so forward shouldn't succeed until we call channel_monitor_updated(). @@ -2151,15 +1858,9 @@ fn test_monitor_update_fail_claim() { expect_pending_htlcs_forwardable_ignore!(nodes[1]); let (_, payment_hash_3, payment_secret_3) = get_payment_preimage_hash!(nodes[0]); - nodes[2] - .node - .send_payment_with_route( - route, - payment_hash_3, - RecipientOnionFields::secret_only(payment_secret_3), - PaymentId(payment_hash_3.0), - ) - .unwrap(); + let id_3 = PaymentId(payment_hash_3.0); + let onion_3 = RecipientOnionFields::secret_only(payment_secret_3); + nodes[2].node.send_payment_with_route(route, payment_hash_3, onion_3, id_3).unwrap(); check_added_monitors!(nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); @@ -2281,18 +1982,10 @@ fn test_monitor_update_on_pending_forwards() { let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[2], nodes[0], 1000000); - { - nodes[2] - .node - .send_payment_with_route( - route, - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); - check_added_monitors!(nodes[2], 1); - } + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[2].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); + check_added_monitors!(nodes[2], 1); let mut events = nodes[2].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); @@ -2364,32 +2057,17 @@ fn monitor_update_claim_fail_no_response() { // Now start forwarding a second payment, skipping the last RAA so B is in AwaitingRAA let (route, payment_hash_2, payment_preimage_2, payment_secret_2) = get_route_and_payment_hash!(nodes[0], nodes[1], 1000000); - { - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); - check_added_monitors!(nodes[0], 1); - } + let onion = RecipientOnionFields::secret_only(payment_secret_2); + let id = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion, id).unwrap(); + check_added_monitors!(nodes[0], 1); let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); let payment_event = SendEvent::from_event(events.pop().unwrap()); nodes[1].node.handle_update_add_htlc(node_a_id, &payment_event.msgs[0]); - let as_raa = commitment_signed_dance!( - nodes[1], - nodes[0], - payment_event.commitment_msg, - false, - true, - false, - true - ); + let commitment = payment_event.commitment_msg; + let as_raa = commitment_signed_dance!(nodes[1], nodes[0], commitment, false, true, false, true); chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1].node.claim_funds(payment_preimage_1); @@ -2578,20 +2256,10 @@ fn do_during_funding_monitor_fail( send_payment(&nodes[0], &[&nodes[1]], 8000000); close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true); - check_closed_event!( - nodes[0], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); - check_closed_event!( - nodes[1], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); + let reason_a = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); + let reason_b = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); } #[test] @@ -2638,52 +2306,33 @@ fn test_path_paused_mpp() { chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); // The first path should have succeeded with the second getting a MonitorUpdateInProgress err. - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), - ) - .unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[0].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[0], 2); chanmon_cfgs[0].persister.set_update_ret(ChannelMonitorUpdateStatus::Completed); // Pass the first HTLC of the payment along to nodes[3]. let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path( - &nodes[0], - &[&nodes[1], &nodes[3]], - 0, - payment_hash.clone(), - Some(payment_secret), - events.pop().unwrap(), - false, - None, - ); + let path_1 = &[&nodes[1], &nodes[3]]; + let ev = events.pop().unwrap(); + pass_along_path(&nodes[0], path_1, 0, payment_hash, Some(payment_secret), ev, false, None); // And check that, after we successfully update the monitor for chan_2 we can pass the second // HTLC along to nodes[3] and claim the whole payment back to nodes[0]. let (latest_update, _) = get_latest_mon_update_id(&nodes[0], chan_2_id); nodes[0].chain_monitor.chain_monitor.force_channel_monitor_updated(chan_2_id, latest_update); + let mut events = nodes[0].node.get_and_clear_pending_msg_events(); assert_eq!(events.len(), 1); - pass_along_path( - &nodes[0], - &[&nodes[2], &nodes[3]], - 200_000, - payment_hash.clone(), - Some(payment_secret), - events.pop().unwrap(), - true, - None, - ); + let path_2 = &[&nodes[2], &nodes[3]]; + let ev = events.pop().unwrap(); + pass_along_path(&nodes[0], path_2, 200_000, payment_hash, Some(payment_secret), ev, true, None); claim_payment_along_route(ClaimAlongRouteArgs::new( &nodes[0], - &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], + &[path_1, path_2], payment_preimage, )); } @@ -2713,15 +2362,9 @@ fn test_pending_update_fee_ack_on_reconnect() { let (route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(&nodes[1], nodes[0], 1_000_000); - nodes[1] - .node - .send_payment_with_route( - route, - payment_hash, - RecipientOnionFields::secret_only(payment_secret), - PaymentId(payment_hash.0), - ) - .unwrap(); + let onion = RecipientOnionFields::secret_only(payment_secret); + let id = PaymentId(payment_hash.0); + nodes[1].node.send_payment_with_route(route, payment_hash, onion, id).unwrap(); check_added_monitors!(nodes[1], 1); let bs_initial_send_msgs = get_htlc_update_msgs!(nodes[1], node_a_id); // bs_initial_send_msgs are not delivered until they are re-generated after reconnect @@ -2746,31 +2389,14 @@ fn test_pending_update_fee_ack_on_reconnect() { nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(node_a_id, &as_connect_msg); @@ -2925,31 +2551,14 @@ fn do_update_fee_resend_test(deliver_update: bool, parallel_updates: bool) { nodes[0].node.peer_disconnected(node_b_id); nodes[1].node.peer_disconnected(node_a_id); - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let as_connect_msg = get_chan_reestablish_msgs!(nodes[0], nodes[1]).pop().unwrap(); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let bs_connect_msg = get_chan_reestablish_msgs!(nodes[1], nodes[0]).pop().unwrap(); nodes[1].node.handle_channel_reestablish(node_a_id, &as_connect_msg); @@ -3029,9 +2638,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); let persister; - let new_chain_monitor; + let new_chain_mon; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); - let nodes_0_deserialized; + let nodes_0_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let node_a_id = nodes[0].node.get_our_node_id(); @@ -3066,28 +2675,16 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // (c) will not be freed from the holding cell. let (payment_preimage_0, payment_hash_0, ..) = route_payment(&nodes[1], &[&nodes[0]], 100_000); - nodes[0] - .node - .send_payment_with_route( - route.clone(), - payment_hash_1, - RecipientOnionFields::secret_only(payment_secret_1), - PaymentId(payment_hash_1.0), - ) - .unwrap(); + let onion_1 = RecipientOnionFields::secret_only(payment_secret_1); + let id_1 = PaymentId(payment_hash_1.0); + nodes[0].node.send_payment_with_route(route.clone(), payment_hash_1, onion_1, id_1).unwrap(); check_added_monitors!(nodes[0], 1); let send = SendEvent::from_node(&nodes[0]); assert_eq!(send.msgs.len(), 1); - nodes[0] - .node - .send_payment_with_route( - route, - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[0].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); check_added_monitors!(nodes[0], 0); let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); @@ -3110,14 +2707,9 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { // disconnect the peers. Note that the fuzzer originally found this issue because // deserializing a ChannelManager in this state causes an assertion failure. if reload_a { - reload_node!( - nodes[0], - &nodes[0].node.encode(), - &[&chan_0_monitor_serialized], - persister, - new_chain_monitor, - nodes_0_deserialized - ); + let node_ser = nodes[0].node.encode(); + let mons = &[&chan_0_monitor_serialized[..]]; + reload_node!(nodes[0], &node_ser, mons, persister, new_chain_mon, nodes_0_reload); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); } else { @@ -3126,32 +2718,15 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { nodes[1].node.peer_disconnected(node_a_id); // Now reconnect the two - nodes[0] - .node - .peer_connected( - node_b_id, - &msgs::Init { - features: nodes[1].node.init_features(), - networks: None, - remote_network_address: None, - }, - true, - ) - .unwrap(); + let init_msg = msgs::Init { + features: nodes[1].node.init_features(), + networks: None, + remote_network_address: None, + }; + nodes[0].node.peer_connected(node_b_id, &init_msg, true).unwrap(); let reestablish_1 = get_chan_reestablish_msgs!(nodes[0], nodes[1]); assert_eq!(reestablish_1.len(), 1); - nodes[1] - .node - .peer_connected( - node_a_id, - &msgs::Init { - features: nodes[0].node.init_features(), - networks: None, - remote_network_address: None, - }, - false, - ) - .unwrap(); + nodes[1].node.peer_connected(node_a_id, &init_msg, false).unwrap(); let reestablish_2 = get_chan_reestablish_msgs!(nodes[1], nodes[0]); assert_eq!(reestablish_2.len(), 1); @@ -3292,15 +2867,9 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f // awaiting a remote revoke_and_ack from nodes[0]. let (route, second_payment_hash, _, second_payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[1], 100_000); - nodes[0] - .node - .send_payment_with_route( - route, - second_payment_hash, - RecipientOnionFields::secret_only(second_payment_secret), - PaymentId(second_payment_hash.0), - ) - .unwrap(); + let onion_2 = RecipientOnionFields::secret_only(second_payment_secret); + let id_2 = PaymentId(second_payment_hash.0); + nodes[0].node.send_payment_with_route(route, second_payment_hash, onion_2, id_2).unwrap(); check_added_monitors!(nodes[0], 1); let send_event = @@ -3481,20 +3050,10 @@ fn test_temporary_error_during_shutdown() { assert_eq!(txn_a, txn_b); assert_eq!(txn_a.len(), 1); check_spends!(txn_a[0], funding_tx); - check_closed_event!( - nodes[1], - 1, - ClosureReason::CounterpartyInitiatedCooperativeClosure, - [node_a_id], - 100000 - ); - check_closed_event!( - nodes[0], - 1, - ClosureReason::LocallyInitiatedCooperativeClosure, - [node_b_id], - 100000 - ); + let reason_b = ClosureReason::CounterpartyInitiatedCooperativeClosure; + check_closed_event!(nodes[1], 1, reason_b, [node_a_id], 100000); + let reason_a = ClosureReason::LocallyInitiatedCooperativeClosure; + check_closed_event!(nodes[0], 1, reason_a, [node_b_id], 100000); } #[test] @@ -3647,7 +3206,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config.clone()), Some(chan_config)]); - let nodes_0_deserialized; + let node_a_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -3663,22 +3222,14 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::OpenChannelRequest { temporary_channel_id, .. } => { + Event::OpenChannelRequest { temporary_channel_id: chan_id, .. } => { if use_0conf { nodes[1] .node - .accept_inbound_channel_from_trusted_peer_0conf( - &temporary_channel_id, - &node_a_id, - 0, - None, - ) + .accept_inbound_channel_from_trusted_peer_0conf(&chan_id, &node_a_id, 0, None) .unwrap(); } else { - nodes[1] - .node - .accept_inbound_channel(&temporary_channel_id, &node_a_id, 0, None) - .unwrap(); + nodes[1].node.accept_inbound_channel(&chan_id, &node_a_id, 0, None).unwrap(); } }, _ => panic!("Unexpected event"), @@ -3737,14 +3288,8 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) { nodes[0].chain_source.watched_txn.lock().unwrap().clear(); nodes[0].chain_source.watched_outputs.lock().unwrap().clear(); - reload_node!( - nodes[0], - &nodes[0].node.encode(), - &[], - persister, - new_chain_monitor, - nodes_0_deserialized - ); + let node_a_ser = nodes[0].node.encode(); + reload_node!(nodes[0], &node_a_ser, &[], persister, new_chain_monitor, node_a_reload); check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [node_b_id], 100000); assert!(nodes[0].node.list_channels().is_empty()); } @@ -3771,7 +3316,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(chan_config.clone()), Some(chan_config)]); - let nodes_1_deserialized; + let node_b_reload; let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); @@ -3787,22 +3332,14 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 1); match events[0] { - Event::OpenChannelRequest { temporary_channel_id, .. } => { + Event::OpenChannelRequest { temporary_channel_id: chan_id, .. } => { if use_0conf { nodes[1] .node - .accept_inbound_channel_from_trusted_peer_0conf( - &temporary_channel_id, - &node_a_id, - 0, - None, - ) + .accept_inbound_channel_from_trusted_peer_0conf(&chan_id, &node_a_id, 0, None) .unwrap(); } else { - nodes[1] - .node - .accept_inbound_channel(&temporary_channel_id, &node_a_id, 0, None) - .unwrap(); + nodes[1].node.accept_inbound_channel(&chan_id, &node_a_id, 0, None).unwrap(); } }, _ => panic!("Unexpected event"), @@ -3858,14 +3395,8 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo nodes[1].chain_source.watched_txn.lock().unwrap().clear(); nodes[1].chain_source.watched_outputs.lock().unwrap().clear(); - reload_node!( - nodes[1], - &nodes[1].node.encode(), - &[], - persister, - new_chain_monitor, - nodes_1_deserialized - ); + let node_b_ser = nodes[1].node.encode(); + reload_node!(nodes[1], &node_b_ser, &[], persister, new_chain_monitor, node_b_reload); check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [node_a_id], 100000); assert!(nodes[1].node.list_channels().is_empty()); @@ -3913,13 +3444,8 @@ fn test_blocked_chan_preimage_release() { nodes[1] .node .handle_update_fulfill_htlc(node_c_id, &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]); - do_commitment_signed_dance( - &nodes[1], - &nodes[2], - &cs_htlc_fulfill_updates.commitment_signed, - false, - false, - ); + let commitment = cs_htlc_fulfill_updates.commitment_signed; + do_commitment_signed_dance(&nodes[1], &nodes[2], &commitment, false, false); check_added_monitors(&nodes[1], 0); // Now claim the second payment on nodes[0], which will ultimately result in nodes[1] trying to @@ -3977,13 +3503,8 @@ fn test_blocked_chan_preimage_release() { nodes[2] .node .handle_update_fulfill_htlc(node_b_id, &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]); - do_commitment_signed_dance( - &nodes[2], - &nodes[1], - &bs_htlc_fulfill_updates.commitment_signed, - false, - false, - ); + let commitment = bs_htlc_fulfill_updates.commitment_signed; + do_commitment_signed_dance(&nodes[2], &nodes[1], &commitment, false, false); expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); } @@ -4002,8 +3523,8 @@ fn do_test_inverted_mon_completion_order( let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let persister; - let new_chain_monitor; - let nodes_1_deserialized; + let chain_mon; + let node_b_reload; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -4068,14 +3589,7 @@ fn do_test_inverted_mon_completion_order( } let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode(); - reload_node!( - nodes[1], - &manager_b, - &[&mon_ab, &mon_bc], - persister, - new_chain_monitor, - nodes_1_deserialized - ); + reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, chain_mon, node_b_reload); nodes[0].node.peer_disconnected(node_b_id); nodes[2].node.peer_disconnected(node_b_id); @@ -4131,14 +3645,8 @@ fn do_test_inverted_mon_completion_order( check_added_monitors(&nodes[1], 0); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); - check_closed_event( - &nodes[1], - 1, - ClosureReason::OutdatedChannelManager, - false, - &[node_c_id], - 100_000, - ); + let reason = ClosureReason::OutdatedChannelManager; + check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100_000); check_added_monitors(&nodes[1], 2); nodes[1].node.timer_tick_occurred(); @@ -4207,8 +3715,8 @@ fn do_test_durable_preimages_on_closed_channel( let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let persister; - let new_chain_monitor; - let nodes_1_deserialized; + let chain_mon; + let node_b_reload; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -4249,56 +3757,33 @@ fn do_test_durable_preimages_on_closed_channel( let _ = get_revoke_commit_msgs!(nodes[1], node_c_id); let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode(); - let error_message = "Channel force-closed"; + let err_msg = "Channel force-closed".to_owned(); if close_chans_before_reload { if !close_only_a { chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1] .node - .force_close_broadcasting_latest_txn( - &chan_id_bc, - &node_c_id, - error_message.to_string(), - ) + .force_close_broadcasting_latest_txn(&chan_id_bc, &node_c_id, err_msg.clone()) .unwrap(); check_closed_broadcast(&nodes[1], 1, true); - check_closed_event( - &nodes[1], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - false, - &[node_c_id], - 100000, - ); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[1], 1, reason, false, &[node_c_id], 100000); } chanmon_cfgs[1].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress); nodes[1] .node - .force_close_broadcasting_latest_txn(&chan_id_ab, &node_a_id, error_message.to_string()) + .force_close_broadcasting_latest_txn(&chan_id_ab, &node_a_id, err_msg) .unwrap(); check_closed_broadcast(&nodes[1], 1, true); - check_closed_event( - &nodes[1], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - false, - &[node_a_id], - 100000, - ); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } // Now reload node B let manager_b = nodes[1].node.encode(); - reload_node!( - nodes[1], - &manager_b, - &[&mon_ab, &mon_bc], - persister, - new_chain_monitor, - nodes_1_deserialized - ); + reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, chain_mon, node_b_reload); nodes[0].node.peer_disconnected(node_b_id); nodes[2].node.peer_disconnected(node_b_id); @@ -4312,20 +3797,11 @@ fn do_test_durable_preimages_on_closed_channel( assert_eq!(bs_close_txn.len(), 3); } } - let error_message = "Channel force-closed"; - nodes[0] - .node - .force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, error_message.to_string()) - .unwrap(); - check_closed_event( - &nodes[0], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - false, - &[node_b_id], - 100000, - ); + let err_msg = "Channel force-closed".to_owned(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, err_msg).unwrap(); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100000); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(as_closing_tx.len(), 1); @@ -4365,14 +3841,8 @@ fn do_test_durable_preimages_on_closed_channel( if !close_chans_before_reload { check_closed_broadcast(&nodes[1], 1, true); - check_closed_event( - &nodes[1], - 1, - ClosureReason::CommitmentTxConfirmed, - false, - &[node_a_id], - 100000, - ); + let reason = ClosureReason::CommitmentTxConfirmed; + check_closed_event(&nodes[1], 1, reason, false, &[node_a_id], 100000); } mine_transactions(&nodes[0], &[&as_closing_tx[0], bs_preimage_tx]); @@ -4434,8 +3904,8 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); let persister; - let new_chain_monitor; - let nodes_1_deserialized; + let chain_mon; + let node_b_reload; let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); @@ -4488,33 +3958,17 @@ fn do_test_reload_mon_update_completion_actions(close_during_reload: bool) { let mon_ab = get_monitor!(nodes[1], chan_id_ab).encode(); let mon_bc = get_monitor!(nodes[1], chan_id_bc).encode(); let manager_b = nodes[1].node.encode(); - reload_node!( - nodes[1], - &manager_b, - &[&mon_ab, &mon_bc], - persister, - new_chain_monitor, - nodes_1_deserialized - ); + reload_node!(nodes[1], &manager_b, &[&mon_ab, &mon_bc], persister, chain_mon, node_b_reload); - let error_message = "Channel force-closed"; + let msg = "Channel force-closed".to_owned(); if close_during_reload { // Test that we still free the B<->C channel if the A<->B channel closed while we reloaded // (as learned about during the on-reload block connection). - nodes[0] - .node - .force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, error_message.to_string()) - .unwrap(); + nodes[0].node.force_close_broadcasting_latest_txn(&chan_id_ab, &node_b_id, msg).unwrap(); check_added_monitors!(nodes[0], 1); check_closed_broadcast!(nodes[0], true); - check_closed_event( - &nodes[0], - 1, - ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }, - false, - &[node_b_id], - 100_000, - ); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true) }; + check_closed_event(&nodes[0], 1, reason, false, &[node_b_id], 100_000); let as_closing_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); mine_transaction_without_consistency_checks(&nodes[1], &as_closing_tx[0]); } @@ -4622,15 +4076,9 @@ fn do_test_glacial_peer_cant_hang(hold_chan_a: bool) { // With the A<->B preimage persistence not yet complete, the B<->C channel is stuck // waiting. - nodes[1] - .node - .send_payment_with_route( - route, - payment_hash_2, - RecipientOnionFields::secret_only(payment_secret_2), - PaymentId(payment_hash_2.0), - ) - .unwrap(); + let onion_2 = RecipientOnionFields::secret_only(payment_secret_2); + let id_2 = PaymentId(payment_hash_2.0); + nodes[1].node.send_payment_with_route(route, payment_hash_2, onion_2, id_2).unwrap(); check_added_monitors(&nodes[1], 0); assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); @@ -4729,14 +4177,8 @@ fn test_partial_claim_mon_update_compl_actions() { route.paths[1].hops[0].pubkey = node_c_id; route.paths[1].hops[0].short_channel_id = chan_2_scid; route.paths[1].hops[1].short_channel_id = chan_4_scid; - send_along_route_with_secret( - &nodes[0], - route, - &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], - 200_000, - payment_hash, - payment_secret, - ); + let paths = &[&[&nodes[1], &nodes[3]][..], &[&nodes[2], &nodes[3]][..]]; + send_along_route_with_secret(&nodes[0], route, paths, 200_000, payment_hash, payment_secret); // Claim along both paths, but only complete one of the two monitor updates. chanmon_cfgs[3].persister.set_update_ret(ChannelMonitorUpdateStatus::InProgress);