@@ -9562,6 +9562,186 @@ fn test_keysend_payments_to_private_node() {
9562
9562
claim_payment(&nodes[0], &path, test_preimage);
9563
9563
}
9564
9564
9565
+ fn do_test_partial_claim_before_restart(persist_both_monitors: bool) {
9566
+ // Test what happens if a node receives an MPP payment, claims it, but crashes before
9567
+ // persisting the ChannelManager. If `persist_both_monitors` is false, also crash after only
9568
+ // updating one of the two channels' ChannelMonitors. As a result, on startup, we'll (a) still
9569
+ // have the PaymentReceivedEvent, (b) have one (or two) channel(s) that goes on chain with the
9570
+ // HTLC preimage in them, and (c) optionally have one channel that is live off-chain but does
9571
+ // not have the preimage tied to the still-pending HTLC.
9572
+ //
9573
+ // To get to the correct state, on startup we should propagate the preimage to the
9574
+ // still-off-chain channel, claiming the HTLC as soon as the peer connects, with the monitor
9575
+ // receiving the preimage without a state update.
9576
+ let chanmon_cfgs = create_chanmon_cfgs(4);
9577
+ let node_cfgs = create_node_cfgs(4, &chanmon_cfgs);
9578
+ let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]);
9579
+
9580
+ let persister: test_utils::TestPersister;
9581
+ let new_chain_monitor: test_utils::TestChainMonitor;
9582
+ let nodes_3_deserialized: ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>;
9583
+
9584
+ let mut nodes = create_network(4, &node_cfgs, &node_chanmgrs);
9585
+
9586
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 100_000, 0, InitFeatures::known(), InitFeatures::known());
9587
+ create_announced_chan_between_nodes_with_value(&nodes, 0, 2, 100_000, 0, InitFeatures::known(), InitFeatures::known());
9588
+ let chan_id_persisted = create_announced_chan_between_nodes_with_value(&nodes, 1, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()).2;
9589
+ let chan_id_not_persisted = create_announced_chan_between_nodes_with_value(&nodes, 2, 3, 100_000, 0, InitFeatures::known(), InitFeatures::known()).2;
9590
+
9591
+ // Crate an MPP route for 15k sats, more than the default htlc-max of 10%
9592
+ let (mut route, payment_hash, payment_preimage, payment_secret) = get_route_and_payment_hash!(nodes[0], nodes[3], 15_000_000);
9593
+ assert_eq!(route.paths.len(), 2);
9594
+ route.paths.sort_by(|path_a, _| {
9595
+ // Sort the path so that the path through nodes[1] comes first
9596
+ if path_a[0].pubkey == nodes[1].node.get_our_node_id() {
9597
+ core::cmp::Ordering::Less } else { core::cmp::Ordering::Greater }
9598
+ });
9599
+
9600
+ nodes[0].node.send_payment(&route, payment_hash, &Some(payment_secret)).unwrap();
9601
+ check_added_monitors!(nodes[0], 2);
9602
+
9603
+ // Send the payment through to nodes[3] *without* clearing the PaymentReceived event
9604
+ let mut send_events = nodes[0].node.get_and_clear_pending_msg_events();
9605
+ assert_eq!(send_events.len(), 2);
9606
+ do_pass_along_path(&nodes[0], &[&nodes[1], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[0].clone(), true, false, None);
9607
+ do_pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, payment_hash, Some(payment_secret), send_events[1].clone(), true, false, None);
9608
+
9609
+ // Now that we have an MPP payment pending, get the latest encoded copies of nodes[3]'s
9610
+ // monitors and ChannelManager, for use later, if we don't want to persist both monitors.
9611
+ let mut original_monitor = test_utils::TestVecWriter(Vec::new());
9612
+ if !persist_both_monitors {
9613
+ for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
9614
+ if outpoint.to_channel_id() == chan_id_not_persisted {
9615
+ assert!(original_monitor.0.is_empty());
9616
+ nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
9617
+ }
9618
+ }
9619
+ }
9620
+
9621
+ let mut original_manager = test_utils::TestVecWriter(Vec::new());
9622
+ nodes[3].node.write(&mut original_manager).unwrap();
9623
+
9624
+ expect_payment_received!(nodes[3], payment_hash, payment_secret, 15_000_000);
9625
+
9626
+ nodes[3].node.claim_funds(payment_preimage);
9627
+ check_added_monitors!(nodes[3], 2);
9628
+
9629
+ // Now fetch one of the two updated ChannelMonitors from nodes[3], and restart pretending we
9630
+ // crashed in between the two persistence calls - using one old ChannelMonitor and one new one,
9631
+ // with the old ChannelManager.
9632
+ let mut updated_monitor = test_utils::TestVecWriter(Vec::new());
9633
+ for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
9634
+ if outpoint.to_channel_id() == chan_id_persisted {
9635
+ assert!(updated_monitor.0.is_empty());
9636
+ nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut updated_monitor).unwrap();
9637
+ }
9638
+ }
9639
+ // If `persist_both_monitors` is set, get the second monitor here as well
9640
+ if persist_both_monitors {
9641
+ for outpoint in nodes[3].chain_monitor.chain_monitor.list_monitors() {
9642
+ if outpoint.to_channel_id() == chan_id_not_persisted {
9643
+ assert!(original_monitor.0.is_empty());
9644
+ nodes[3].chain_monitor.chain_monitor.get_monitor(outpoint).unwrap().write(&mut original_monitor).unwrap();
9645
+ }
9646
+ }
9647
+ }
9648
+
9649
+ // Now restart nodes[3].
9650
+ persister = test_utils::TestPersister::new();
9651
+ let keys_manager = &chanmon_cfgs[3].keys_manager;
9652
+ new_chain_monitor = test_utils::TestChainMonitor::new(Some(nodes[3].chain_source), nodes[3].tx_broadcaster.clone(), nodes[3].logger, node_cfgs[3].fee_estimator, &persister, keys_manager);
9653
+ nodes[3].chain_monitor = &new_chain_monitor;
9654
+ let mut monitors = Vec::new();
9655
+ for mut monitor_data in [original_monitor, updated_monitor] {
9656
+ let (_, mut deserialized_monitor) = <(BlockHash, ChannelMonitor<EnforcingSigner>)>::read(&mut &monitor_data.0[..], keys_manager).unwrap();
9657
+ monitors.push(deserialized_monitor);
9658
+ }
9659
+
9660
+ let config = UserConfig::default();
9661
+ nodes_3_deserialized = {
9662
+ let mut channel_monitors = HashMap::new();
9663
+ for monitor in monitors.iter_mut() {
9664
+ channel_monitors.insert(monitor.get_funding_txo().0, monitor);
9665
+ }
9666
+ <(BlockHash, ChannelManager<EnforcingSigner, &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, &test_utils::TestLogger>)>::read(&mut &original_manager.0[..], ChannelManagerReadArgs {
9667
+ default_config: config,
9668
+ keys_manager,
9669
+ fee_estimator: node_cfgs[3].fee_estimator,
9670
+ chain_monitor: nodes[3].chain_monitor,
9671
+ tx_broadcaster: nodes[3].tx_broadcaster.clone(),
9672
+ logger: nodes[3].logger,
9673
+ channel_monitors,
9674
+ }).unwrap().1
9675
+ };
9676
+ nodes[3].node = &nodes_3_deserialized;
9677
+
9678
+ for monitor in monitors {
9679
+ // On startup the preimage should have been copied into the non-persisted monitor:
9680
+ assert!(monitor.get_stored_preimages().contains_key(&payment_hash));
9681
+ nodes[3].chain_monitor.watch_channel(monitor.get_funding_txo().0.clone(), monitor).unwrap();
9682
+ }
9683
+ check_added_monitors!(nodes[3], 2);
9684
+
9685
+ nodes[1].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
9686
+ nodes[2].node.peer_disconnected(&nodes[3].node.get_our_node_id(), false);
9687
+
9688
+ // During deserialization, we should have closed one channel and broadcast its latest
9689
+ // commitment transaction. We should also still have the original PaymentReceived event we
9690
+ // never finished processing.
9691
+ let events = nodes[3].node.get_and_clear_pending_events();
9692
+ assert_eq!(events.len(), if persist_both_monitors { 3 } else { 2 });
9693
+ if let Event::PaymentReceived { amt: 15_000_000, .. } = events[0] { } else { panic!(); }
9694
+ if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[1] { } else { panic!(); }
9695
+ if persist_both_monitors {
9696
+ if let Event::ChannelClosed { reason: ClosureReason::OutdatedChannelManager, .. } = events[2] { } else { panic!(); }
9697
+ }
9698
+
9699
+ assert_eq!(nodes[3].node.list_channels().len(), if persist_both_monitors { 0 } else { 1 });
9700
+ if !persist_both_monitors {
9701
+ // If one of the two channels is still live, reveal the payment preimage over it.
9702
+
9703
+ nodes[3].node.peer_connected(&nodes[2].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
9704
+ let reestablish_1 = get_chan_reestablish_msgs!(nodes[3], nodes[2]);
9705
+ nodes[2].node.peer_connected(&nodes[3].node.get_our_node_id(), &msgs::Init { features: InitFeatures::empty(), remote_network_address: None });
9706
+ let reestablish_2 = get_chan_reestablish_msgs!(nodes[2], nodes[3]);
9707
+
9708
+ nodes[2].node.handle_channel_reestablish(&nodes[3].node.get_our_node_id(), &reestablish_1[0]);
9709
+ get_event_msg!(nodes[2], MessageSendEvent::SendChannelUpdate, nodes[3].node.get_our_node_id());
9710
+ assert!(nodes[2].node.get_and_clear_pending_msg_events().is_empty());
9711
+
9712
+ nodes[3].node.handle_channel_reestablish(&nodes[2].node.get_our_node_id(), &reestablish_2[0]);
9713
+
9714
+ // Once we call `get_and_clear_pending_msg_events` the holding cell is cleared and the HTLC
9715
+ // claim should fly.
9716
+ let ds_msgs = nodes[3].node.get_and_clear_pending_msg_events();
9717
+ check_added_monitors!(nodes[3], 1);
9718
+ assert_eq!(ds_msgs.len(), 2);
9719
+ if let MessageSendEvent::SendChannelUpdate { .. } = ds_msgs[1] {} else { panic!(); }
9720
+
9721
+ let cs_updates = match ds_msgs[0] {
9722
+ MessageSendEvent::UpdateHTLCs { ref updates, .. } => {
9723
+ nodes[2].node.handle_update_fulfill_htlc(&nodes[3].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
9724
+ check_added_monitors!(nodes[2], 1);
9725
+ let cs_updates = get_htlc_update_msgs!(nodes[2], nodes[0].node.get_our_node_id());
9726
+ expect_payment_forwarded!(nodes[2], Some(1000), false);
9727
+ commitment_signed_dance!(nodes[2], nodes[3], updates.commitment_signed, false, true);
9728
+ cs_updates
9729
+ }
9730
+ _ => panic!(),
9731
+ };
9732
+
9733
+ nodes[0].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_updates.update_fulfill_htlcs[0]);
9734
+ commitment_signed_dance!(nodes[0], nodes[2], cs_updates.commitment_signed, false, true);
9735
+ expect_payment_sent!(nodes[0], payment_preimage);
9736
+ }
9737
+ }
9738
+
9739
+ #[test]
9740
+ fn test_partial_claim_before_restart() {
9741
+ do_test_partial_claim_before_restart(false);
9742
+ do_test_partial_claim_before_restart(true);
9743
+ }
9744
+
9565
9745
/// The possible events which may trigger a `max_dust_htlc_exposure` breach
9566
9746
#[derive(Clone, Copy, PartialEq)]
9567
9747
enum ExposureEvent {
0 commit comments