diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 5128600163a..3f7a1055b6d 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -869,6 +869,9 @@ impl Writeable for ChannelMonitorImpl { writer.write_all(&txid[..])?; writer.write_all(&byte_utils::be64_to_array(htlc_infos.len() as u64))?; for &(ref htlc_output, ref htlc_source) in htlc_infos.iter() { + debug_assert!(htlc_source.is_none() || Some(**txid) == self.current_counterparty_commitment_txid + || Some(**txid) == self.prev_counterparty_commitment_txid, + "HTLC Sources for all revoked commitment transactions should be none!"); serialize_htlc_in_commitment!(htlc_output); htlc_source.as_ref().map(|b| b.as_ref()).write(writer)?; } @@ -1659,7 +1662,8 @@ impl ChannelMonitor { /// as long as we examine both the current counterparty commitment transaction and, if it hasn't /// been revoked yet, the previous one, we we will never "forget" to resolve an HTLC. macro_rules! fail_unbroadcast_htlcs { - ($self: expr, $commitment_tx_type: expr, $commitment_tx_conf_height: expr, $confirmed_htlcs_list: expr, $logger: expr) => { { + ($self: expr, $commitment_tx_type: expr, $commitment_txid_confirmed: expr, + $commitment_tx_conf_height: expr, $confirmed_htlcs_list: expr, $logger: expr) => { { macro_rules! check_htlc_fails { ($txid: expr, $commitment_tx: expr) => { if let Some(ref latest_outpoints) = $self.counterparty_claimable_outpoints.get($txid) { @@ -1675,9 +1679,14 @@ macro_rules! fail_unbroadcast_htlcs { // cannot currently change after channel initialization, so we don't // need to here. let confirmed_htlcs_iter: &mut Iterator)> = &mut $confirmed_htlcs_list; + let mut matched_htlc = false; for (ref broadcast_htlc, ref broadcast_source) in confirmed_htlcs_iter { - if broadcast_htlc.transaction_output_index.is_some() && Some(&**source) == *broadcast_source { + if broadcast_htlc.transaction_output_index.is_some() && + (Some(&**source) == *broadcast_source || + (broadcast_source.is_none() && + broadcast_htlc.payment_hash == htlc.payment_hash && + broadcast_htlc.amount_msat == htlc.amount_msat)) { matched_htlc = true; break; } @@ -1693,7 +1702,7 @@ macro_rules! fail_unbroadcast_htlcs { } }); let entry = OnchainEventEntry { - txid: *$txid, + txid: $commitment_txid_confirmed, height: $commitment_tx_conf_height, event: OnchainEvent::HTLCUpdate { source: (**source).clone(), @@ -1702,8 +1711,9 @@ macro_rules! fail_unbroadcast_htlcs { commitment_tx_output_idx: None, }, }; - log_trace!($logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of {} commitment transaction, waiting for confirmation (at height {})", - log_bytes!(htlc.payment_hash.0), $commitment_tx, $commitment_tx_type, entry.confirmation_threshold()); + log_trace!($logger, "Failing HTLC with payment_hash {} from {} counterparty commitment tx due to broadcast of {} commitment transaction {}, waiting for confirmation (at height {})", + log_bytes!(htlc.payment_hash.0), $commitment_tx, $commitment_tx_type, + $commitment_txid_confirmed, entry.confirmation_threshold()); $self.onchain_events_awaiting_threshold_conf.push(entry); } } @@ -2100,7 +2110,16 @@ impl ChannelMonitorImpl { } self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number); - fail_unbroadcast_htlcs!(self, "revoked counterparty", height, [].iter().map(|a| *a), logger); + if let Some(per_commitment_data) = per_commitment_option { + fail_unbroadcast_htlcs!(self, "revoked_counterparty", commitment_txid, height, + per_commitment_data.iter().map(|(htlc, htlc_source)| + (htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref())) + ), logger); + } else { + debug_assert!(false, "We should have per-commitment option for any recognized old commitment txn"); + fail_unbroadcast_htlcs!(self, "revoked counterparty", commitment_txid, height, + [].iter().map(|reference| *reference), logger); + } } } else if let Some(per_commitment_data) = per_commitment_option { // While this isn't useful yet, there is a potential race where if a counterparty @@ -2116,7 +2135,10 @@ impl ChannelMonitorImpl { self.counterparty_commitment_txn_on_chain.insert(commitment_txid, commitment_number); log_info!(logger, "Got broadcast of non-revoked counterparty commitment transaction {}", commitment_txid); - fail_unbroadcast_htlcs!(self, "counterparty", height, per_commitment_data.iter().map(|(a, b)| (a, b.as_ref().map(|b| b.as_ref()))), logger); + fail_unbroadcast_htlcs!(self, "counterparty", commitment_txid, height, + per_commitment_data.iter().map(|(htlc, htlc_source)| + (htlc, htlc_source.as_ref().map(|htlc_source| htlc_source.as_ref())) + ), logger); let htlc_claim_reqs = self.get_counterparty_htlc_output_claim_reqs(commitment_number, commitment_txid, Some(tx)); for req in htlc_claim_reqs { @@ -2272,7 +2294,9 @@ impl ChannelMonitorImpl { let res = self.get_broadcasted_holder_claims(&self.current_holder_commitment_tx, height); let mut to_watch = self.get_broadcasted_holder_watch_outputs(&self.current_holder_commitment_tx, tx); append_onchain_update!(res, to_watch); - fail_unbroadcast_htlcs!(self, "latest holder", height, self.current_holder_commitment_tx.htlc_outputs.iter().map(|(a, _, c)| (a, c.as_ref())), logger); + fail_unbroadcast_htlcs!(self, "latest holder", commitment_txid, height, + self.current_holder_commitment_tx.htlc_outputs.iter() + .map(|(htlc, _, htlc_source)| (htlc, htlc_source.as_ref())), logger); } else if let &Some(ref holder_tx) = &self.prev_holder_signed_commitment_tx { if holder_tx.txid == commitment_txid { is_holder_tx = true; @@ -2280,7 +2304,9 @@ impl ChannelMonitorImpl { let res = self.get_broadcasted_holder_claims(holder_tx, height); let mut to_watch = self.get_broadcasted_holder_watch_outputs(holder_tx, tx); append_onchain_update!(res, to_watch); - fail_unbroadcast_htlcs!(self, "previous holder", height, holder_tx.htlc_outputs.iter().map(|(a, _, c)| (a, c.as_ref())), logger); + fail_unbroadcast_htlcs!(self, "previous holder", commitment_txid, height, + holder_tx.htlc_outputs.iter().map(|(htlc, _, htlc_source)| (htlc, htlc_source.as_ref())), + logger); } } @@ -2561,7 +2587,8 @@ impl ChannelMonitorImpl { matured_htlcs.push(source.clone()); } - log_debug!(logger, "HTLC {} failure update has got enough confirmations to be passed upstream", log_bytes!(payment_hash.0)); + log_debug!(logger, "HTLC {} failure update in {} has got enough confirmations to be passed upstream", + log_bytes!(payment_hash.0), entry.txid); self.pending_monitor_events.push(MonitorEvent::HTLCEvent(HTLCUpdate { payment_hash, payment_preimage: None, @@ -2640,7 +2667,10 @@ impl ChannelMonitorImpl { F::Target: FeeEstimator, L::Target: Logger, { - self.onchain_events_awaiting_threshold_conf.retain(|ref entry| entry.txid != *txid); + self.onchain_events_awaiting_threshold_conf.retain(|ref entry| if entry.txid == *txid { + log_info!(logger, "Removing onchain event with txid {}", txid); + false + } else { true }); self.onchain_tx_handler.transaction_unconfirmed(txid, broadcaster, fee_estimator, logger); } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index a716d80677c..ddab3353e71 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2515,10 +2515,10 @@ fn claim_htlc_outputs_shared_tx() { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); // Rebalance the network to generate htlc in the two directions - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1]], 8_000_000); // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx - let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000); + let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0; + let (_payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000); // Get the will-be-revoked local txn from node[0] let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); @@ -2541,9 +2541,9 @@ fn claim_htlc_outputs_shared_tx() { check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_payment_failed!(nodes[1], payment_hash_2, true); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert_eq!(node_txn.len(), 2); // ChannelMonitor: penalty tx, ChannelManager: local commitment assert_eq!(node_txn[0].input.len(), 3); // Claim the revoked output + both revoked HTLC outputs @@ -2560,7 +2560,13 @@ fn claim_htlc_outputs_shared_tx() { // Next nodes[1] broadcasts its current local tx state: assert_eq!(node_txn[1].input.len(), 1); - assert_eq!(node_txn[1].input[0].previous_output.txid, chan_1.3.txid()); //Spending funding tx unique txouput, tx broadcasted by ChannelManager + check_spends!(node_txn[1], chan_1.3); + + // Finally, mine the penalty transaction and check that we get an HTLC failure after + // ANTI_REORG_DELAY confirmations. + mine_transaction(&nodes[1], &node_txn[0]); + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + expect_payment_failed!(nodes[1], payment_hash_2, true); } get_announce_close_broadcast_events(&nodes, 0, 1); assert_eq!(nodes[0].node.list_channels().len(), 0); @@ -2579,11 +2585,11 @@ fn claim_htlc_outputs_single_tx() { let chan_1 = create_announced_chan_between_nodes(&nodes, 0, 1, InitFeatures::known(), InitFeatures::known()); // Rebalance the network to generate htlc in the two directions - send_payment(&nodes[0], &vec!(&nodes[1])[..], 8000000); + send_payment(&nodes[0], &[&nodes[1]], 8_000_000); // node[0] is gonna to revoke an old state thus node[1] should be able to claim both offered/received HTLC outputs on top of commitment tx, but this // time as two different claim transactions as we're gonna to timeout htlc with given a high current height - let payment_preimage_1 = route_payment(&nodes[0], &vec!(&nodes[1])[..], 3000000).0; - let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &vec!(&nodes[0])[..], 3000000); + let payment_preimage_1 = route_payment(&nodes[0], &[&nodes[1]], 3_000_000).0; + let (_payment_preimage_2, payment_hash_2, _payment_secret_2) = route_payment(&nodes[1], &[&nodes[0]], 3_000_000); // Get the will-be-revoked local txn from node[0] let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan_1.2); @@ -2605,9 +2611,9 @@ fn claim_htlc_outputs_single_tx() { } connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); - expect_payment_failed!(nodes[1], payment_hash_2, true); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); - let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); + let node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); assert!(node_txn.len() == 9 || node_txn.len() == 10); // Check the pair local commitment and HTLC-timeout broadcast due to HTLC expiration @@ -2635,6 +2641,14 @@ fn claim_htlc_outputs_single_tx() { assert_eq!(*witness_lens.iter().skip(0).next().unwrap(), 77); // revoked to_local assert_eq!(*witness_lens.iter().skip(1).next().unwrap(), OFFERED_HTLC_SCRIPT_WEIGHT); // revoked offered HTLC assert_eq!(*witness_lens.iter().skip(2).next().unwrap(), ACCEPTED_HTLC_SCRIPT_WEIGHT); // revoked received HTLC + + // Finally, mine the penalty transactions and check that we get an HTLC failure after + // ANTI_REORG_DELAY confirmations. + mine_transaction(&nodes[1], &node_txn[2]); + mine_transaction(&nodes[1], &node_txn[3]); + mine_transaction(&nodes[1], &node_txn[4]); + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + expect_payment_failed!(nodes[1], payment_hash_2, true); } get_announce_close_broadcast_events(&nodes, 0, 1); assert_eq!(nodes[0].node.list_channels().len(), 0); @@ -7282,37 +7296,25 @@ fn do_test_sweep_outbound_htlc_failure_update(revoked: bool, local: bool) { check_added_monitors!(nodes[0], 1); check_closed_event!(nodes[0], 1, ClosureReason::CommitmentTxConfirmed); assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); + connect_blocks(&nodes[0], TEST_FINAL_CLTV - 1); // Confirm blocks until the HTLC expires - timeout_tx.push(nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap()[1].clone()); + timeout_tx = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().drain(..) + .filter(|tx| tx.input[0].previous_output.txid == bs_commitment_tx[0].txid()).collect(); + check_spends!(timeout_tx[0], bs_commitment_tx[0]); + // For both a revoked or non-revoked commitment transaction, after ANTI_REORG_DELAY the + // dust HTLC should have been failed. + expect_payment_failed!(nodes[0], dust_hash, true); + if !revoked { - expect_payment_failed!(nodes[0], dust_hash, true); assert_eq!(timeout_tx[0].input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); - // We fail non-dust-HTLC 2 by broadcast of local timeout tx on remote commitment tx - mine_transaction(&nodes[0], &timeout_tx[0]); - assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); - connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); - expect_payment_failed!(nodes[0], non_dust_hash, true); } else { - // If revoked, both dust & non-dust HTLCs should have been failed after ANTI_REORG_DELAY confs of revoked - // commitment tx - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); - let first; - match events[0] { - Event::PaymentPathFailed { payment_hash, .. } => { - if payment_hash == dust_hash { first = true; } - else { first = false; } - }, - _ => panic!("Unexpected event"), - } - match events[1] { - Event::PaymentPathFailed { payment_hash, .. } => { - if first { assert_eq!(payment_hash, non_dust_hash); } - else { assert_eq!(payment_hash, dust_hash); } - }, - _ => panic!("Unexpected event"), - } + assert_eq!(timeout_tx[0].lock_time, 0); } + // We fail non-dust-HTLC 2 by broadcast of local timeout/revocation-claim tx + mine_transaction(&nodes[0], &timeout_tx[0]); + assert_eq!(nodes[0].node.get_and_clear_pending_events().len(), 0); + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + expect_payment_failed!(nodes[0], non_dust_hash, true); } } diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 492a6553770..7c46f7b6d28 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -94,6 +94,55 @@ fn test_spendable_output<'a, 'b, 'c, 'd>(node: &'a Node<'b, 'c, 'd>, spendable_t } else { panic!(); } } +#[test] +fn revoked_output_htlc_resolution_timing() { + // Tests that HTLCs which were present in a broadcasted remote revoked commitment transaction + // are resolved only after a spend of the HTLC output reaches six confirmations. Preivously + // they would resolve after the revoked commitment transaction itself reaches six + // confirmations. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known()); + + let payment_hash_1 = route_payment(&nodes[1], &[&nodes[0]], 1_000_000).1; + + // Get a commitment transaction which contains the HTLC we care about, but which we'll revoke + // before forwarding. + let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2); + assert_eq!(revoked_local_txn.len(), 1); + + // Route a dust payment to revoke the above commitment transaction + route_payment(&nodes[0], &[&nodes[1]], 1_000); + + // Confirm the revoked commitment transaction, closing the channel. + mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_broadcast!(nodes[1], true); + + let bs_spend_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(bs_spend_txn.len(), 2); + check_spends!(bs_spend_txn[0], revoked_local_txn[0]); + check_spends!(bs_spend_txn[1], chan.3); + + // After the commitment transaction confirms, we should still wait on the HTLC spend + // transaction to confirm before resolving the HTLC. + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Spend the HTLC output, generating a HTLC failure event after ANTI_REORG_DELAY confirmations. + mine_transaction(&nodes[1], &bs_spend_txn[0]); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1); + expect_payment_failed!(nodes[1], payment_hash_1, true); +} + #[test] fn chanmon_claim_value_coop_close() { // Tests `get_claimable_balances` returns the correct values across a simple cooperative claim. diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index 0025598e4c2..a7090e4e735 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -185,6 +185,77 @@ fn test_onchain_htlc_timeout_delay_remote_commitment() { do_test_onchain_htlc_reorg(false, false); } +#[test] +fn test_counterparty_revoked_reorg() { + // Test what happens when a revoked counterparty transaction is broadcast but then reorg'd out + // of the main chain. Specifically, HTLCs in the latest commitment transaction which are not + // included in the revoked commitment transaction should not be considered failed, and should + // still be claim-from-able after the reorg. + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 500_000_000, InitFeatures::known(), InitFeatures::known()); + + // Get the initial commitment transaction for broadcast, before any HTLCs are added at all. + let revoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2); + assert_eq!(revoked_local_txn.len(), 1); + + // Now add two HTLCs in each direction, one dust and one not. + route_payment(&nodes[0], &[&nodes[1]], 5_000_000); + route_payment(&nodes[0], &[&nodes[1]], 5_000); + let (payment_preimage_3, payment_hash_3, ..) = route_payment(&nodes[1], &[&nodes[0]], 4_000_000); + let payment_hash_4 = route_payment(&nodes[1], &[&nodes[0]], 4_000).1; + + nodes[0].node.claim_funds(payment_preimage_3); + let _ = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + check_added_monitors!(nodes[0], 1); + expect_payment_claimed!(nodes[0], payment_hash_3, 4_000_000); + + let mut unrevoked_local_txn = get_local_commitment_txn!(nodes[0], chan.2); + assert_eq!(unrevoked_local_txn.len(), 3); // commitment + 2 HTLC txn + // Sort the unrevoked transactions in reverse order, ie commitment tx, then HTLC 1 then HTLC 3 + unrevoked_local_txn.sort_unstable_by_key(|tx| 1_000_000 - tx.output.iter().map(|outp| outp.value).sum::()); + + // Now mine A's old commitment transaction, which should close the channel, but take no action + // on any of the HTLCs, at least until we get six confirmations (which we won't get). + mine_transaction(&nodes[1], &revoked_local_txn[0]); + check_added_monitors!(nodes[1], 1); + check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed); + check_closed_broadcast!(nodes[1], true); + + // Connect up to one block before the revoked transaction would be considered final, then do a + // reorg that disconnects the full chain and goes up to the height at which the revoked + // transaction would be final. + let theoretical_conf_height = nodes[1].best_block_info().1 + ANTI_REORG_DELAY - 1; + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + disconnect_all_blocks(&nodes[1]); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + connect_blocks(&nodes[1], theoretical_conf_height); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Now connect A's latest commitment transaction instead and resolve the HTLCs + mine_transaction(&nodes[1], &unrevoked_local_txn[0]); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + assert!(nodes[1].node.get_and_clear_pending_events().is_empty()); + + // Connect the HTLC claim transaction for HTLC 3 + mine_transaction(&nodes[1], &unrevoked_local_txn[2]); + expect_payment_sent!(nodes[1], payment_preimage_3); + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Connect blocks to confirm the unrevoked commitment transaction + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); + expect_payment_failed!(nodes[1], payment_hash_4, true); +} + fn do_test_unconf_chan(reload_node: bool, reorg_after_reload: bool, use_funding_unconfirmed: bool, connect_style: ConnectStyle) { // After creating a chan between nodes, we disconnect all blocks previously seen to force a // channel close on nodes[0] side. We also use this to provide very basic testing of logic