Skip to content

Commit 1b6516c

Browse files
committed
Use new monitor persistence flow in funding_signed handling
In the previous commit, we moved all our `ChannelMonitorUpdate` pipelines to use a new async path via the `handle_new_monitor_update` macro. This avoids having two message sending pathways and simply sends messages in the "monitor update completed" flow, which is shared between sync and async monitor updates. Here we reuse the new macro for handling `funding_signed` messages when doing an initial `ChannelMonitor` persistence. This provides a similar benefit, simplifying the code a trivial amount, but importantly allows us to fully remove the original `handle_monitor_update_res` macro.
1 parent be5d67e commit 1b6516c

File tree

2 files changed

+30
-106
lines changed

2 files changed

+30
-106
lines changed

lightning/src/ln/channel.rs

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2301,7 +2301,7 @@ impl<Signer: Sign> Channel<Signer> {
23012301

23022302
/// Handles a funding_signed message from the remote end.
23032303
/// If this call is successful, broadcast the funding transaction (and not before!)
2304-
pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, logger: &L) -> Result<(ChannelMonitor<Signer>, Transaction, Option<msgs::ChannelReady>), ChannelError> where L::Target: Logger {
2304+
pub fn funding_signed<L: Deref>(&mut self, msg: &msgs::FundingSigned, best_block: BestBlock, logger: &L) -> Result<ChannelMonitor<Signer>, ChannelError> where L::Target: Logger {
23052305
if !self.is_outbound() {
23062306
return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()));
23072307
}
@@ -2370,7 +2370,9 @@ impl<Signer: Sign> Channel<Signer> {
23702370

23712371
log_info!(logger, "Received funding_signed from peer for channel {}", log_bytes!(self.channel_id()));
23722372

2373-
Ok((channel_monitor, self.funding_transaction.as_ref().cloned().unwrap(), self.check_get_channel_ready(0)))
2373+
let need_channel_ready = self.check_get_channel_ready(0).is_some();
2374+
self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new());
2375+
Ok(channel_monitor)
23742376
}
23752377

23762378
/// Handles a channel_ready message from our peer. If we've already sent our channel_ready

lightning/src/ln/channelmanager.rs

Lines changed: 26 additions & 104 deletions
Original file line numberDiff line numberDiff line change
@@ -1477,69 +1477,6 @@ macro_rules! remove_channel {
14771477
}
14781478
}
14791479

1480-
macro_rules! handle_monitor_update_res {
1481-
($self: ident, $err: expr, $chan: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr, $chan_id: expr) => {
1482-
match $err {
1483-
ChannelMonitorUpdateStatus::PermanentFailure => {
1484-
log_error!($self.logger, "Closing channel {} due to monitor update ChannelMonitorUpdateStatus::PermanentFailure", log_bytes!($chan_id[..]));
1485-
update_maps_on_chan_removal!($self, $chan);
1486-
let res: Result<(), _> = Err(MsgHandleErrInternal::from_finish_shutdown("ChannelMonitor storage failure".to_owned(), *$chan_id, $chan.get_user_id(),
1487-
$chan.force_shutdown(false), $self.get_channel_update_for_broadcast(&$chan).ok() ));
1488-
(res, true)
1489-
},
1490-
ChannelMonitorUpdateStatus::InProgress => {
1491-
log_info!($self.logger, "Disabling channel {} due to monitor update in progress. On restore will send {} and process {} forwards, {} fails, and {} fulfill finalizations",
1492-
log_bytes!($chan_id[..]),
1493-
if $resend_commitment && $resend_raa {
1494-
match $action_type {
1495-
RAACommitmentOrder::CommitmentFirst => { "commitment then RAA" },
1496-
RAACommitmentOrder::RevokeAndACKFirst => { "RAA then commitment" },
1497-
}
1498-
} else if $resend_commitment { "commitment" }
1499-
else if $resend_raa { "RAA" }
1500-
else { "nothing" },
1501-
(&$failed_forwards as &Vec<(PendingHTLCInfo, u64)>).len(),
1502-
(&$failed_fails as &Vec<(HTLCSource, PaymentHash, HTLCFailReason)>).len(),
1503-
(&$failed_finalized_fulfills as &Vec<HTLCSource>).len());
1504-
if !$resend_commitment {
1505-
debug_assert!($action_type == RAACommitmentOrder::RevokeAndACKFirst || !$resend_raa);
1506-
}
1507-
if !$resend_raa {
1508-
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst || !$resend_commitment);
1509-
}
1510-
$chan.monitor_updating_paused($resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills);
1511-
(Err(MsgHandleErrInternal::from_chan_no_close(ChannelError::Ignore("Failed to update ChannelMonitor".to_owned()), *$chan_id)), false)
1512-
},
1513-
ChannelMonitorUpdateStatus::Completed => {
1514-
(Ok(()), false)
1515-
},
1516-
}
1517-
};
1518-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $resend_channel_ready: expr, $failed_forwards: expr, $failed_fails: expr, $failed_finalized_fulfills: expr) => { {
1519-
let (res, drop) = handle_monitor_update_res!($self, $err, $entry.get_mut(), $action_type, $resend_raa, $resend_commitment, $resend_channel_ready, $failed_forwards, $failed_fails, $failed_finalized_fulfills, $entry.key());
1520-
if drop {
1521-
$entry.remove_entry();
1522-
}
1523-
res
1524-
} };
1525-
($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, COMMITMENT_UPDATE_ONLY) => { {
1526-
debug_assert!($action_type == RAACommitmentOrder::CommitmentFirst);
1527-
handle_monitor_update_res!($self, $err, $entry, $action_type, false, true, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
1528-
} };
1529-
($self: ident, $err: expr, $entry: expr, $action_type: path, $chan_id: expr, NO_UPDATE) => {
1530-
handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, false, Vec::new(), Vec::new(), Vec::new(), $chan_id)
1531-
};
1532-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_channel_ready: expr, OPTIONALLY_RESEND_FUNDING_LOCKED) => {
1533-
handle_monitor_update_res!($self, $err, $entry, $action_type, false, false, $resend_channel_ready, Vec::new(), Vec::new(), Vec::new())
1534-
};
1535-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr) => {
1536-
handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, Vec::new(), Vec::new(), Vec::new())
1537-
};
1538-
($self: ident, $err: expr, $entry: expr, $action_type: path, $resend_raa: expr, $resend_commitment: expr, $failed_forwards: expr, $failed_fails: expr) => {
1539-
handle_monitor_update_res!($self, $err, $entry, $action_type, $resend_raa, $resend_commitment, false, $failed_forwards, $failed_fails, Vec::new())
1540-
};
1541-
}
1542-
15431480
macro_rules! send_channel_ready {
15441481
($self: ident, $pending_msg_events: expr, $channel: expr, $channel_ready_msg: expr) => {{
15451482
$pending_msg_events.push(events::MessageSendEvent::SendChannelReady {
@@ -1597,9 +1534,9 @@ macro_rules! handle_new_monitor_update {
15971534
res
15981535
},
15991536
ChannelMonitorUpdateStatus::Completed => {
1600-
if $chan.get_next_monitor_update()
1601-
.expect("We can't be processing a monitor udpate if it isn't queued")
1602-
.update_id == $update_id &&
1537+
if ($update_id == 0 || $chan.get_next_monitor_update()
1538+
.expect("We can't be processing a monitor update if it isn't queued")
1539+
.update_id == $update_id) &&
16031540
$chan.get_latest_monitor_update_id() == $update_id
16041541
{
16051542
let mut updates = $chan.monitor_updating_restored(&$self.logger,
@@ -4877,45 +4814,30 @@ impl<M: Deref, T: Deref, K: Deref, F: Deref, L: Deref> ChannelManager<M, T, K, F
48774814
}
48784815

48794816
fn internal_funding_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::FundingSigned) -> Result<(), MsgHandleErrInternal> {
4880-
let funding_tx = {
4881-
let best_block = *self.best_block.read().unwrap();
4882-
let mut channel_lock = self.channel_state.lock().unwrap();
4883-
let channel_state = &mut *channel_lock;
4884-
match channel_state.by_id.entry(msg.channel_id) {
4885-
hash_map::Entry::Occupied(mut chan) => {
4886-
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4887-
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4888-
}
4889-
let (monitor, funding_tx, channel_ready) = match chan.get_mut().funding_signed(&msg, best_block, &self.logger) {
4890-
Ok(update) => update,
4891-
Err(e) => try_chan_entry!(self, Err(e), chan),
4892-
};
4893-
match self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor) {
4894-
ChannelMonitorUpdateStatus::Completed => {},
4895-
e => {
4896-
let mut res = handle_monitor_update_res!(self, e, chan, RAACommitmentOrder::RevokeAndACKFirst, channel_ready.is_some(), OPTIONALLY_RESEND_FUNDING_LOCKED);
4897-
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
4898-
// We weren't able to watch the channel to begin with, so no updates should be made on
4899-
// it. Previously, full_stack_target found an (unreachable) panic when the
4900-
// monitor update contained within `shutdown_finish` was applied.
4901-
if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
4902-
shutdown_finish.0.take();
4903-
}
4904-
}
4905-
return res
4906-
},
4907-
}
4908-
if let Some(msg) = channel_ready {
4909-
send_channel_ready!(self, channel_state.pending_msg_events, chan.get(), msg);
4817+
let best_block = *self.best_block.read().unwrap();
4818+
let mut channel_lock = self.channel_state.lock().unwrap();
4819+
let channel_state = &mut *channel_lock;
4820+
match channel_state.by_id.entry(msg.channel_id) {
4821+
hash_map::Entry::Occupied(mut chan) => {
4822+
if chan.get().get_counterparty_node_id() != *counterparty_node_id {
4823+
return Err(MsgHandleErrInternal::send_err_msg_no_close("Got a message for a channel from the wrong node!".to_owned(), msg.channel_id));
4824+
}
4825+
let monitor = try_chan_entry!(self,
4826+
chan.get_mut().funding_signed(&msg, best_block, &self.logger), chan);
4827+
let update_res = self.chain_monitor.watch_channel(chan.get().get_funding_txo().unwrap(), monitor);
4828+
let mut res = handle_new_monitor_update!(self, update_res, 0, channel_lock, channel_state.pending_msg_events, chan);
4829+
if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
4830+
// We weren't able to watch the channel to begin with, so no updates should be made on
4831+
// it. Previously, full_stack_target found an (unreachable) panic when the
4832+
// monitor update contained within `shutdown_finish` was applied.
4833+
if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
4834+
shutdown_finish.0.take();
49104835
}
4911-
funding_tx
4912-
},
4913-
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4914-
}
4915-
};
4916-
log_info!(self.logger, "Broadcasting funding transaction with txid {}", funding_tx.txid());
4917-
self.tx_broadcaster.broadcast_transaction(&funding_tx);
4918-
Ok(())
4836+
}
4837+
res
4838+
},
4839+
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id))
4840+
}
49194841
}
49204842

49214843
fn internal_channel_ready(&self, counterparty_node_id: &PublicKey, msg: &msgs::ChannelReady) -> Result<(), MsgHandleErrInternal> {

0 commit comments

Comments
 (0)