Skip to content

Commit 20fb6b6

Browse files
committed
Rebroadcast channel update of a force-closed channel, if needed
- This commit adds the ability to rebroadcast the channel update message for a force closed channel if we fail to broadcast it to any peer at the time of closing of channel.
1 parent 6e40e5f commit 20fb6b6

File tree

1 file changed

+89
-3
lines changed

1 file changed

+89
-3
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 89 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1257,13 +1257,28 @@ where
12571257

12581258
pending_offers_messages: Mutex<Vec<PendingOnionMessage<OffersMessage>>>,
12591259

1260+
/// Tracks the channel_update message that were not broadcasted because
1261+
/// we were not connected to any peers.
1262+
pending_broadcast_messages: Mutex<PendingBroadcastMessages>,
1263+
12601264
entropy_source: ES,
12611265
node_signer: NS,
12621266
signer_provider: SP,
12631267

12641268
logger: L,
12651269
}
12661270

1271+
pub struct PendingBroadcastMessages {
1272+
/// The original broadcast events
1273+
pub broadcast_message: Vec<MessageSendEvent>,
1274+
/// The number of ticks before retry broadcasting
1275+
pub ticks_remaining: Option<i32>,
1276+
}
1277+
1278+
/// The number of ticks that may elapse before trying to rebroadcast
1279+
/// the pending broadcast messages
1280+
const PENDING_BROADCAST_MESSAGES_TIMER_TICKS: i32 = 2;
1281+
12671282
/// Chain-related parameters used to construct a new `ChannelManager`.
12681283
///
12691284
/// Typically, the block-specific parameters are derived from the best block hash for the network,
@@ -2338,6 +2353,7 @@ where
23382353
funding_batch_states: Mutex::new(BTreeMap::new()),
23392354

23402355
pending_offers_messages: Mutex::new(Vec::new()),
2356+
pending_broadcast_messages: Mutex::new(PendingBroadcastMessages { broadcast_message: Vec::new(), ticks_remaining: None }),
23412357

23422358
entropy_source,
23432359
node_signer,
@@ -2841,15 +2857,44 @@ where
28412857
if let Some(update) = update_opt {
28422858
// Try to send the `BroadcastChannelUpdate` to the peer we just force-closed on, but if
28432859
// not try to broadcast it via whatever peer we have.
2860+
let brodcast_message_evt = events::MessageSendEvent::BroadcastChannelUpdate {
2861+
msg: update
2862+
};
2863+
28442864
let per_peer_state = self.per_peer_state.read().unwrap();
28452865
let a_peer_state_opt = per_peer_state.get(peer_node_id)
28462866
.ok_or(per_peer_state.values().next());
2867+
2868+
// if we were able to get the peer we just force closed on.
28472869
if let Ok(a_peer_state_mutex) = a_peer_state_opt {
28482870
let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
2849-
a_peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
2850-
msg: update
2851-
});
2871+
a_peer_state.pending_msg_events.push(brodcast_message_evt);
2872+
}
2873+
2874+
// if we connected to some other random peer.
2875+
else if let Err(a_peer_state_mutex) = a_peer_state_opt {
2876+
match a_peer_state_mutex {
2877+
// if we were able to connect to Some peer.
2878+
Some(val) => {
2879+
let mut a_peer_state = val.lock().unwrap();
2880+
a_peer_state.pending_msg_events.push(brodcast_message_evt);
2881+
log_info!(self.logger,
2882+
"Not able to broadcast channel update to peer we force-closed on. Broadcasting to some random peer.");
2883+
},
2884+
2885+
// If we connected to no one.
2886+
None => {
2887+
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
2888+
if pending_broadcast_messages.broadcast_message.len() == 0 {
2889+
pending_broadcast_messages.ticks_remaining = Some(PENDING_BROADCAST_MESSAGES_TIMER_TICKS);
2890+
}
2891+
pending_broadcast_messages.broadcast_message.push(brodcast_message_evt);
2892+
log_info!(self.logger, "Not able to broadcast channel_update of force-closed channel right now.
2893+
Will try rebroadcasting later.");
2894+
}
2895+
}
28522896
}
2897+
28532898
}
28542899

28552900
Ok(counterparty_node_id)
@@ -4915,6 +4960,45 @@ where
49154960

49164961
{
49174962
let per_peer_state = self.per_peer_state.read().unwrap();
4963+
4964+
{
4965+
// Get pending messages to be broadcasted.
4966+
let mut pending_broadcast_messages = self.pending_broadcast_messages.lock().unwrap();
4967+
4968+
// If we have pending broadcast events
4969+
if pending_broadcast_messages.ticks_remaining != None {
4970+
// And it is time to broadcast
4971+
if pending_broadcast_messages.ticks_remaining == Some(0) {
4972+
// And we are connected to non-zero number of peers, we can broadcast successfully
4973+
if per_peer_state.len() > 0 {
4974+
let a_peer_state_mutex = per_peer_state.values().next().unwrap();
4975+
let mut a_peer_state = a_peer_state_mutex.lock().unwrap();
4976+
4977+
for broadcast_evts in pending_broadcast_messages.broadcast_message.iter() {
4978+
a_peer_state.pending_msg_events.push(broadcast_evts.clone());
4979+
}
4980+
// After broadcasting, we clear the event vector.
4981+
// And set the timer to None.
4982+
pending_broadcast_messages.broadcast_message.clear();
4983+
pending_broadcast_messages.ticks_remaining = None;
4984+
4985+
log_info!(self.logger, "Successfully broadcasted the pending channel update messages.");
4986+
}
4987+
// Else we restart the counter, waiting for next time to try rebroadcasting
4988+
else {
4989+
pending_broadcast_messages.ticks_remaining = Some(PENDING_BROADCAST_MESSAGES_TIMER_TICKS);
4990+
}
4991+
}
4992+
// Else we decrement the counter
4993+
else {
4994+
pending_broadcast_messages.ticks_remaining = match pending_broadcast_messages.ticks_remaining {
4995+
Some(val) => Some(val - 1),
4996+
None => None
4997+
}
4998+
}
4999+
}
5000+
}
5001+
49185002
for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
49195003
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
49205004
let peer_state = &mut *peer_state_lock;
@@ -10810,6 +10894,8 @@ where
1081010894

1081110895
pending_offers_messages: Mutex::new(Vec::new()),
1081210896

10897+
pending_broadcast_messages: Mutex::new(PendingBroadcastMessages { broadcast_message: Vec::new(), ticks_remaining: None}),
10898+
1081310899
entropy_source: args.entropy_source,
1081410900
node_signer: args.node_signer,
1081510901
signer_provider: args.signer_provider,

0 commit comments

Comments
 (0)