Skip to content

Commit 286d1db

Browse files
authored
Merge pull request #2521 from TheBlueMatt/2023-08-one-less-write
Avoid persisting ChannelManager in some cases and separate event from persist notifies
2 parents 3dcdb14 + 32e5903 commit 286d1db

File tree

3 files changed

+344
-188
lines changed

3 files changed

+344
-188
lines changed

fuzz/src/chanmon_consistency.rs

Lines changed: 18 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,6 @@ struct TestChainMonitor {
125125
// "fails" if we ever force-close a channel, we avoid doing so, always saving the latest
126126
// fully-serialized monitor state here, as well as the corresponding update_id.
127127
pub latest_monitors: Mutex<HashMap<OutPoint, (u64, Vec<u8>)>>,
128-
pub should_update_manager: atomic::AtomicBool,
129128
}
130129
impl TestChainMonitor {
131130
pub fn new(broadcaster: Arc<TestBroadcaster>, logger: Arc<dyn Logger>, feeest: Arc<FuzzEstimator>, persister: Arc<TestPersister>, keys: Arc<KeyProvider>) -> Self {
@@ -135,7 +134,6 @@ impl TestChainMonitor {
135134
keys,
136135
persister,
137136
latest_monitors: Mutex::new(HashMap::new()),
138-
should_update_manager: atomic::AtomicBool::new(false),
139137
}
140138
}
141139
}
@@ -146,7 +144,6 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
146144
if let Some(_) = self.latest_monitors.lock().unwrap().insert(funding_txo, (monitor.get_latest_update_id(), ser.0)) {
147145
panic!("Already had monitor pre-watch_channel");
148146
}
149-
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
150147
self.chain_monitor.watch_channel(funding_txo, monitor)
151148
}
152149

@@ -162,7 +159,6 @@ impl chain::Watch<TestChannelSigner> for TestChainMonitor {
162159
let mut ser = VecWriter(Vec::new());
163160
deserialized_monitor.write(&mut ser).unwrap();
164161
map_entry.insert((update.update_id, ser.0));
165-
self.should_update_manager.store(true, atomic::Ordering::Relaxed);
166162
self.chain_monitor.update_channel(funding_txo, update)
167163
}
168164

@@ -1101,11 +1097,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
11011097
if !chan_a_disconnected {
11021098
nodes[1].peer_disconnected(&nodes[0].get_our_node_id());
11031099
chan_a_disconnected = true;
1104-
drain_msg_events_on_disconnect!(0);
1105-
}
1106-
if monitor_a.should_update_manager.load(atomic::Ordering::Relaxed) {
1107-
node_a_ser.0.clear();
1108-
nodes[0].write(&mut node_a_ser).unwrap();
1100+
push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(0));
1101+
ab_events.clear();
1102+
ba_events.clear();
11091103
}
11101104
let (new_node_a, new_monitor_a) = reload_node!(node_a_ser, 0, monitor_a, keys_manager_a, fee_est_a);
11111105
nodes[0] = new_node_a;
@@ -1134,11 +1128,9 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
11341128
if !chan_b_disconnected {
11351129
nodes[1].peer_disconnected(&nodes[2].get_our_node_id());
11361130
chan_b_disconnected = true;
1137-
drain_msg_events_on_disconnect!(2);
1138-
}
1139-
if monitor_c.should_update_manager.load(atomic::Ordering::Relaxed) {
1140-
node_c_ser.0.clear();
1141-
nodes[2].write(&mut node_c_ser).unwrap();
1131+
push_excess_b_events!(nodes[1].get_and_clear_pending_msg_events().drain(..), Some(2));
1132+
bc_events.clear();
1133+
cb_events.clear();
11421134
}
11431135
let (new_node_c, new_monitor_c) = reload_node!(node_c_ser, 2, monitor_c, keys_manager_c, fee_est_c);
11441136
nodes[2] = new_node_c;
@@ -1304,15 +1296,18 @@ pub fn do_test<Out: Output>(data: &[u8], underlying_out: Out) {
13041296
_ => test_return!(),
13051297
}
13061298

1307-
node_a_ser.0.clear();
1308-
nodes[0].write(&mut node_a_ser).unwrap();
1309-
monitor_a.should_update_manager.store(false, atomic::Ordering::Relaxed);
1310-
node_b_ser.0.clear();
1311-
nodes[1].write(&mut node_b_ser).unwrap();
1312-
monitor_b.should_update_manager.store(false, atomic::Ordering::Relaxed);
1313-
node_c_ser.0.clear();
1314-
nodes[2].write(&mut node_c_ser).unwrap();
1315-
monitor_c.should_update_manager.store(false, atomic::Ordering::Relaxed);
1299+
if nodes[0].get_and_clear_needs_persistence() == true {
1300+
node_a_ser.0.clear();
1301+
nodes[0].write(&mut node_a_ser).unwrap();
1302+
}
1303+
if nodes[1].get_and_clear_needs_persistence() == true {
1304+
node_b_ser.0.clear();
1305+
nodes[1].write(&mut node_b_ser).unwrap();
1306+
}
1307+
if nodes[2].get_and_clear_needs_persistence() == true {
1308+
node_c_ser.0.clear();
1309+
nodes[2].write(&mut node_c_ser).unwrap();
1310+
}
13161311
}
13171312
}
13181313

lightning-background-processor/src/lib.rs

Lines changed: 9 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -315,7 +315,7 @@ macro_rules! define_run_body {
315315
// see `await_start`'s use below.
316316
let mut await_start = None;
317317
if $check_slow_await { await_start = Some($get_timer(1)); }
318-
let updates_available = $await;
318+
$await;
319319
let await_slow = if $check_slow_await { $timer_elapsed(&mut await_start.unwrap(), 1) } else { false };
320320

321321
// Exit the loop if the background processor was requested to stop.
@@ -324,7 +324,7 @@ macro_rules! define_run_body {
324324
break;
325325
}
326326

327-
if updates_available {
327+
if $channel_manager.get_and_clear_needs_persistence() {
328328
log_trace!($logger, "Persisting ChannelManager...");
329329
$persister.persist_manager(&*$channel_manager)?;
330330
log_trace!($logger, "Done persisting ChannelManager.");
@@ -655,16 +655,14 @@ where
655655
channel_manager, channel_manager.process_pending_events_async(async_event_handler).await,
656656
gossip_sync, peer_manager, logger, scorer, should_break, {
657657
let fut = Selector {
658-
a: channel_manager.get_persistable_update_future(),
658+
a: channel_manager.get_event_or_persistence_needed_future(),
659659
b: chain_monitor.get_update_future(),
660660
c: sleeper(if mobile_interruptable_platform { Duration::from_millis(100) } else { Duration::from_secs(FASTEST_TIMER) }),
661661
};
662662
match fut.await {
663-
SelectorOutput::A => true,
664-
SelectorOutput::B => false,
663+
SelectorOutput::A|SelectorOutput::B => {},
665664
SelectorOutput::C(exit) => {
666665
should_break = exit;
667-
false
668666
}
669667
}
670668
}, |t| sleeper(Duration::from_secs(t)),
@@ -787,10 +785,10 @@ impl BackgroundProcessor {
787785
define_run_body!(persister, chain_monitor, chain_monitor.process_pending_events(&event_handler),
788786
channel_manager, channel_manager.process_pending_events(&event_handler),
789787
gossip_sync, peer_manager, logger, scorer, stop_thread.load(Ordering::Acquire),
790-
Sleeper::from_two_futures(
791-
channel_manager.get_persistable_update_future(),
788+
{ Sleeper::from_two_futures(
789+
channel_manager.get_event_or_persistence_needed_future(),
792790
chain_monitor.get_update_future()
793-
).wait_timeout(Duration::from_millis(100)),
791+
).wait_timeout(Duration::from_millis(100)); },
794792
|_| Instant::now(), |time: &Instant, dur| time.elapsed().as_secs() > dur, false)
795793
});
796794
Self { stop_thread: stop_thread_clone, thread_handle: Some(handle) }
@@ -1326,7 +1324,7 @@ mod tests {
13261324
check_persisted_data!(nodes[0].node, filepath.clone());
13271325

13281326
loop {
1329-
if !nodes[0].node.get_persistence_condvar_value() { break }
1327+
if !nodes[0].node.get_event_or_persist_condvar_value() { break }
13301328
}
13311329

13321330
// Force-close the channel.
@@ -1335,7 +1333,7 @@ mod tests {
13351333
// Check that the force-close updates are persisted.
13361334
check_persisted_data!(nodes[0].node, filepath.clone());
13371335
loop {
1338-
if !nodes[0].node.get_persistence_condvar_value() { break }
1336+
if !nodes[0].node.get_event_or_persist_condvar_value() { break }
13391337
}
13401338

13411339
// Check network graph is persisted

0 commit comments

Comments
 (0)