@@ -1357,62 +1357,112 @@ mod tests {
1357
1357
}
1358
1358
}
1359
1359
1360
+ macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1361
+ ( $nodes: expr, $receive: expr, $sleep: expr) => {
1362
+ let features = ChannelFeatures :: empty( ) ;
1363
+ $nodes[ 0 ] . network_graph. add_channel_from_partial_announcement(
1364
+ 42 , 53 , features, $nodes[ 0 ] . node. get_our_node_id( ) , $nodes[ 1 ] . node. get_our_node_id( )
1365
+ ) . expect( "Failed to update channel from partial announcement" ) ;
1366
+ let original_graph_description = $nodes[ 0 ] . network_graph. to_string( ) ;
1367
+ assert!( original_graph_description. contains( "42: features: 0000, node_one:" ) ) ;
1368
+ assert_eq!( $nodes[ 0 ] . network_graph. read_only( ) . channels( ) . len( ) , 1 ) ;
1369
+
1370
+ loop {
1371
+ $sleep;
1372
+ let log_entries = $nodes[ 0 ] . logger. lines. lock( ) . unwrap( ) ;
1373
+ let loop_counter = "Calling ChannelManager's timer_tick_occurred" . to_string( ) ;
1374
+ if * log_entries. get( & ( "lightning_background_processor" . to_string( ) , loop_counter) )
1375
+ . unwrap_or( & 0 ) > 1
1376
+ {
1377
+ // Wait until the loop has gone around at least twice.
1378
+ break
1379
+ }
1380
+ }
1381
+
1382
+ let initialization_input = vec![
1383
+ 76 , 68 , 75 , 1 , 111 , 226 , 140 , 10 , 182 , 241 , 179 , 114 , 193 , 166 , 162 , 70 , 174 , 99 , 247 ,
1384
+ 79 , 147 , 30 , 131 , 101 , 225 , 90 , 8 , 156 , 104 , 214 , 25 , 0 , 0 , 0 , 0 , 0 , 97 , 227 , 98 , 218 ,
1385
+ 0 , 0 , 0 , 4 , 2 , 22 , 7 , 207 , 206 , 25 , 164 , 197 , 231 , 230 , 231 , 56 , 102 , 61 , 250 , 251 ,
1386
+ 187 , 172 , 38 , 46 , 79 , 247 , 108 , 44 , 155 , 48 , 219 , 238 , 252 , 53 , 192 , 6 , 67 , 2 , 36 , 125 ,
1387
+ 157 , 176 , 223 , 175 , 234 , 116 , 94 , 248 , 201 , 225 , 97 , 235 , 50 , 47 , 115 , 172 , 63 , 136 ,
1388
+ 88 , 216 , 115 , 11 , 111 , 217 , 114 , 84 , 116 , 124 , 231 , 107 , 2 , 158 , 1 , 242 , 121 , 152 , 106 ,
1389
+ 204 , 131 , 186 , 35 , 93 , 70 , 216 , 10 , 237 , 224 , 183 , 89 , 95 , 65 , 3 , 83 , 185 , 58 , 138 ,
1390
+ 181 , 64 , 187 , 103 , 127 , 68 , 50 , 2 , 201 , 19 , 17 , 138 , 136 , 149 , 185 , 226 , 156 , 137 , 175 ,
1391
+ 110 , 32 , 237 , 0 , 217 , 90 , 31 , 100 , 228 , 149 , 46 , 219 , 175 , 168 , 77 , 4 , 143 , 38 , 128 ,
1392
+ 76 , 97 , 0 , 0 , 0 , 2 , 0 , 0 , 255 , 8 , 153 , 192 , 0 , 2 , 27 , 0 , 0 , 0 , 1 , 0 , 0 , 255 , 2 , 68 ,
1393
+ 226 , 0 , 6 , 11 , 0 , 1 , 2 , 3 , 0 , 0 , 0 , 2 , 0 , 40 , 0 , 0 , 0 , 0 , 0 , 0 , 3 , 232 , 0 , 0 , 3 , 232 ,
1394
+ 0 , 0 , 0 , 1 , 0 , 0 , 0 , 0 , 58 , 85 , 116 , 216 , 255 , 8 , 153 , 192 , 0 , 2 , 27 , 0 , 0 , 25 , 0 , 0 ,
1395
+ 0 , 1 , 0 , 0 , 0 , 125 , 255 , 2 , 68 , 226 , 0 , 6 , 11 , 0 , 1 , 5 , 0 , 0 , 0 , 0 , 29 , 129 , 25 , 192 ,
1396
+ ] ;
1397
+ $nodes[ 0 ] . rapid_gossip_sync. update_network_graph_no_std( & initialization_input[ ..] , Some ( 1642291930 ) ) . unwrap( ) ;
1398
+
1399
+ // this should have added two channels
1400
+ assert_eq!( $nodes[ 0 ] . network_graph. read_only( ) . channels( ) . len( ) , 3 ) ;
1401
+
1402
+ $receive. expect( "Network graph not pruned within deadline" ) ;
1403
+
1404
+ // all channels should now be pruned
1405
+ assert_eq!( $nodes[ 0 ] . network_graph. read_only( ) . channels( ) . len( ) , 0 ) ;
1406
+ }
1407
+ }
1408
+
1360
1409
#[ test]
1361
1410
fn test_not_pruning_network_graph_until_graph_sync_completion ( ) {
1411
+ let ( sender, receiver) = std:: sync:: mpsc:: sync_channel ( 1 ) ;
1412
+
1362
1413
let nodes = create_nodes ( 2 , "test_not_pruning_network_graph_until_graph_sync_completion" . to_string ( ) ) ;
1363
1414
let data_dir = nodes[ 0 ] . persister . get_data_dir ( ) ;
1364
- let ( sender, receiver) = std:: sync:: mpsc:: sync_channel ( 1 ) ;
1365
1415
let persister = Arc :: new ( Persister :: new ( data_dir) . with_graph_persistence_notifier ( sender) ) ;
1366
- let network_graph = nodes[ 0 ] . network_graph . clone ( ) ;
1367
- let features = ChannelFeatures :: empty ( ) ;
1368
- network_graph. add_channel_from_partial_announcement ( 42 , 53 , features, nodes[ 0 ] . node . get_our_node_id ( ) , nodes[ 1 ] . node . get_our_node_id ( ) )
1369
- . expect ( "Failed to update channel from partial announcement" ) ;
1370
- let original_graph_description = network_graph. to_string ( ) ;
1371
- assert ! ( original_graph_description. contains( "42: features: 0000, node_one:" ) ) ;
1372
- assert_eq ! ( network_graph. read_only( ) . channels( ) . len( ) , 1 ) ;
1373
1416
1374
1417
let event_handler = |_: _ | { } ;
1375
1418
let background_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . rapid_gossip_sync ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) ) ;
1376
1419
1377
- loop {
1378
- let log_entries = nodes[ 0 ] . logger . lines . lock ( ) . unwrap ( ) ;
1379
- let loop_counter = "Calling ChannelManager's timer_tick_occurred" . to_string ( ) ;
1380
- if * log_entries. get ( & ( "lightning_background_processor" . to_string ( ) , loop_counter) )
1381
- . unwrap_or ( & 0 ) > 1
1382
- {
1383
- // Wait until the loop has gone around at least twice.
1384
- break
1385
- }
1386
- }
1387
-
1388
- let initialization_input = vec ! [
1389
- 76 , 68 , 75 , 1 , 111 , 226 , 140 , 10 , 182 , 241 , 179 , 114 , 193 , 166 , 162 , 70 , 174 , 99 , 247 ,
1390
- 79 , 147 , 30 , 131 , 101 , 225 , 90 , 8 , 156 , 104 , 214 , 25 , 0 , 0 , 0 , 0 , 0 , 97 , 227 , 98 , 218 ,
1391
- 0 , 0 , 0 , 4 , 2 , 22 , 7 , 207 , 206 , 25 , 164 , 197 , 231 , 230 , 231 , 56 , 102 , 61 , 250 , 251 ,
1392
- 187 , 172 , 38 , 46 , 79 , 247 , 108 , 44 , 155 , 48 , 219 , 238 , 252 , 53 , 192 , 6 , 67 , 2 , 36 , 125 ,
1393
- 157 , 176 , 223 , 175 , 234 , 116 , 94 , 248 , 201 , 225 , 97 , 235 , 50 , 47 , 115 , 172 , 63 , 136 ,
1394
- 88 , 216 , 115 , 11 , 111 , 217 , 114 , 84 , 116 , 124 , 231 , 107 , 2 , 158 , 1 , 242 , 121 , 152 , 106 ,
1395
- 204 , 131 , 186 , 35 , 93 , 70 , 216 , 10 , 237 , 224 , 183 , 89 , 95 , 65 , 3 , 83 , 185 , 58 , 138 ,
1396
- 181 , 64 , 187 , 103 , 127 , 68 , 50 , 2 , 201 , 19 , 17 , 138 , 136 , 149 , 185 , 226 , 156 , 137 , 175 ,
1397
- 110 , 32 , 237 , 0 , 217 , 90 , 31 , 100 , 228 , 149 , 46 , 219 , 175 , 168 , 77 , 4 , 143 , 38 , 128 ,
1398
- 76 , 97 , 0 , 0 , 0 , 2 , 0 , 0 , 255 , 8 , 153 , 192 , 0 , 2 , 27 , 0 , 0 , 0 , 1 , 0 , 0 , 255 , 2 , 68 ,
1399
- 226 , 0 , 6 , 11 , 0 , 1 , 2 , 3 , 0 , 0 , 0 , 2 , 0 , 40 , 0 , 0 , 0 , 0 , 0 , 0 , 3 , 232 , 0 , 0 , 3 , 232 ,
1400
- 0 , 0 , 0 , 1 , 0 , 0 , 0 , 0 , 58 , 85 , 116 , 216 , 255 , 8 , 153 , 192 , 0 , 2 , 27 , 0 , 0 , 25 , 0 , 0 ,
1401
- 0 , 1 , 0 , 0 , 0 , 125 , 255 , 2 , 68 , 226 , 0 , 6 , 11 , 0 , 1 , 5 , 0 , 0 , 0 , 0 , 29 , 129 , 25 , 192 ,
1402
- ] ;
1403
- nodes[ 0 ] . rapid_gossip_sync . update_network_graph_no_std ( & initialization_input[ ..] , Some ( 1642291930 ) ) . unwrap ( ) ;
1404
-
1405
- // this should have added two channels
1406
- assert_eq ! ( network_graph. read_only( ) . channels( ) . len( ) , 3 ) ;
1407
-
1408
- receiver
1409
- . recv_timeout ( Duration :: from_secs ( super :: FIRST_NETWORK_PRUNE_TIMER * 5 ) )
1410
- . expect ( "Network graph not pruned within deadline" ) ;
1420
+ do_test_not_pruning_network_graph_until_graph_sync_completion ! ( nodes,
1421
+ receiver. recv_timeout( Duration :: from_secs( super :: FIRST_NETWORK_PRUNE_TIMER * 5 ) ) ,
1422
+ std:: thread:: sleep( Duration :: from_millis( 1 ) ) ) ;
1411
1423
1412
1424
background_processor. stop ( ) . unwrap ( ) ;
1425
+ }
1426
+
1427
+ #[ tokio:: test]
1428
+ #[ cfg( feature = "futures" ) ]
1429
+ async fn test_not_pruning_network_graph_until_graph_sync_completion_async ( ) {
1430
+ let ( sender, receiver) = std:: sync:: mpsc:: sync_channel ( 1 ) ;
1431
+
1432
+ let nodes = create_nodes ( 2 , "test_not_pruning_network_graph_until_graph_sync_completion_async" . to_string ( ) ) ;
1433
+ let data_dir = nodes[ 0 ] . persister . get_data_dir ( ) ;
1434
+ let persister = Arc :: new ( Persister :: new ( data_dir) . with_graph_persistence_notifier ( sender) ) ;
1413
1435
1414
- // all channels should now be pruned
1415
- assert_eq ! ( network_graph. read_only( ) . channels( ) . len( ) , 0 ) ;
1436
+ let ( exit_sender, exit_receiver) = tokio:: sync:: watch:: channel ( ( ) ) ;
1437
+ let bp_future = super :: process_events_async (
1438
+ persister, |_: _ | { async { } } , nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) ,
1439
+ nodes[ 0 ] . rapid_gossip_sync ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) ,
1440
+ Some ( nodes[ 0 ] . scorer . clone ( ) ) , move |dur : Duration | {
1441
+ let mut exit_receiver = exit_receiver. clone ( ) ;
1442
+ Box :: pin ( async move {
1443
+ tokio:: select! {
1444
+ _ = tokio:: time:: sleep( dur) => false ,
1445
+ _ = exit_receiver. changed( ) => true ,
1446
+ }
1447
+ } )
1448
+ } , false ,
1449
+ ) ;
1450
+ // TODO: Drop _local and simply spawn after #2003
1451
+ let local_set = tokio:: task:: LocalSet :: new ( ) ;
1452
+ local_set. spawn_local ( bp_future) ;
1453
+ local_set. spawn_local ( async move {
1454
+ do_test_not_pruning_network_graph_until_graph_sync_completion ! ( nodes, {
1455
+ let mut i = 0 ;
1456
+ loop {
1457
+ tokio:: time:: sleep( Duration :: from_secs( super :: FIRST_NETWORK_PRUNE_TIMER ) ) . await ;
1458
+ if let Ok ( ( ) ) = receiver. try_recv( ) { break Ok :: <( ) , ( ) >( ( ) ) ; }
1459
+ assert!( i < 5 ) ;
1460
+ i += 1 ;
1461
+ }
1462
+ } , tokio:: time:: sleep( Duration :: from_millis( 1 ) ) . await ) ;
1463
+ exit_sender. send ( ( ) ) . unwrap ( ) ;
1464
+ } ) ;
1465
+ local_set. await ;
1416
1466
}
1417
1467
1418
1468
macro_rules! do_test_payment_path_scoring {
0 commit comments