@@ -151,6 +151,13 @@ impl AccessFuture {
151
151
///
152
152
/// This is identical to calling [`AccessFuture::resolve`] with a dummy `gossip`, disabling
153
153
/// forwarding the validated gossip message onwards to peers.
154
+ ///
155
+ /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order
156
+ /// to allow us to interact with peers again, you should call [`PeerManager::process_events`]
157
+ /// after this.
158
+ ///
159
+ /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high
160
+ /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
154
161
pub fn resolve_without_forwarding < L : Deref > ( & self ,
155
162
graph : & NetworkGraph < L > , result : Result < TxOut , ChainAccessError > )
156
163
where L :: Target : Logger {
@@ -161,6 +168,13 @@ impl AccessFuture {
161
168
///
162
169
/// The given `gossip` is used to broadcast any validated messages onwards to all peers which
163
170
/// have available buffer space.
171
+ ///
172
+ /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order
173
+ /// to allow us to interact with peers again, you should call [`PeerManager::process_events`]
174
+ /// after this.
175
+ ///
176
+ /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high
177
+ /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
164
178
pub fn resolve < L : Deref , G : Deref < Target =NetworkGraph < L > > , C : Deref , GS : Deref < Target = P2PGossipSync < G , C , L > > > ( & self ,
165
179
graph : & NetworkGraph < L > , gossip : GS , result : Result < TxOut , ChainAccessError >
166
180
) where L :: Target : Logger , C :: Target : ChainAccess {
@@ -515,4 +529,36 @@ impl PendingChecks {
515
529
}
516
530
}
517
531
}
532
+
533
+ /// The maximum number of pending gossip checks before [`Self::too_many_checks_pending`]
534
+ /// returns `true`. Note that this isn't a strict upper-bound on the number of checks pending -
535
+ /// each peer may, at a minimum, read one more socket buffer worth of `channel_announcement`s
536
+ /// which we'll have to process. With a socket buffer of 4KB and a minimum
537
+ /// `channel_announcement` size of, roughly, 429 bytes, this may leave us with `10*our peer
538
+ /// count` messages to process beyond this limit. Because we'll probably have a few peers,
539
+ /// there's no reason for this constant to be materially less than 30 or so, and 32 in-flight
540
+ /// checks should be more than enough for decent parallelism.
541
+ const MAX_PENDING_LOOKUPS : usize = 32 ;
542
+
543
+ /// Returns true if there are a large number of async checks pending and future
544
+ /// `channel_announcement` messages should be delayed. Note that this is only a hint and
545
+ /// messages already in-flight may still have to be handled for various reasons.
546
+ pub ( super ) fn too_many_checks_pending ( & self ) -> bool {
547
+ let mut pending_checks = self . internal . lock ( ) . unwrap ( ) ;
548
+ if pending_checks. channels . len ( ) > Self :: MAX_PENDING_LOOKUPS {
549
+ // If we have many channel checks pending, ensure we don't have any dangling checks
550
+ // (i.e. checks where the user told us they'd call back but drop'd the `AccessFuture`
551
+ // instead) before we commit to applying backpressure.
552
+ pending_checks. channels . retain ( |_, chan| {
553
+ Weak :: upgrade ( & chan) . is_some ( )
554
+ } ) ;
555
+ pending_checks. nodes . retain ( |_, channels| {
556
+ channels. retain ( |chan| Weak :: upgrade ( & chan) . is_some ( ) ) ;
557
+ !channels. is_empty ( )
558
+ } ) ;
559
+ pending_checks. channels . len ( ) > Self :: MAX_PENDING_LOOKUPS
560
+ } else {
561
+ false
562
+ }
563
+ }
518
564
}
0 commit comments