@@ -148,6 +148,13 @@ impl UtxoFuture {
148
148
///
149
149
/// This is identical to calling [`UtxoFuture::resolve`] with a dummy `gossip`, disabling
150
150
/// forwarding the validated gossip message onwards to peers.
151
+ ///
152
+ /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order
153
+ /// to allow us to interact with peers again, you should call [`PeerManager::process_events`]
154
+ /// after this.
155
+ ///
156
+ /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high
157
+ /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
151
158
pub fn resolve_without_forwarding < L : Deref > ( & self ,
152
159
graph : & NetworkGraph < L > , result : Result < TxOut , UtxoLookupError > )
153
160
where L :: Target : Logger {
@@ -158,6 +165,13 @@ impl UtxoFuture {
158
165
///
159
166
/// The given `gossip` is used to broadcast any validated messages onwards to all peers which
160
167
/// have available buffer space.
168
+ ///
169
+ /// Because this may cause the [`NetworkGraph`]'s [`processing_queue_high`] to flip, in order
170
+ /// to allow us to interact with peers again, you should call [`PeerManager::process_events`]
171
+ /// after this.
172
+ ///
173
+ /// [`processing_queue_high`]: crate::ln::msgs::RoutingMessageHandler::processing_queue_high
174
+ /// [`PeerManager::process_events`]: crate::ln::peer_handler::PeerManager::process_events
161
175
pub fn resolve < L : Deref , G : Deref < Target =NetworkGraph < L > > , U : Deref , GS : Deref < Target = P2PGossipSync < G , U , L > > > ( & self ,
162
176
graph : & NetworkGraph < L > , gossip : GS , result : Result < TxOut , UtxoLookupError >
163
177
) where L :: Target : Logger , U :: Target : UtxoLookup {
@@ -510,4 +524,36 @@ impl PendingChecks {
510
524
}
511
525
}
512
526
}
527
+
528
+ /// The maximum number of pending gossip checks before [`Self::too_many_checks_pending`]
529
+ /// returns `true`. Note that this isn't a strict upper-bound on the number of checks pending -
530
+ /// each peer may, at a minimum, read one more socket buffer worth of `channel_announcement`s
531
+ /// which we'll have to process. With a socket buffer of 4KB and a minimum
532
+ /// `channel_announcement` size of, roughly, 429 bytes, this may leave us with `10*our peer
533
+ /// count` messages to process beyond this limit. Because we'll probably have a few peers,
534
+ /// there's no reason for this constant to be materially less than 30 or so, and 32 in-flight
535
+ /// checks should be more than enough for decent parallelism.
536
+ const MAX_PENDING_LOOKUPS : usize = 32 ;
537
+
538
+ /// Returns true if there are a large number of async checks pending and future
539
+ /// `channel_announcement` messages should be delayed. Note that this is only a hint and
540
+ /// messages already in-flight may still have to be handled for various reasons.
541
+ pub ( super ) fn too_many_checks_pending ( & self ) -> bool {
542
+ let mut pending_checks = self . internal . lock ( ) . unwrap ( ) ;
543
+ if pending_checks. channels . len ( ) > Self :: MAX_PENDING_LOOKUPS {
544
+ // If we have many channel checks pending, ensure we don't have any dangling checks
545
+ // (i.e. checks where the user told us they'd call back but drop'd the `AccessFuture`
546
+ // instead) before we commit to applying backpressure.
547
+ pending_checks. channels . retain ( |_, chan| {
548
+ Weak :: upgrade ( & chan) . is_some ( )
549
+ } ) ;
550
+ pending_checks. nodes . retain ( |_, channels| {
551
+ channels. retain ( |chan| Weak :: upgrade ( & chan) . is_some ( ) ) ;
552
+ !channels. is_empty ( )
553
+ } ) ;
554
+ pending_checks. channels . len ( ) > Self :: MAX_PENDING_LOOKUPS
555
+ } else {
556
+ false
557
+ }
558
+ }
513
559
}
0 commit comments