63
63
static volatile int32_t _dispatch_workq_runnable_workers ;
64
64
65
65
/* The desired minimum number of runnable worker threads */
66
- static int32_t _dispatch_workq_target_runnable_workers ;
66
+ static int32_t _dispatch_workq_min_runnable_workers ;
67
67
68
68
/* The desired maximum number of runnable worker threads */
69
- static int32_t _dispatch_workq_target_max_runnable_workers ;
69
+ static int32_t _dispatch_workq_max_runnable_workers ;
70
70
71
71
/* Limit on the total number of worker threads that can be created. */
72
- static int32_t _dispatch_workq_max_total_workers ;
72
+ static int32_t _dispatch_workq_max_spawned_workers ;
73
73
74
74
#if DISPATCH_ENABLE_PWQ_KEXT
75
75
/* Are we using user-level or kext based management? */
@@ -124,7 +124,7 @@ void _dispatch_workq_worker_register_kext(void);
124
124
void _dispatch_workq_worker_unregister_kext (void );
125
125
void _dispatch_workq_work_added_kext (void );
126
126
void _dispatch_workq_worker_wait_kext (void );
127
- void * dispatch_workq_thread_monitor_main_kext (void * context DISPATCH_UNUSED );
127
+ void * _dispatch_workq_thread_monitor_main_kext (void * context DISPATCH_UNUSED );
128
128
#endif // DISPATCH_ENABLE_PWQ_KEXT
129
129
130
130
#pragma mark Workqueue internal data structures and functions
@@ -258,14 +258,14 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED)
258
258
high_target = MIN (high_target , max );
259
259
}
260
260
}
261
- _dispatch_workq_target_runnable_workers = low_target ;
262
- _dispatch_workq_target_max_runnable_workers = high_target ;
263
- _dispatch_workq_max_total_workers = max ;
261
+ _dispatch_workq_min_runnable_workers = low_target ;
262
+ _dispatch_workq_max_runnable_workers = high_target ;
263
+ _dispatch_workq_max_spawned_workers = max ;
264
264
265
265
// Must come after _dispatch_workqueue_management_init() sets threadpool_verbose
266
266
_dispatch_debug ("workq: normal pool targets: %d...%d with max of %d" ,
267
- _dispatch_workq_target_runnable_workers , _dispatch_workq_target_max_runnable_workers ,
268
- _dispatch_workq_max_total_workers );
267
+ _dispatch_workq_min_runnable_workers , _dispatch_workq_max_runnable_workers ,
268
+ _dispatch_workq_max_spawned_workers );
269
269
270
270
// Now that we have set threadpool parameters, initialize the management subsystem
271
271
_dispatch_workq_management_init ();
@@ -284,7 +284,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED)
284
284
WORKQ_NUM_PRIORITIES , false);
285
285
286
286
// create initial set of normal workers
287
- for (int i = 0 ; i < _dispatch_workq_target_runnable_workers ; i ++ ) {
287
+ for (int i = 0 ; i < _dispatch_workq_min_runnable_workers ; i ++ ) {
288
288
_dispatch_workq_spawn_thread (_dispatch_workq_normal_worker_main );
289
289
}
290
290
}
@@ -335,7 +335,7 @@ dispatch_workq_additem_np(dispatch_workqueue_t workq,
335
335
STAILQ_INSERT_TAIL (& workq -> item_listhead , wi , item_entry );
336
336
_dispatch_unfair_lock_unlock (& workq -> lock );
337
337
338
- if (unlikely (_dispatch_workq_runnable_workers < _dispatch_workq_target_max_runnable_workers )) {
338
+ if (unlikely (_dispatch_workq_runnable_workers < _dispatch_workq_max_runnable_workers )) {
339
339
_dispatch_workq_work_added ();
340
340
}
341
341
}
@@ -358,7 +358,7 @@ _dispatch_workq_add_control_item(void *op_code)
358
358
STAILQ_INSERT_TAIL (& workq -> item_listhead , wi , item_entry );
359
359
_dispatch_unfair_lock_unlock (& workq -> lock );
360
360
361
- if (unlikely (_dispatch_workq_runnable_workers < _dispatch_workq_target_max_runnable_workers )) {
361
+ if (unlikely (_dispatch_workq_runnable_workers < _dispatch_workq_max_runnable_workers )) {
362
362
_dispatch_workq_work_added ();
363
363
}
364
364
}
@@ -558,7 +558,7 @@ _dispatch_workq_management_init(void)
558
558
_dispatch_workq_kext_active = _dispatch_workqueue_management_init_kext ();
559
559
560
560
if (_dispatch_workq_kext_active ) {
561
- _dispatch_spawn_thread (dispatch_thread_monitor_main_kext );
561
+ _dispatch_spawn_thread (_dispatch_thread_monitor_main_kext );
562
562
return ;
563
563
}
564
564
#endif
@@ -573,7 +573,7 @@ _dispatch_workq_management_init(void)
573
573
(void )dispatch_assume_zero (r );
574
574
575
575
_dispatch_workq_manager .registered_workers =
576
- _dispatch_calloc (_dispatch_workq_max_total_workers , sizeof (pid_t ));
576
+ _dispatch_calloc (_dispatch_workq_max_spawned_workers , sizeof (pid_t ));
577
577
578
578
// spawn a thread to periodically estimate the number
579
579
// of runnable workers and add/subtract to maintain target
@@ -593,7 +593,7 @@ _dispatch_workq_worker_register(void)
593
593
int tid = syscall (SYS_gettid );
594
594
int r = pthread_mutex_lock (& _dispatch_workq_manager .registered_worker_mutex );
595
595
(void )dispatch_assume_zero (r );
596
- if (_dispatch_workq_manager .num_registered_workers < _dispatch_workq_max_total_workers - 1 ) {
596
+ if (_dispatch_workq_manager .num_registered_workers < _dispatch_workq_max_spawned_workers - 1 ) {
597
597
int worker_id = _dispatch_workq_manager .num_registered_workers ++ ;
598
598
_dispatch_workq_manager .registered_workers [worker_id ] = tid ;
599
599
rc = 0 ;
@@ -640,7 +640,7 @@ _dispatch_workq_work_added(void)
640
640
return ;
641
641
}
642
642
#endif
643
- if ((_dispatch_workq_runnable_workers < _dispatch_workq_target_max_runnable_workers )
643
+ if ((_dispatch_workq_runnable_workers < _dispatch_workq_max_runnable_workers )
644
644
&& (_dispatch_workq_manager .num_spare_workers > 0 )) {
645
645
int r = pthread_mutex_lock (& _dispatch_workq_manager .spare_worker_mutex );
646
646
(void )dispatch_assume_zero (r );
@@ -794,10 +794,10 @@ _dispatch_workq_thread_monitor_main(void *context DISPATCH_UNUSED)
794
794
795
795
_dispatch_debug ("workq: %d runnable of %d total workers (target runnable: %d..%d)" ,
796
796
_dispatch_workq_runnable_workers , _dispatch_workq_manager .num_registered_workers ,
797
- _dispatch_workq_target_runnable_workers , _dispatch_workq_target_max_runnable_workers );
797
+ _dispatch_workq_min_runnable_workers , _dispatch_workq_max_runnable_workers );
798
798
799
799
// Not enough workers running and there appears to be work in queues.
800
- if ((_dispatch_workq_runnable_workers < _dispatch_workq_target_runnable_workers ) &&
800
+ if ((_dispatch_workq_runnable_workers < _dispatch_workq_min_runnable_workers ) &&
801
801
(os_atomic_load (& _dispatch_workq_normal_pool .mask , relaxed ) != 0 )) {
802
802
r = pthread_mutex_lock (& _dispatch_workq_manager .spare_worker_mutex );
803
803
(void )dispatch_assume_zero (r );
@@ -814,8 +814,8 @@ _dispatch_workq_thread_monitor_main(void *context DISPATCH_UNUSED)
814
814
}
815
815
816
816
// Too many runnable workers
817
- if (_dispatch_workq_runnable_workers > _dispatch_workq_target_max_runnable_workers ) {
818
- int over = _dispatch_workq_runnable_workers - _dispatch_workq_target_max_runnable_workers ;
817
+ if (_dispatch_workq_runnable_workers > _dispatch_workq_max_runnable_workers ) {
818
+ int over = _dispatch_workq_runnable_workers - _dispatch_workq_max_runnable_workers ;
819
819
int pendingWaits = os_atomic_load (& _dispatch_workq_manager .num_pending_waits , relaxed );
820
820
if (pendingWaits < over ) {
821
821
os_atomic_inc (& _dispatch_workq_manager .num_pending_waits , relaxed );
0 commit comments