@@ -135,12 +135,12 @@ typedef struct dispatch_workq_item_s {
135
135
void * func_arg ;
136
136
} dispatch_workq_item_s , * dispatch_workq_item_t ;
137
137
138
- typedef struct dispatch_workqueue_s {
138
+ typedef struct dispatch_workq_workqueue_s {
139
139
STAILQ_HEAD (,dispatch_workq_item_s ) item_listhead ;
140
140
dispatch_unfair_lock_s lock ;
141
141
int priority ;
142
142
bool overcommit ;
143
- } dispatch_workqueue_s , * dispatch_workqueue_t ;
143
+ } dispatch_workq_workqueue_s , * dispatch_workq_workqueue_t ;
144
144
145
145
/*
146
146
* The overcommit pool uses a simple coarse locking policy:
@@ -154,7 +154,7 @@ typedef struct dispatch_workqueue_s {
154
154
typedef struct dispatch_workq_overcommit_pool_s {
155
155
uint32_t mask ;
156
156
int num_spares ;
157
- dispatch_workqueue_t queues [WORKQ_NUM_PRIORITIES ];
157
+ dispatch_workq_workqueue_t wqs [WORKQ_NUM_PRIORITIES ];
158
158
pthread_mutex_t mutex ;
159
159
pthread_cond_t spare_workers ;
160
160
} dispatch_workq_overcommit_pool_s , * dispatch_workq_overcommit_pool_t ;
@@ -171,7 +171,7 @@ static dispatch_workq_overcommit_pool_s _dispatch_workq_overcommit_pool;
171
171
*/
172
172
typedef struct dispatch_workq_pool_s {
173
173
volatile uint32_t mask ;
174
- dispatch_workqueue_t queues [WORKQ_NUM_PRIORITIES ];
174
+ dispatch_workq_workqueue_t wqs [WORKQ_NUM_PRIORITIES ];
175
175
} dispatch_workq_pool_s , * dispatch_workq_pool_t ;
176
176
177
177
static dispatch_workq_pool_s _dispatch_workq_normal_pool ;
@@ -209,10 +209,11 @@ _dispatch_workq_dealloc_item(dispatch_workq_item_t wi)
209
209
210
210
211
211
static void
212
- _dispatch_workq_allocate_workqueues (dispatch_workqueue_t * queues , int num_queues , bool overcommit )
212
+ _dispatch_workq_allocate_workqueues (dispatch_workq_workqueue_t * queues ,
213
+ int num_queues , bool overcommit )
213
214
{
214
215
for (int i = 0 ; i < num_queues ; i ++ ) {
215
- dispatch_workqueue_t wq ;
216
+ dispatch_workq_workqueue_t wq ;
216
217
wq = _dispatch_calloc (1 , ROUND_UP_TO_CACHELINE_SIZE (sizeof (* wq )));
217
218
218
219
wq -> priority = i ;
@@ -271,7 +272,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED)
271
272
_dispatch_workq_management_init ();
272
273
273
274
// overcommit pool
274
- _dispatch_workq_allocate_workqueues (_dispatch_workq_overcommit_pool .queues ,
275
+ _dispatch_workq_allocate_workqueues (_dispatch_workq_overcommit_pool .wqs ,
275
276
WORKQ_NUM_PRIORITIES , true);
276
277
r = pthread_mutex_init (& _dispatch_workq_overcommit_pool .mutex , NULL );
277
278
(void )dispatch_assume_zero (r );
@@ -280,7 +281,7 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED)
280
281
(void )dispatch_assume_zero (r );
281
282
282
283
// normal pool
283
- _dispatch_workq_allocate_workqueues (_dispatch_workq_normal_pool .queues ,
284
+ _dispatch_workq_allocate_workqueues (_dispatch_workq_normal_pool .wqs ,
284
285
WORKQ_NUM_PRIORITIES , false);
285
286
286
287
// create initial set of normal workers
@@ -291,24 +292,24 @@ _dispatch_workq_init_once(void *context DISPATCH_UNUSED)
291
292
292
293
293
294
int
294
- dispatch_workq_get_wq (dispatch_workqueue_t * workqp ,
295
+ dispatch_workq_get_wq (dispatch_workq_workqueue_t * workqp ,
295
296
int priority , int overcommit )
296
297
{
297
298
dispatch_once_f (& _dispatch_workq_init_once_pred , NULL ,
298
299
& _dispatch_workq_init_once );
299
300
300
301
dispatch_assert (priority >= 0 && priority < WORKQ_NUM_PRIORITIES );
301
302
if (overcommit ) {
302
- * workqp = _dispatch_workq_overcommit_pool .queues [priority ];
303
+ * workqp = _dispatch_workq_overcommit_pool .wqs [priority ];
303
304
} else {
304
- * workqp = _dispatch_workq_normal_pool .queues [priority ];
305
+ * workqp = _dispatch_workq_normal_pool .wqs [priority ];
305
306
}
306
307
307
308
return 0 ;
308
309
}
309
310
310
311
int
311
- dispatch_workq_additem_np (dispatch_workqueue_t workq ,
312
+ dispatch_workq_additem_np (dispatch_workq_workqueue_t workq ,
312
313
void (* item_func )(void * ), void * item_arg )
313
314
{
314
315
dispatch_workq_item_t wi = _dispatch_workq_alloc_item (item_func , item_arg );
@@ -348,7 +349,7 @@ static void
348
349
_dispatch_workq_add_control_item (void * op_code )
349
350
{
350
351
dispatch_workq_item_t wi = _dispatch_workq_alloc_item (NULL , op_code );
351
- dispatch_workqueue_t workq = _dispatch_workq_normal_pool .queues [0 ];
352
+ dispatch_workq_workqueue_t workq = _dispatch_workq_normal_pool .wqs [0 ];
352
353
unsigned int wq_index_bit = 1 ; // highest priority queue
353
354
354
355
_dispatch_unfair_lock_lock (& workq -> lock );
@@ -390,29 +391,30 @@ _dispatch_workq_overcommit_worker_main(void *context DISPATCH_UNUSED)
390
391
{
391
392
int r ;
392
393
sigset_t mask ;
394
+ dispatch_workq_overcommit_pool_t oc_pool = & _dispatch_workq_overcommit_pool ;
393
395
394
396
r = sigfillset (& mask );
395
397
(void )dispatch_assume_zero (r );
396
398
r = _dispatch_pthread_sigmask (SIG_BLOCK , & mask , NULL );
397
399
(void )dispatch_assume_zero (r );
398
400
399
- r = pthread_mutex_lock (& _dispatch_workq_overcommit_pool . mutex );
401
+ r = pthread_mutex_lock (& oc_pool -> mutex );
400
402
(void )dispatch_assume_zero (r );
401
403
402
404
for (;;) {
403
- unsigned int idx = __builtin_ffs (_dispatch_workq_overcommit_pool . mask );
405
+ unsigned int idx = __builtin_ffs (oc_pool -> mask );
404
406
if (idx > 0 ) {
405
- dispatch_workqueue_t wq = _dispatch_workq_overcommit_pool . queues [idx - 1 ];
407
+ dispatch_workq_workqueue_t wq = oc_pool -> wqs [idx - 1 ];
406
408
dispatch_workq_item_t work = STAILQ_FIRST (& wq -> item_listhead );
407
409
if (work != NULL ) {
408
410
/* Remove the first work item */
409
411
STAILQ_REMOVE_HEAD (& wq -> item_listhead , item_entry );
410
412
if (STAILQ_EMPTY (& wq -> item_listhead )) {
411
- _dispatch_workq_overcommit_pool . mask &= ~(0x1 << wq -> priority );
413
+ oc_pool -> mask &= ~(0x1 << wq -> priority );
412
414
}
413
415
414
416
/* Release pool mutex */
415
- r = pthread_mutex_unlock (& _dispatch_workq_overcommit_pool . mutex );
417
+ r = pthread_mutex_unlock (& oc_pool -> mutex );
416
418
(void )dispatch_assume_zero (r );
417
419
418
420
/* Execute the work item */
@@ -423,16 +425,15 @@ _dispatch_workq_overcommit_worker_main(void *context DISPATCH_UNUSED)
423
425
func (func_arg );
424
426
425
427
/* Acquire pool mutex */
426
- r = pthread_mutex_lock (& _dispatch_workq_overcommit_pool . mutex );
428
+ r = pthread_mutex_lock (& oc_pool -> mutex );
427
429
(void )dispatch_assume_zero (r );
428
430
continue ;
429
431
}
430
432
}
431
433
432
434
/* Wait for more work to be available. */
433
- _dispatch_workq_overcommit_pool .num_spares ++ ;
434
- r = pthread_cond_wait (& _dispatch_workq_overcommit_pool .spare_workers ,
435
- & _dispatch_workq_overcommit_pool .mutex );
435
+ oc_pool -> num_spares ++ ;
436
+ r = pthread_cond_wait (& oc_pool -> spare_workers , & oc_pool -> mutex );
436
437
}
437
438
}
438
439
@@ -442,6 +443,7 @@ _dispatch_workq_normal_worker_main(void *context DISPATCH_UNUSED)
442
443
{
443
444
int r ;
444
445
sigset_t mask ;
446
+ dispatch_workq_pool_t pool = & _dispatch_workq_normal_pool ;
445
447
446
448
r = sigfillset (& mask );
447
449
(void )dispatch_assume_zero (r );
@@ -456,9 +458,9 @@ _dispatch_workq_normal_worker_main(void *context DISPATCH_UNUSED)
456
458
_dispatch_debug ("workq: worker successfully registered" );
457
459
458
460
for (;;) {
459
- int idx = __builtin_ffs (os_atomic_load (& _dispatch_workq_normal_pool . mask , relaxed ));
461
+ int idx = __builtin_ffs (os_atomic_load (& pool -> mask , relaxed ));
460
462
if (idx > 0 ) {
461
- dispatch_workqueue_t wq = _dispatch_workq_normal_pool . queues [idx - 1 ];
463
+ dispatch_workq_workqueue_t wq = pool -> wqs [idx - 1 ];
462
464
_dispatch_unfair_lock_lock (& wq -> lock );
463
465
dispatch_workq_item_t work = STAILQ_FIRST (& wq -> item_listhead );
464
466
if (unlikely (work == NULL )) {
@@ -469,7 +471,7 @@ _dispatch_workq_normal_worker_main(void *context DISPATCH_UNUSED)
469
471
// found work: remove it.
470
472
STAILQ_REMOVE_HEAD (& wq -> item_listhead , item_entry );
471
473
if (STAILQ_EMPTY (& wq -> item_listhead )) {
472
- os_atomic_and (& _dispatch_workq_normal_pool . mask , ~(0x1 << wq -> priority ), relaxed );
474
+ os_atomic_and (& pool -> mask , ~(0x1 << wq -> priority ), relaxed );
473
475
}
474
476
_dispatch_unfair_lock_unlock (& wq -> lock );
475
477
@@ -501,7 +503,7 @@ _dispatch_workq_normal_worker_main(void *context DISPATCH_UNUSED)
501
503
for (int i = 0 ; i < 5 && !found_work ; i ++ ) {
502
504
r = pthread_yield ();
503
505
(void )dispatch_assume_zero (r );
504
- found_work = os_atomic_load (& _dispatch_workq_normal_pool . mask , relaxed ) != 0 ;
506
+ found_work = os_atomic_load (& pool -> mask , relaxed ) != 0 ;
505
507
}
506
508
507
509
if (!found_work ) {
0 commit comments