14
14
#include <string.h>
15
15
#include <threads.h>
16
16
17
+ #ifdef RUNTIME_STAT
18
+ /*
19
+ * "rtry" the number of retries in the __list_find function.
20
+ * "cons" the number of wait-free contains in the __list_find function that curr
21
+ * pointer pointed.
22
+ * "trav" the number of list element traversal in the __list_find function.
23
+ * "fail" the number of CAS() failures.
24
+ * "del" the number of list_delete operation failed and restart again.
25
+ * "ins" the number of list_insert operation failed and restart again.
26
+ * "load" is the number of atomic_load operation in list_delete, list_insert
27
+ * and __list_find.
28
+ * "store" is the number of atomic_store operation in list_delete, list_insert
29
+ * and __list_find.
30
+ */
31
+ static atomic_uint_fast64_t rtry = 0 , cons = 0 , trav = 0 , fail = 0 ;
32
+ static atomic_uint_fast64_t del = 0 , ins = 0 ;
33
+ static atomic_uint_fast64_t load = 0 , store = 0 ;
34
+ static atomic_uint_fast64_t deletes = 0 , inserts = 0 ;
35
+
36
+ enum {
37
+ TRACE_NOTHING = 0 ,
38
+ TRACE_TRIES ,
39
+ TRACE_WAIT_FREE_CONS ,
40
+ TRACE_TRAVERSAL ,
41
+ TRACE_DEL ,
42
+ TRACE_INS ,
43
+ TRACE_INSERTS ,
44
+ TRACE_DELETES
45
+ };
46
+
47
+ #define CAS (obj , expected , desired ) \
48
+ ({ \
49
+ bool __ret = atomic_compare_exchange_strong(obj, expected, desired); \
50
+ if (!__ret) \
51
+ atomic_fetch_add(&fail, 1); \
52
+ __ret; \
53
+ })
54
+ #define ATOMIC_LOAD (obj ) \
55
+ ({ \
56
+ atomic_fetch_add(&load, 1); \
57
+ atomic_load(obj); \
58
+ })
59
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
60
+ do { \
61
+ atomic_fetch_add(&store, 1); \
62
+ atomic_store_explicit(obj, desired, order); \
63
+ } while (0)
64
+ #define TRACE_ATOMIC_FECTH_ADD (obj , arg , ops ) \
65
+ ({ \
66
+ if (ops) \
67
+ atomic_fetch_add(obj, arg); \
68
+ })
69
+
70
+ void do_analysis (void )
71
+ {
72
+ printf (
73
+ "\"rtry\" is the number of retries in the __list_find function.\n" );
74
+ printf (
75
+ "\"cons\" is the number of wait-free contains in the __list_find "
76
+ "function that curr pointer pointed.\n" );
77
+ printf (
78
+ "\"trav\" is the number of list element traversal in the "
79
+ "__list_find function.\n" );
80
+ printf ("\"fail\" is the number of CAS() failures.\n" );
81
+ printf (
82
+ "\"del\" is the number of list_delete operation failed and "
83
+ "restart again.\n" );
84
+ printf (
85
+ "\"ins\" is the number of list_insert operation failed and "
86
+ "restart again.\n" );
87
+ printf ("\"deletes\" is the number of linked list elements deleted.\n" );
88
+ printf ("\"inserts\" is the number of linked list elements created.\n" );
89
+ printf (
90
+ "\"load\" is the number of atomic_load operation in list_delete, "
91
+ "list_insert and __list_find.\n" );
92
+ printf (
93
+ "\"store\" is the number of atomic_store operation in list_delete, "
94
+ "list_insert and __list_find.\n" );
95
+ printf ("\n%10s %10s %10s %10s %10s %10s %10s %10s %10s %10s\n" , "rtry" ,
96
+ "cons" , "trav" , "fail" , "del" , "ins" , "load" , "store" , "deletes" ,
97
+ "inserts" );
98
+ for (int i = 0 ; i < 109 ; i ++ )
99
+ printf ("-" );
100
+ printf ("\n%10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld\n" ,
101
+ rtry , cons , trav , fail , del , ins , load , store , deletes , inserts );
102
+ }
103
+
104
+ #define RUNTIME_STAT_INIT () atexit(do_analysis)
105
+
106
+ #else
107
+
108
+ #define CAS (obj , expected , desired ) \
109
+ ({ atomic_compare_exchange_strong(obj, expected, desired); })
110
+ #define ATOMIC_LOAD (obj ) ({ atomic_load(obj); })
111
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
112
+ do { \
113
+ atomic_store_explicit(obj, desired, order); \
114
+ } while (0)
115
+ #define TRACE_ATOMIC_FECTH_ADD (obj , arg , ops ) ({})
116
+ #define RUNTIME_STAT_INIT () \
117
+ do { \
118
+ } while (0)
119
+ #endif /* RUNTIME_STAT */
120
+
17
121
#define HP_MAX_THREADS 128
18
122
#define HP_MAX_HPS 5 /* This is named 'K' in the HP paper */
19
123
#define CLPAD (128 / sizeof(uintptr_t))
@@ -162,8 +266,6 @@ void list_hp_retire(list_hp_t *hp, uintptr_t ptr)
162
266
#define N_THREADS (128 / 2)
163
267
#define MAX_THREADS 128
164
268
165
- static atomic_uint_fast32_t deletes = 0 , inserts = 0 ;
166
-
167
269
enum { HP_NEXT = 0 , HP_CURR = 1 , HP_PREV };
168
270
169
271
#define is_marked (p ) (bool) ((uintptr_t)(p) &0x01)
@@ -195,7 +297,7 @@ list_node_t *list_node_new(list_key_t key)
195
297
list_node_t * node = aligned_alloc (128 , sizeof (* node ));
196
298
assert (node );
197
299
* node = (list_node_t ){.magic = LIST_MAGIC , .key = key };
198
- ( void ) atomic_fetch_add ( & inserts , 1 );
300
+ TRACE_ATOMIC_FECTH_ADD ( & inserts , 1 , TRACE_INSERTS );
199
301
return node ;
200
302
}
201
303
@@ -205,7 +307,7 @@ void list_node_destroy(list_node_t *node)
205
307
return ;
206
308
assert (node -> magic == LIST_MAGIC );
207
309
free (node );
208
- ( void ) atomic_fetch_add ( & deletes , 1 );
310
+ TRACE_ATOMIC_FECTH_ADD ( & deletes , 1 , TRACE_DELETES );
209
311
}
210
312
211
313
static void __list_node_delete (void * arg )
@@ -225,21 +327,31 @@ static bool __list_find(list_t *list,
225
327
226
328
try_again :
227
329
prev = & list -> head ;
228
- curr = (list_node_t * ) atomic_load (prev );
330
+ curr = (list_node_t * ) ATOMIC_LOAD (prev );
229
331
(void ) list_hp_protect_ptr (list -> hp , HP_CURR , (uintptr_t ) curr );
230
- if (atomic_load (prev ) != get_unmarked (curr ))
332
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr )) {
333
+ TRACE_ATOMIC_FECTH_ADD (& rtry , 1 , TRACE_TRIES );
231
334
goto try_again ;
335
+ }
232
336
while (true) {
233
- next = (list_node_t * ) atomic_load (& get_unmarked_node (curr )-> next );
337
+ #ifdef RUNTIME_STAT
338
+ if (is_marked (curr ))
339
+ TRACE_ATOMIC_FECTH_ADD (& cons , 1 , TRACE_WAIT_FREE_CONS );
340
+ #endif
341
+ next = (list_node_t * ) ATOMIC_LOAD (& get_unmarked_node (curr )-> next );
234
342
(void ) list_hp_protect_ptr (list -> hp , HP_NEXT , get_unmarked (next ));
235
343
/* On a CAS failure, the search function, "__list_find," will simply
236
344
* have to go backwards in the list until an unmarked element is found
237
345
* from which the search in increasing key order can be started.
238
346
*/
239
- if (atomic_load (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next )
347
+ if (ATOMIC_LOAD (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next ) {
348
+ TRACE_ATOMIC_FECTH_ADD (& rtry , 1 , TRACE_TRIES );
240
349
goto try_again ;
241
- if (atomic_load (prev ) != get_unmarked (curr ))
350
+ }
351
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr )) {
352
+ TRACE_ATOMIC_FECTH_ADD (& rtry , 1 , TRACE_TRIES );
242
353
goto try_again ;
354
+ }
243
355
if (get_unmarked_node (next ) == next ) {
244
356
if (!(get_unmarked_node (curr )-> key < * key )) {
245
357
* par_curr = curr ;
@@ -252,12 +364,15 @@ static bool __list_find(list_t *list,
252
364
get_unmarked (curr ));
253
365
} else {
254
366
uintptr_t tmp = get_unmarked (curr );
255
- if (!atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next )))
367
+ if (!CAS (prev , & tmp , get_unmarked (next ))) {
368
+ TRACE_ATOMIC_FECTH_ADD (& rtry , 1 , TRACE_TRIES );
256
369
goto try_again ;
370
+ }
257
371
list_hp_retire (list -> hp , get_unmarked (curr ));
258
372
}
259
373
curr = next ;
260
374
(void ) list_hp_protect_release (list -> hp , HP_CURR , get_unmarked (next ));
375
+ TRACE_ATOMIC_FECTH_ADD (& trav , 1 , TRACE_TRAVERSAL );
261
376
}
262
377
}
263
378
@@ -274,13 +389,14 @@ bool list_insert(list_t *list, list_key_t key)
274
389
list_hp_clear (list -> hp );
275
390
return false;
276
391
}
277
- atomic_store_explicit (& node -> next , (uintptr_t ) curr ,
392
+ ATOMIC_STORE_EXPLICIT (& node -> next , (uintptr_t ) curr ,
278
393
memory_order_relaxed );
279
394
uintptr_t tmp = get_unmarked (curr );
280
- if (atomic_compare_exchange_strong (prev , & tmp , (uintptr_t ) node )) {
395
+ if (CAS (prev , & tmp , (uintptr_t ) node )) {
281
396
list_hp_clear (list -> hp );
282
397
return true;
283
398
}
399
+ TRACE_ATOMIC_FECTH_ADD (& ins , 1 , TRACE_INS );
284
400
}
285
401
}
286
402
@@ -296,12 +412,13 @@ bool list_delete(list_t *list, list_key_t key)
296
412
297
413
uintptr_t tmp = get_unmarked (next );
298
414
299
- if (!atomic_compare_exchange_strong (& curr -> next , & tmp ,
300
- get_marked ( next )))
415
+ if (!CAS (& curr -> next , & tmp , get_marked ( next ))) {
416
+ TRACE_ATOMIC_FECTH_ADD ( & del , 1 , TRACE_DEL );
301
417
continue ;
418
+ }
302
419
303
420
tmp = get_unmarked (curr );
304
- if (atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next ))) {
421
+ if (CAS (prev , & tmp , get_unmarked (next ))) {
305
422
list_hp_clear (list -> hp );
306
423
list_hp_retire (list -> hp , get_unmarked (curr ));
307
424
} else {
@@ -364,6 +481,7 @@ static void *delete_thread(void *arg)
364
481
365
482
int main (void )
366
483
{
484
+ RUNTIME_STAT_INIT ();
367
485
list_t * list = list_new ();
368
486
369
487
pthread_t thr [N_THREADS ];
@@ -382,8 +500,5 @@ int main(void)
382
500
383
501
list_destroy (list );
384
502
385
- fprintf (stderr , "inserts = %zu, deletes = %zu\n" , atomic_load (& inserts ),
386
- atomic_load (& deletes ));
387
-
388
503
return 0 ;
389
504
}
0 commit comments