14
14
#include <string.h>
15
15
#include <threads.h>
16
16
17
+ static atomic_uint_fast64_t deletes = 0 , inserts = 0 ;
18
+
19
+ #ifdef RUNTIME_STAT
20
+ /*
21
+ * "rtry" the number of retries in the __list_find function.
22
+ * "cons" the number of wait-free contains in the __list_find function that curr
23
+ * pointer pointed.
24
+ * "trav" the number of list element traversal in the __list_find function.
25
+ * "fail" the number of CAS() failures.
26
+ * "del" the number of list_delete operation failed and restart again.
27
+ * "ins" the number of list_insert operation failed and restart again.
28
+ * "load" is the number of atomic_load operation in list_delete, list_insert
29
+ * and __list_find.
30
+ * "store" is the number of atomic_store operation in list_delete, list_insert
31
+ * and __list_find.
32
+ */
33
+ struct runtime_stat {
34
+ atomic_uint_fast64_t rtry , cons , trav , fail ;
35
+ atomic_uint_fast64_t del , ins ;
36
+ atomic_uint_fast64_t load , store ;
37
+ };
38
+ struct runtime_stat rts = {0 };
39
+
40
+ enum {
41
+ TRACE_NOTHING = 0 ,
42
+ TRACE_TRIES ,
43
+ TRACE_WAIT_FREE_CONS ,
44
+ TRACE_TRAVERSAL ,
45
+ TRACE_DEL ,
46
+ TRACE_INS ,
47
+ TRACE_INSERTS ,
48
+ TRACE_DELETES
49
+ };
50
+
51
+ #define CAS (obj , expected , desired ) \
52
+ ({ \
53
+ bool __ret = atomic_compare_exchange_strong(obj, expected, desired); \
54
+ if (!__ret) \
55
+ atomic_fetch_add(&rts.fail, 1); \
56
+ __ret; \
57
+ })
58
+ #define ATOMIC_LOAD (obj ) \
59
+ ({ \
60
+ atomic_fetch_add(&rts.load, 1); \
61
+ atomic_load(obj); \
62
+ })
63
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
64
+ do { \
65
+ atomic_fetch_add(&rts.store, 1); \
66
+ atomic_store_explicit(obj, desired, order); \
67
+ } while (0)
68
+ #define ATOMIC_FETCH_ADD (obj , arg , ops ) \
69
+ ({ \
70
+ if (TRACE_##ops) \
71
+ atomic_fetch_add(obj, arg); \
72
+ })
73
+
74
+ static void do_analysis (void )
75
+ {
76
+ printf (
77
+ "\"rtry\" is the number of retries in the __list_find function.\n" );
78
+ printf (
79
+ "\"cons\" is the number of wait-free contains in the __list_find "
80
+ "function that curr pointer pointed.\n" );
81
+ printf (
82
+ "\"trav\" is the number of list element traversal in the "
83
+ "__list_find function.\n" );
84
+ printf ("\"fail\" is the number of CAS() failures.\n" );
85
+ printf (
86
+ "\"del\" is the number of list_delete operation failed and "
87
+ "restart again.\n" );
88
+ printf (
89
+ "\"ins\" is the number of list_insert operation failed and "
90
+ "restart again.\n" );
91
+ printf ("\"deletes\" is the number of linked list elements deleted.\n" );
92
+ printf ("\"inserts\" is the number of linked list elements created.\n" );
93
+ printf (
94
+ "\"load\" is the number of atomic_load operation in list_delete, "
95
+ "list_insert and __list_find.\n" );
96
+ printf (
97
+ "\"store\" is the number of atomic_store operation in list_delete, "
98
+ "list_insert and __list_find.\n" );
99
+ printf ("\n%10s %10s %10s %10s %10s %10s %10s %10s %10s %10s\n" , "rtry" ,
100
+ "cons" , "trav" , "fail" , "del" , "ins" , "load" , "store" , "deletes" ,
101
+ "inserts" );
102
+ for (int i = 0 ; i < 109 ; i ++ )
103
+ printf ("-" );
104
+ printf ("\n%10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld\n" ,
105
+ atomic_load (& rts .rtry ), atomic_load (& rts .cons ),
106
+ atomic_load (& rts .trav ), atomic_load (& rts .fail ),
107
+ atomic_load (& rts .del ), atomic_load (& rts .ins ), atomic_load (& rts .load ),
108
+ atomic_load (& rts .store ), atomic_load (& deletes ),
109
+ atomic_load (& inserts ));
110
+ }
111
+
112
+ #define RUNTIME_STAT_INIT () atexit(do_analysis)
113
+
114
+ #else
115
+
116
+ #define CAS (obj , expected , desired ) \
117
+ ({ atomic_compare_exchange_strong(obj, expected, desired); })
118
+ #define ATOMIC_LOAD (obj ) ({ atomic_load(obj); })
119
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
120
+ do { \
121
+ atomic_store_explicit(obj, desired, order); \
122
+ } while (0)
123
+ #define ATOMIC_FETCH_ADD (obj , arg , ops ) ({})
124
+
125
+ static void do_analysis (void )
126
+ {
127
+ fprintf (stderr , "inserts = %zu, deletes = %zu\n" , atomic_load (& inserts ),
128
+ atomic_load (& deletes ));
129
+ }
130
+
131
+ #define RUNTIME_STAT_INIT () atexit(do_analysis)
132
+
133
+ #endif /* RUNTIME_STAT */
134
+
17
135
#define HP_MAX_THREADS 128
18
136
#define HP_MAX_HPS 5 /* This is named 'K' in the HP paper */
19
137
#define CLPAD (128 / sizeof(uintptr_t))
@@ -162,8 +280,6 @@ void list_hp_retire(list_hp_t *hp, uintptr_t ptr)
162
280
#define N_THREADS (128 / 2)
163
281
#define MAX_THREADS 128
164
282
165
- static atomic_uint_fast32_t deletes = 0 , inserts = 0 ;
166
-
167
283
enum { HP_NEXT = 0 , HP_CURR = 1 , HP_PREV };
168
284
169
285
#define is_marked (p ) (bool) ((uintptr_t)(p) &0x01)
@@ -225,21 +341,29 @@ static bool __list_find(list_t *list,
225
341
226
342
try_again :
227
343
prev = & list -> head ;
228
- curr = (list_node_t * ) atomic_load (prev );
344
+ curr = (list_node_t * ) ATOMIC_LOAD (prev );
229
345
(void ) list_hp_protect_ptr (list -> hp , HP_CURR , (uintptr_t ) curr );
230
- if (atomic_load (prev ) != get_unmarked (curr ))
346
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr )) {
347
+ ATOMIC_FETCH_ADD (& rts .rtry , 1 , TRIES );
231
348
goto try_again ;
349
+ }
232
350
while (true) {
233
- next = (list_node_t * ) atomic_load (& get_unmarked_node (curr )-> next );
351
+ if (is_marked (curr ))
352
+ ATOMIC_FETCH_ADD (& rts .cons , 1 , WAIT_FREE_CONS );
353
+ next = (list_node_t * ) ATOMIC_LOAD (& get_unmarked_node (curr )-> next );
234
354
(void ) list_hp_protect_ptr (list -> hp , HP_NEXT , get_unmarked (next ));
235
355
/* On a CAS failure, the search function, "__list_find," will simply
236
356
* have to go backwards in the list until an unmarked element is found
237
357
* from which the search in increasing key order can be started.
238
358
*/
239
- if (atomic_load (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next )
359
+ if (ATOMIC_LOAD (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next ) {
360
+ ATOMIC_FETCH_ADD (& rts .rtry , 1 , TRIES );
240
361
goto try_again ;
241
- if (atomic_load (prev ) != get_unmarked (curr ))
362
+ }
363
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr )) {
364
+ ATOMIC_FETCH_ADD (& rts .rtry , 1 , TRIES );
242
365
goto try_again ;
366
+ }
243
367
if (get_unmarked_node (next ) == next ) {
244
368
if (!(get_unmarked_node (curr )-> key < * key )) {
245
369
* par_curr = curr ;
@@ -252,12 +376,15 @@ static bool __list_find(list_t *list,
252
376
get_unmarked (curr ));
253
377
} else {
254
378
uintptr_t tmp = get_unmarked (curr );
255
- if (!atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next )))
379
+ if (!CAS (prev , & tmp , get_unmarked (next ))) {
380
+ ATOMIC_FETCH_ADD (& rts .rtry , 1 , TRIES );
256
381
goto try_again ;
382
+ }
257
383
list_hp_retire (list -> hp , get_unmarked (curr ));
258
384
}
259
385
curr = next ;
260
386
(void ) list_hp_protect_release (list -> hp , HP_CURR , get_unmarked (next ));
387
+ ATOMIC_FETCH_ADD (& rts .trav , 1 , TRAVERSAL );
261
388
}
262
389
}
263
390
@@ -274,13 +401,14 @@ bool list_insert(list_t *list, list_key_t key)
274
401
list_hp_clear (list -> hp );
275
402
return false;
276
403
}
277
- atomic_store_explicit (& node -> next , (uintptr_t ) curr ,
404
+ ATOMIC_STORE_EXPLICIT (& node -> next , (uintptr_t ) curr ,
278
405
memory_order_relaxed );
279
406
uintptr_t tmp = get_unmarked (curr );
280
- if (atomic_compare_exchange_strong (prev , & tmp , (uintptr_t ) node )) {
407
+ if (CAS (prev , & tmp , (uintptr_t ) node )) {
281
408
list_hp_clear (list -> hp );
282
409
return true;
283
410
}
411
+ ATOMIC_FETCH_ADD (& rts .ins , 1 , INS );
284
412
}
285
413
}
286
414
@@ -296,12 +424,13 @@ bool list_delete(list_t *list, list_key_t key)
296
424
297
425
uintptr_t tmp = get_unmarked (next );
298
426
299
- if (!atomic_compare_exchange_strong (& curr -> next , & tmp ,
300
- get_marked ( next )))
427
+ if (!CAS (& curr -> next , & tmp , get_marked ( next ))) {
428
+ ATOMIC_FETCH_ADD ( & rts . del , 1 , DEL );
301
429
continue ;
430
+ }
302
431
303
432
tmp = get_unmarked (curr );
304
- if (atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next ))) {
433
+ if (CAS (prev , & tmp , get_unmarked (next ))) {
305
434
list_hp_clear (list -> hp );
306
435
list_hp_retire (list -> hp , get_unmarked (curr ));
307
436
} else {
@@ -364,6 +493,7 @@ static void *delete_thread(void *arg)
364
493
365
494
int main (void )
366
495
{
496
+ RUNTIME_STAT_INIT ();
367
497
list_t * list = list_new ();
368
498
369
499
pthread_t thr [N_THREADS ];
@@ -382,8 +512,5 @@ int main(void)
382
512
383
513
list_destroy (list );
384
514
385
- fprintf (stderr , "inserts = %zu, deletes = %zu\n" , atomic_load (& inserts ),
386
- atomic_load (& deletes ));
387
-
388
515
return 0 ;
389
516
}
0 commit comments