14
14
#include <string.h>
15
15
#include <threads.h>
16
16
17
+ #ifdef RUNTIME_STAT
18
+ /*
19
+ * "rtry" the number of retries in the __list_find function.
20
+ * "cons" the number of wait-free contains in the __list_find function that curr
21
+ * pointer pointed.
22
+ * "trav" the number of list element traversal in the __list_find function.
23
+ * "fail" the number of CAS() failures.
24
+ * "del" the number of list_delete operation failed and restart again.
25
+ * "ins" the number of list_insert operation failed and restart again.
26
+ * "load" is the number of atomic_load operation in list_delete, list_insert
27
+ * and __list_find.
28
+ * "store" is the number of atomic_store operation in list_delete, list_insert
29
+ * and __list_find.
30
+ */
31
+ static atomic_uint_fast64_t rtry = 0 , cons = 0 , trav = 0 , fail = 0 ;
32
+ static atomic_uint_fast64_t del = 0 , ins = 0 ;
33
+ static atomic_uint_fast64_t load = 0 , store = 0 ;
34
+ static atomic_uint_fast64_t deletes = 0 , inserts = 0 ;
35
+
36
+ #define TRACE_ATOMIC_TRIES \
37
+ do { \
38
+ atomic_fetch_add(&rtry, 1); \
39
+ } while (0)
40
+ #define TRACE_WAIT_FREE_CONS \
41
+ do { \
42
+ atomic_fetch_add(&cons, 1); \
43
+ } while (0)
44
+ #define TRACE_TRAVERSAL \
45
+ do { \
46
+ atomic_fetch_add(&trav, 1); \
47
+ } while (0)
48
+ #define TRACE_DEL \
49
+ do { \
50
+ atomic_fetch_add(&del, 1); \
51
+ } while (0)
52
+ #define TRACE_INS \
53
+ do { \
54
+ atomic_fetch_add(&ins, 1); \
55
+ } while (0)
56
+ #define TRACE_DELETES \
57
+ do { \
58
+ atomic_fetch_add(&deletes, 1); \
59
+ } while (0)
60
+ #define TRACE_INSERTS \
61
+ do { \
62
+ atomic_fetch_add(&inserts, 1); \
63
+ } while (0)
64
+ #define CAS (obj , expected , desired ) \
65
+ ({ \
66
+ bool __ret = atomic_compare_exchange_strong(obj, expected, desired); \
67
+ if (!__ret) \
68
+ atomic_fetch_add(&fail, 1); \
69
+ __ret; \
70
+ })
71
+ #define ATOMIC_LOAD (obj ) \
72
+ ({ \
73
+ atomic_fetch_add(&load, 1); \
74
+ atomic_load(obj); \
75
+ })
76
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
77
+ do { \
78
+ atomic_fetch_add(&store, 1); \
79
+ atomic_store_explicit(obj, desired, order); \
80
+ } while (0)
81
+
82
+ void do_analysis (void )
83
+ {
84
+ printf (
85
+ "\"rtry\" is the number of retries in the __list_find function.\n" );
86
+ printf (
87
+ "\"cons\" is the number of wait-free contains in the __list_find "
88
+ "function that curr pointer pointed.\n" );
89
+ printf (
90
+ "\"trav\" is the number of list element traversal in the "
91
+ "__list_find function.\n" );
92
+ printf ("\"fail\" is the number of CAS() failures.\n" );
93
+ printf (
94
+ "\"del\" is the number of list_delete operation failed and "
95
+ "restart again.\n" );
96
+ printf (
97
+ "\"ins\" is the number of list_insert operation failed and "
98
+ "restart again.\n" );
99
+ printf ("\"deletes\" is the number of linked list elements deleted.\n" );
100
+ printf ("\"inserts\" is the number of linked list elements created.\n" );
101
+ printf (
102
+ "\"load\" is the number of atomic_load operation in list_delete, "
103
+ "list_insert and __list_find.\n" );
104
+ printf (
105
+ "\"store\" is the number of atomic_store operation in list_delete, "
106
+ "list_insert and __list_find.\n" );
107
+ printf ("\n%10s %10s %10s %10s %10s %10s %10s %10s %10s %10s\n" , "rtry" ,
108
+ "cons" , "trav" , "fail" , "del" , "ins" , "load" , "store" , "deletes" ,
109
+ "inserts" );
110
+ for (int i = 0 ; i < 109 ; i ++ )
111
+ printf ("-" );
112
+ printf ("\n%10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld\n" ,
113
+ rtry , cons , trav , fail , del , ins , load , store , deletes , inserts );
114
+ }
115
+
116
+ #else
117
+
118
+ #define TRACE_ATOMIC_TRIES \
119
+ do { \
120
+ } while (0)
121
+ #define TRACE_WAIT_FREE_CONS \
122
+ do { \
123
+ } while (0)
124
+ #define TRACE_TRAVERSAL \
125
+ do { \
126
+ } while (0)
127
+ #define TRACE_DEL \
128
+ do { \
129
+ } while (0)
130
+ #define TRACE_INS \
131
+ do { \
132
+ } while (0)
133
+ #define TRACE_DELETES \
134
+ do { \
135
+ } while (0)
136
+ #define TRACE_INSERTS \
137
+ do { \
138
+ } while (0)
139
+ #define CAS (obj , expected , desired ) \
140
+ ({ atomic_compare_exchange_strong(obj, expected, desired); })
141
+ #define ATOMIC_LOAD (obj ) ({ atomic_load(obj); })
142
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
143
+ do { \
144
+ atomic_store_explicit(obj, desired, order); \
145
+ } while (0)
146
+
147
+ #endif
148
+
17
149
#define HP_MAX_THREADS 128
18
150
#define HP_MAX_HPS 5 /* This is named 'K' in the HP paper */
19
151
#define CLPAD (128 / sizeof(uintptr_t))
@@ -162,8 +294,6 @@ void list_hp_retire(list_hp_t *hp, uintptr_t ptr)
162
294
#define N_THREADS (128 / 2)
163
295
#define MAX_THREADS 128
164
296
165
- static atomic_uint_fast32_t deletes = 0 , inserts = 0 ;
166
-
167
297
enum { HP_NEXT = 0 , HP_CURR = 1 , HP_PREV };
168
298
169
299
#define is_marked (p ) (bool) ((uintptr_t)(p) &0x01)
@@ -195,7 +325,7 @@ list_node_t *list_node_new(list_key_t key)
195
325
list_node_t * node = aligned_alloc (128 , sizeof (* node ));
196
326
assert (node );
197
327
* node = (list_node_t ){.magic = LIST_MAGIC , .key = key };
198
- ( void ) atomic_fetch_add ( & inserts , 1 ) ;
328
+ TRACE_INSERTS ;
199
329
return node ;
200
330
}
201
331
@@ -205,7 +335,7 @@ void list_node_destroy(list_node_t *node)
205
335
return ;
206
336
assert (node -> magic == LIST_MAGIC );
207
337
free (node );
208
- ( void ) atomic_fetch_add ( & deletes , 1 ) ;
338
+ TRACE_DELETES ;
209
339
}
210
340
211
341
static void __list_node_delete (void * arg )
@@ -225,21 +355,31 @@ static bool __list_find(list_t *list,
225
355
226
356
try_again :
227
357
prev = & list -> head ;
228
- curr = (list_node_t * ) atomic_load (prev );
358
+ curr = (list_node_t * ) ATOMIC_LOAD (prev );
229
359
(void ) list_hp_protect_ptr (list -> hp , HP_CURR , (uintptr_t ) curr );
230
- if (atomic_load (prev ) != get_unmarked (curr ))
360
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr )) {
361
+ TRACE_ATOMIC_TRIES ;
231
362
goto try_again ;
363
+ }
232
364
while (true) {
233
- next = (list_node_t * ) atomic_load (& get_unmarked_node (curr )-> next );
365
+ #ifdef RUNTIME_STAT
366
+ if (is_marked (curr ))
367
+ TRACE_WAIT_FREE_CONS ;
368
+ #endif
369
+ next = (list_node_t * ) ATOMIC_LOAD (& get_unmarked_node (curr )-> next );
234
370
(void ) list_hp_protect_ptr (list -> hp , HP_NEXT , get_unmarked (next ));
235
371
/* On a CAS failure, the search function, "__list_find," will simply
236
372
* have to go backwards in the list until an unmarked element is found
237
373
* from which the search in increasing key order can be started.
238
374
*/
239
- if (atomic_load (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next )
375
+ if (ATOMIC_LOAD (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next ) {
376
+ TRACE_ATOMIC_TRIES ;
240
377
goto try_again ;
241
- if (atomic_load (prev ) != get_unmarked (curr ))
378
+ }
379
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr )) {
380
+ TRACE_ATOMIC_TRIES ;
242
381
goto try_again ;
382
+ }
243
383
if (get_unmarked_node (next ) == next ) {
244
384
if (!(get_unmarked_node (curr )-> key < * key )) {
245
385
* par_curr = curr ;
@@ -252,12 +392,15 @@ static bool __list_find(list_t *list,
252
392
get_unmarked (curr ));
253
393
} else {
254
394
uintptr_t tmp = get_unmarked (curr );
255
- if (!atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next )))
395
+ if (!CAS (prev , & tmp , get_unmarked (next ))) {
396
+ TRACE_ATOMIC_TRIES ;
256
397
goto try_again ;
398
+ }
257
399
list_hp_retire (list -> hp , get_unmarked (curr ));
258
400
}
259
401
curr = next ;
260
402
(void ) list_hp_protect_release (list -> hp , HP_CURR , get_unmarked (next ));
403
+ TRACE_TRAVERSAL ;
261
404
}
262
405
}
263
406
@@ -274,13 +417,14 @@ bool list_insert(list_t *list, list_key_t key)
274
417
list_hp_clear (list -> hp );
275
418
return false;
276
419
}
277
- atomic_store_explicit (& node -> next , (uintptr_t ) curr ,
420
+ ATOMIC_STORE_EXPLICIT (& node -> next , (uintptr_t ) curr ,
278
421
memory_order_relaxed );
279
422
uintptr_t tmp = get_unmarked (curr );
280
- if (atomic_compare_exchange_strong (prev , & tmp , (uintptr_t ) node )) {
423
+ if (CAS (prev , & tmp , (uintptr_t ) node )) {
281
424
list_hp_clear (list -> hp );
282
425
return true;
283
426
}
427
+ TRACE_INS ;
284
428
}
285
429
}
286
430
@@ -296,12 +440,13 @@ bool list_delete(list_t *list, list_key_t key)
296
440
297
441
uintptr_t tmp = get_unmarked (next );
298
442
299
- if (!atomic_compare_exchange_strong (& curr -> next , & tmp ,
300
- get_marked ( next )))
443
+ if (!CAS (& curr -> next , & tmp , get_marked ( next ))) {
444
+ TRACE_DEL ;
301
445
continue ;
446
+ }
302
447
303
448
tmp = get_unmarked (curr );
304
- if (atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next ))) {
449
+ if (CAS (prev , & tmp , get_unmarked (next ))) {
305
450
list_hp_clear (list -> hp );
306
451
list_hp_retire (list -> hp , get_unmarked (curr ));
307
452
} else {
@@ -382,8 +527,8 @@ int main(void)
382
527
383
528
list_destroy (list );
384
529
385
- fprintf ( stderr , "inserts = %zu, deletes = %zu\n" , atomic_load ( & inserts ),
386
- atomic_load ( & deletes ) );
387
-
530
+ #ifdef RUNTIME_STAT
531
+ do_analysis ( );
532
+ #endif
388
533
return 0 ;
389
534
}
0 commit comments