14
14
#include <string.h>
15
15
#include <threads.h>
16
16
17
+ #ifdef RUNTIME_STAT
18
+ /*
19
+ * "rtry" the number of retries in the __list_find function.
20
+ * "cons" the number of wait-free contains in the __list_find function that curr
21
+ * pointer pointed.
22
+ * "trav" the number of list element traversal in the __list_find function.
23
+ * "fail" the number of CAS() failures.
24
+ * "del" the number of list_delete operation failed and restart again.
25
+ * "ins" the number of list_insert operation failed and restart again.
26
+ * "load" is the number of atomic_load operation in list_delete, list_insert
27
+ * and __list_find.
28
+ * "store" is the number of atomic_store operation in list_delete, list_insert
29
+ * and __list_find.
30
+ */
31
+ static atomic_uint_fast64_t rtry = 0 , cons = 0 , trav = 0 , fail = 0 ;
32
+ static atomic_uint_fast64_t del = 0 , ins = 0 ;
33
+ static atomic_uint_fast64_t load = 0 , store = 0 ;
34
+ static atomic_uint_fast64_t deletes = 0 , inserts = 0 ;
35
+
36
+ #define TRACE_GOTO_TRY_AGAIN \
37
+ do { \
38
+ atomic_fetch_add(&rtry, 1); \
39
+ goto try_again; \
40
+ } while (0)
41
+ #define TRACE_WAIT_FREE_CONS \
42
+ do { \
43
+ atomic_fetch_add(&cons, 1); \
44
+ } while (0)
45
+ #define TRACE_TRAVERSAL \
46
+ do { \
47
+ atomic_fetch_add(&trav, 1); \
48
+ } while (0)
49
+ #define TRACE_DEL \
50
+ do { \
51
+ atomic_fetch_add(&del, 1); \
52
+ } while (0)
53
+ #define TRACE_INS \
54
+ do { \
55
+ atomic_fetch_add(&ins, 1); \
56
+ } while (0)
57
+ #define TRACE_DELETES \
58
+ do { \
59
+ atomic_fetch_add(&deletes, 1); \
60
+ } while (0)
61
+ #define TRACE_INSERTS \
62
+ do { \
63
+ atomic_fetch_add(&inserts, 1); \
64
+ } while (0)
65
+ #define CAS (obj , expected , desired ) \
66
+ ({ \
67
+ bool __ret = atomic_compare_exchange_strong(obj, expected, desired); \
68
+ if (!__ret) \
69
+ atomic_fetch_add(&fail, 1); \
70
+ __ret; \
71
+ })
72
+ #define ATOMIC_LOAD (obj ) \
73
+ ({ \
74
+ atomic_fetch_add(&load, 1); \
75
+ atomic_load(obj); \
76
+ })
77
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
78
+ do { \
79
+ atomic_fetch_add(&store, 1); \
80
+ atomic_store_explicit(obj, desired, order); \
81
+ } while (0)
82
+
83
+ void do_analysis (void )
84
+ {
85
+ printf (
86
+ "\"rtry\" is the number of retries in the __list_find function.\n" );
87
+ printf (
88
+ "\"cons\" is the number of wait-free contains in the __list_find "
89
+ "function that curr pointer pointed.\n" );
90
+ printf (
91
+ "\"trav\" is the number of list element traversal in the "
92
+ "__list_find function.\n" );
93
+ printf ("\"fail\" is the number of CAS() failures.\n" );
94
+ printf (
95
+ "\"del\" is the number of list_delete operation failed and "
96
+ "restart again.\n" );
97
+ printf (
98
+ "\"ins\" is the number of list_insert operation failed and "
99
+ "restart again.\n" );
100
+ printf ("\"deletes\" is the number of linked list elements deleted.\n" );
101
+ printf ("\"inserts\" is the number of linked list elements created.\n" );
102
+ printf (
103
+ "\"load\" is the number of atomic_load operation in list_delete, "
104
+ "list_insert and __list_find.\n" );
105
+ printf (
106
+ "\"store\" is the number of atomic_store operation in list_delete, "
107
+ "list_insert and __list_find.\n" );
108
+ printf ("\n%10s %10s %10s %10s %10s %10s %10s %10s %10s %10s\n" , "rtry" ,
109
+ "cons" , "trav" , "fail" , "del" , "ins" , "load" , "store" , "deletes" ,
110
+ "inserts" );
111
+ for (int i = 0 ; i < 109 ; i ++ )
112
+ printf ("-" );
113
+ printf ("\n%10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld\n" ,
114
+ rtry , cons , trav , fail , del , ins , load , store , deletes , inserts );
115
+ }
116
+
117
+ #else
118
+
119
+ #define TRACE_GOTO_TRY_AGAIN \
120
+ do { \
121
+ goto try_again; \
122
+ } while (0)
123
+ #define TRACE_WAIT_FREE_CONS \
124
+ do { \
125
+ } while (0)
126
+ #define TRACE_TRAVERSAL \
127
+ do { \
128
+ } while (0)
129
+ #define TRACE_DEL \
130
+ do { \
131
+ } while (0)
132
+ #define TRACE_INS \
133
+ do { \
134
+ } while (0)
135
+ #define TRACE_DELETES \
136
+ do { \
137
+ } while (0)
138
+ #define TRACE_INSERTS \
139
+ do { \
140
+ } while (0)
141
+ #define CAS (obj , expected , desired ) \
142
+ ({ atomic_compare_exchange_strong(obj, expected, desired); })
143
+ #define ATOMIC_LOAD (obj ) ({ atomic_load(obj); })
144
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
145
+ do { \
146
+ atomic_store_explicit(obj, desired, order); \
147
+ } while (0)
148
+
149
+ #endif
150
+
17
151
#define HP_MAX_THREADS 128
18
152
#define HP_MAX_HPS 5 /* This is named 'K' in the HP paper */
19
153
#define CLPAD (128 / sizeof(uintptr_t))
@@ -162,8 +296,6 @@ void list_hp_retire(list_hp_t *hp, uintptr_t ptr)
162
296
#define N_THREADS (128 / 2)
163
297
#define MAX_THREADS 128
164
298
165
- static atomic_uint_fast32_t deletes = 0 , inserts = 0 ;
166
-
167
299
enum { HP_NEXT = 0 , HP_CURR = 1 , HP_PREV };
168
300
169
301
#define is_marked (p ) (bool) ((uintptr_t)(p) &0x01)
@@ -195,7 +327,7 @@ list_node_t *list_node_new(list_key_t key)
195
327
list_node_t * node = aligned_alloc (128 , sizeof (* node ));
196
328
assert (node );
197
329
* node = (list_node_t ){.magic = LIST_MAGIC , .key = key };
198
- ( void ) atomic_fetch_add ( & inserts , 1 ) ;
330
+ TRACE_INSERTS ;
199
331
return node ;
200
332
}
201
333
@@ -205,7 +337,7 @@ void list_node_destroy(list_node_t *node)
205
337
return ;
206
338
assert (node -> magic == LIST_MAGIC );
207
339
free (node );
208
- ( void ) atomic_fetch_add ( & deletes , 1 ) ;
340
+ TRACE_DELETES ;
209
341
}
210
342
211
343
static void __list_node_delete (void * arg )
@@ -225,21 +357,25 @@ static bool __list_find(list_t *list,
225
357
226
358
try_again :
227
359
prev = & list -> head ;
228
- curr = (list_node_t * ) atomic_load (prev );
360
+ curr = (list_node_t * ) ATOMIC_LOAD (prev );
229
361
(void ) list_hp_protect_ptr (list -> hp , HP_CURR , (uintptr_t ) curr );
230
- if (atomic_load (prev ) != get_unmarked (curr ))
231
- goto try_again ;
362
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr ))
363
+ TRACE_GOTO_TRY_AGAIN ;
232
364
while (true) {
233
- next = (list_node_t * ) atomic_load (& get_unmarked_node (curr )-> next );
365
+ #ifdef RUNTIME_STAT
366
+ if (is_marked (curr ))
367
+ TRACE_WAIT_FREE_CONS ;
368
+ #endif
369
+ next = (list_node_t * ) ATOMIC_LOAD (& get_unmarked_node (curr )-> next );
234
370
(void ) list_hp_protect_ptr (list -> hp , HP_NEXT , get_unmarked (next ));
235
371
/* On a CAS failure, the search function, "__list_find," will simply
236
372
* have to go backwards in the list until an unmarked element is found
237
373
* from which the search in increasing key order can be started.
238
374
*/
239
- if (atomic_load (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next )
240
- goto try_again ;
241
- if (atomic_load (prev ) != get_unmarked (curr ))
242
- goto try_again ;
375
+ if (ATOMIC_LOAD (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next )
376
+ TRACE_GOTO_TRY_AGAIN ;
377
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr ))
378
+ TRACE_GOTO_TRY_AGAIN ;
243
379
if (get_unmarked_node (next ) == next ) {
244
380
if (!(get_unmarked_node (curr )-> key < * key )) {
245
381
* par_curr = curr ;
@@ -252,12 +388,13 @@ static bool __list_find(list_t *list,
252
388
get_unmarked (curr ));
253
389
} else {
254
390
uintptr_t tmp = get_unmarked (curr );
255
- if (!atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next )))
256
- goto try_again ;
391
+ if (!CAS (prev , & tmp , get_unmarked (next )))
392
+ TRACE_GOTO_TRY_AGAIN ;
257
393
list_hp_retire (list -> hp , get_unmarked (curr ));
258
394
}
259
395
curr = next ;
260
396
(void ) list_hp_protect_release (list -> hp , HP_CURR , get_unmarked (next ));
397
+ TRACE_TRAVERSAL ;
261
398
}
262
399
}
263
400
@@ -274,13 +411,14 @@ bool list_insert(list_t *list, list_key_t key)
274
411
list_hp_clear (list -> hp );
275
412
return false;
276
413
}
277
- atomic_store_explicit (& node -> next , (uintptr_t ) curr ,
414
+ ATOMIC_STORE_EXPLICIT (& node -> next , (uintptr_t ) curr ,
278
415
memory_order_relaxed );
279
416
uintptr_t tmp = get_unmarked (curr );
280
- if (atomic_compare_exchange_strong (prev , & tmp , (uintptr_t ) node )) {
417
+ if (CAS (prev , & tmp , (uintptr_t ) node )) {
281
418
list_hp_clear (list -> hp );
282
419
return true;
283
420
}
421
+ TRACE_INS ;
284
422
}
285
423
}
286
424
@@ -296,12 +434,13 @@ bool list_delete(list_t *list, list_key_t key)
296
434
297
435
uintptr_t tmp = get_unmarked (next );
298
436
299
- if (!atomic_compare_exchange_strong (& curr -> next , & tmp ,
300
- get_marked ( next )))
437
+ if (!CAS (& curr -> next , & tmp , get_marked ( next ))) {
438
+ TRACE_DEL ;
301
439
continue ;
440
+ }
302
441
303
442
tmp = get_unmarked (curr );
304
- if (atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next ))) {
443
+ if (CAS (prev , & tmp , get_unmarked (next ))) {
305
444
list_hp_clear (list -> hp );
306
445
list_hp_retire (list -> hp , get_unmarked (curr ));
307
446
} else {
@@ -382,8 +521,8 @@ int main(void)
382
521
383
522
list_destroy (list );
384
523
385
- fprintf ( stderr , "inserts = %zu, deletes = %zu\n" , atomic_load ( & inserts ),
386
- atomic_load ( & deletes ) );
387
-
524
+ #ifdef RUNTIME_STAT
525
+ do_analysis ( );
526
+ #endif
388
527
return 0 ;
389
528
}
0 commit comments