14
14
#include <string.h>
15
15
#include <threads.h>
16
16
17
+ #ifdef RUNTIME_STAT
18
+ /*
19
+ * "rtry" the number of retries in the __list_find function.
20
+ * "cons" the number of wait-free contains in the __list_find function that curr
21
+ * pointer pointed.
22
+ * "trav" the number of list element traversal in the __list_find function.
23
+ * "fail" the number of CAS() failures.
24
+ * "del" the number of list_delete operation failed and restart again.
25
+ * "ins" the number of list_insert operation failed and restart again.
26
+ */
27
+ static atomic_uint_fast64_t rtry = 0 , cons = 0 , trav = 0 , fail = 0 ;
28
+ static atomic_uint_fast64_t del = 0 , ins = 0 ;
29
+ static atomic_uint_fast64_t deletes = 0 , inserts = 0 ;
30
+
31
+ #define TRACE_GOTO_TRY_AGAIN \
32
+ do { \
33
+ atomic_fetch_add(&rtry, 1); \
34
+ goto try_again; \
35
+ } while (0)
36
+ #define TRACE_WAIT_FREE_CONS \
37
+ do { \
38
+ atomic_fetch_add(&cons, 1); \
39
+ } while (0)
40
+ #define TRACE_TRAVERSAL \
41
+ do { \
42
+ atomic_fetch_add(&trav, 1); \
43
+ } while (0)
44
+ #define CAS (obj , expected , desired ) \
45
+ ({ \
46
+ atomic_fetch_add(&fail, 1); \
47
+ atomic_compare_exchange_strong(obj, expected, desired); \
48
+ })
49
+ #define TRACE_DEL \
50
+ do { \
51
+ atomic_fetch_add(&del, 1); \
52
+ } while (0)
53
+ #define TRACE_INS \
54
+ do { \
55
+ atomic_fetch_add(&ins, 1); \
56
+ } while (0)
57
+ #define TRACE_DELETES \
58
+ do { \
59
+ atomic_fetch_add(&deletes, 1); \
60
+ } while (0)
61
+ #define TRACE_INSERTS \
62
+ do { \
63
+ atomic_fetch_add(&inserts, 1); \
64
+ } while (0)
65
+
66
+ void analysis_func (void )
67
+ {
68
+ printf ("\"rtry\" is the number of retries in the __list_find function.\n" );
69
+ printf (
70
+ "\"cons\" is the number of wait-free contains in the __list_find "
71
+ "function that curr pointer pointed.\n" );
72
+ printf (
73
+ "\"trav\" is the number of list element traversal in the __list_find "
74
+ "function.\n" );
75
+ printf ("\"fail\" is the number of CAS() failures.\n" );
76
+ printf (
77
+ "\"del\" is the number of list_delete operation failed and restart "
78
+ "again.\n" );
79
+ printf (
80
+ "\"ins\" is the number of list_insert operation failed and restart "
81
+ "again.\n" );
82
+ printf ("\"deletes\" is the number of linked list elements deleted.\n" );
83
+ printf ("\"inserts\" is the number of linked list elements created.\n" );
84
+ printf ("\n%10s %10s %10s %10s %10s %10s %10s %10s\n" , "rtry" , "cons" ,
85
+ "trav" , "fail" , "del" , "ins" , "deletes" , "inserts" );
86
+ for (int i = 0 ; i < 87 ; i ++ )
87
+ printf ("-" );
88
+ printf ("\n%10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld\n" , rtry , cons ,
89
+ trav , fail , del , ins , deletes , inserts );
90
+ }
91
+
92
+ #else
93
+
94
+ #define TRACE_GOTO_TRY_AGAIN \
95
+ do { \
96
+ goto try_again; \
97
+ } while (0)
98
+ #define TRACE_WAIT_FREE_CONS \
99
+ do { \
100
+ } while (0)
101
+ #define TRACE_TRAVERSAL \
102
+ do { \
103
+ } while (0)
104
+ #define CAS (obj , expected , desired ) \
105
+ ({ atomic_compare_exchange_strong(obj, expected, desired); })
106
+ #define TRACE_DEL \
107
+ do { \
108
+ } while (0)
109
+ #define TRACE_INS \
110
+ do { \
111
+ } while (0)
112
+ #define TRACE_DELETES \
113
+ do { \
114
+ } while (0)
115
+ #define TRACE_INSERTS \
116
+ do { \
117
+ } while (0)
118
+
119
+ #endif
120
+
17
121
#define HP_MAX_THREADS 128
18
122
#define HP_MAX_HPS 5 /* This is named 'K' in the HP paper */
19
123
#define CLPAD (128 / sizeof(uintptr_t))
@@ -162,8 +266,6 @@ void list_hp_retire(list_hp_t *hp, uintptr_t ptr)
162
266
#define N_THREADS (128 / 2)
163
267
#define MAX_THREADS 128
164
268
165
- static atomic_uint_fast32_t deletes = 0 , inserts = 0 ;
166
-
167
269
enum { HP_NEXT = 0 , HP_CURR = 1 , HP_PREV };
168
270
169
271
#define is_marked (p ) (bool) ((uintptr_t)(p) &0x01)
@@ -195,7 +297,7 @@ list_node_t *list_node_new(list_key_t key)
195
297
list_node_t * node = aligned_alloc (128 , sizeof (* node ));
196
298
assert (node );
197
299
* node = (list_node_t ){.magic = LIST_MAGIC , .key = key };
198
- ( void ) atomic_fetch_add ( & inserts , 1 ) ;
300
+ TRACE_INSERTS ;
199
301
return node ;
200
302
}
201
303
@@ -205,7 +307,7 @@ void list_node_destroy(list_node_t *node)
205
307
return ;
206
308
assert (node -> magic == LIST_MAGIC );
207
309
free (node );
208
- ( void ) atomic_fetch_add ( & deletes , 1 ) ;
310
+ TRACE_DELETES ;
209
311
}
210
312
211
313
static void __list_node_delete (void * arg )
@@ -228,18 +330,22 @@ static bool __list_find(list_t *list,
228
330
curr = (list_node_t * ) atomic_load (prev );
229
331
(void ) list_hp_protect_ptr (list -> hp , HP_CURR , (uintptr_t ) curr );
230
332
if (atomic_load (prev ) != get_unmarked (curr ))
231
- goto try_again ;
333
+ TRACE_GOTO_TRY_AGAIN ;
232
334
while (true) {
335
+ #ifdef RUNTIME_STAT
336
+ if (is_marked (curr ))
337
+ TRACE_WAIT_FREE_CONS ;
338
+ #endif
233
339
next = (list_node_t * ) atomic_load (& get_unmarked_node (curr )-> next );
234
340
(void ) list_hp_protect_ptr (list -> hp , HP_NEXT , get_unmarked (next ));
235
341
/* On a CAS failure, the search function, "__list_find," will simply
236
342
* have to go backwards in the list until an unmarked element is found
237
343
* from which the search in increasing key order can be started.
238
344
*/
239
345
if (atomic_load (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next )
240
- goto try_again ;
346
+ TRACE_GOTO_TRY_AGAIN ;
241
347
if (atomic_load (prev ) != get_unmarked (curr ))
242
- goto try_again ;
348
+ TRACE_GOTO_TRY_AGAIN ;
243
349
if (get_unmarked_node (next ) == next ) {
244
350
if (!(get_unmarked_node (curr )-> key < * key )) {
245
351
* par_curr = curr ;
@@ -252,12 +358,13 @@ static bool __list_find(list_t *list,
252
358
get_unmarked (curr ));
253
359
} else {
254
360
uintptr_t tmp = get_unmarked (curr );
255
- if (!atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next )))
256
- goto try_again ;
361
+ if (!CAS (prev , & tmp , get_unmarked (next )))
362
+ TRACE_GOTO_TRY_AGAIN ;
257
363
list_hp_retire (list -> hp , get_unmarked (curr ));
258
364
}
259
365
curr = next ;
260
366
(void ) list_hp_protect_release (list -> hp , HP_CURR , get_unmarked (next ));
367
+ TRACE_TRAVERSAL ;
261
368
}
262
369
}
263
370
@@ -277,10 +384,11 @@ bool list_insert(list_t *list, list_key_t key)
277
384
atomic_store_explicit (& node -> next , (uintptr_t ) curr ,
278
385
memory_order_relaxed );
279
386
uintptr_t tmp = get_unmarked (curr );
280
- if (atomic_compare_exchange_strong (prev , & tmp , (uintptr_t ) node )) {
387
+ if (CAS (prev , & tmp , (uintptr_t ) node )) {
281
388
list_hp_clear (list -> hp );
282
389
return true;
283
390
}
391
+ TRACE_INS ;
284
392
}
285
393
}
286
394
@@ -296,12 +404,13 @@ bool list_delete(list_t *list, list_key_t key)
296
404
297
405
uintptr_t tmp = get_unmarked (next );
298
406
299
- if (!atomic_compare_exchange_strong (& curr -> next , & tmp ,
300
- get_marked ( next )))
407
+ if (!CAS (& curr -> next , & tmp , get_marked ( next ))) {
408
+ TRACE_DEL ;
301
409
continue ;
410
+ }
302
411
303
412
tmp = get_unmarked (curr );
304
- if (atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next ))) {
413
+ if (CAS (prev , & tmp , get_unmarked (next ))) {
305
414
list_hp_clear (list -> hp );
306
415
list_hp_retire (list -> hp , get_unmarked (curr ));
307
416
} else {
@@ -382,8 +491,8 @@ int main(void)
382
491
383
492
list_destroy (list );
384
493
385
- fprintf ( stderr , "inserts = %zu, deletes = %zu\n" , atomic_load ( & inserts ),
386
- atomic_load ( & deletes ) );
387
-
494
+ #ifdef RUNTIME_STAT
495
+ analysis_func ( );
496
+ #endif
388
497
return 0 ;
389
498
}
0 commit comments