14
14
#include <string.h>
15
15
#include <threads.h>
16
16
17
+ #ifdef RUNTIME_STAT
18
+ /*
19
+ * "rtry" the number of retries in the __list_find function.
20
+ * "cons" the number of wait-free contains in the __list_find function that curr
21
+ * pointer pointed.
22
+ * "trav" the number of list element traversal in the __list_find function.
23
+ * "fail" the number of CAS() failures.
24
+ * "del" the number of list_delete operation failed and restart again.
25
+ * "ins" the number of list_insert operation failed and restart again.
26
+ * "load" is the number of atomic_load operation in list_delete, list_insert
27
+ * and __list_find.
28
+ * "store" is the number of atomic_store operation in list_delete, list_insert
29
+ * and __list_find.
30
+ */
31
+ static atomic_uint_fast64_t rtry = 0 , cons = 0 , trav = 0 , fail = 0 ;
32
+ static atomic_uint_fast64_t del = 0 , ins = 0 ;
33
+ static atomic_uint_fast64_t load = 0 , store = 0 ;
34
+ static atomic_uint_fast64_t deletes = 0 , creates = 0 ;
35
+
36
+ #define TRACE_NOTHING 1
37
+ #define TRACE_TRIES 1
38
+ #define TRACE_WAIT_FREE_CONS 1
39
+ #define TRACE_TRAVERSAL 1
40
+ #define TRACE_DEL 1
41
+ #define TRACE_INS 1
42
+ #define TRACE_CREATES 1
43
+ #define TRACE_DELETES 1
44
+
45
+ #define CAS (obj , expected , desired ) \
46
+ ({ \
47
+ bool __ret = atomic_compare_exchange_strong(obj, expected, desired); \
48
+ if (!__ret) \
49
+ atomic_fetch_add(&fail, 1); \
50
+ __ret; \
51
+ })
52
+ #define ATOMIC_LOAD (obj ) \
53
+ ({ \
54
+ atomic_fetch_add(&load, 1); \
55
+ atomic_load(obj); \
56
+ })
57
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
58
+ do { \
59
+ atomic_fetch_add(&store, 1); \
60
+ atomic_store_explicit(obj, desired, order); \
61
+ } while (0)
62
+ #define TRACE_ATOMIC_FECTH_ADD (obj , arg , ops ) \
63
+ ({ \
64
+ if (ops) \
65
+ atomic_fetch_add(obj, arg); \
66
+ })
67
+
68
+ void do_analysis (void )
69
+ {
70
+ printf (
71
+ "\"rtry\" is the number of retries in the __list_find function.\n" );
72
+ printf (
73
+ "\"cons\" is the number of wait-free contains in the __list_find "
74
+ "function that curr pointer pointed.\n" );
75
+ printf (
76
+ "\"trav\" is the number of list element traversal in the "
77
+ "__list_find function.\n" );
78
+ printf ("\"fail\" is the number of CAS() failures.\n" );
79
+ printf (
80
+ "\"del\" is the number of list_delete operation failed and "
81
+ "restart again.\n" );
82
+ printf (
83
+ "\"ins\" is the number of list_insert operation failed and "
84
+ "restart again.\n" );
85
+ printf ("\"deletes\" is the number of linked list elements deleted.\n" );
86
+ printf ("\"creates\" is the number of linked list elements created.\n" );
87
+ printf (
88
+ "\"load\" is the number of atomic_load operation in list_delete, "
89
+ "list_insert and __list_find.\n" );
90
+ printf (
91
+ "\"store\" is the number of atomic_store operation in list_delete, "
92
+ "list_insert and __list_find.\n" );
93
+ printf ("\n%10s %10s %10s %10s %10s %10s %10s %10s %10s %10s\n" , "rtry" ,
94
+ "cons" , "trav" , "fail" , "del" , "ins" , "load" , "store" , "deletes" ,
95
+ "creates" );
96
+ for (int i = 0 ; i < 109 ; i ++ )
97
+ printf ("-" );
98
+ printf ("\n%10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld %10ld\n" ,
99
+ rtry , cons , trav , fail , del , ins , load , store , deletes , creates );
100
+ }
101
+
102
+ #else
103
+
104
+ #define CAS (obj , expected , desired ) \
105
+ ({ atomic_compare_exchange_strong(obj, expected, desired); })
106
+ #define ATOMIC_LOAD (obj ) ({ atomic_load(obj); })
107
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
108
+ do { \
109
+ atomic_store_explicit(obj, desired, order); \
110
+ } while (0)
111
+ #define TRACE_ATOMIC_FECTH_ADD (obj , arg , ops ) ({})
112
+
113
+ #endif /* RUNTIME_STAT */
114
+
17
115
#define HP_MAX_THREADS 128
18
116
#define HP_MAX_HPS 5 /* This is named 'K' in the HP paper */
19
117
#define CLPAD (128 / sizeof(uintptr_t))
@@ -162,8 +260,6 @@ void list_hp_retire(list_hp_t *hp, uintptr_t ptr)
162
260
#define N_THREADS (128 / 2)
163
261
#define MAX_THREADS 128
164
262
165
- static atomic_uint_fast32_t deletes = 0 , inserts = 0 ;
166
-
167
263
enum { HP_NEXT = 0 , HP_CURR = 1 , HP_PREV };
168
264
169
265
#define is_marked (p ) (bool) ((uintptr_t)(p) &0x01)
@@ -195,7 +291,7 @@ list_node_t *list_node_new(list_key_t key)
195
291
list_node_t * node = aligned_alloc (128 , sizeof (* node ));
196
292
assert (node );
197
293
* node = (list_node_t ){.magic = LIST_MAGIC , .key = key };
198
- ( void ) atomic_fetch_add ( & inserts , 1 );
294
+ TRACE_ATOMIC_FECTH_ADD ( & creates , 1 , TRACE_CREATES );
199
295
return node ;
200
296
}
201
297
@@ -205,7 +301,7 @@ void list_node_destroy(list_node_t *node)
205
301
return ;
206
302
assert (node -> magic == LIST_MAGIC );
207
303
free (node );
208
- ( void ) atomic_fetch_add ( & deletes , 1 );
304
+ TRACE_ATOMIC_FECTH_ADD ( & deletes , 1 , TRACE_DELETES );
209
305
}
210
306
211
307
static void __list_node_delete (void * arg )
@@ -225,21 +321,31 @@ static bool __list_find(list_t *list,
225
321
226
322
try_again :
227
323
prev = & list -> head ;
228
- curr = (list_node_t * ) atomic_load (prev );
324
+ curr = (list_node_t * ) ATOMIC_LOAD (prev );
229
325
(void ) list_hp_protect_ptr (list -> hp , HP_CURR , (uintptr_t ) curr );
230
- if (atomic_load (prev ) != get_unmarked (curr ))
326
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr )) {
327
+ TRACE_ATOMIC_FECTH_ADD (& rtry , 1 , TRACE_TRIES );
231
328
goto try_again ;
329
+ }
232
330
while (true) {
233
- next = (list_node_t * ) atomic_load (& get_unmarked_node (curr )-> next );
331
+ #ifdef RUNTIME_STAT
332
+ if (is_marked (curr ))
333
+ TRACE_ATOMIC_FECTH_ADD (& cons , 1 , TRACE_WAIT_FREE_CONS );
334
+ #endif
335
+ next = (list_node_t * ) ATOMIC_LOAD (& get_unmarked_node (curr )-> next );
234
336
(void ) list_hp_protect_ptr (list -> hp , HP_NEXT , get_unmarked (next ));
235
337
/* On a CAS failure, the search function, "__list_find," will simply
236
338
* have to go backwards in the list until an unmarked element is found
237
339
* from which the search in increasing key order can be started.
238
340
*/
239
- if (atomic_load (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next )
341
+ if (ATOMIC_LOAD (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next ) {
342
+ TRACE_ATOMIC_FECTH_ADD (& rtry , 1 , TRACE_TRIES );
240
343
goto try_again ;
241
- if (atomic_load (prev ) != get_unmarked (curr ))
344
+ }
345
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr )) {
346
+ TRACE_ATOMIC_FECTH_ADD (& rtry , 1 , TRACE_TRIES );
242
347
goto try_again ;
348
+ }
243
349
if (get_unmarked_node (next ) == next ) {
244
350
if (!(get_unmarked_node (curr )-> key < * key )) {
245
351
* par_curr = curr ;
@@ -252,12 +358,15 @@ static bool __list_find(list_t *list,
252
358
get_unmarked (curr ));
253
359
} else {
254
360
uintptr_t tmp = get_unmarked (curr );
255
- if (!atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next )))
361
+ if (!CAS (prev , & tmp , get_unmarked (next ))) {
362
+ TRACE_ATOMIC_FECTH_ADD (& rtry , 1 , TRACE_TRIES );
256
363
goto try_again ;
364
+ }
257
365
list_hp_retire (list -> hp , get_unmarked (curr ));
258
366
}
259
367
curr = next ;
260
368
(void ) list_hp_protect_release (list -> hp , HP_CURR , get_unmarked (next ));
369
+ TRACE_ATOMIC_FECTH_ADD (& trav , 1 , TRACE_TRAVERSAL );
261
370
}
262
371
}
263
372
@@ -274,13 +383,14 @@ bool list_insert(list_t *list, list_key_t key)
274
383
list_hp_clear (list -> hp );
275
384
return false;
276
385
}
277
- atomic_store_explicit (& node -> next , (uintptr_t ) curr ,
386
+ ATOMIC_STORE_EXPLICIT (& node -> next , (uintptr_t ) curr ,
278
387
memory_order_relaxed );
279
388
uintptr_t tmp = get_unmarked (curr );
280
- if (atomic_compare_exchange_strong (prev , & tmp , (uintptr_t ) node )) {
389
+ if (CAS (prev , & tmp , (uintptr_t ) node )) {
281
390
list_hp_clear (list -> hp );
282
391
return true;
283
392
}
393
+ TRACE_ATOMIC_FECTH_ADD (& ins , 1 , TRACE_INS );
284
394
}
285
395
}
286
396
@@ -296,12 +406,13 @@ bool list_delete(list_t *list, list_key_t key)
296
406
297
407
uintptr_t tmp = get_unmarked (next );
298
408
299
- if (!atomic_compare_exchange_strong (& curr -> next , & tmp ,
300
- get_marked ( next )))
409
+ if (!CAS (& curr -> next , & tmp , get_marked ( next ))) {
410
+ TRACE_ATOMIC_FECTH_ADD ( & del , 1 , TRACE_DEL );
301
411
continue ;
412
+ }
302
413
303
414
tmp = get_unmarked (curr );
304
- if (atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next ))) {
415
+ if (CAS (prev , & tmp , get_unmarked (next ))) {
305
416
list_hp_clear (list -> hp );
306
417
list_hp_retire (list -> hp , get_unmarked (curr ));
307
418
} else {
@@ -364,6 +475,10 @@ static void *delete_thread(void *arg)
364
475
365
476
int main (void )
366
477
{
478
+ #ifdef RUNTIME_STAT
479
+ atexit (do_analysis );
480
+ #endif
481
+
367
482
list_t * list = list_new ();
368
483
369
484
pthread_t thr [N_THREADS ];
@@ -382,8 +497,5 @@ int main(void)
382
497
383
498
list_destroy (list );
384
499
385
- fprintf (stderr , "inserts = %zu, deletes = %zu\n" , atomic_load (& inserts ),
386
- atomic_load (& deletes ));
387
-
388
500
return 0 ;
389
501
}
0 commit comments