14
14
#include <string.h>
15
15
#include <threads.h>
16
16
17
+ static atomic_uint_fast64_t deletes = 0 , inserts = 0 ;
18
+
19
+ #ifdef RUNTIME_STAT
20
+
21
+ /*
22
+ * Reference :
23
+ * A more Pragmatic Implementation of the Lock-free, Ordered, Linked List
24
+ * https://arxiv.org/abs/2010.15755
25
+ */
26
+
27
+ enum {
28
+ TRACE_nothing = 0 ,
29
+ TRACE_retry , /* the number of retries in the __list_find function. */
30
+ TRACE_contains , /* the number of wait-free contains in the __list_find
31
+ function that curr pointer pointed. */
32
+ TRACE_traversal , /* the number of list element traversal in the __list_find
33
+ function. */
34
+ TRACE_fail , /* the number of CAS() failures. */
35
+ TRACE_del , /* the number of list_delete operation failed and restart again.
36
+ */
37
+ TRACE_ins , /* the number of list_insert operation failed and restart again.
38
+ */
39
+ TRACE_inserts , /* the number of atomic_load operation in list_delete,
40
+ list_insert and __list_find. */
41
+ TRACE_deletes /* the number of atomic_store operation in list_delete,
42
+ list_insert and __list_find. */
43
+ };
44
+
45
+ struct runtime_stat {
46
+ atomic_uint_fast64_t retry , contains , traversal , fail ;
47
+ atomic_uint_fast64_t del , ins ;
48
+ atomic_uint_fast64_t load , store ;
49
+ };
50
+ static struct runtime_stat stats = {0 };
51
+
52
+ #define CAS (obj , expected , desired ) \
53
+ ({ \
54
+ bool __ret = atomic_compare_exchange_strong(obj, expected, desired); \
55
+ if (!__ret) \
56
+ atomic_fetch_add(&stats.fail, 1); \
57
+ __ret; \
58
+ })
59
+ #define ATOMIC_LOAD (obj ) \
60
+ ({ \
61
+ atomic_fetch_add(&stats.load, 1); \
62
+ atomic_load(obj); \
63
+ })
64
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
65
+ do { \
66
+ atomic_fetch_add(&stats.store, 1); \
67
+ atomic_store_explicit(obj, desired, order); \
68
+ } while (0)
69
+ #define TRACE (ops ) \
70
+ do { \
71
+ if (TRACE_##ops) \
72
+ atomic_fetch_add(&stats.ops, 1); \
73
+ } while (0)
74
+
75
+ static void do_analysis (void )
76
+ {
77
+ __atomic_thread_fence (__ATOMIC_SEQ_CST );
78
+ #define TRACE_PRINT (ops ) printf("%-10s: %ld\n", #ops, stats.ops);
79
+ TRACE_PRINT (retry );
80
+ TRACE_PRINT (contains );
81
+ TRACE_PRINT (traversal );
82
+ TRACE_PRINT (fail );
83
+ TRACE_PRINT (del )
84
+ TRACE_PRINT (ins );
85
+ TRACE_PRINT (load );
86
+ TRACE_PRINT (store );
87
+ #undef TRACE_PRINT
88
+ #define TRACE_PRINT (val ) printf("%-10s: %ld\n", #val, val);
89
+ TRACE_PRINT (deletes );
90
+ TRACE_PRINT (inserts );
91
+ #undef TRACE_PRINT
92
+ printf (
93
+ "\"retry\" is the number of retries in the __list_find function.\n" );
94
+ printf (
95
+ "\"contains\" is the number of wait-free contains in the __list_find "
96
+ "function that curr pointer pointed.\n" );
97
+ printf (
98
+ "\"traversal\"is the number of list element traversal in the "
99
+ "__list_find function.\n" );
100
+ printf ("\"fail\" is the number of CAS() failures.\n" );
101
+ printf (
102
+ "\"del\" is the number of list_delete operation failed and "
103
+ "restart again.\n" );
104
+ printf (
105
+ "\"ins\" is the number of list_insert operation failed and "
106
+ "restart again.\n" );
107
+ printf ("\"deletes\" is the number of linked list elements deleted.\n" );
108
+ printf ("\"inserts\" is the number of linked list elements created.\n" );
109
+ printf (
110
+ "\"load\" is the number of atomic_load operation in list_delete, "
111
+ "list_insert and __list_find.\n" );
112
+ printf (
113
+ "\"store\" is the number of atomic_store operation in list_delete, "
114
+ "list_insert and __list_find.\n" );
115
+ }
116
+
117
+ #else
118
+
119
+ #define CAS (obj , expected , desired ) \
120
+ ({ atomic_compare_exchange_strong(obj, expected, desired); })
121
+ #define ATOMIC_LOAD (obj ) ({ atomic_load(obj); })
122
+ #define ATOMIC_STORE_EXPLICIT (obj , desired , order ) \
123
+ do { \
124
+ atomic_store_explicit(obj, desired, order); \
125
+ } while (0)
126
+ #define TRACE (ops ) \
127
+ do { \
128
+ } while (0)
129
+
130
+ static void do_analysis (void )
131
+ {
132
+ __atomic_thread_fence (__ATOMIC_SEQ_CST );
133
+ fprintf (stderr , "inserts = %zu, deletes = %zu\n" , inserts , deletes );
134
+ }
135
+
136
+ #endif /* RUNTIME_STAT */
137
+
138
+ #define RUNTIME_STAT_INIT () atexit(do_analysis)
139
+
17
140
#define HP_MAX_THREADS 128
18
141
#define HP_MAX_HPS 5 /* This is named 'K' in the HP paper */
19
142
#define CLPAD (128 / sizeof(uintptr_t))
@@ -162,8 +285,6 @@ void list_hp_retire(list_hp_t *hp, uintptr_t ptr)
162
285
#define N_THREADS (128 / 2)
163
286
#define MAX_THREADS 128
164
287
165
- static atomic_uint_fast32_t deletes = 0 , inserts = 0 ;
166
-
167
288
enum { HP_NEXT = 0 , HP_CURR = 1 , HP_PREV };
168
289
169
290
#define is_marked (p ) (bool) ((uintptr_t)(p) &0x01)
@@ -225,21 +346,29 @@ static bool __list_find(list_t *list,
225
346
226
347
try_again :
227
348
prev = & list -> head ;
228
- curr = (list_node_t * ) atomic_load (prev );
349
+ curr = (list_node_t * ) ATOMIC_LOAD (prev );
229
350
(void ) list_hp_protect_ptr (list -> hp , HP_CURR , (uintptr_t ) curr );
230
- if (atomic_load (prev ) != get_unmarked (curr ))
351
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr )) {
352
+ TRACE (retry );
231
353
goto try_again ;
354
+ }
232
355
while (true) {
233
- next = (list_node_t * ) atomic_load (& get_unmarked_node (curr )-> next );
356
+ if (is_marked (curr ))
357
+ TRACE (contains );
358
+ next = (list_node_t * ) ATOMIC_LOAD (& get_unmarked_node (curr )-> next );
234
359
(void ) list_hp_protect_ptr (list -> hp , HP_NEXT , get_unmarked (next ));
235
360
/* On a CAS failure, the search function, "__list_find," will simply
236
361
* have to go backwards in the list until an unmarked element is found
237
362
* from which the search in increasing key order can be started.
238
363
*/
239
- if (atomic_load (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next )
364
+ if (ATOMIC_LOAD (& get_unmarked_node (curr )-> next ) != (uintptr_t ) next ) {
365
+ TRACE (retry );
240
366
goto try_again ;
241
- if (atomic_load (prev ) != get_unmarked (curr ))
367
+ }
368
+ if (ATOMIC_LOAD (prev ) != get_unmarked (curr )) {
369
+ TRACE (retry );
242
370
goto try_again ;
371
+ }
243
372
if (get_unmarked_node (next ) == next ) {
244
373
if (!(get_unmarked_node (curr )-> key < * key )) {
245
374
* par_curr = curr ;
@@ -252,12 +381,15 @@ static bool __list_find(list_t *list,
252
381
get_unmarked (curr ));
253
382
} else {
254
383
uintptr_t tmp = get_unmarked (curr );
255
- if (!atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next )))
384
+ if (!CAS (prev , & tmp , get_unmarked (next ))) {
385
+ TRACE (retry );
256
386
goto try_again ;
387
+ }
257
388
list_hp_retire (list -> hp , get_unmarked (curr ));
258
389
}
259
390
curr = next ;
260
391
(void ) list_hp_protect_release (list -> hp , HP_CURR , get_unmarked (next ));
392
+ TRACE (traversal );
261
393
}
262
394
}
263
395
@@ -274,13 +406,14 @@ bool list_insert(list_t *list, list_key_t key)
274
406
list_hp_clear (list -> hp );
275
407
return false;
276
408
}
277
- atomic_store_explicit (& node -> next , (uintptr_t ) curr ,
409
+ ATOMIC_STORE_EXPLICIT (& node -> next , (uintptr_t ) curr ,
278
410
memory_order_relaxed );
279
411
uintptr_t tmp = get_unmarked (curr );
280
- if (atomic_compare_exchange_strong (prev , & tmp , (uintptr_t ) node )) {
412
+ if (CAS (prev , & tmp , (uintptr_t ) node )) {
281
413
list_hp_clear (list -> hp );
282
414
return true;
283
415
}
416
+ TRACE (ins );
284
417
}
285
418
}
286
419
@@ -296,12 +429,13 @@ bool list_delete(list_t *list, list_key_t key)
296
429
297
430
uintptr_t tmp = get_unmarked (next );
298
431
299
- if (!atomic_compare_exchange_strong (& curr -> next , & tmp ,
300
- get_marked ( next )))
432
+ if (!CAS (& curr -> next , & tmp , get_marked ( next ))) {
433
+ TRACE ( del );
301
434
continue ;
435
+ }
302
436
303
437
tmp = get_unmarked (curr );
304
- if (atomic_compare_exchange_strong (prev , & tmp , get_unmarked (next ))) {
438
+ if (CAS (prev , & tmp , get_unmarked (next ))) {
305
439
list_hp_clear (list -> hp );
306
440
list_hp_retire (list -> hp , get_unmarked (curr ));
307
441
} else {
@@ -364,6 +498,7 @@ static void *delete_thread(void *arg)
364
498
365
499
int main (void )
366
500
{
501
+ RUNTIME_STAT_INIT ();
367
502
list_t * list = list_new ();
368
503
369
504
pthread_t thr [N_THREADS ];
@@ -382,8 +517,5 @@ int main(void)
382
517
383
518
list_destroy (list );
384
519
385
- fprintf (stderr , "inserts = %zu, deletes = %zu\n" , atomic_load (& inserts ),
386
- atomic_load (& deletes ));
387
-
388
520
return 0 ;
389
521
}
0 commit comments