Skip to content

Commit bf72d0d

Browse files
committed
Add more operation analysis, including deletes and inserts variables.
And Using the preprocessor to decide the analysis being set or not. The following are the new varialbes to record the opreations: rtry is the number of retries in the __list_find function. cons is the number of wait-free contains in the __list_find function that curr pointer pointed. trav is the number of list element traversal in the __list_find function. fail is the number of CAS() failures. del is the number of list_delete operation failed and restart again. ins is the number of list_insert operation failed and restart again. load is the number of atomic_load operation in list_delete, list_insert and __list_find. store is the number of atomic_store operation in list_delete, list_insert and __list_find.
1 parent ce237c8 commit bf72d0d

File tree

2 files changed

+155
-16
lines changed

2 files changed

+155
-16
lines changed

hp_list/Makefile

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,13 @@ $(BIN): main.c
1212
all: CFLAGS += -O2
1313
all: $(BIN)
1414

15+
# The RUNTIME_STAT allow to show the runtime operation states by recording
16+
# atomic operation called.
17+
# The operation including atomic operation, linked list insert and delete
18+
# states, the element traversal in the __list_find function.
19+
analyze: CFLAGS +=-D RUNTIME_STAT
20+
analyze: $(BIN)
21+
1522
indent:
1623
clang-format -i *.[ch]
1724

hp_list/main.c

Lines changed: 148 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,129 @@
1414
#include <string.h>
1515
#include <threads.h>
1616

17+
static atomic_uint_fast64_t deletes = 0, inserts = 0;
18+
19+
#ifdef RUNTIME_STAT
20+
21+
/*
22+
* Reference :
23+
* A more Pragmatic Implementation of the Lock-free, Ordered, Linked List
24+
* https://arxiv.org/abs/2010.15755
25+
*/
26+
27+
enum {
28+
TRACE_nothing = 0,
29+
TRACE_retry, /* the number of retries in the __list_find function. */
30+
TRACE_contains, /* the number of wait-free contains in the __list_find
31+
function that curr pointer pointed. */
32+
TRACE_traversal, /* the number of list element traversal in the __list_find
33+
function. */
34+
TRACE_fail, /* the number of CAS() failures. */
35+
TRACE_del, /* the number of list_delete operation failed and restart again.
36+
*/
37+
TRACE_ins, /* the number of list_insert operation failed and restart again.
38+
*/
39+
TRACE_inserts, /* the number of atomic_load operation in list_delete,
40+
list_insert and __list_find. */
41+
TRACE_deletes /* the number of atomic_store operation in list_delete,
42+
list_insert and __list_find. */
43+
};
44+
45+
struct runtime_stat {
46+
atomic_uint_fast64_t retry, contains, traversal, fail;
47+
atomic_uint_fast64_t del, ins;
48+
atomic_uint_fast64_t load, store;
49+
};
50+
static struct runtime_stat stats = {0};
51+
52+
#define CAS(obj, expected, desired) \
53+
({ \
54+
bool __ret = atomic_compare_exchange_strong(obj, expected, desired); \
55+
if (!__ret) \
56+
atomic_fetch_add(&stats.fail, 1); \
57+
__ret; \
58+
})
59+
#define ATOMIC_LOAD(obj) \
60+
({ \
61+
atomic_fetch_add(&stats.load, 1); \
62+
atomic_load(obj); \
63+
})
64+
#define ATOMIC_STORE_EXPLICIT(obj, desired, order) \
65+
do { \
66+
atomic_fetch_add(&stats.store, 1); \
67+
atomic_store_explicit(obj, desired, order); \
68+
} while (0)
69+
#define TRACE(ops) \
70+
do { \
71+
if (TRACE_##ops) \
72+
atomic_fetch_add(&stats.ops, 1); \
73+
} while (0)
74+
75+
static void do_analysis(void)
76+
{
77+
__atomic_thread_fence(__ATOMIC_SEQ_CST);
78+
#define TRACE_PRINT(ops) printf("%-10s: %ld\n", #ops, stats.ops);
79+
TRACE_PRINT(retry);
80+
TRACE_PRINT(contains);
81+
TRACE_PRINT(traversal);
82+
TRACE_PRINT(fail);
83+
TRACE_PRINT(del)
84+
TRACE_PRINT(ins);
85+
TRACE_PRINT(load);
86+
TRACE_PRINT(store);
87+
#undef TRACE_PRINT
88+
#define TRACE_PRINT(val) printf("%-10s: %ld\n", #val, val);
89+
TRACE_PRINT(deletes);
90+
TRACE_PRINT(inserts);
91+
#undef TRACE_PRINT
92+
printf(
93+
"\"retry\" is the number of retries in the __list_find function.\n");
94+
printf(
95+
"\"contains\" is the number of wait-free contains in the __list_find "
96+
"function that curr pointer pointed.\n");
97+
printf(
98+
"\"traversal\"is the number of list element traversal in the "
99+
"__list_find function.\n");
100+
printf("\"fail\" is the number of CAS() failures.\n");
101+
printf(
102+
"\"del\" is the number of list_delete operation failed and "
103+
"restart again.\n");
104+
printf(
105+
"\"ins\" is the number of list_insert operation failed and "
106+
"restart again.\n");
107+
printf("\"deletes\" is the number of linked list elements deleted.\n");
108+
printf("\"inserts\" is the number of linked list elements created.\n");
109+
printf(
110+
"\"load\" is the number of atomic_load operation in list_delete, "
111+
"list_insert and __list_find.\n");
112+
printf(
113+
"\"store\" is the number of atomic_store operation in list_delete, "
114+
"list_insert and __list_find.\n");
115+
}
116+
117+
#else
118+
119+
#define CAS(obj, expected, desired) \
120+
({ atomic_compare_exchange_strong(obj, expected, desired); })
121+
#define ATOMIC_LOAD(obj) ({ atomic_load(obj); })
122+
#define ATOMIC_STORE_EXPLICIT(obj, desired, order) \
123+
do { \
124+
atomic_store_explicit(obj, desired, order); \
125+
} while (0)
126+
#define TRACE(ops) \
127+
do { \
128+
} while (0)
129+
130+
static void do_analysis(void)
131+
{
132+
__atomic_thread_fence(__ATOMIC_SEQ_CST);
133+
fprintf(stderr, "inserts = %zu, deletes = %zu\n", inserts, deletes);
134+
}
135+
136+
#endif /* RUNTIME_STAT */
137+
138+
#define RUNTIME_STAT_INIT() atexit(do_analysis)
139+
17140
#define HP_MAX_THREADS 128
18141
#define HP_MAX_HPS 5 /* This is named 'K' in the HP paper */
19142
#define CLPAD (128 / sizeof(uintptr_t))
@@ -162,8 +285,6 @@ void list_hp_retire(list_hp_t *hp, uintptr_t ptr)
162285
#define N_THREADS (128 / 2)
163286
#define MAX_THREADS 128
164287

165-
static atomic_uint_fast32_t deletes = 0, inserts = 0;
166-
167288
enum { HP_NEXT = 0, HP_CURR = 1, HP_PREV };
168289

169290
#define is_marked(p) (bool) ((uintptr_t)(p) &0x01)
@@ -225,21 +346,29 @@ static bool __list_find(list_t *list,
225346

226347
try_again:
227348
prev = &list->head;
228-
curr = (list_node_t *) atomic_load(prev);
349+
curr = (list_node_t *) ATOMIC_LOAD(prev);
229350
(void) list_hp_protect_ptr(list->hp, HP_CURR, (uintptr_t) curr);
230-
if (atomic_load(prev) != get_unmarked(curr))
351+
if (ATOMIC_LOAD(prev) != get_unmarked(curr)) {
352+
TRACE(retry);
231353
goto try_again;
354+
}
232355
while (true) {
233-
next = (list_node_t *) atomic_load(&get_unmarked_node(curr)->next);
356+
if (is_marked(curr))
357+
TRACE(contains);
358+
next = (list_node_t *) ATOMIC_LOAD(&get_unmarked_node(curr)->next);
234359
(void) list_hp_protect_ptr(list->hp, HP_NEXT, get_unmarked(next));
235360
/* On a CAS failure, the search function, "__list_find," will simply
236361
* have to go backwards in the list until an unmarked element is found
237362
* from which the search in increasing key order can be started.
238363
*/
239-
if (atomic_load(&get_unmarked_node(curr)->next) != (uintptr_t) next)
364+
if (ATOMIC_LOAD(&get_unmarked_node(curr)->next) != (uintptr_t) next) {
365+
TRACE(retry);
240366
goto try_again;
241-
if (atomic_load(prev) != get_unmarked(curr))
367+
}
368+
if (ATOMIC_LOAD(prev) != get_unmarked(curr)) {
369+
TRACE(retry);
242370
goto try_again;
371+
}
243372
if (get_unmarked_node(next) == next) {
244373
if (!(get_unmarked_node(curr)->key < *key)) {
245374
*par_curr = curr;
@@ -252,12 +381,15 @@ static bool __list_find(list_t *list,
252381
get_unmarked(curr));
253382
} else {
254383
uintptr_t tmp = get_unmarked(curr);
255-
if (!atomic_compare_exchange_strong(prev, &tmp, get_unmarked(next)))
384+
if (!CAS(prev, &tmp, get_unmarked(next))) {
385+
TRACE(retry);
256386
goto try_again;
387+
}
257388
list_hp_retire(list->hp, get_unmarked(curr));
258389
}
259390
curr = next;
260391
(void) list_hp_protect_release(list->hp, HP_CURR, get_unmarked(next));
392+
TRACE(traversal);
261393
}
262394
}
263395

@@ -274,13 +406,14 @@ bool list_insert(list_t *list, list_key_t key)
274406
list_hp_clear(list->hp);
275407
return false;
276408
}
277-
atomic_store_explicit(&node->next, (uintptr_t) curr,
409+
ATOMIC_STORE_EXPLICIT(&node->next, (uintptr_t) curr,
278410
memory_order_relaxed);
279411
uintptr_t tmp = get_unmarked(curr);
280-
if (atomic_compare_exchange_strong(prev, &tmp, (uintptr_t) node)) {
412+
if (CAS(prev, &tmp, (uintptr_t) node)) {
281413
list_hp_clear(list->hp);
282414
return true;
283415
}
416+
TRACE(ins);
284417
}
285418
}
286419

@@ -296,12 +429,13 @@ bool list_delete(list_t *list, list_key_t key)
296429

297430
uintptr_t tmp = get_unmarked(next);
298431

299-
if (!atomic_compare_exchange_strong(&curr->next, &tmp,
300-
get_marked(next)))
432+
if (!CAS(&curr->next, &tmp, get_marked(next))) {
433+
TRACE(del);
301434
continue;
435+
}
302436

303437
tmp = get_unmarked(curr);
304-
if (atomic_compare_exchange_strong(prev, &tmp, get_unmarked(next))) {
438+
if (CAS(prev, &tmp, get_unmarked(next))) {
305439
list_hp_clear(list->hp);
306440
list_hp_retire(list->hp, get_unmarked(curr));
307441
} else {
@@ -364,6 +498,7 @@ static void *delete_thread(void *arg)
364498

365499
int main(void)
366500
{
501+
RUNTIME_STAT_INIT();
367502
list_t *list = list_new();
368503

369504
pthread_t thr[N_THREADS];
@@ -382,8 +517,5 @@ int main(void)
382517

383518
list_destroy(list);
384519

385-
fprintf(stderr, "inserts = %zu, deletes = %zu\n", atomic_load(&inserts),
386-
atomic_load(&deletes));
387-
388520
return 0;
389521
}

0 commit comments

Comments
 (0)