Skip to content

Commit b877fc8

Browse files
committed
lf-queue: Replace the old lf-queue with lfq
Instead of using the old implementation, a concurrent queue 'lfq' is provided, which can utilize hazard pointers to release memory. The program has been rewritten using C11 Atomics to support other processor architectures, and it has solved the data race issues, enabling it to pass the runtime detection of Thread Sanitizer.
1 parent 342506f commit b877fc8

File tree

7 files changed

+415
-704
lines changed

7 files changed

+415
-704
lines changed

lf-queue/Makefile

100644100755
Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,21 @@
1-
CFLAGS = -Wall -O2 -g -I.
1+
CC = gcc
2+
CFLAGS = -std=gnu11 -O3 -Wall -Wextra
23
LDFLAGS = -lpthread
34

4-
# Enable ThreadSanitizer
5-
# CFLAGS += -fsanitize=thread
6-
# LDFLAGS += -fsanitize=thread
5+
targets = main1 main2 main3 main4
76

8-
all: test
7+
all: $(targets)
98

10-
test: test.c
11-
$(CC) $(CFLAGS) -o $@ $< $(LDFLAGS)
9+
tsan: CFLAGS += -fsanitize=thread -g
10+
tsan: all
1211

1312
clean:
14-
rm -f test
13+
rm -f $(targets)
14+
15+
main%: lfq.c test.c
16+
$(CC) $(CFLAGS) $^ $(LDFLAGS) -o $@
17+
18+
main1: CFLAGS += -D MAX_PRODUCER=1 -D MAX_CONSUMER=1
19+
main2: CFLAGS += -D MAX_PRODUCER=4 -D MAX_CONSUMER=4
20+
main3: CFLAGS += -D MAX_PRODUCER=100 -D MAX_CONSUMER=10
21+
main4: CFLAGS += -D MAX_PRODUCER=10 -D MAX_CONSUMER=100

lf-queue/README.md

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,4 @@
1-
# Multithreaded Bounded Lock-free Queue
1+
# lfq
2+
This program has been modified using C11 Atomics to support other processor architectures and has rewritten the code to solve the data race issues, enabling it to pass the runtime detection of Thread Sanitizer.
23

3-
Features:
4-
* single header style for C11
5-
* strongly typed
6-
* multithreaded
7-
* bounded
8-
* lock-free
4+
It implements a concurrent queue and utilizes hazard pointers to release memory.

lf-queue/atomics.h

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
#pragma once
2+
3+
#include <stdatomic.h>
4+
#include <stdbool.h>
5+
#include <stdlib.h>
6+
#include <string.h>
7+
8+
#define ATOMIC_SET atomic_flag_test_and_set
9+
#define ATOMIC_RELEASE atomic_flag_clear
10+
11+
#define ATOMIC_SUB atomic_fetch_sub
12+
#define ATOMIC_SUB64 ATOMIC_SUB
13+
#define CAS atomic_compare_exchange_strong
14+
#define cmpxchg CAS
15+
/* The 2nd argument is limited to 1 on machines with TAS but not XCHG.
16+
* On x86 it is an arbitrary value.
17+
*/
18+
#define XCHG atomic_exchange
19+
20+
#define ATOMIC_ADD atomic_fetch_add
21+
#define ATOMIC_ADD64 ATOMIC_ADD
22+
#define mb() atomic_thread_fence(memory_order_seq_cst)
23+
24+
/* Memory barriers*/
25+
#define smp_mb() atomic_thread_fence(memory_order_seq_cst)
26+
#define smp_rmb() atomic_thread_fence(memory_order_acquire)
27+
#define smp_wmb() atomic_thread_fence(memory_order_release)

lf-queue/lfq.c

Lines changed: 214 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,214 @@
1+
#include <assert.h>
2+
#include <errno.h>
3+
#include <stdbool.h>
4+
5+
#include "atomics.h"
6+
#include "lfq.h"
7+
8+
#define MAX_FREE 150
9+
10+
static bool in_hp(struct lfq_ctx *ctx, struct lfq_node *node)
11+
{
12+
for (int i = 0; i < ctx->MAX_HP_SIZE; i++) {
13+
if (atomic_load(&ctx->HP[i]) == node)
14+
return true;
15+
}
16+
return false;
17+
}
18+
19+
/* add to tail of the free list */
20+
static void insert_pool(struct lfq_ctx *ctx, struct lfq_node *node)
21+
{
22+
atomic_store(&node->free_next, NULL);
23+
struct lfq_node *old_tail = XCHG(&ctx->fpt, node); /* seq_cst */
24+
atomic_store(&old_tail->free_next, node);
25+
}
26+
static void free_pool(struct lfq_ctx *ctx, bool freeall)
27+
{
28+
bool old = 0;
29+
if (!CAS(&ctx->is_freeing, &old, 1))
30+
return;
31+
32+
for (int i = 0; i < MAX_FREE || freeall; i++) {
33+
struct lfq_node *p = ctx->fph;
34+
if ((!atomic_load(&p->can_free)) || (!atomic_load(&p->free_next)) ||
35+
in_hp(ctx, (struct lfq_node *) p))
36+
goto final;
37+
ctx->fph = p->free_next;
38+
free(p);
39+
}
40+
final:
41+
atomic_store(&ctx->is_freeing, false);
42+
smp_mb();
43+
}
44+
45+
static void safe_free(struct lfq_ctx *ctx, struct lfq_node *node)
46+
{
47+
if (atomic_load(&node->can_free) && !in_hp(ctx, node)) {
48+
/* free is not thread-safe */
49+
bool old = 0;
50+
if (CAS(&ctx->is_freeing, &old, 1)) {
51+
/* poison the pointer to detect use-after-free */
52+
node->next = (void *) -1;
53+
free(node); /* we got the lock; actually free */
54+
atomic_store(&ctx->is_freeing, false);
55+
smp_mb();
56+
} else /* we did not get the lock; only add to a freelist */
57+
insert_pool(ctx, node);
58+
} else
59+
insert_pool(ctx, node);
60+
free_pool(ctx, false);
61+
}
62+
63+
static int alloc_tid(struct lfq_ctx *ctx)
64+
{
65+
for (int i = 0; i < ctx->MAX_HP_SIZE; i++) {
66+
if (ctx->tid_map[i] == 0) {
67+
int old = 0;
68+
if (CAS(&ctx->tid_map[i], &old, 1))
69+
return i;
70+
}
71+
}
72+
73+
return -1;
74+
}
75+
76+
static void free_tid(struct lfq_ctx *ctx, int tid)
77+
{
78+
ctx->tid_map[tid] = 0;
79+
}
80+
81+
int lfq_init(struct lfq_ctx *ctx, int max_consume_thread)
82+
{
83+
struct lfq_node *tmp = calloc(1, sizeof(struct lfq_node));
84+
if (!tmp)
85+
return -errno;
86+
87+
struct lfq_node *node = calloc(1, sizeof(struct lfq_node));
88+
if (!node)
89+
return -errno;
90+
91+
tmp->can_free = node->can_free = true;
92+
memset(ctx, 0, sizeof(struct lfq_ctx));
93+
ctx->MAX_HP_SIZE = max_consume_thread;
94+
ctx->HP = calloc(max_consume_thread, sizeof(struct lfq_node));
95+
ctx->tid_map = calloc(max_consume_thread, sizeof(struct lfq_node));
96+
ctx->head = ctx->tail = tmp;
97+
ctx->fph = ctx->fpt = node;
98+
99+
return 0;
100+
}
101+
102+
long lfg_count_freelist(const struct lfq_ctx *ctx)
103+
{
104+
long count = 0;
105+
for (struct lfq_node *p = (struct lfq_node *) ctx->fph; p; p = p->free_next)
106+
count++;
107+
return count;
108+
}
109+
110+
int lfq_release(struct lfq_ctx *ctx)
111+
{
112+
if (ctx->tail && ctx->head) { /* if we have data in queue */
113+
while ((struct lfq_node *) ctx->head) { /* while still have node */
114+
struct lfq_node *tmp = (struct lfq_node *) ctx->head->next;
115+
safe_free(ctx, (struct lfq_node *) ctx->head);
116+
ctx->head = tmp;
117+
}
118+
ctx->tail = 0;
119+
}
120+
if (ctx->fph && ctx->fpt) {
121+
free_pool(ctx, true);
122+
if (ctx->fph != ctx->fpt)
123+
return -1;
124+
free(ctx->fpt); /* free the empty node */
125+
ctx->fph = ctx->fpt = 0;
126+
}
127+
if (ctx->fph || ctx->fpt)
128+
return -1;
129+
130+
free(ctx->HP);
131+
free(ctx->tid_map);
132+
memset(ctx, 0, sizeof(struct lfq_ctx));
133+
134+
return 0;
135+
}
136+
137+
int lfq_enqueue(struct lfq_ctx *ctx, void *data)
138+
{
139+
struct lfq_node *insert_node = calloc(1, sizeof(struct lfq_node));
140+
if (!insert_node)
141+
return -errno;
142+
143+
insert_node->data = data;
144+
struct lfq_node *old_tail = XCHG(&ctx->tail, insert_node);
145+
/* We have claimed our spot in the insertion order by modifying tail.
146+
* we are the only inserting thread with a pointer to the old tail.
147+
*
148+
* Now we can make it part of the list by overwriting the NULL pointer in
149+
* the old tail. This is safe whether or not other threads have updated
150+
* ->next in our insert_node.
151+
*/
152+
assert(!(old_tail->next) && "old tail was not NULL");
153+
atomic_store(&old_tail->next, insert_node);
154+
155+
return 0;
156+
}
157+
158+
void *lfq_dequeue_tid(struct lfq_ctx *ctx, int tid)
159+
{
160+
struct lfq_node *old_head, *new_head;
161+
162+
/* HP[tid] is necessary for deallocation. */
163+
do {
164+
retry:
165+
/* continue jumps to the bottom of the loop, and would attempt a CAS
166+
* with uninitialized new_head.
167+
*/
168+
old_head = atomic_load(&ctx->head);
169+
170+
/* seq-cst store.
171+
* FIXME: use xchg instead of mov + mfence on x86.
172+
*/
173+
atomic_store(&ctx->HP[tid], old_head);
174+
mb();
175+
176+
/* another thread freed it before seeing our HP[tid] store */
177+
if (old_head != atomic_load(&ctx->head))
178+
goto retry;
179+
new_head = atomic_load(&old_head->next);
180+
181+
if (new_head == 0) {
182+
atomic_store(&ctx->HP[tid], 0);
183+
return NULL; /* never remove the last node */
184+
}
185+
} while (!CAS(&ctx->head, &old_head, new_head));
186+
187+
/* We have atomically advanced head, and we are the thread that won the race
188+
* to claim a node. We return the data from the *new* head. The list starts
189+
* off with a dummy node, so the current head is always a node that is
190+
* already been read.
191+
*/
192+
atomic_store(&ctx->HP[tid], 0);
193+
void *ret = new_head->data;
194+
atomic_store(&new_head->can_free, true);
195+
196+
/* we need to avoid freeing until other readers are definitely not going to
197+
* load its ->next in the CAS loop
198+
*/
199+
safe_free(ctx, (struct lfq_node *) old_head);
200+
201+
return ret;
202+
}
203+
204+
void *lfq_dequeue(struct lfq_ctx *ctx)
205+
{
206+
int tid = alloc_tid(ctx);
207+
/* To many thread race */
208+
if (tid == -1)
209+
return (void *) -1;
210+
211+
void *ret = lfq_dequeue_tid(ctx, tid);
212+
free_tid(ctx, tid);
213+
return ret;
214+
}

lf-queue/lfq.h

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
#pragma once
2+
3+
#include <stdalign.h>
4+
#include <stdbool.h>
5+
6+
struct lfq_node {
7+
void *data;
8+
union {
9+
struct lfq_node *next;
10+
struct lfq_node *free_next;
11+
};
12+
bool can_free;
13+
};
14+
15+
struct lfq_ctx {
16+
alignas(64) struct lfq_node *head;
17+
int count;
18+
struct lfq_node **HP; /* hazard pointers */
19+
int *tid_map;
20+
bool is_freeing;
21+
struct lfq_node *fph, *fpt; /* free pool head/tail */
22+
23+
/* FIXME: get rid of struct. Make it configurable */
24+
int MAX_HP_SIZE;
25+
26+
/* avoid cacheline contention */
27+
alignas(64) struct lfq_node *tail;
28+
};
29+
30+
/**
31+
* lfq_init - Initialize lock-free queue.
32+
* @ctx: Lock-free queue handler.
33+
* @max_consume_thread: Max consume thread numbers. If this value set to zero,
34+
* use default value (16).
35+
* Return zero on success. On error, negative errno.
36+
*/
37+
int lfq_init(struct lfq_ctx *ctx, int max_consume_thread);
38+
39+
/**
40+
* lfq_release - Release lock-free queue from ctx.
41+
* @ctx: Lock-free queue handler.
42+
* Return zero on success. On error, -1.
43+
*/
44+
int lfq_release(struct lfq_ctx *ctx);
45+
46+
/* internal function */
47+
long lfg_count_freelist(const struct lfq_ctx *ctx);
48+
49+
/**
50+
* lfq_enqueue - Push data into queue.
51+
* @ctx: Lock-free queue handler.
52+
* @data: User data
53+
* Return zero on success. On error, negative errno.
54+
*/
55+
int lfq_enqueue(struct lfq_ctx *ctx, void *data);
56+
57+
/**
58+
* lfq_dequeue_tid - Pop data from queue.
59+
* @ctx: Lock-free queue handler.
60+
* @tid: Unique thread id.
61+
* Return zero if empty queue. On error, negative errno.
62+
*/
63+
void *lfq_dequeue_tid(struct lfq_ctx *ctx, int tid);
64+
65+
/**
66+
* lfq_dequeue - Pop data from queue.
67+
* @ctx: Lock-free queue handler.
68+
*/
69+
void *lfq_dequeue(struct lfq_ctx *ctx);

0 commit comments

Comments
 (0)