xref: /freebsd/sys/contrib/ck/include/ck_queue.h (revision 6be3386466ab79a84b48429ae66244f21526d3df)
1 /*
2  * Copyright 2012-2015 Samy Al Bahra.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*-
28  * Copyright (c) 1991, 1993
29  *	The Regents of the University of California.  All rights reserved.
30  *
31  * Redistribution and use in source and binary forms, with or without
32  * modification, are permitted provided that the following conditions
33  * are met:
34  * 1. Redistributions of source code must retain the above copyright
35  *    notice, this list of conditions and the following disclaimer.
36  * 2. Redistributions in binary form must reproduce the above copyright
37  *    notice, this list of conditions and the following disclaimer in the
38  *    documentation and/or other materials provided with the distribution.
39  * 4. Neither the name of the University nor the names of its contributors
40  *    may be used to endorse or promote products derived from this software
41  *    without specific prior written permission.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  *
55  *	@(#)queue.h	8.5 (Berkeley) 8/20/94
56  * $FreeBSD$
57  */
58 
59 #ifndef CK_QUEUE_H
60 #define	CK_QUEUE_H
61 
62 #include <ck_pr.h>
63 
64 /*
65  * This file defines three types of data structures: singly-linked lists,
66  * singly-linked tail queues and lists.
67  *
68  * A singly-linked list is headed by a single forward pointer. The elements
69  * are singly linked for minimum space and pointer manipulation overhead at
70  * the expense of O(n) removal for arbitrary elements. New elements can be
71  * added to the list after an existing element or at the head of the list.
72  * Elements being removed from the head of the list should use the explicit
73  * macro for this purpose for optimum efficiency. A singly-linked list may
74  * only be traversed in the forward direction.  Singly-linked lists are ideal
75  * for applications with large datasets and few or no removals or for
76  * implementing a LIFO queue.
77  *
78  * A singly-linked tail queue is headed by a pair of pointers, one to the
79  * head of the list and the other to the tail of the list. The elements are
80  * singly linked for minimum space and pointer manipulation overhead at the
81  * expense of O(n) removal for arbitrary elements. New elements can be added
82  * to the list after an existing element, at the head of the list, or at the
83  * end of the list. Elements being removed from the head of the tail queue
84  * should use the explicit macro for this purpose for optimum efficiency.
85  * A singly-linked tail queue may only be traversed in the forward direction.
86  * Singly-linked tail queues are ideal for applications with large datasets
87  * and few or no removals or for implementing a FIFO queue.
88  *
89  * A list is headed by a single forward pointer (or an array of forward
90  * pointers for a hash table header). The elements are doubly linked
91  * so that an arbitrary element can be removed without a need to
92  * traverse the list. New elements can be added to the list before
93  * or after an existing element or at the head of the list. A list
94  * may only be traversed in the forward direction.
95  *
96  * It is safe to use _FOREACH/_FOREACH_SAFE in the presence of concurrent
97  * modifications to the list. Writers to these lists must, on the other hand,
98  * implement writer-side synchronization. The _SWAP operations are not atomic.
99  * This facility is currently unsupported on architectures such as the Alpha
100  * which require load-depend memory fences.
101  *
102  *				CK_SLIST	CK_LIST	CK_STAILQ
103  * _HEAD			+		+	+
104  * _HEAD_INITIALIZER		+		+	+
105  * _ENTRY			+		+	+
106  * _INIT			+		+	+
107  * _EMPTY			+		+	+
108  * _FIRST			+		+	+
109  * _NEXT			+		+	+
110  * _FOREACH			+		+	+
111  * _FOREACH_SAFE		+		+	+
112  * _INSERT_HEAD			+		+	+
113  * _INSERT_BEFORE		-		+	-
114  * _INSERT_AFTER		+		+	+
115  * _INSERT_TAIL			-		-	+
116  * _REMOVE_AFTER		+		-	+
117  * _REMOVE_HEAD			+		-	+
118  * _REMOVE			+		+	+
119  * _SWAP			+		+	+
120  * _MOVE			+		+	+
121  */
122 
123 /*
124  * Singly-linked List declarations.
125  */
126 #define	CK_SLIST_HEAD(name, type)						\
127 struct name {									\
128 	struct type *cslh_first;	/* first element */				\
129 }
130 
131 #define	CK_SLIST_HEAD_INITIALIZER(head)						\
132 	{ NULL }
133 
134 #define	CK_SLIST_ENTRY(type)							\
135 struct {									\
136 	struct type *csle_next;	/* next element */				\
137 }
138 
139 /*
140  * Singly-linked List functions.
141  */
142 #define	CK_SLIST_EMPTY(head)							\
143 	(ck_pr_load_ptr(&(head)->cslh_first) == NULL)
144 
145 #define	CK_SLIST_FIRST(head)							\
146 	(ck_pr_load_ptr(&(head)->cslh_first))
147 
148 #define	CK_SLIST_NEXT(elm, field)						\
149 	ck_pr_load_ptr(&((elm)->field.csle_next))
150 
151 #define	CK_SLIST_FOREACH(var, head, field)					\
152 	for ((var) = CK_SLIST_FIRST((head));					\
153 	    (var) && (ck_pr_fence_load(), 1);					\
154 	    (var) = CK_SLIST_NEXT((var), field))
155 
156 #define	CK_SLIST_FOREACH_SAFE(var, head, field, tvar)				 \
157 	for ((var) = CK_SLIST_FIRST(head);					 \
158 	    (var) && (ck_pr_fence_load(), (tvar) = CK_SLIST_NEXT(var, field), 1);\
159 	    (var) = (tvar))
160 
161 #define	CK_SLIST_FOREACH_PREVPTR(var, varp, head, field)			\
162 	for ((varp) = &(head)->cslh_first;					\
163 	    ((var) = ck_pr_load_ptr(varp)) != NULL && (ck_pr_fence_load(), 1);	\
164 	    (varp) = &(var)->field.csle_next)
165 
166 #define	CK_SLIST_INIT(head) do {						\
167 	ck_pr_store_ptr(&(head)->cslh_first, NULL);				\
168 	ck_pr_fence_store();							\
169 } while (0)
170 
171 #define	CK_SLIST_INSERT_AFTER(a, b, field) do {					\
172 	(b)->field.csle_next = (a)->field.csle_next;				\
173 	ck_pr_fence_store();							\
174 	ck_pr_store_ptr(&(a)->field.csle_next, b);				\
175 } while (0)
176 
177 #define	CK_SLIST_INSERT_HEAD(head, elm, field) do {				\
178 	(elm)->field.csle_next = (head)->cslh_first;				\
179 	ck_pr_fence_store();							\
180 	ck_pr_store_ptr(&(head)->cslh_first, elm);				\
181 } while (0)
182 
183 #define	CK_SLIST_INSERT_PREVPTR(prevp, slistelm, elm, field) do {		\
184 	(elm)->field.csle_next = (slistelm);					\
185 	ck_pr_fence_store();							\
186 	ck_pr_store_ptr(prevp, elm);						\
187 } while (0)
188 
189 #define CK_SLIST_REMOVE_AFTER(elm, field) do {					\
190 	ck_pr_store_ptr(&(elm)->field.csle_next,				\
191 	    (elm)->field.csle_next->field.csle_next);				\
192 } while (0)
193 
194 #define	CK_SLIST_REMOVE(head, elm, type, field) do {				\
195 	if ((head)->cslh_first == (elm)) {					\
196 		CK_SLIST_REMOVE_HEAD((head), field);				\
197 	} else {								\
198 		struct type *curelm = (head)->cslh_first;			\
199 		while (curelm->field.csle_next != (elm))			\
200 			curelm = curelm->field.csle_next;			\
201 		CK_SLIST_REMOVE_AFTER(curelm, field);				\
202 	}									\
203 } while (0)
204 
205 #define	CK_SLIST_REMOVE_HEAD(head, field) do {					\
206 	ck_pr_store_ptr(&(head)->cslh_first,					\
207 		(head)->cslh_first->field.csle_next);				\
208 } while (0)
209 
210 #define CK_SLIST_REMOVE_PREVPTR(prevp, elm, field) do {				\
211 	ck_pr_store_ptr(prevptr, (elm)->field.csle_next);			\
212 } while (0)
213 
214 #define CK_SLIST_MOVE(head1, head2, field) do {					\
215 	ck_pr_store_ptr(&(head1)->cslh_first, (head2)->cslh_first);		\
216 } while (0)
217 
218 /*
219  * This operation is not applied atomically.
220  */
221 #define CK_SLIST_SWAP(a, b, type) do {						\
222 	struct type *swap_first = (a)->cslh_first;				\
223 	(a)->cslh_first = (b)->cslh_first;					\
224 	(b)->cslh_first = swap_first;						\
225 } while (0)
226 
227 /*
228  * Singly-linked Tail queue declarations.
229  */
230 #define	CK_STAILQ_HEAD(name, type)					\
231 struct name {								\
232 	struct type *cstqh_first;/* first element */			\
233 	struct type **cstqh_last;/* addr of last next element */		\
234 }
235 
236 #define	CK_STAILQ_HEAD_INITIALIZER(head)				\
237 	{ NULL, &(head).cstqh_first }
238 
239 #define	CK_STAILQ_ENTRY(type)						\
240 struct {								\
241 	struct type *cstqe_next;	/* next element */			\
242 }
243 
244 /*
245  * Singly-linked Tail queue functions.
246  */
247 #define	CK_STAILQ_CONCAT(head1, head2) do {					\
248 	if ((head2)->cstqh_first != NULL) {					\
249 		ck_pr_store_ptr((head1)->cstqh_last, (head2)->cstqh_first);	\
250 		ck_pr_fence_store();						\
251 		(head1)->cstqh_last = (head2)->cstqh_last;			\
252 		CK_STAILQ_INIT((head2));					\
253 	}									\
254 } while (0)
255 
256 #define	CK_STAILQ_EMPTY(head)	(ck_pr_load_ptr(&(head)->cstqh_first) == NULL)
257 
258 #define	CK_STAILQ_FIRST(head)	(ck_pr_load_ptr(&(head)->cstqh_first))
259 
260 #define	CK_STAILQ_FOREACH(var, head, field)				\
261 	for((var) = CK_STAILQ_FIRST((head));				\
262 	   (var) && (ck_pr_fence_load(), 1);				\
263 	   (var) = CK_STAILQ_NEXT((var), field))
264 
265 #define	CK_STAILQ_FOREACH_SAFE(var, head, field, tvar)			\
266 	for ((var) = CK_STAILQ_FIRST((head));				\
267 	    (var) && (ck_pr_fence_load(), (tvar) =			\
268 		CK_STAILQ_NEXT((var), field), 1);			\
269 	    (var) = (tvar))
270 
271 #define	CK_STAILQ_INIT(head) do {					\
272 	ck_pr_store_ptr(&(head)->cstqh_first, NULL);			\
273 	ck_pr_fence_store();						\
274 	(head)->cstqh_last = &(head)->cstqh_first;			\
275 } while (0)
276 
277 #define	CK_STAILQ_INSERT_AFTER(head, tqelm, elm, field) do {			\
278 	(elm)->field.cstqe_next = (tqelm)->field.cstqe_next;			\
279 	ck_pr_fence_store();							\
280 	ck_pr_store_ptr(&(tqelm)->field.cstqe_next, elm);			\
281 	if ((elm)->field.cstqe_next == NULL)					\
282 		(head)->cstqh_last = &(elm)->field.cstqe_next;			\
283 } while (0)
284 
285 #define	CK_STAILQ_INSERT_HEAD(head, elm, field) do {				\
286 	(elm)->field.cstqe_next = (head)->cstqh_first;				\
287 	ck_pr_fence_store();							\
288 	ck_pr_store_ptr(&(head)->cstqh_first, elm);				\
289 	if ((elm)->field.cstqe_next == NULL)					\
290 		(head)->cstqh_last = &(elm)->field.cstqe_next;			\
291 } while (0)
292 
293 #define	CK_STAILQ_INSERT_TAIL(head, elm, field) do {				\
294 	(elm)->field.cstqe_next = NULL;						\
295 	ck_pr_fence_store();							\
296 	ck_pr_store_ptr((head)->cstqh_last, (elm));				\
297 	(head)->cstqh_last = &(elm)->field.cstqe_next;				\
298 } while (0)
299 
300 #define	CK_STAILQ_NEXT(elm, field)						\
301 	(ck_pr_load_ptr(&(elm)->field.cstqe_next))
302 
303 #define	CK_STAILQ_REMOVE(head, elm, type, field) do {				\
304 	if ((head)->cstqh_first == (elm)) {					\
305 		CK_STAILQ_REMOVE_HEAD((head), field);				\
306 	} else {								\
307 		struct type *curelm = (head)->cstqh_first;			\
308 		while (curelm->field.cstqe_next != (elm))			\
309 			curelm = curelm->field.cstqe_next;			\
310 		CK_STAILQ_REMOVE_AFTER(head, curelm, field);			\
311 	}									\
312 } while (0)
313 
314 #define CK_STAILQ_REMOVE_AFTER(head, elm, field) do {				\
315 	ck_pr_store_ptr(&(elm)->field.cstqe_next,				\
316 	    (elm)->field.cstqe_next->field.cstqe_next);				\
317 	if ((elm)->field.cstqe_next == NULL)					\
318 		(head)->cstqh_last = &(elm)->field.cstqe_next;			\
319 } while (0)
320 
321 #define	CK_STAILQ_REMOVE_HEAD(head, field) do {					\
322 	ck_pr_store_ptr(&(head)->cstqh_first,					\
323 	    (head)->cstqh_first->field.cstqe_next);				\
324 	if ((head)->cstqh_first == NULL)						\
325 		(head)->cstqh_last = &(head)->cstqh_first;			\
326 } while (0)
327 
328 #define CK_STAILQ_MOVE(head1, head2, field) do {				\
329 	ck_pr_store_ptr(&(head1)->cstqh_first, (head2)->cstqh_first);		\
330 	(head1)->cstqh_last = (head2)->cstqh_last;				\
331 	if ((head2)->cstqh_last == &(head2)->cstqh_first)				\
332 		(head1)->cstqh_last = &(head1)->cstqh_first;			\
333 } while (0)
334 
335 /*
336  * This operation is not applied atomically.
337  */
338 #define CK_STAILQ_SWAP(head1, head2, type) do {				\
339 	struct type *swap_first = CK_STAILQ_FIRST(head1);		\
340 	struct type **swap_last = (head1)->cstqh_last;			\
341 	CK_STAILQ_FIRST(head1) = CK_STAILQ_FIRST(head2);		\
342 	(head1)->cstqh_last = (head2)->cstqh_last;			\
343 	CK_STAILQ_FIRST(head2) = swap_first;				\
344 	(head2)->cstqh_last = swap_last;					\
345 	if (CK_STAILQ_EMPTY(head1))					\
346 		(head1)->cstqh_last = &(head1)->cstqh_first;		\
347 	if (CK_STAILQ_EMPTY(head2))					\
348 		(head2)->cstqh_last = &(head2)->cstqh_first;		\
349 } while (0)
350 
351 /*
352  * List declarations.
353  */
354 #define	CK_LIST_HEAD(name, type)						\
355 struct name {									\
356 	struct type *clh_first;	/* first element */				\
357 }
358 
359 #define	CK_LIST_HEAD_INITIALIZER(head)						\
360 	{ NULL }
361 
362 #define	CK_LIST_ENTRY(type)							\
363 struct {									\
364 	struct type *cle_next;	/* next element */				\
365 	struct type **cle_prev;	/* address of previous next element */		\
366 }
367 
368 #define	CK_LIST_FIRST(head)		ck_pr_load_ptr(&(head)->clh_first)
369 #define	CK_LIST_EMPTY(head)		(CK_LIST_FIRST(head) == NULL)
370 #define	CK_LIST_NEXT(elm, field)	ck_pr_load_ptr(&(elm)->field.cle_next)
371 
372 #define	CK_LIST_FOREACH(var, head, field)					\
373 	for ((var) = CK_LIST_FIRST((head));					\
374 	    (var) && (ck_pr_fence_load(), 1);					\
375 	    (var) = CK_LIST_NEXT((var), field))
376 
377 #define	CK_LIST_FOREACH_SAFE(var, head, field, tvar)				  \
378 	for ((var) = CK_LIST_FIRST((head));					  \
379 	    (var) && (ck_pr_fence_load(), (tvar) = CK_LIST_NEXT((var), field), 1);\
380 	    (var) = (tvar))
381 
382 #define	CK_LIST_INIT(head) do {							\
383 	ck_pr_store_ptr(&(head)->clh_first, NULL);				\
384 	ck_pr_fence_store();							\
385 } while (0)
386 
387 #define	CK_LIST_INSERT_AFTER(listelm, elm, field) do {				\
388 	(elm)->field.cle_next = (listelm)->field.cle_next;			\
389 	(elm)->field.cle_prev = &(listelm)->field.cle_next;			\
390 	ck_pr_fence_store();							\
391 	if ((listelm)->field.cle_next != NULL)					\
392 		(listelm)->field.cle_next->field.cle_prev = &(elm)->field.cle_next;\
393 	ck_pr_store_ptr(&(listelm)->field.cle_next, elm);			\
394 } while (0)
395 
396 #define	CK_LIST_INSERT_BEFORE(listelm, elm, field) do {				\
397 	(elm)->field.cle_prev = (listelm)->field.cle_prev;			\
398 	(elm)->field.cle_next = (listelm);					\
399 	ck_pr_fence_store();							\
400 	ck_pr_store_ptr((listelm)->field.cle_prev, (elm));			\
401 	(listelm)->field.cle_prev = &(elm)->field.cle_next;			\
402 } while (0)
403 
404 #define	CK_LIST_INSERT_HEAD(head, elm, field) do {				\
405 	(elm)->field.cle_next = (head)->clh_first;				\
406 	ck_pr_fence_store();							\
407 	if ((elm)->field.cle_next != NULL)					\
408 		(head)->clh_first->field.cle_prev =  &(elm)->field.cle_next;	\
409 	ck_pr_store_ptr(&(head)->clh_first, elm);				\
410 	(elm)->field.cle_prev = &(head)->clh_first;				\
411 } while (0)
412 
413 #define	CK_LIST_REMOVE(elm, field) do {						\
414 	ck_pr_store_ptr((elm)->field.cle_prev, (elm)->field.cle_next);		\
415 	if ((elm)->field.cle_next != NULL)					\
416 		(elm)->field.cle_next->field.cle_prev = (elm)->field.cle_prev;	\
417 } while (0)
418 
419 #define CK_LIST_MOVE(head1, head2, field) do {				\
420 	ck_pr_store_ptr(&(head1)->clh_first, (head2)->clh_first);		\
421 	if ((head1)->clh_first != NULL)					\
422 		(head1)->clh_first->field.cle_prev = &(head1)->clh_first;	\
423 } while (0)
424 
425 /*
426  * This operation is not applied atomically.
427  */
428 #define CK_LIST_SWAP(head1, head2, type, field) do {			\
429 	struct type *swap_tmp = (head1)->clh_first;			\
430 	(head1)->clh_first = (head2)->clh_first;				\
431 	(head2)->clh_first = swap_tmp;					\
432 	if ((swap_tmp = (head1)->clh_first) != NULL)			\
433 		swap_tmp->field.cle_prev = &(head1)->clh_first;		\
434 	if ((swap_tmp = (head2)->clh_first) != NULL)			\
435 		swap_tmp->field.cle_prev = &(head2)->clh_first;		\
436 } while (0)
437 
438 #endif /* CK_QUEUE_H */
439