]> git.saurik.com Git - apple/xnu.git/blob - bsd/sys/queue.h
8791385d7d4b31959fa21089c50c81f909bf5b23
[apple/xnu.git] / bsd / sys / queue.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*-
29 * Copyright (c) 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 4. Neither the name of the University nor the names of its contributors
41 * may be used to endorse or promote products derived from this software
42 * without specific prior written permission.
43 *
44 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
45 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
46 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
47 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
48 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
49 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
50 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
51 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
52 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
53 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
54 * SUCH DAMAGE.
55 *
56 * @(#)queue.h 8.5 (Berkeley) 8/20/94
57 */
58
59 #ifndef _SYS_QUEUE_H_
60 #define _SYS_QUEUE_H_
61
62 #ifdef KERNEL_PRIVATE
63 #include <kern/debug.h> /* panic function call */
64 #include <sys/cdefs.h> /* __improbable in kernelspace */
65 #else
66 #ifndef __improbable
67 #define __improbable(x) (x) /* noop in userspace */
68 #endif /* __improbable */
69 #endif /* KERNEL_PRIVATE */
70
71 /*
72 * This file defines five types of data structures: singly-linked lists,
73 * singly-linked tail queues, lists, tail queues, and circular queues.
74 *
75 * A singly-linked list is headed by a single forward pointer. The elements
76 * are singly linked for minimum space and pointer manipulation overhead at
77 * the expense of O(n) removal for arbitrary elements. New elements can be
78 * added to the list after an existing element or at the head of the list.
79 * Elements being removed from the head of the list should use the explicit
80 * macro for this purpose for optimum efficiency. A singly-linked list may
81 * only be traversed in the forward direction. Singly-linked lists are ideal
82 * for applications with large datasets and few or no removals or for
83 * implementing a LIFO queue.
84 *
85 * A singly-linked tail queue is headed by a pair of pointers, one to the
86 * head of the list and the other to the tail of the list. The elements are
87 * singly linked for minimum space and pointer manipulation overhead at the
88 * expense of O(n) removal for arbitrary elements. New elements can be added
89 * to the list after an existing element, at the head of the list, or at the
90 * end of the list. Elements being removed from the head of the tail queue
91 * should use the explicit macro for this purpose for optimum efficiency.
92 * A singly-linked tail queue may only be traversed in the forward direction.
93 * Singly-linked tail queues are ideal for applications with large datasets
94 * and few or no removals or for implementing a FIFO queue.
95 *
96 * A list is headed by a single forward pointer (or an array of forward
97 * pointers for a hash table header). The elements are doubly linked
98 * so that an arbitrary element can be removed without a need to
99 * traverse the list. New elements can be added to the list before
100 * or after an existing element or at the head of the list. A list
101 * may only be traversed in the forward direction.
102 *
103 * A tail queue is headed by a pair of pointers, one to the head of the
104 * list and the other to the tail of the list. The elements are doubly
105 * linked so that an arbitrary element can be removed without a need to
106 * traverse the list. New elements can be added to the list before or
107 * after an existing element, at the head of the list, or at the end of
108 * the list. A tail queue may be traversed in either direction.
109 *
110 * A circle queue is headed by a pair of pointers, one to the head of the
111 * list and the other to the tail of the list. The elements are doubly
112 * linked so that an arbitrary element can be removed without a need to
113 * traverse the list. New elements can be added to the list before or after
114 * an existing element, at the head of the list, or at the end of the list.
115 * A circle queue may be traversed in either direction, but has a more
116 * complex end of list detection.
117 * Note that circle queues are deprecated, because, as the removal log
118 * in FreeBSD states, "CIRCLEQs are a disgrace to everything Knuth taught
119 * us in Volume 1 Chapter 2. [...] Use TAILQ instead, it provides the same
120 * functionality." Code using them will continue to compile, but they
121 * are no longer documented on the man page.
122 *
123 * For details on the use of these macros, see the queue(3) manual page.
124 *
125 *
126 * SLIST LIST STAILQ TAILQ CIRCLEQ
127 * _HEAD + + + + +
128 * _HEAD_INITIALIZER + + + + -
129 * _ENTRY + + + + +
130 * _INIT + + + + +
131 * _EMPTY + + + + +
132 * _FIRST + + + + +
133 * _NEXT + + + + +
134 * _PREV - - - + +
135 * _LAST - - + + +
136 * _FOREACH + + + + +
137 * _FOREACH_SAFE + + + + -
138 * _FOREACH_REVERSE - - - + -
139 * _FOREACH_REVERSE_SAFE - - - + -
140 * _INSERT_HEAD + + + + +
141 * _INSERT_BEFORE - + - + +
142 * _INSERT_AFTER + + + + +
143 * _INSERT_TAIL - - + + +
144 * _CONCAT - - + + -
145 * _REMOVE_AFTER + - + - -
146 * _REMOVE_HEAD + - + - -
147 * _REMOVE_HEAD_UNTIL - - + - -
148 * _REMOVE + + + + +
149 * _SWAP - + + + -
150 *
151 */
152 #ifdef QUEUE_MACRO_DEBUG
153 /* Store the last 2 places the queue element or head was altered */
154 struct qm_trace {
155 char * lastfile;
156 int lastline;
157 char * prevfile;
158 int prevline;
159 };
160
161 #define TRACEBUF struct qm_trace trace;
162 #define TRASHIT(x) do {(x) = (void *)-1;} while (0)
163
164 #define QMD_TRACE_HEAD(head) do { \
165 (head)->trace.prevline = (head)->trace.lastline; \
166 (head)->trace.prevfile = (head)->trace.lastfile; \
167 (head)->trace.lastline = __LINE__; \
168 (head)->trace.lastfile = __FILE__; \
169 } while (0)
170
171 #define QMD_TRACE_ELEM(elem) do { \
172 (elem)->trace.prevline = (elem)->trace.lastline; \
173 (elem)->trace.prevfile = (elem)->trace.lastfile; \
174 (elem)->trace.lastline = __LINE__; \
175 (elem)->trace.lastfile = __FILE__; \
176 } while (0)
177
178 #else
179 #define QMD_TRACE_ELEM(elem)
180 #define QMD_TRACE_HEAD(head)
181 #define TRACEBUF
182 #define TRASHIT(x)
183 #endif /* QUEUE_MACRO_DEBUG */
184
185 /*
186 * Horrible macros to enable use of code that was meant to be C-specific
187 * (and which push struct onto type) in C++; without these, C++ code
188 * that uses these macros in the context of a class will blow up
189 * due to "struct" being preprended to "type" by the macros, causing
190 * inconsistent use of tags.
191 *
192 * This approach is necessary because these are macros; we have to use
193 * these on a per-macro basis (because the queues are implemented as
194 * macros, disabling this warning in the scope of the header file is
195 * insufficient), whuch means we can't use #pragma, and have to use
196 * _Pragma. We only need to use these for the queue macros that
197 * prepend "struct" to "type" and will cause C++ to blow up.
198 */
199 #if defined(__clang__) && defined(__cplusplus)
200 #define __MISMATCH_TAGS_PUSH \
201 _Pragma("clang diagnostic push") \
202 _Pragma("clang diagnostic ignored \"-Wmismatched-tags\"")
203 #define __MISMATCH_TAGS_POP \
204 _Pragma("clang diagnostic pop")
205 #else
206 #define __MISMATCH_TAGS_PUSH
207 #define __MISMATCH_TAGS_POP
208 #endif
209
210 /*
211 * Singly-linked List declarations.
212 */
213 #define SLIST_HEAD(name, type) \
214 __MISMATCH_TAGS_PUSH \
215 struct name { \
216 struct type *slh_first; /* first element */ \
217 } \
218 __MISMATCH_TAGS_POP
219
220 #define SLIST_HEAD_INITIALIZER(head) \
221 { NULL }
222
223 #define SLIST_ENTRY(type) \
224 __MISMATCH_TAGS_PUSH \
225 struct { \
226 struct type *sle_next; /* next element */ \
227 } \
228 __MISMATCH_TAGS_POP
229
230 /*
231 * Singly-linked List functions.
232 */
233 #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
234
235 #define SLIST_FIRST(head) ((head)->slh_first)
236
237 #define SLIST_FOREACH(var, head, field) \
238 for ((var) = SLIST_FIRST((head)); \
239 (var); \
240 (var) = SLIST_NEXT((var), field))
241
242 #define SLIST_FOREACH_SAFE(var, head, field, tvar) \
243 for ((var) = SLIST_FIRST((head)); \
244 (var) && ((tvar) = SLIST_NEXT((var), field), 1); \
245 (var) = (tvar))
246
247 #define SLIST_FOREACH_PREVPTR(var, varp, head, field) \
248 for ((varp) = &SLIST_FIRST((head)); \
249 ((var) = *(varp)) != NULL; \
250 (varp) = &SLIST_NEXT((var), field))
251
252 #define SLIST_INIT(head) do { \
253 SLIST_FIRST((head)) = NULL; \
254 } while (0)
255
256 #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
257 SLIST_NEXT((elm), field) = SLIST_NEXT((slistelm), field); \
258 SLIST_NEXT((slistelm), field) = (elm); \
259 } while (0)
260
261 #define SLIST_INSERT_HEAD(head, elm, field) do { \
262 SLIST_NEXT((elm), field) = SLIST_FIRST((head)); \
263 SLIST_FIRST((head)) = (elm); \
264 } while (0)
265
266 #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
267
268 #define SLIST_REMOVE(head, elm, type, field) \
269 __MISMATCH_TAGS_PUSH \
270 do { \
271 if (SLIST_FIRST((head)) == (elm)) { \
272 SLIST_REMOVE_HEAD((head), field); \
273 } \
274 else { \
275 struct type *curelm = SLIST_FIRST((head)); \
276 while (SLIST_NEXT(curelm, field) != (elm)) \
277 curelm = SLIST_NEXT(curelm, field); \
278 SLIST_REMOVE_AFTER(curelm, field); \
279 } \
280 TRASHIT((elm)->field.sle_next); \
281 } while (0) \
282 __MISMATCH_TAGS_POP
283
284 #define SLIST_REMOVE_AFTER(elm, field) do { \
285 SLIST_NEXT(elm, field) = \
286 SLIST_NEXT(SLIST_NEXT(elm, field), field); \
287 } while (0)
288
289 #define SLIST_REMOVE_HEAD(head, field) do { \
290 SLIST_FIRST((head)) = SLIST_NEXT(SLIST_FIRST((head)), field); \
291 } while (0)
292
293 /*
294 * Singly-linked Tail queue declarations.
295 */
296 #define STAILQ_HEAD(name, type) \
297 __MISMATCH_TAGS_PUSH \
298 struct name { \
299 struct type *stqh_first;/* first element */ \
300 struct type **stqh_last;/* addr of last next element */ \
301 } \
302 __MISMATCH_TAGS_POP
303
304 #define STAILQ_HEAD_INITIALIZER(head) \
305 { NULL, &(head).stqh_first }
306
307 #define STAILQ_ENTRY(type) \
308 __MISMATCH_TAGS_PUSH \
309 struct { \
310 struct type *stqe_next; /* next element */ \
311 } \
312 __MISMATCH_TAGS_POP
313
314 /*
315 * Singly-linked Tail queue functions.
316 */
317 #define STAILQ_CONCAT(head1, head2) do { \
318 if (!STAILQ_EMPTY((head2))) { \
319 *(head1)->stqh_last = (head2)->stqh_first; \
320 (head1)->stqh_last = (head2)->stqh_last; \
321 STAILQ_INIT((head2)); \
322 } \
323 } while (0)
324
325 #define STAILQ_EMPTY(head) ((head)->stqh_first == NULL)
326
327 #define STAILQ_FIRST(head) ((head)->stqh_first)
328
329 #define STAILQ_FOREACH(var, head, field) \
330 for((var) = STAILQ_FIRST((head)); \
331 (var); \
332 (var) = STAILQ_NEXT((var), field))
333
334
335 #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
336 for ((var) = STAILQ_FIRST((head)); \
337 (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
338 (var) = (tvar))
339
340 #define STAILQ_INIT(head) do { \
341 STAILQ_FIRST((head)) = NULL; \
342 (head)->stqh_last = &STAILQ_FIRST((head)); \
343 } while (0)
344
345 #define STAILQ_INSERT_AFTER(head, tqelm, elm, field) do { \
346 if ((STAILQ_NEXT((elm), field) = STAILQ_NEXT((tqelm), field)) == NULL)\
347 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
348 STAILQ_NEXT((tqelm), field) = (elm); \
349 } while (0)
350
351 #define STAILQ_INSERT_HEAD(head, elm, field) do { \
352 if ((STAILQ_NEXT((elm), field) = STAILQ_FIRST((head))) == NULL) \
353 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
354 STAILQ_FIRST((head)) = (elm); \
355 } while (0)
356
357 #define STAILQ_INSERT_TAIL(head, elm, field) do { \
358 STAILQ_NEXT((elm), field) = NULL; \
359 *(head)->stqh_last = (elm); \
360 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
361 } while (0)
362
363 #define STAILQ_LAST(head, type, field) \
364 __MISMATCH_TAGS_PUSH \
365 (STAILQ_EMPTY((head)) ? \
366 NULL : \
367 ((struct type *)(void *) \
368 ((char *)((head)->stqh_last) - __offsetof(struct type, field))))\
369 __MISMATCH_TAGS_POP
370
371 #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
372
373 #define STAILQ_REMOVE(head, elm, type, field) \
374 __MISMATCH_TAGS_PUSH \
375 do { \
376 if (STAILQ_FIRST((head)) == (elm)) { \
377 STAILQ_REMOVE_HEAD((head), field); \
378 } \
379 else { \
380 struct type *curelm = STAILQ_FIRST((head)); \
381 while (STAILQ_NEXT(curelm, field) != (elm)) \
382 curelm = STAILQ_NEXT(curelm, field); \
383 STAILQ_REMOVE_AFTER(head, curelm, field); \
384 } \
385 TRASHIT((elm)->field.stqe_next); \
386 } while (0) \
387 __MISMATCH_TAGS_POP
388
389 #define STAILQ_REMOVE_HEAD(head, field) do { \
390 if ((STAILQ_FIRST((head)) = \
391 STAILQ_NEXT(STAILQ_FIRST((head)), field)) == NULL) \
392 (head)->stqh_last = &STAILQ_FIRST((head)); \
393 } while (0)
394
395 #define STAILQ_REMOVE_HEAD_UNTIL(head, elm, field) do { \
396 if ((STAILQ_FIRST((head)) = STAILQ_NEXT((elm), field)) == NULL) \
397 (head)->stqh_last = &STAILQ_FIRST((head)); \
398 } while (0)
399
400 #define STAILQ_REMOVE_AFTER(head, elm, field) do { \
401 if ((STAILQ_NEXT(elm, field) = \
402 STAILQ_NEXT(STAILQ_NEXT(elm, field), field)) == NULL) \
403 (head)->stqh_last = &STAILQ_NEXT((elm), field); \
404 } while (0)
405
406 #define STAILQ_SWAP(head1, head2, type) \
407 __MISMATCH_TAGS_PUSH \
408 do { \
409 struct type *swap_first = STAILQ_FIRST(head1); \
410 struct type **swap_last = (head1)->stqh_last; \
411 STAILQ_FIRST(head1) = STAILQ_FIRST(head2); \
412 (head1)->stqh_last = (head2)->stqh_last; \
413 STAILQ_FIRST(head2) = swap_first; \
414 (head2)->stqh_last = swap_last; \
415 if (STAILQ_EMPTY(head1)) \
416 (head1)->stqh_last = &STAILQ_FIRST(head1); \
417 if (STAILQ_EMPTY(head2)) \
418 (head2)->stqh_last = &STAILQ_FIRST(head2); \
419 } while (0) \
420 __MISMATCH_TAGS_POP
421
422
423 /*
424 * List declarations.
425 */
426 #define LIST_HEAD(name, type) \
427 __MISMATCH_TAGS_PUSH \
428 struct name { \
429 struct type *lh_first; /* first element */ \
430 } \
431 __MISMATCH_TAGS_POP
432
433 #define LIST_HEAD_INITIALIZER(head) \
434 { NULL }
435
436 #define LIST_ENTRY(type) \
437 __MISMATCH_TAGS_PUSH \
438 struct { \
439 struct type *le_next; /* next element */ \
440 struct type **le_prev; /* address of previous next element */ \
441 } \
442 __MISMATCH_TAGS_POP
443
444 /*
445 * List functions.
446 */
447
448 #ifdef KERNEL_PRIVATE
449 #define LIST_CHECK_HEAD(head, field) do { \
450 if (__improbable( \
451 LIST_FIRST((head)) != NULL && \
452 LIST_FIRST((head))->field.le_prev != \
453 &LIST_FIRST((head)))) \
454 panic("Bad list head %p first->prev != head", (head)); \
455 } while (0)
456
457 #define LIST_CHECK_NEXT(elm, field) do { \
458 if (__improbable( \
459 LIST_NEXT((elm), field) != NULL && \
460 LIST_NEXT((elm), field)->field.le_prev != \
461 &((elm)->field.le_next))) \
462 panic("Bad link elm %p next->prev != elm", (elm)); \
463 } while (0)
464
465 #define LIST_CHECK_PREV(elm, field) do { \
466 if (__improbable(*(elm)->field.le_prev != (elm))) \
467 panic("Bad link elm %p prev->next != elm", (elm)); \
468 } while (0)
469 #else
470 #define LIST_CHECK_HEAD(head, field)
471 #define LIST_CHECK_NEXT(elm, field)
472 #define LIST_CHECK_PREV(elm, field)
473 #endif /* KERNEL_PRIVATE */
474
475 #define LIST_EMPTY(head) ((head)->lh_first == NULL)
476
477 #define LIST_FIRST(head) ((head)->lh_first)
478
479 #define LIST_FOREACH(var, head, field) \
480 for ((var) = LIST_FIRST((head)); \
481 (var); \
482 (var) = LIST_NEXT((var), field))
483
484 #define LIST_FOREACH_SAFE(var, head, field, tvar) \
485 for ((var) = LIST_FIRST((head)); \
486 (var) && ((tvar) = LIST_NEXT((var), field), 1); \
487 (var) = (tvar))
488
489 #define LIST_INIT(head) do { \
490 LIST_FIRST((head)) = NULL; \
491 } while (0)
492
493 #define LIST_INSERT_AFTER(listelm, elm, field) do { \
494 LIST_CHECK_NEXT(listelm, field); \
495 if ((LIST_NEXT((elm), field) = LIST_NEXT((listelm), field)) != NULL)\
496 LIST_NEXT((listelm), field)->field.le_prev = \
497 &LIST_NEXT((elm), field); \
498 LIST_NEXT((listelm), field) = (elm); \
499 (elm)->field.le_prev = &LIST_NEXT((listelm), field); \
500 } while (0)
501
502 #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
503 LIST_CHECK_PREV(listelm, field); \
504 (elm)->field.le_prev = (listelm)->field.le_prev; \
505 LIST_NEXT((elm), field) = (listelm); \
506 *(listelm)->field.le_prev = (elm); \
507 (listelm)->field.le_prev = &LIST_NEXT((elm), field); \
508 } while (0)
509
510 #define LIST_INSERT_HEAD(head, elm, field) do { \
511 LIST_CHECK_HEAD((head), field); \
512 if ((LIST_NEXT((elm), field) = LIST_FIRST((head))) != NULL) \
513 LIST_FIRST((head))->field.le_prev = &LIST_NEXT((elm), field);\
514 LIST_FIRST((head)) = (elm); \
515 (elm)->field.le_prev = &LIST_FIRST((head)); \
516 } while (0)
517
518 #define LIST_NEXT(elm, field) ((elm)->field.le_next)
519
520 #define LIST_REMOVE(elm, field) do { \
521 LIST_CHECK_NEXT(elm, field); \
522 LIST_CHECK_PREV(elm, field); \
523 if (LIST_NEXT((elm), field) != NULL) \
524 LIST_NEXT((elm), field)->field.le_prev = \
525 (elm)->field.le_prev; \
526 *(elm)->field.le_prev = LIST_NEXT((elm), field); \
527 TRASHIT((elm)->field.le_next); \
528 TRASHIT((elm)->field.le_prev); \
529 } while (0)
530
531 #define LIST_SWAP(head1, head2, type, field) \
532 __MISMATCH_TAGS_PUSH \
533 do { \
534 struct type *swap_tmp = LIST_FIRST((head1)); \
535 LIST_FIRST((head1)) = LIST_FIRST((head2)); \
536 LIST_FIRST((head2)) = swap_tmp; \
537 if ((swap_tmp = LIST_FIRST((head1))) != NULL) \
538 swap_tmp->field.le_prev = &LIST_FIRST((head1)); \
539 if ((swap_tmp = LIST_FIRST((head2))) != NULL) \
540 swap_tmp->field.le_prev = &LIST_FIRST((head2)); \
541 } while (0) \
542 __MISMATCH_TAGS_POP
543
544 /*
545 * Tail queue declarations.
546 */
547 #define TAILQ_HEAD(name, type) \
548 __MISMATCH_TAGS_PUSH \
549 struct name { \
550 struct type *tqh_first; /* first element */ \
551 struct type **tqh_last; /* addr of last next element */ \
552 TRACEBUF \
553 } \
554 __MISMATCH_TAGS_POP
555
556 #define TAILQ_HEAD_INITIALIZER(head) \
557 { NULL, &(head).tqh_first }
558
559 #define TAILQ_ENTRY(type) \
560 __MISMATCH_TAGS_PUSH \
561 struct { \
562 struct type *tqe_next; /* next element */ \
563 struct type **tqe_prev; /* address of previous next element */ \
564 TRACEBUF \
565 } \
566 __MISMATCH_TAGS_POP
567
568 /*
569 * Tail queue functions.
570 */
571 #ifdef KERNEL_PRIVATE
572 #define TAILQ_CHECK_HEAD(head, field) do { \
573 if (__improbable( \
574 TAILQ_FIRST((head)) != NULL && \
575 TAILQ_FIRST((head))->field.tqe_prev != \
576 &TAILQ_FIRST((head)))) \
577 panic("Bad tailq head %p first->prev != head", (head)); \
578 } while (0)
579
580 #define TAILQ_CHECK_NEXT(elm, field) do { \
581 if (__improbable( \
582 TAILQ_NEXT((elm), field) != NULL && \
583 TAILQ_NEXT((elm), field)->field.tqe_prev != \
584 &((elm)->field.tqe_next))) \
585 panic("Bad tailq elm %p next->prev != elm", (elm)); \
586 } while(0)
587
588 #define TAILQ_CHECK_PREV(elm, field) do { \
589 if (__improbable(*(elm)->field.tqe_prev != (elm))) \
590 panic("Bad tailq elm %p prev->next != elm", (elm)); \
591 } while(0)
592 #else
593 #define TAILQ_CHECK_HEAD(head, field)
594 #define TAILQ_CHECK_NEXT(elm, field)
595 #define TAILQ_CHECK_PREV(elm, field)
596 #endif /* KERNEL_PRIVATE */
597
598 #define TAILQ_CONCAT(head1, head2, field) do { \
599 if (!TAILQ_EMPTY(head2)) { \
600 *(head1)->tqh_last = (head2)->tqh_first; \
601 (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
602 (head1)->tqh_last = (head2)->tqh_last; \
603 TAILQ_INIT((head2)); \
604 QMD_TRACE_HEAD(head1); \
605 QMD_TRACE_HEAD(head2); \
606 } \
607 } while (0)
608
609 #define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
610
611 #define TAILQ_FIRST(head) ((head)->tqh_first)
612
613 #define TAILQ_FOREACH(var, head, field) \
614 for ((var) = TAILQ_FIRST((head)); \
615 (var); \
616 (var) = TAILQ_NEXT((var), field))
617
618 #define TAILQ_FOREACH_SAFE(var, head, field, tvar) \
619 for ((var) = TAILQ_FIRST((head)); \
620 (var) && ((tvar) = TAILQ_NEXT((var), field), 1); \
621 (var) = (tvar))
622
623 #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
624 for ((var) = TAILQ_LAST((head), headname); \
625 (var); \
626 (var) = TAILQ_PREV((var), headname, field))
627
628 #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, tvar) \
629 for ((var) = TAILQ_LAST((head), headname); \
630 (var) && ((tvar) = TAILQ_PREV((var), headname, field), 1); \
631 (var) = (tvar))
632
633 #define TAILQ_INIT(head) do { \
634 TAILQ_FIRST((head)) = NULL; \
635 (head)->tqh_last = &TAILQ_FIRST((head)); \
636 QMD_TRACE_HEAD(head); \
637 } while (0)
638
639
640 #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
641 TAILQ_CHECK_NEXT(listelm, field); \
642 if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\
643 TAILQ_NEXT((elm), field)->field.tqe_prev = \
644 &TAILQ_NEXT((elm), field); \
645 else { \
646 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
647 QMD_TRACE_HEAD(head); \
648 } \
649 TAILQ_NEXT((listelm), field) = (elm); \
650 (elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \
651 QMD_TRACE_ELEM(&(elm)->field); \
652 QMD_TRACE_ELEM(&listelm->field); \
653 } while (0)
654
655 #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
656 TAILQ_CHECK_PREV(listelm, field); \
657 (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
658 TAILQ_NEXT((elm), field) = (listelm); \
659 *(listelm)->field.tqe_prev = (elm); \
660 (listelm)->field.tqe_prev = &TAILQ_NEXT((elm), field); \
661 QMD_TRACE_ELEM(&(elm)->field); \
662 QMD_TRACE_ELEM(&listelm->field); \
663 } while (0)
664
665 #define TAILQ_INSERT_HEAD(head, elm, field) do { \
666 TAILQ_CHECK_HEAD(head, field); \
667 if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \
668 TAILQ_FIRST((head))->field.tqe_prev = \
669 &TAILQ_NEXT((elm), field); \
670 else \
671 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
672 TAILQ_FIRST((head)) = (elm); \
673 (elm)->field.tqe_prev = &TAILQ_FIRST((head)); \
674 QMD_TRACE_HEAD(head); \
675 QMD_TRACE_ELEM(&(elm)->field); \
676 } while (0)
677
678 #define TAILQ_INSERT_TAIL(head, elm, field) do { \
679 TAILQ_NEXT((elm), field) = NULL; \
680 (elm)->field.tqe_prev = (head)->tqh_last; \
681 *(head)->tqh_last = (elm); \
682 (head)->tqh_last = &TAILQ_NEXT((elm), field); \
683 QMD_TRACE_HEAD(head); \
684 QMD_TRACE_ELEM(&(elm)->field); \
685 } while (0)
686
687 #define TAILQ_LAST(head, headname) \
688 __MISMATCH_TAGS_PUSH \
689 (*(((struct headname *)((head)->tqh_last))->tqh_last)) \
690 __MISMATCH_TAGS_POP
691
692 #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
693
694 #define TAILQ_PREV(elm, headname, field) \
695 __MISMATCH_TAGS_PUSH \
696 (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) \
697 __MISMATCH_TAGS_POP
698
699 #define TAILQ_REMOVE(head, elm, field) do { \
700 TAILQ_CHECK_NEXT(elm, field); \
701 TAILQ_CHECK_PREV(elm, field); \
702 if ((TAILQ_NEXT((elm), field)) != NULL) \
703 TAILQ_NEXT((elm), field)->field.tqe_prev = \
704 (elm)->field.tqe_prev; \
705 else { \
706 (head)->tqh_last = (elm)->field.tqe_prev; \
707 QMD_TRACE_HEAD(head); \
708 } \
709 *(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \
710 TRASHIT((elm)->field.tqe_next); \
711 TRASHIT((elm)->field.tqe_prev); \
712 QMD_TRACE_ELEM(&(elm)->field); \
713 } while (0)
714
715 /*
716 * Why did they switch to spaces for this one macro?
717 */
718 #define TAILQ_SWAP(head1, head2, type, field) \
719 __MISMATCH_TAGS_PUSH \
720 do { \
721 struct type *swap_first = (head1)->tqh_first; \
722 struct type **swap_last = (head1)->tqh_last; \
723 (head1)->tqh_first = (head2)->tqh_first; \
724 (head1)->tqh_last = (head2)->tqh_last; \
725 (head2)->tqh_first = swap_first; \
726 (head2)->tqh_last = swap_last; \
727 if ((swap_first = (head1)->tqh_first) != NULL) \
728 swap_first->field.tqe_prev = &(head1)->tqh_first; \
729 else \
730 (head1)->tqh_last = &(head1)->tqh_first; \
731 if ((swap_first = (head2)->tqh_first) != NULL) \
732 swap_first->field.tqe_prev = &(head2)->tqh_first; \
733 else \
734 (head2)->tqh_last = &(head2)->tqh_first; \
735 } while (0) \
736 __MISMATCH_TAGS_POP
737
738 /*
739 * Circular queue definitions.
740 */
741 #define CIRCLEQ_HEAD(name, type) \
742 __MISMATCH_TAGS_PUSH \
743 struct name { \
744 struct type *cqh_first; /* first element */ \
745 struct type *cqh_last; /* last element */ \
746 } \
747 __MISMATCH_TAGS_POP
748
749 #define CIRCLEQ_ENTRY(type) \
750 __MISMATCH_TAGS_PUSH \
751 struct { \
752 struct type *cqe_next; /* next element */ \
753 struct type *cqe_prev; /* previous element */ \
754 } \
755 __MISMATCH_TAGS_POP
756
757 /*
758 * Circular queue functions.
759 */
760 #ifdef KERNEL_PRIVATE
761 #define CIRCLEQ_CHECK_HEAD(head, field) do { \
762 if (__improbable( \
763 CIRCLEQ_FIRST((head)) != ((void*)(head)) && \
764 CIRCLEQ_FIRST((head))->field.cqe_prev != ((void*)(head))))\
765 panic("Bad circleq head %p first->prev != head", (head)); \
766 } while(0)
767 #define CIRCLEQ_CHECK_NEXT(head, elm, field) do { \
768 if (__improbable( \
769 CIRCLEQ_NEXT((elm), field) != ((void*)(head)) && \
770 CIRCLEQ_NEXT((elm), field)->field.cqe_prev != (elm))) \
771 panic("Bad circleq elm %p next->prev != elm", (elm)); \
772 } while(0)
773 #define CIRCLEQ_CHECK_PREV(head, elm, field) do { \
774 if (__improbable( \
775 CIRCLEQ_PREV((elm), field) != ((void*)(head)) && \
776 CIRCLEQ_PREV((elm), field)->field.cqe_next != (elm))) \
777 panic("Bad circleq elm %p prev->next != elm", (elm)); \
778 } while(0)
779 #else
780 #define CIRCLEQ_CHECK_HEAD(head, field)
781 #define CIRCLEQ_CHECK_NEXT(head, elm, field)
782 #define CIRCLEQ_CHECK_PREV(head, elm, field)
783 #endif /* KERNEL_PRIVATE */
784
785 #define CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head))
786
787 #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
788
789 #define CIRCLEQ_FOREACH(var, head, field) \
790 for((var) = (head)->cqh_first; \
791 (var) != (void *)(head); \
792 (var) = (var)->field.cqe_next)
793
794 #define CIRCLEQ_INIT(head) do { \
795 (head)->cqh_first = (void *)(head); \
796 (head)->cqh_last = (void *)(head); \
797 } while (0)
798
799 #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
800 CIRCLEQ_CHECK_NEXT(head, listelm, field); \
801 (elm)->field.cqe_next = (listelm)->field.cqe_next; \
802 (elm)->field.cqe_prev = (listelm); \
803 if ((listelm)->field.cqe_next == (void *)(head)) \
804 (head)->cqh_last = (elm); \
805 else \
806 (listelm)->field.cqe_next->field.cqe_prev = (elm); \
807 (listelm)->field.cqe_next = (elm); \
808 } while (0)
809
810 #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
811 CIRCLEQ_CHECK_PREV(head, listelm, field); \
812 (elm)->field.cqe_next = (listelm); \
813 (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
814 if ((listelm)->field.cqe_prev == (void *)(head)) \
815 (head)->cqh_first = (elm); \
816 else \
817 (listelm)->field.cqe_prev->field.cqe_next = (elm); \
818 (listelm)->field.cqe_prev = (elm); \
819 } while (0)
820
821 #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
822 CIRCLEQ_CHECK_HEAD(head, field); \
823 (elm)->field.cqe_next = (head)->cqh_first; \
824 (elm)->field.cqe_prev = (void *)(head); \
825 if ((head)->cqh_last == (void *)(head)) \
826 (head)->cqh_last = (elm); \
827 else \
828 (head)->cqh_first->field.cqe_prev = (elm); \
829 (head)->cqh_first = (elm); \
830 } while (0)
831
832 #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
833 (elm)->field.cqe_next = (void *)(head); \
834 (elm)->field.cqe_prev = (head)->cqh_last; \
835 if ((head)->cqh_first == (void *)(head)) \
836 (head)->cqh_first = (elm); \
837 else \
838 (head)->cqh_last->field.cqe_next = (elm); \
839 (head)->cqh_last = (elm); \
840 } while (0)
841
842 #define CIRCLEQ_LAST(head) ((head)->cqh_last)
843
844 #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
845
846 #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
847
848 #define CIRCLEQ_REMOVE(head, elm, field) do { \
849 CIRCLEQ_CHECK_NEXT(head, elm, field); \
850 CIRCLEQ_CHECK_PREV(head, elm, field); \
851 if ((elm)->field.cqe_next == (void *)(head)) \
852 (head)->cqh_last = (elm)->field.cqe_prev; \
853 else \
854 (elm)->field.cqe_next->field.cqe_prev = \
855 (elm)->field.cqe_prev; \
856 if ((elm)->field.cqe_prev == (void *)(head)) \
857 (head)->cqh_first = (elm)->field.cqe_next; \
858 else \
859 (elm)->field.cqe_prev->field.cqe_next = \
860 (elm)->field.cqe_next; \
861 } while (0)
862
863 #ifdef _KERNEL
864
865 #if NOTFB31
866
867 /*
868 * XXX insque() and remque() are an old way of handling certain queues.
869 * They bogusly assumes that all queue heads look alike.
870 */
871
872 struct quehead {
873 struct quehead *qh_link;
874 struct quehead *qh_rlink;
875 };
876
877 #ifdef __GNUC__
878 #ifdef KERNEL_PRIVATE
879 static __inline void
880 chkquenext(void *a)
881 {
882 struct quehead *element = (struct quehead *)a;
883 if (__improbable(element->qh_link != NULL &&
884 element->qh_link->qh_rlink != element)) {
885 panic("Bad que elm %p next->prev != elm", a);
886 }
887 }
888
889 static __inline void
890 chkqueprev(void *a)
891 {
892 struct quehead *element = (struct quehead *)a;
893 if (__improbable(element->qh_rlink != NULL &&
894 element->qh_rlink->qh_link != element)) {
895 panic("Bad que elm %p prev->next != elm", a);
896 }
897 }
898 #else /* !KERNEL_PRIVATE */
899 #define chkquenext(a)
900 #define chkqueprev(a)
901 #endif /* KERNEL_PRIVATE */
902
903 static __inline void
904 insque(void *a, void *b)
905 {
906 struct quehead *element = (struct quehead *)a,
907 *head = (struct quehead *)b;
908 chkquenext(head);
909
910 element->qh_link = head->qh_link;
911 element->qh_rlink = head;
912 head->qh_link = element;
913 element->qh_link->qh_rlink = element;
914 }
915
916 static __inline void
917 remque(void *a)
918 {
919 struct quehead *element = (struct quehead *)a;
920 chkquenext(element);
921 chkqueprev(element);
922
923 element->qh_link->qh_rlink = element->qh_rlink;
924 element->qh_rlink->qh_link = element->qh_link;
925 element->qh_rlink = 0;
926 }
927
928 #else /* !__GNUC__ */
929
930 void insque(void *a, void *b);
931 void remque(void *a);
932
933 #endif /* __GNUC__ */
934
935 #endif /* NOTFB31 */
936 #endif /* _KERNEL */
937
938 #endif /* !_SYS_QUEUE_H_ */